From ef2bd10f54d7a577764ad81e2b9d6a6f5c4e610a Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 6 Mar 2026 10:29:57 -0800 Subject: [PATCH 01/16] Changes for azure-ai-projects release v2.0.1 --- sdk/ai/azure-ai-projects/CHANGELOG.md | 22 +++++++++++++++++++ .../azure/ai/projects/_version.py | 2 +- sdk/ai/azure-ai-projects/tsp-location.yaml | 4 ++++ 3 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 sdk/ai/azure-ai-projects/tsp-location.yaml diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 4c8384d88d48..776a2a6f6b17 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -1,5 +1,27 @@ # Release History +## 2.0.1 (Unreleased) + +### Features Added + +* Placeholder + +### Breaking Changes + +* Placeholder + +### Bug Fixes + +* Placeholder + +### Sample updates + +* Placeholder + +### Other Changes + +* Placeholder + ## 2.0.0 (2026-03-06) First stable release of the client library that uses the Generally Available (GA) version "v1" of the Foundry REST APIs. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py index 8f2350dd3b0c..38c04a589ff2 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "2.0.0" +VERSION = "2.0.1" diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml new file mode 100644 index 000000000000..d614f684d1c5 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -0,0 +1,4 @@ +directory: specification/ai-foundry/data-plane/Foundry +commit: 05c0bec52a73ca6cd87c700ea6f4d5f68b9b433b +repo: Azure/azure-rest-api-specs +additionalDirectories: From 38e2bfea780875b4088aa170366d1d3639f12401 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 6 Mar 2026 11:51:11 -0800 Subject: [PATCH 02/16] x --- sdk/ai/azure-ai-projects/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 776a2a6f6b17..7676c008b571 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -10,7 +10,7 @@ * Placeholder -### Bug Fixes +### Bugs Fixed * Placeholder From 95df96439f9839eb7cb81d1d1ac46bec18457ff4 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 6 Mar 2026 15:56:01 -0800 Subject: [PATCH 03/16] Set beta version --- sdk/ai/azure-ai-projects/CHANGELOG.md | 2 +- sdk/ai/azure-ai-projects/azure/ai/projects/_version.py | 2 +- sdk/ai/azure-ai-projects/pyproject.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 7676c008b571..34c72e0a6fa5 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 2.0.1 (Unreleased) +## 2.0.1b1 (Unreleased) ### Features Added diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py index 38c04a589ff2..f70bfecdef88 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "2.0.1" +VERSION = "2.0.1b1" diff --git a/sdk/ai/azure-ai-projects/pyproject.toml b/sdk/ai/azure-ai-projects/pyproject.toml index f5babde606a2..e2ae27e26534 100644 --- a/sdk/ai/azure-ai-projects/pyproject.toml +++ b/sdk/ai/azure-ai-projects/pyproject.toml @@ -17,7 +17,7 @@ authors = [ description = "Microsoft Corporation Azure AI Projects Client Library for Python" license = "MIT" classifiers = [ - "Development Status :: 5 - Production/Stable", + "Development Status :: 4 - Beta", "Programming Language :: Python", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3", From d04d08b088fbec0afe44f84bf84c24d2e043a233 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 9 Mar 2026 07:51:50 -0700 Subject: [PATCH 04/16] Re-emit --- .../azure-ai-projects/apiview-properties.json | 3 +- .../ai/projects/aio/operations/_operations.py | 339 ++++++++++++++- .../azure/ai/projects/models/__init__.py | 2 + .../azure/ai/projects/models/_models.py | 50 ++- .../ai/projects/operations/_operations.py | 402 ++++++++++++++++++ sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 6 files changed, 789 insertions(+), 9 deletions(-) diff --git a/sdk/ai/azure-ai-projects/apiview-properties.json b/sdk/ai/azure-ai-projects/apiview-properties.json index bd38f0ef92c5..e57c56c7a12e 100644 --- a/sdk/ai/azure-ai-projects/apiview-properties.json +++ b/sdk/ai/azure-ai-projects/apiview-properties.json @@ -102,6 +102,7 @@ "azure.ai.projects.models.ScheduleTask": "Azure.AI.Projects.ScheduleTask", "azure.ai.projects.models.EvaluationScheduleTask": "Azure.AI.Projects.EvaluationScheduleTask", "azure.ai.projects.models.EvaluationTaxonomy": "Azure.AI.Projects.EvaluationTaxonomy", + "azure.ai.projects.models.EvaluatorCredentialRequest": "Azure.AI.Projects.EvaluatorCredentialRequest", "azure.ai.projects.models.EvaluatorMetric": "Azure.AI.Projects.EvaluatorMetric", "azure.ai.projects.models.EvaluatorVersion": "Azure.AI.Projects.EvaluatorVersion", "azure.ai.projects.models.FabricDataAgentToolParameters": "Azure.AI.Projects.FabricDataAgentToolParameters", @@ -220,6 +221,7 @@ "azure.ai.projects.models.EvaluatorDefinitionType": "Azure.AI.Projects.EvaluatorDefinitionType", "azure.ai.projects.models.EvaluatorMetricType": "Azure.AI.Projects.EvaluatorMetricType", "azure.ai.projects.models.EvaluatorMetricDirection": "Azure.AI.Projects.EvaluatorMetricDirection", + "azure.ai.projects.models.PendingUploadType": "Azure.AI.Projects.PendingUploadType", "azure.ai.projects.models.OperationState": "Azure.Core.Foundations.OperationState", "azure.ai.projects.models.InsightType": "Azure.AI.Projects.InsightType", "azure.ai.projects.models.SampleType": "Azure.AI.Projects.SampleType", @@ -260,7 +262,6 @@ "azure.ai.projects.models.ConnectionType": "Azure.AI.Projects.ConnectionType", "azure.ai.projects.models.CredentialType": "Azure.AI.Projects.CredentialType", "azure.ai.projects.models.DatasetType": "Azure.AI.Projects.DatasetType", - "azure.ai.projects.models.PendingUploadType": "Azure.AI.Projects.PendingUploadType", "azure.ai.projects.models.DeploymentType": "Azure.AI.Projects.DeploymentType", "azure.ai.projects.models.IndexType": "Azure.AI.Projects.IndexType", "azure.ai.projects.models.MemoryStoreUpdateStatus": "Azure.AI.Projects.MemoryStoreUpdateStatus", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index 55291e178dd2..f5d68ab4c376 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -57,9 +57,11 @@ build_beta_evaluation_taxonomies_update_request, build_beta_evaluators_create_version_request, build_beta_evaluators_delete_version_request, + build_beta_evaluators_get_credentials_request, build_beta_evaluators_get_version_request, build_beta_evaluators_list_request, build_beta_evaluators_list_versions_request, + build_beta_evaluators_pending_upload_request, build_beta_evaluators_update_version_request, build_beta_insights_generate_request, build_beta_insights_get_request, @@ -110,9 +112,8 @@ T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] List = list -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False - +REM _SERIALIZER = Serializer() +REM _SERIALIZER.client_side_validation = False class BetaOperations: """ @@ -4662,6 +4663,338 @@ async def update_version( return deserialized # type: ignore + @overload + async def pending_upload( + self, + name: str, + version: str, + pending_upload_request: _models.PendingUploadRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def pending_upload( + self, + name: str, + version: str, + pending_upload_request: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def pending_upload( + self, + name: str, + version: str, + pending_upload_request: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def pending_upload( + self, + name: str, + version: str, + pending_upload_request: Union[_models.PendingUploadRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Is one of the following + types: PendingUploadRequest, JSON, IO[bytes] Required. + :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest or JSON or + IO[bytes] + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.PendingUploadResponse] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(pending_upload_request, (IOBase, bytes)): + _content = pending_upload_request + else: + _content = json.dumps(pending_upload_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_beta_evaluators_pending_upload_request( + name=name, + version=version, + foundry_features=_foundry_features, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.PendingUploadResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def get_credentials( + self, + name: str, + version: str, + credential_request: _models.EvaluatorCredentialRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Required. + :type credential_request: ~azure.ai.projects.models.EvaluatorCredentialRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def get_credentials( + self, + name: str, + version: str, + credential_request: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Required. + :type credential_request: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def get_credentials( + self, + name: str, + version: str, + credential_request: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Required. + :type credential_request: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def get_credentials( + self, + name: str, + version: str, + credential_request: Union[_models.EvaluatorCredentialRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Is one of the following types: + EvaluatorCredentialRequest, JSON, IO[bytes] Required. + :type credential_request: ~azure.ai.projects.models.EvaluatorCredentialRequest or JSON or + IO[bytes] + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DatasetCredential] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(credential_request, (IOBase, bytes)): + _content = credential_request + else: + _content = json.dumps(credential_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_beta_evaluators_get_credentials_request( + name=name, + version=version, + foundry_features=_foundry_features, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.DatasetCredential, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + class BetaInsightsOperations: """ diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py index bb537cc97636..110c948952f0 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py @@ -102,6 +102,7 @@ EvaluationScheduleTask, EvaluationTaxonomy, EvaluationTaxonomyInput, + EvaluatorCredentialRequest, EvaluatorDefinition, EvaluatorMetric, EvaluatorVersion, @@ -372,6 +373,7 @@ "EvaluationScheduleTask", "EvaluationTaxonomy", "EvaluationTaxonomyInput", + "EvaluatorCredentialRequest", "EvaluatorDefinition", "EvaluatorMetric", "EvaluatorVersion", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 7e7b260b7626..bae28a926cec 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -2230,23 +2230,35 @@ class CodeBasedEvaluatorDefinition(EvaluatorDefinition, discriminator="code"): :vartype metrics: dict[str, ~azure.ai.projects.models.EvaluatorMetric] :ivar type: Required. Code-based definition. :vartype type: str or ~azure.ai.projects.models.CODE - :ivar code_text: Inline code text for the evaluator. Required. + :ivar code_text: Inline code text for the evaluator. :vartype code_text: str + :ivar entry_point: The entry point Python file name for the uploaded evaluator code (e.g. + 'answer_length_evaluator.py'). + :vartype entry_point: str + :ivar image_tag: The container image tag to use for evaluator code execution. + :vartype image_tag: str """ type: Literal[EvaluatorDefinitionType.CODE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Required. Code-based definition.""" - code_text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """Inline code text for the evaluator. Required.""" + code_text: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Inline code text for the evaluator.""" + entry_point: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The entry point Python file name for the uploaded evaluator code (e.g. + 'answer_length_evaluator.py').""" + image_tag: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The container image tag to use for evaluator code execution.""" @overload def __init__( self, *, - code_text: str, init_parameters: Optional[dict[str, Any]] = None, data_schema: Optional[dict[str, Any]] = None, metrics: Optional[dict[str, "_models.EvaluatorMetric"]] = None, + code_text: Optional[str] = None, + entry_point: Optional[str] = None, + image_tag: Optional[str] = None, ) -> None: ... @overload @@ -4243,6 +4255,36 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) +class EvaluatorCredentialRequest(_Model): + """Request body for getting evaluator credentials. + + :ivar blob_uri: The blob URI for the evaluator storage. Example: + ``https://account.blob.core.windows.net:443/container``. Required. + :vartype blob_uri: str + """ + + blob_uri: str = rest_field(name="blobUri", visibility=["read", "create", "update", "delete", "query"]) + """The blob URI for the evaluator storage. Example: + ``https://account.blob.core.windows.net:443/container``. Required.""" + + @overload + def __init__( + self, + *, + blob_uri: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + class EvaluatorMetric(_Model): """Evaluator Metric. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index 371aa81b2b00..21502f0de2bb 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -1255,6 +1255,76 @@ def build_beta_evaluators_update_version_request( # pylint: disable=name-too-lo return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) +def build_beta_evaluators_pending_upload_request( # pylint: disable=name-too-long + name: str, + version: str, + *, + foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW], + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "v1")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluators/{name}/versions/{version}/startPendingUpload" + path_format_arguments = { + "name": _SERIALIZER.url("name", name, "str"), + "version": _SERIALIZER.url("version", version, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Foundry-Features"] = _SERIALIZER.header("foundry_features", foundry_features, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_beta_evaluators_get_credentials_request( # pylint: disable=name-too-long + name: str, + version: str, + *, + foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW], + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "v1")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluators/{name}/versions/{version}/credentials" + path_format_arguments = { + "name": _SERIALIZER.url("name", name, "str"), + "version": _SERIALIZER.url("version", version, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Foundry-Features"] = _SERIALIZER.header("foundry_features", foundry_features, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + def build_beta_insights_generate_request( *, foundry_features: Literal[_FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW], **kwargs: Any ) -> HttpRequest: @@ -6394,6 +6464,338 @@ def update_version( return deserialized # type: ignore + @overload + def pending_upload( + self, + name: str, + version: str, + pending_upload_request: _models.PendingUploadRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def pending_upload( + self, + name: str, + version: str, + pending_upload_request: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def pending_upload( + self, + name: str, + version: str, + pending_upload_request: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def pending_upload( + self, + name: str, + version: str, + pending_upload_request: Union[_models.PendingUploadRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Is one of the following + types: PendingUploadRequest, JSON, IO[bytes] Required. + :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest or JSON or + IO[bytes] + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.PendingUploadResponse] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(pending_upload_request, (IOBase, bytes)): + _content = pending_upload_request + else: + _content = json.dumps(pending_upload_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_beta_evaluators_pending_upload_request( + name=name, + version=version, + foundry_features=_foundry_features, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.PendingUploadResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def get_credentials( + self, + name: str, + version: str, + credential_request: _models.EvaluatorCredentialRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Required. + :type credential_request: ~azure.ai.projects.models.EvaluatorCredentialRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def get_credentials( + self, + name: str, + version: str, + credential_request: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Required. + :type credential_request: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def get_credentials( + self, + name: str, + version: str, + credential_request: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Required. + :type credential_request: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def get_credentials( + self, + name: str, + version: str, + credential_request: Union[_models.EvaluatorCredentialRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Is one of the following types: + EvaluatorCredentialRequest, JSON, IO[bytes] Required. + :type credential_request: ~azure.ai.projects.models.EvaluatorCredentialRequest or JSON or + IO[bytes] + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DatasetCredential] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(credential_request, (IOBase, bytes)): + _content = credential_request + else: + _content = json.dumps(credential_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_beta_evaluators_get_credentials_request( + name=name, + version=version, + foundry_features=_foundry_features, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.DatasetCredential, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + class BetaInsightsOperations: """ diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index d614f684d1c5..52212df81322 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai-foundry/data-plane/Foundry -commit: 05c0bec52a73ca6cd87c700ea6f4d5f68b9b433b +commit: b09cb5a69be1c014d9f67f463d6ede22035b1088 repo: Azure/azure-rest-api-specs additionalDirectories: From 06c4c26936c96ef4609c3617b4671890a693f66b Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 9 Mar 2026 07:55:13 -0700 Subject: [PATCH 05/16] fix --- .../azure/ai/projects/aio/operations/_operations.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index f5d68ab4c376..6f2e077a1e91 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -112,8 +112,8 @@ T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] List = list -REM _SERIALIZER = Serializer() -REM _SERIALIZER.client_side_validation = False +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False class BetaOperations: """ From 86d53c53e8ffa1fac49c9f7388ee1b3f9e9f321f Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Mon, 9 Mar 2026 17:54:18 -0700 Subject: [PATCH 06/16] Rename env varrs name --- sdk/ai/azure-ai-projects/.env.template | 4 +- sdk/ai/azure-ai-projects/CHANGELOG.md | 4 +- sdk/ai/azure-ai-projects/README.md | 18 ++-- .../samples/agents/sample_agent_basic.py | 8 +- .../agents/sample_agent_basic_async.py | 8 +- .../agents/sample_agent_retrieve_basic.py | 8 +- .../sample_agent_retrieve_basic_async.py | 8 +- .../agents/sample_agent_stream_events.py | 8 +- .../agents/sample_agent_structured_output.py | 8 +- .../sample_agent_structured_output_async.py | 8 +- .../agents/sample_workflow_multi_agent.py | 10 +-- .../sample_workflow_multi_agent_async.py | 10 +-- ..._agent_basic_with_azure_monitor_tracing.py | 8 +- ...sample_agent_basic_with_console_tracing.py | 8 +- ..._with_console_tracing_custom_attributes.py | 8 +- .../agents/tools/sample_agent_ai_search.py | 8 +- .../tools/sample_agent_azure_function.py | 8 +- .../tools/sample_agent_bing_custom_search.py | 8 +- .../tools/sample_agent_bing_grounding.py | 8 +- .../tools/sample_agent_browser_automation.py | 8 +- .../tools/sample_agent_code_interpreter.py | 8 +- .../sample_agent_code_interpreter_async.py | 8 +- ...ample_agent_code_interpreter_with_files.py | 8 +- ...agent_code_interpreter_with_files_async.py | 8 +- .../agents/tools/sample_agent_computer_use.py | 4 +- .../tools/sample_agent_computer_use_async.py | 4 +- .../agents/tools/sample_agent_fabric.py | 8 +- .../agents/tools/sample_agent_file_search.py | 8 +- .../sample_agent_file_search_in_stream.py | 8 +- ...ample_agent_file_search_in_stream_async.py | 8 +- .../tools/sample_agent_function_tool.py | 8 +- .../tools/sample_agent_function_tool_async.py | 8 +- .../tools/sample_agent_image_generation.py | 8 +- .../sample_agent_image_generation_async.py | 8 +- .../samples/agents/tools/sample_agent_mcp.py | 8 +- .../agents/tools/sample_agent_mcp_async.py | 8 +- ...ample_agent_mcp_with_project_connection.py | 8 +- ...agent_mcp_with_project_connection_async.py | 8 +- .../tools/sample_agent_memory_search.py | 8 +- .../tools/sample_agent_memory_search_async.py | 8 +- .../agents/tools/sample_agent_openapi.py | 8 +- ...e_agent_openapi_with_project_connection.py | 8 +- .../agents/tools/sample_agent_sharepoint.py | 8 +- .../agents/tools/sample_agent_to_agent.py | 8 +- .../agents/tools/sample_agent_web_search.py | 8 +- .../tools/sample_agent_web_search_preview.py | 8 +- ...ple_agent_web_search_with_custom_search.py | 8 +- .../samples/connections/sample_connections.py | 4 +- .../connections/sample_connections_async.py | 4 +- .../samples/datasets/sample_datasets.py | 4 +- .../samples/datasets/sample_datasets_async.py | 4 +- .../datasets/sample_datasets_download.py | 4 +- .../samples/deployments/sample_deployments.py | 8 +- .../deployments/sample_deployments_async.py | 8 +- .../samples/evaluations/README.md | 8 +- .../agentic_evaluators/sample_coherence.py | 8 +- .../agentic_evaluators/sample_fluency.py | 8 +- .../agent_utils.py | 2 +- .../sample_generic_agentic_evaluator.py | 6 +- .../agentic_evaluators/sample_groundedness.py | 8 +- .../sample_intent_resolution.py | 8 +- .../agentic_evaluators/sample_relevance.py | 8 +- .../sample_response_completeness.py | 8 +- .../sample_task_adherence.py | 8 +- .../sample_task_completion.py | 8 +- .../sample_task_navigation_efficiency.py | 4 +- .../sample_tool_call_accuracy.py | 8 +- .../sample_tool_call_success.py | 8 +- .../sample_tool_input_accuracy.py | 8 +- .../sample_tool_output_utilization.py | 8 +- .../sample_tool_selection.py | 8 +- .../evaluations/sample_agent_evaluation.py | 8 +- .../sample_agent_response_evaluation.py | 8 +- ..._response_evaluation_with_function_tool.py | 8 +- .../sample_continuous_evaluation_rule.py | 8 +- .../evaluations/sample_eval_catalog.py | 4 +- ...mple_eval_catalog_code_based_evaluators.py | 8 +- ...le_eval_catalog_prompt_based_evaluators.py | 8 +- .../sample_evaluation_cluster_insight.py | 10 +-- .../sample_evaluation_compare_insight.py | 8 +- .../sample_evaluations_ai_assisted.py | 8 +- ...ple_evaluations_builtin_with_dataset_id.py | 8 +- ...le_evaluations_builtin_with_inline_data.py | 8 +- ...valuations_builtin_with_inline_data_oai.py | 8 +- .../sample_evaluations_builtin_with_traces.py | 8 +- .../evaluations/sample_evaluations_graders.py | 8 +- ...aluations_score_model_grader_with_image.py | 8 +- .../evaluations/sample_model_evaluation.py | 8 +- .../evaluations/sample_redteam_evaluations.py | 6 +- .../sample_scheduled_evaluations.py | 12 +-- .../samples/files/sample_files.py | 4 +- .../samples/files/sample_files_async.py | 4 +- .../finetuning/sample_finetuning_dpo_job.py | 4 +- .../sample_finetuning_dpo_job_async.py | 4 +- ...le_finetuning_oss_models_supervised_job.py | 4 +- ...etuning_oss_models_supervised_job_async.py | 4 +- .../sample_finetuning_reinforcement_job.py | 4 +- ...mple_finetuning_reinforcement_job_async.py | 4 +- .../sample_finetuning_supervised_job.py | 4 +- .../sample_finetuning_supervised_job_async.py | 4 +- .../samples/indexes/sample_indexes.py | 4 +- .../samples/indexes/sample_indexes_async.py | 4 +- .../mcp_client/sample_mcp_tool_async.py | 6 +- .../memories/sample_memory_advanced.py | 4 +- .../memories/sample_memory_advanced_async.py | 4 +- .../samples/memories/sample_memory_basic.py | 4 +- .../memories/sample_memory_basic_async.py | 4 +- .../samples/memories/sample_memory_crud.py | 4 +- .../memories/sample_memory_crud_async.py | 4 +- .../samples/red_team/sample_red_team.py | 8 +- .../samples/red_team/sample_red_team_async.py | 8 +- .../responses/sample_responses_basic.py | 10 +-- .../responses/sample_responses_basic_async.py | 10 +-- ...responses_basic_without_aiprojectclient.py | 8 +- ...ses_basic_without_aiprojectclient_async.py | 8 +- .../responses/sample_responses_image_input.py | 8 +- .../sample_responses_stream_events.py | 8 +- .../sample_responses_stream_manager.py | 8 +- .../sample_responses_structured_output.py | 8 +- .../samples/telemetry/sample_telemetry.py | 4 +- .../telemetry/sample_telemetry_async.py | 4 +- .../telemetry/test_ai_agents_instrumentor.py | 8 +- .../test_ai_agents_instrumentor_async.py | 8 +- .../telemetry/test_responses_instrumentor.py | 84 +++++++++---------- .../test_responses_instrumentor_async.py | 72 ++++++++-------- ...sponses_instrumentor_browser_automation.py | 8 +- ...s_instrumentor_browser_automation_async.py | 8 +- ...responses_instrumentor_code_interpreter.py | 8 +- ...ses_instrumentor_code_interpreter_async.py | 8 +- ...test_responses_instrumentor_file_search.py | 8 +- ...esponses_instrumentor_file_search_async.py | 8 +- .../test_responses_instrumentor_mcp.py | 8 +- .../test_responses_instrumentor_mcp_async.py | 8 +- .../test_responses_instrumentor_metrics.py | 2 +- .../test_responses_instrumentor_workflow.py | 8 +- ...t_responses_instrumentor_workflow_async.py | 8 +- .../tests/agents/test_agent_responses_crud.py | 4 +- .../agents/test_agent_responses_crud_async.py | 4 +- .../tests/agents/test_agents_crud.py | 2 +- .../tests/agents/test_agents_crud_async.py | 2 +- ...est_agent_code_interpreter_and_function.py | 4 +- ..._agent_file_search_and_code_interpreter.py | 4 +- .../test_agent_file_search_and_function.py | 8 +- ...t_file_search_code_interpreter_function.py | 2 +- .../test_multitool_with_conversations.py | 2 +- .../agents/tools/test_agent_ai_search.py | 2 +- .../tools/test_agent_ai_search_async.py | 2 +- .../agents/tools/test_agent_bing_grounding.py | 4 +- .../tools/test_agent_code_interpreter.py | 4 +- .../test_agent_code_interpreter_async.py | 2 +- .../agents/tools/test_agent_file_search.py | 4 +- .../tools/test_agent_file_search_async.py | 4 +- .../tools/test_agent_file_search_stream.py | 2 +- .../test_agent_file_search_stream_async.py | 2 +- .../agents/tools/test_agent_function_tool.py | 6 +- .../tools/test_agent_function_tool_async.py | 6 +- .../tools/test_agent_image_generation.py | 2 +- .../test_agent_image_generation_async.py | 2 +- .../tests/agents/tools/test_agent_mcp.py | 4 +- .../agents/tools/test_agent_mcp_async.py | 2 +- .../agents/tools/test_agent_memory_search.py | 2 +- .../tools/test_agent_memory_search_async.py | 2 +- .../tests/agents/tools/test_agent_openapi.py | 2 +- .../agents/tools/test_agent_openapi_async.py | 2 +- .../test_agent_tools_with_conversations.py | 8 +- .../agents/tools/test_agent_web_search.py | 2 +- .../tools/test_agent_web_search_async.py | 2 +- .../tests/datasets/test_datasets.py | 2 +- .../tests/datasets/test_datasets_async.py | 2 +- .../tests/deployments/test_deployments.py | 4 +- .../deployments/test_deployments_async.py | 4 +- .../tests/finetuning/test_finetuning.py | 4 +- .../tests/finetuning/test_finetuning_async.py | 4 +- .../tests/responses/test_responses.py | 2 +- .../tests/responses/test_responses_async.py | 2 +- .../azure-ai-projects/tests/samples/README.md | 12 +-- .../tests/samples/test_samples.py | 32 +++---- .../tests/samples/test_samples_async.py | 28 +++---- .../tests/samples/test_samples_evaluations.py | 16 ++-- sdk/ai/azure-ai-projects/tests/test_base.py | 12 +-- 180 files changed, 668 insertions(+), 666 deletions(-) diff --git a/sdk/ai/azure-ai-projects/.env.template b/sdk/ai/azure-ai-projects/.env.template index df66f2a71199..89e81956f322 100644 --- a/sdk/ai/azure-ai-projects/.env.template +++ b/sdk/ai/azure-ai-projects/.env.template @@ -20,8 +20,8 @@ AZURE_AI_PROJECTS_CONSOLE_LOGGING= # Project endpoint has the format: # `https://.services.ai.azure.com/api/projects/` -AZURE_AI_PROJECT_ENDPOINT= -AZURE_AI_MODEL_DEPLOYMENT_NAME= +PROJECT_ENDPOINT= +MODEL_DEPLOYMENT_NAME= AZURE_AI_AGENT_NAME= CONVERSATION_ID= CONNECTION_NAME= diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 34c72e0a6fa5..80b7bcd0be13 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -20,7 +20,9 @@ ### Other Changes -* Placeholder +* Replace environment variable name, `AZURE_AI_PROJECT_ENDPOINT` TO `PROJECT_ENDPOINT`. +* Replace environment variable name, `AZURE_AI_MODEL_DEPLOYMENT_NAME` TO `MODEL_DEPLOYMENT_NAME`. + ## 2.0.0 (2026-03-06) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 8c741a297add..a9b17e078b94 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -54,7 +54,7 @@ To report an issue with the client library, or request additional features, plea * Python 3.9 or later. * An [Azure subscription][azure_sub]. * A [project in Microsoft Foundry](https://learn.microsoft.com/azure/foundry/how-to/create-projects). -* A Foundry project endpoint URL of the form `https://your-ai-services-account-name.services.ai.azure.com/api/projects/your-project-name`. It can be found in your Microsoft Foundry Project home page. Below we will assume the environment variable `AZURE_AI_PROJECT_ENDPOINT` was defined to hold this value. +* A Foundry project endpoint URL of the form `https://your-ai-services-account-name.services.ai.azure.com/api/projects/your-project-name`. It can be found in your Microsoft Foundry Project home page. Below we will assume the environment variable `PROJECT_ENDPOINT` was defined to hold this value. * An Entra ID token for authentication. Your application needs an object that implements the [TokenCredential](https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential) interface. Code samples here use [DefaultAzureCredential](https://learn.microsoft.com/python/api/azure-identity/azure.identity.defaultazurecredential). To get that working, you will need: * An appropriate role assignment. See [Role-based access control in Microsoft Foundry portal](https://learn.microsoft.com/azure/foundry/concepts/rbac-foundry). Role assignment can be done via the "Access Control (IAM)" tab of your Azure AI Project resource in the Azure portal. * [Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) installed. @@ -87,7 +87,7 @@ from azure.identity import DefaultAzureCredential with ( DefaultAzureCredential() as credential, - AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, + AIProjectClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=credential) as project_client, ): ``` @@ -107,7 +107,7 @@ from azure.identity.aio import DefaultAzureCredential async with ( DefaultAzureCredential() as credential, - AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, + AIProjectClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=credential) as project_client, ): ``` @@ -117,20 +117,20 @@ async with ( Your Microsoft Foundry project may have one or more AI models deployed. These could be OpenAI models, Microsoft models, or models from other providers. Use the code below to get an authenticated [OpenAI](https://github.com/openai/openai-python?tab=readme-ov-file#usage) client from the [openai](https://pypi.org/project/openai/) package, and execute an example multi-turn "Responses" calls. -The code below assumes the environment variable `AZURE_AI_MODEL_DEPLOYMENT_NAME` is defined. It's the deployment name of an AI model in your Foundry Project. See "Build" menu, under "Models" (First column of the "Deployments" table). +The code below assumes the environment variable `MODEL_DEPLOYMENT_NAME` is defined. It's the deployment name of an AI model in your Foundry Project. See "Build" menu, under "Models" (First column of the "Deployments" table). ```python with project_client.get_openai_client() as openai_client: response = openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], input="What is the size of France in square miles?", ) print(f"Response output: {response.output_text}") response = openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], input="And what is the capital city?", previous_response_id=response.id, ) @@ -145,7 +145,7 @@ See the "responses" folder in the [package samples][samples] for additional samp The `.agents` property on the `AIProjectClient` gives you access to all Agent operations. Agents use an extension of the OpenAI Responses protocol, so you will need to get an `OpenAI` client to do Agent operations, as shown in the example below. -The code below assumes environment variable `AZURE_AI_MODEL_DEPLOYMENT_NAME` is defined. It's the deployment name of an AI model in your Foundry Project. See "Build" menu, under "Models" (First column of the "Deployments" table). +The code below assumes environment variable `MODEL_DEPLOYMENT_NAME` is defined. It's the deployment name of an AI model in your Foundry Project. See "Build" menu, under "Models" (First column of the "Deployments" table). See the "agents" folder in the [package samples][samples] for an extensive set of samples, including streaming, tool usage and memory store usage. @@ -156,7 +156,7 @@ with project_client.get_openai_client() as openai_client: agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) @@ -1357,7 +1357,7 @@ By default logs redact the values of URL query strings, the values of some HTTP ```python project_client = AIProjectClient( credential=DefaultAzureCredential(), - endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + endpoint=os.environ["PROJECT_ENDPOINT"], logging_enable=True ) ``` diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py index 1b57d0bcd29c..a494cd33cd1e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -34,7 +34,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -46,7 +46,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py index 69404d31ae35..c4ddae1518c3 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] async def main() -> None: @@ -48,7 +48,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions.", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py index 876dc8daebcd..89d6192b6c7b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py @@ -22,9 +22,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -36,8 +36,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] +endpoint = os.environ["PROJECT_ENDPOINT"] +model = os.environ["MODEL_DEPLOYMENT_NAME"] with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py index 8baa7034c139..89320902dd05 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py @@ -22,9 +22,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -37,8 +37,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] +endpoint = os.environ["PROJECT_ENDPOINT"] +model = os.environ["MODEL_DEPLOYMENT_NAME"] async def main(): diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py index 5ab97c36b587..cfb11d16d92b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py @@ -21,9 +21,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -46,7 +46,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py index dfeb5c961720..83f7e7803384 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py @@ -24,9 +24,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv pydantic Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -51,7 +51,7 @@ class CalendarEvent(BaseModel): participants: list[str] -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -62,7 +62,7 @@ class CalendarEvent(BaseModel): agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], text=PromptAgentDefinitionTextOptions( format=TextResponseFormatJsonSchema(name="CalendarEvent", schema=CalendarEvent.model_json_schema()) ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py index 12bdaf31231a..a21d54e8426d 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py @@ -24,9 +24,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp pydantic Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -44,7 +44,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] class CalendarEvent(BaseModel): @@ -63,7 +63,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], text=PromptAgentDefinitionTextOptions( format=TextResponseFormatJsonSchema(name="CalendarEvent", schema=CalendarEvent.model_json_schema()) ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py index b8674dc5c146..5def14deed3e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -46,7 +46,7 @@ teacher_agent = project_client.agents.create_version( agent_name="teacher-agent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="""You are a teacher that create pre-school math question for student and check answer. If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", @@ -58,7 +58,7 @@ student_agent = project_client.agents.create_version( agent_name="student-agent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py index b565a04be074..160e059dbd39 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -36,7 +36,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] async def main(): @@ -50,7 +50,7 @@ async def main(): teacher_agent = await project_client.agents.create_version( agent_name="teacher-agent-async", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="""You are a teacher that create pre-school math question for student and check answer. If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", @@ -61,7 +61,7 @@ async def main(): student_agent = await project_client.agents.create_version( agent_name="student-agent-async", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py index 0f318459182f..cdf10921d9a0 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv azure-monitor-opentelemetry Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING - Set to `true` to enable GenAI telemetry tracing, which is disabled by default. @@ -46,7 +46,7 @@ with ( DefaultAzureCredential() as credential, - AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, + AIProjectClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=credential) as project_client, ): # [START setup_azure_monitor_tracing] # Enable Azure Monitor tracing @@ -62,7 +62,7 @@ # [END create_span_for_scenario] with project_client.get_openai_client() as openai_client: agent_definition = PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py index e5aa0582e9a6..5495550a2131 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv opentelemetry-sdk azure-core-tracing-opentelemetry Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING - Set to `true` to enable GenAI telemetry tracing, which is disabled by default. @@ -90,11 +90,11 @@ def display_conversation_item(item: Any) -> None: # [END create_span_for_scenario] with ( DefaultAzureCredential() as credential, - AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, + AIProjectClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=credential) as project_client, project_client.get_openai_client() as openai_client, ): agent_definition = PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py index 64786e3a00e0..c35bc71b8823 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py +++ b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv opentelemetry-sdk azure-core-tracing-opentelemetry Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING - Set to `true` to enable GenAI telemetry tracing, which is disabled by default. @@ -44,7 +44,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] # Define the custom span processor that is used for adding the custom @@ -94,7 +94,7 @@ def on_end(self, span: ReadableSpan): ): agent_definition = PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py index 6f082269687a..e0e5cc4267ea 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) AI_SEARCH_PROJECT_CONNECTION_ID - The AI Search project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -41,7 +41,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -66,7 +66,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="""You are a helpful assistant. You must always provide citations for answers using the tool and render them as: `\u3010message_idx:search_idx\u2020source\u3011`.""", tools=[tool], diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py index 242ee742229b..d6f5aed57571 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0b1" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) STORAGE_INPUT_QUEUE_NAME - The name of the Azure Storage Queue to use for input and output in the Azure Function tool. 4) STORAGE_OUTPUT_QUEUE_NAME - The name of the Azure Storage Queue to use for output in the Azure Function tool. @@ -44,7 +44,7 @@ agent = None -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -82,7 +82,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py index 1ebf4c6d213b..43c3246fb6f9 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py @@ -27,9 +27,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID - The Bing Custom Search project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -50,7 +50,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -74,7 +74,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="""You are a helpful agent that can use Bing Custom Search tools to assist users. Use the available Bing Custom Search tools to answer questions and perform tasks.""", tools=[tool], diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py index 22fb479f0109..7ca03afa983a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py @@ -35,9 +35,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) BING_PROJECT_CONNECTION_ID - The Bing project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. """ @@ -55,7 +55,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -76,7 +76,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py index aa660411a6cc..38129dea7b78 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) BROWSER_AUTOMATION_PROJECT_CONNECTION_ID - The browser automation project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -60,7 +60,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="""You are an Agent helping with browser automation tasks. You can answer questions, provide information, and assist with various tasks related to web browsing using the Browser Automation tool available to you.""", diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py index 6007468f8439..72f9d84a4aa9 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -31,7 +31,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -47,7 +47,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py index edde775f47aa..de936fcfff6b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] async def main() -> None: @@ -46,7 +46,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[CodeInterpreterTool()], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py index 3cb511cfcc3b..972d9ad84447 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -57,7 +57,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files_async.py index 69ddd048bcc8..e275ab430db7 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files_async.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] async def main() -> None: @@ -59,7 +59,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[CodeInterpreterTool(container=AutoCodeInterpreterToolParam(file_ids=[file.id]))], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py index 3b1ec849257c..ec91666a1fa2 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py @@ -23,7 +23,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) (Optional) COMPUTER_USE_MODEL_DEPLOYMENT_NAME - The deployment name of the computer-use-preview model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -45,7 +45,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py index c2c85a6c9906..65b027d6e281 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py @@ -23,7 +23,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) (Optional) COMPUTER_USE_MODEL_DEPLOYMENT_NAME - The deployment name of the computer-use-preview model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -44,7 +44,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] async def main(): diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py index 20bcd70f597d..c780ff71b510 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) FABRIC_PROJECT_CONNECTION_ID - The Fabric project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -59,7 +59,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py index 9e14fdd34461..c92e0cfc1e0f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -60,7 +60,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can search through product information.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py index 4bac22ce828c..c8ce27101fc2 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -63,7 +63,7 @@ agent = project_client.agents.create_version( agent_name="StreamingFileSearchAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can search through product information and provide detailed responses. Use the file search tool to find relevant information before answering.", tools=[FileSearchTool(vector_store_ids=[vector_store.id])], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py index 7f02ea13a7d6..1ca1c6ec2ec5 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] async def main() -> None: @@ -63,7 +63,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="StreamingFileSearchAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can search through product information and provide detailed responses. Use the file search tool to find relevant information before answering.", tools=[FileSearchTool(vector_store_ids=[vector_store.id])], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py index 701201d55d99..a8ee3ce16447 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -39,7 +39,7 @@ def get_horoscope(sign: str) -> str: return f"{sign}: Next Tuesday you will befriend a baby otter." -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -69,7 +69,7 @@ def get_horoscope(sign: str) -> str: agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can use function tools.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py index ccbc373e6585..8f7a416abeb1 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] async def get_horoscope(sign: str) -> str: @@ -70,7 +70,7 @@ async def main(): agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can use function tools.", tools=[func_tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py index 5b9ea7011b3c..db372ec52647 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py @@ -28,9 +28,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model (e.g., gpt-4o, gpt-4o-mini, gpt-5o, gpt-5o-mini) + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the chat model (e.g., gpt-4o, gpt-4o-mini, gpt-5o, gpt-5o-mini) used by the Agent for understanding and responding to prompts. This is NOT the image generation model. 3) IMAGE_GENERATION_MODEL_DEPLOYMENT_NAME - The deployment name of the image generation model (e.g. gpt-image-1) used by the ImageGenTool. @@ -53,7 +53,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -73,7 +73,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="Generate images based on user prompts", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py index b7859d6483eb..34010e739490 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py @@ -28,9 +28,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model (e.g., gpt-4o, gpt-4o-mini, gpt-5o, gpt-5o-mini) + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the chat model (e.g., gpt-4o, gpt-4o-mini, gpt-5o, gpt-5o-mini) used by the Agent for understanding and responding to prompts. This is NOT the image generation model. 3) IMAGE_GENERATION_MODEL_DEPLOYMENT_NAME - The deployment name of the image generation model (e.g. gpt-image-1) used by the ImageGenTool. @@ -53,7 +53,7 @@ from azure.ai.projects.models import PromptAgentDefinition, ImageGenTool load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] async def main(): @@ -68,7 +68,7 @@ async def main(): agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="Generate images based on user prompts", tools=[ImageGenTool(model=image_generation_model, quality="low", size="1024x1024")], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py index c318f9004f1e..95a4010ced59 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -50,7 +50,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful agent that can use MCP tools to assist users. Use the available MCP tools to answer questions and perform tasks.", tools=[mcp_tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py index cc12b02a8fdd..0e980be777ad 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] async def main(): @@ -55,7 +55,7 @@ async def main(): agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful agent that can use MCP tools to assist users. Use the available MCP tools to answer questions and perform tasks.", tools=tools, ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py index be1036a9fde8..27af3ffcd510 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) MCP_PROJECT_CONNECTION_ID - The connection resource ID in Custom keys with key equals to "Authorization" and value to be "Bearer ". @@ -34,7 +34,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -55,7 +55,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent7", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="Use MCP tools as needed", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py index 71ec422987a4..7d25ec8836ae 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) MCP_PROJECT_CONNECTION_ID - The connection resource ID in Custom keys with key equals to "Authorization" and value to be "Bearer ". @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] async def main(): @@ -58,7 +58,7 @@ async def main(): agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="Use MCP tools as needed", tools=tools, ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py index 26f9221631f1..6888ad8263a5 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py @@ -23,9 +23,9 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the Agent's AI model, + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the Agent's AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model for memory, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -48,7 +48,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -96,7 +96,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py index 6e959c0c3a88..dd3792a89249 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py @@ -23,9 +23,9 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the Agent's AI model, + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the Agent's AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model for memory, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -51,7 +51,7 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, @@ -90,7 +90,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", tools=[ MemorySearchPreviewTool( diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py index aba7820c69d6..33080b31243c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv jsonref Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -38,7 +38,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -65,7 +65,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py index 886288df69de..e4d56f1ba0f6 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv jsonref Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) OPENAPI_PROJECT_CONNECTION_ID - The OpenAPI project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -42,7 +42,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -75,7 +75,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py index 6a108dfe83a9..273b6c18a73e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) SHAREPOINT_PROJECT_CONNECTION_ID - The SharePoint project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -59,7 +59,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="""You are a helpful agent that can use SharePoint tools to assist users. Use the available SharePoint tools to answer questions and perform tasks.""", tools=[tool], diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py index 133b99d589e0..2a0475de9bfa 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py @@ -19,9 +19,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) A2A_PROJECT_CONNECTION_ID - The A2A project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -41,7 +41,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -61,7 +61,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py index 01729d031854..6837cb0b13a2 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py @@ -26,9 +26,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -46,7 +46,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -60,7 +60,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can search the web", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py index bf09f23786ff..d433fb8cf8e7 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py @@ -26,9 +26,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -42,7 +42,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -56,7 +56,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent105", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can search the web", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py index 41d8d6f75aa2..f086c37c2517 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py @@ -27,9 +27,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID - The Bing Custom Search project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -51,7 +51,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -70,7 +70,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can search the web and bing", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index a867acc2106e..f970221e5068 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - The name of a connection, as found in the "Connected resources" tab in the Management Center of your Microsoft Foundry project. @@ -31,7 +31,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] connection_name = os.environ["CONNECTION_NAME"] with ( diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections_async.py index e5814abf27fc..fce1328c7d36 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - The name of a connection, as found in the "Connected resources" tab in the Management Center of your Microsoft Foundry project. @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] connection_name = os.environ["CONNECTION_NAME"] diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py index eb664060a12c..c0ef2fb68bf9 100644 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py +++ b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - Optional. The name of the Azure Storage Account connection to use for uploading files. 3) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. @@ -36,7 +36,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] connection_name = os.environ.get("CONNECTION_NAME") dataset_name = os.environ.get("DATASET_NAME", "dataset-test") dataset_version_1 = os.environ.get("DATASET_VERSION_1", "1.0") diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py index 7740507124d9..ebf84028bbc8 100644 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py +++ b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - Optional. The name of the Azure Storage Account connection to use for uploading files. 3) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. @@ -45,7 +45,7 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["PROJECT_ENDPOINT"] connection_name = os.environ.get("CONNECTION_NAME") dataset_name = os.environ.get("DATASET_NAME", "dataset-test") dataset_version_1 = os.environ.get("DATASET_VERSION_1", "1.0") diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_download.py b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_download.py index 2a9aa4fc73a2..138431174b93 100644 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_download.py +++ b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_download.py @@ -19,7 +19,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - Optional. The name of the Azure Storage Account connection to use for uploading files. 3) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] connection_name = os.environ.get("CONNECTION_NAME") dataset_name = os.environ.get("DATASET_NAME", "dataset-test") dataset_version = os.environ.get("DATASET_VERSION", "1.0") diff --git a/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments.py b/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments.py index cee409590d7a..f6faeb5037bd 100644 --- a/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments.py +++ b/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the deployment to retrieve. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the deployment to retrieve. 3) MODEL_PUBLISHER - Optional. The publisher of the model to filter by. 4) MODEL_NAME - Optional. The name of the model to filter by. """ @@ -31,8 +31,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] +endpoint = os.environ["PROJECT_ENDPOINT"] +model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] model_publisher = os.environ.get("MODEL_PUBLISHER", "Microsoft") model_name = os.environ.get("MODEL_NAME", "Phi-4") diff --git a/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments_async.py b/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments_async.py index ae2a1151ba17..00c7d1a3e812 100644 --- a/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments_async.py +++ b/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments_async.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the deployment to retrieve. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the deployment to retrieve. 3) MODEL_PUBLISHER - Optional. The publisher of the model to filter by. 4) MODEL_NAME - Optional. The name of the model to filter by. """ @@ -35,8 +35,8 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] - model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] + endpoint = os.environ["PROJECT_ENDPOINT"] + model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] model_publisher = os.environ.get("MODEL_PUBLISHER", "Microsoft") model_name = os.environ.get("MODEL_NAME", "Phi-4") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/README.md b/sdk/ai/azure-ai-projects/samples/evaluations/README.md index 628468aa20e4..078a9edc6cfc 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/README.md +++ b/sdk/ai/azure-ai-projects/samples/evaluations/README.md @@ -11,8 +11,8 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv ``` Set these environment variables: -- `AZURE_AI_PROJECT_ENDPOINT` - Your Azure AI Project endpoint (e.g., `https://.services.ai.azure.com/api/projects/`) -- `AZURE_AI_MODEL_DEPLOYMENT_NAME` - The model deployment name (e.g., `gpt-4o-mini`) +- `PROJECT_ENDPOINT` - Your Azure AI Project endpoint (e.g., `https://.services.ai.azure.com/api/projects/`) +- `MODEL_DEPLOYMENT_NAME` - The model deployment name (e.g., `gpt-4o-mini`) ## Sample Index @@ -94,8 +94,8 @@ Located in the [agentic_evaluators](https://github.com/Azure/azure-sdk-for-pytho ```bash # Set environment variables -export AZURE_AI_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" -export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4o-mini" # Replace with your model +export PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" +export MODEL_DEPLOYMENT_NAME="gpt-4o-mini" # Replace with your model # Run a sample python sample_evaluations_builtin_with_inline_data.py diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py index 13bbaf3726bc..19ebfbc845fa 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, AIProjectClient(endpoint=endpoint, credential=credential) as project_client, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py index dc0a7199df78..0e747921b2dd 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py index 1650b0d1cc5d..49f206917552 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py @@ -28,7 +28,7 @@ def run_evaluator( data_mapping: dict[str, str], ) -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ with ( diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py index df15b21b6699..d54b7e664ed0 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -36,7 +36,7 @@ def _get_evaluator_initialization_parameters(evaluator_name: str) -> dict[str, s if evaluator_name == "task_navigation_efficiency": return {"matching_mode": "exact_match"} # Can be "exact_match", "in_order_match", or "any_order_match" else: - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini return {"deployment_name": model_deployment_name} diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py index 654adfb1faec..4832d3f1d926 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py index e434a78db732..06861dcb05ea 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py index 1d3cc28dc6cb..0ca65a06c49c 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py index cd6ce53055ea..a73734676532 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py index b2f5381c54e4..c884df5ecf08 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py index 92d39f2ddddc..b5439678f8eb 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py index 6c9bc015d529..9383602a6194 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. """ @@ -42,7 +42,7 @@ def main() -> None: endpoint = os.environ.get( - "AZURE_AI_PROJECT_ENDPOINT", "" + "PROJECT_ENDPOINT", "" ) # Sample : https://.services.ai.azure.com/api/projects/ with ( diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py index c2f8980e503e..c5384ce3c16c 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py index 5404c8bb1183..ef82d24014cd 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -44,9 +44,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py index 5809882fb1ee..22dcc2c0f242 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py index 3437f8fd674f..287a36a417f5 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py index e7646aeaabe8..eb21c7c1042e 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py index 7cafd28186f2..b3c6d2a45a27 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py @@ -19,10 +19,10 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) AZURE_AI_AGENT_NAME - The name of the AI agent to use for evaluation. - 3) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 3) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -39,8 +39,8 @@ from openai.types.evals.run_retrieve_response import RunRetrieveResponse load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini +endpoint = os.environ["PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini # [START agent_evaluation_basic] with ( diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py index 9c81014daa52..dae8f5f97596 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py @@ -19,10 +19,10 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) AZURE_AI_AGENT_NAME - The name of the AI agent to use for evaluation. - 3) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 3) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -50,7 +50,7 @@ agent = project_client.agents.create_version( agent_name=os.environ["AZURE_AI_AGENT_NAME"], definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py index 37d7244c3808..b69f22681c64 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py @@ -19,9 +19,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -40,8 +40,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] +endpoint = os.environ["PROJECT_ENDPOINT"] +model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] # Define a function tool for the model to use func_tool = FunctionTool( diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py index 7c38cb9a2d5d..7a37829a1110 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py @@ -28,10 +28,10 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) AZURE_AI_AGENT_NAME - The name of the AI agent to use for evaluation. - 3) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 3) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -50,7 +50,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -63,7 +63,7 @@ agent = project_client.agents.create_version( agent_name=os.environ["AZURE_AI_AGENT_NAME"], definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py index 255ea0c2e660..ff0d08897d8d 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. """ @@ -41,7 +41,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py index c3227497204d..abd5526eb45b 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Optional. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Optional. The name of the model deployment to use for evaluation. """ @@ -42,8 +42,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME") +endpoint = os.environ["PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py index b3dfb581b3ce..fc00c7831b57 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Optional. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Optional. The name of the model deployment to use for evaluation. For Custom Prompt Based Evaluators: @@ -75,8 +75,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME") +endpoint = os.environ["PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py index 77093c5d704b..d7a7886e5900 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py @@ -21,9 +21,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -51,11 +51,11 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME") +endpoint = os.environ["PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME") if not model_deployment_name: - raise ValueError("AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set") + raise ValueError("MODEL_DEPLOYMENT_NAME environment variable is not set") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py index c0fe4424bc85..b830d1111548 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py @@ -21,9 +21,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -47,7 +47,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -64,7 +64,7 @@ TestingCriterionLabelModel( type="label_model", name="sentiment_analysis", - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], input=[ { "role": "developer", diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py index 8ad0899d2a01..4d1f0b66289c 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -39,8 +39,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") +endpoint = os.environ["PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py index c113c8b67e06..0aebda63159e 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. 3) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. 4) DATASET_VERSION - Optional. The version of the Dataset to create and use in this sample. 5) DATA_FOLDER - Optional. The folder path where the data files for upload are located. @@ -44,8 +44,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") +endpoint = os.environ["PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") dataset_name = os.environ.get("DATASET_NAME", "") dataset_version = os.environ.get("DATASET_VERSION", "1") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py index fa4ec52105a3..1cf2e9328207 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -41,9 +41,9 @@ endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini +model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini # Construct the paths to the data folder and data file used in this sample script_dir = os.path.dirname(os.path.abspath(__file__)) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py index 9d968bbbf296..b2d9b79453b6 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py @@ -18,9 +18,9 @@ pip install openai azure-identity python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -42,10 +42,10 @@ client = OpenAI( api_key=get_bearer_token_provider(DefaultAzureCredential(), "https://ai.azure.com/.default"), - base_url=os.environ["AZURE_AI_PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", + base_url=os.environ["PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", ) -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini +model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini data_source_config = DataSourceConfigCustom( { diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py index 33aff799eb4f..e24649f8fc78 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py @@ -19,12 +19,12 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv azure-monitor-query Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. 2) APPINSIGHTS_RESOURCE_ID - Required. The Azure Application Insights resource ID that stores agent traces. It has the form: /subscriptions//resourceGroups//providers/Microsoft.Insights/components/. 3) AGENT_ID - Required. The agent identifier emitted by the Azure tracing integration, used to filter traces. - 4) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The Azure OpenAI deployment name to use with the built-in evaluators. + 4) MODEL_DEPLOYMENT_NAME - Required. The Azure OpenAI deployment name to use with the built-in evaluators. 5) TRACE_LOOKBACK_HOURS - Optional. Number of hours to look back when querying traces and in the evaluation run. Defaults to 1. """ @@ -44,12 +44,12 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] appinsights_resource_id = os.environ[ "APPINSIGHTS_RESOURCE_ID" ] # Sample : /subscriptions//resourceGroups//providers/Microsoft.Insights/components/ agent_id = os.environ["AGENT_ID"] -model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] +model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] trace_query_hours = int(os.environ.get("TRACE_LOOKBACK_HOURS", "1")) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py index ecb0f7fc86c7..e382f1ce7c3d 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -39,8 +39,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") +endpoint = os.environ["PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py index 0f9377ef9fbf..7f14453d4e26 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv pillow Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -47,8 +47,8 @@ file_path = os.path.abspath(__file__) folder_path = os.path.dirname(file_path) -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") +endpoint = os.environ["PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") def image_to_data_uri(image_path: str) -> str: diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py index 8b0170031dd3..c66866f7ed27 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -68,7 +68,7 @@ ) print(f"Evaluation created (id: {eval_object.id}, name: {eval_object.name})") - model = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] + model = os.environ["MODEL_DEPLOYMENT_NAME"] data_source = { "type": "azure_ai_target_completions", "source": { diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py index 6516c637796e..6f75a6bad70f 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. 2) AZURE_AI_AGENT_NAME - Required. The name of the Agent to perform red teaming evaluation on. """ @@ -45,7 +45,7 @@ def main() -> None: load_dotenv() # - endpoint = os.environ.get("AZURE_AI_PROJECT_ENDPOINT", "") + endpoint = os.environ.get("PROJECT_ENDPOINT", "") agent_name = os.environ.get("AZURE_AI_AGENT_NAME", "") with ( @@ -56,7 +56,7 @@ def main() -> None: agent_version = project_client.agents.create_version( agent_name=agent_name, definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py index 29397e006414..fcde3de826ca 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv azure-mgmt-authorization azure-mgmt-resource Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. 2) AZURE_SUBSCRIPTION_ID - Required for RBAC assignment. The Azure subscription ID where the project is located. 3) AZURE_RESOURCE_GROUP_NAME - Required for RBAC assignment. The resource group name where the project is located. @@ -75,13 +75,13 @@ def assign_rbac(): """ load_dotenv() - endpoint = os.environ.get("AZURE_AI_PROJECT_ENDPOINT", "") + endpoint = os.environ.get("PROJECT_ENDPOINT", "") subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID", "") resource_group_name = os.environ.get("AZURE_RESOURCE_GROUP_NAME", "") if not endpoint or not subscription_id or not resource_group_name: print( - "Error: AZURE_AI_PROJECT_ENDPOINT, AZURE_SUBSCRIPTION_ID, and AZURE_RESOURCE_GROUP_NAME environment variables are required" + "Error: PROJECT_ENDPOINT, AZURE_SUBSCRIPTION_ID, and AZURE_RESOURCE_GROUP_NAME environment variables are required" ) return @@ -214,7 +214,7 @@ def assign_rbac(): def schedule_dataset_evaluation() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["PROJECT_ENDPOINT"] dataset_name = os.environ.get("DATASET_NAME", "") dataset_version = os.environ.get("DATASET_VERSION", "1") # Construct the paths to the data folder and data file used in this sample @@ -327,7 +327,7 @@ def schedule_dataset_evaluation() -> None: def schedule_redteam_evaluation() -> None: load_dotenv() # - endpoint = os.environ.get("AZURE_AI_PROJECT_ENDPOINT", "") + endpoint = os.environ.get("PROJECT_ENDPOINT", "") agent_name = os.environ.get("AZURE_AI_AGENT_NAME", "") # Construct the paths to the data folder and data file used in this sample @@ -343,7 +343,7 @@ def schedule_redteam_evaluation() -> None: agent_version = project_client.agents.create_version( agent_name=agent_name, definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/files/sample_files.py b/sdk/ai/azure-ai-projects/samples/files/sample_files.py index 57f8e3fb3187..ba156aff897e 100644 --- a/sdk/ai/azure-ai-projects/samples/files/sample_files.py +++ b/sdk/ai/azure-ai-projects/samples/files/sample_files.py @@ -16,7 +16,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) FILE_PATH - Optional. Path to the file to upload. Defaults to the `data` folder. """ @@ -29,7 +29,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] script_dir = Path(__file__).parent file_path = os.environ.get("FILE_PATH", os.path.join(script_dir, "data", "test_file.jsonl")) diff --git a/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py b/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py index d6bb1491a678..0fbdcb80223c 100644 --- a/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py +++ b/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py @@ -16,7 +16,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) FILE_PATH - Optional. Path to the file to upload. Defaults to the `data` folder. """ @@ -30,7 +30,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] script_dir = Path(__file__).parent file_path = os.environ.get("FILE_PATH", os.path.join(script_dir, "data", "test_file.jsonl")) diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py index b8fe46419490..2e66c9edfb27 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4o` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "gpt-4o-mini") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "dpo_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "dpo_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py index 4e7b5dc91ec0..340159571043 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4o` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "gpt-4o-mini") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "dpo_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "dpo_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py index c4a44e2d727f..5918ff361cdf 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4.1` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "Ministral-3B") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "sft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "sft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py index 2fcd5ee8e93c..01267d2d3ecf 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `Ministral-3B` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -34,7 +34,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "Ministral-3B") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "sft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "sft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py index 6b1bed171863..576587bfb4cc 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `o4-mini` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "o4-mini") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "rft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "rft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py index 701bf384731f..0034551f6005 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `o4-mini` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "o4-mini") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "rft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "rft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py index 626af3c4bedd..4edd2d93f1ee 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py @@ -21,7 +21,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv azure-mgmt-cognitiveservices Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4.1` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -43,7 +43,7 @@ load_dotenv() # For fine-tuning -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "gpt-4.1") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "sft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "sft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py index cd967034fd92..47d3136f2ced 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py @@ -21,7 +21,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp azure-mgmt-cognitiveservices Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4.1` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -43,7 +43,7 @@ load_dotenv() # For fine-tuning -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "gpt-4.1") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "sft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "sft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes.py b/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes.py index fd865b8d8ca1..594e651a917d 100644 --- a/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes.py +++ b/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) INDEX_NAME - Optional. The name of the Index to create and use in this sample. 3) INDEX_VERSION - Optional. The version of the Index to create and use in this sample. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] index_name = os.environ.get("INDEX_NAME", "index-test") index_version = os.environ.get("INDEX_VERSION", "1.0") ai_search_connection_name = os.environ.get("AI_SEARCH_CONNECTION_NAME", "my-ai-search-connection-name") diff --git a/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes_async.py b/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes_async.py index 9ace80a10df8..1cd9c00f1328 100644 --- a/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes_async.py +++ b/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) INDEX_NAME - Optional. The name of the Index to create and use in this sample. 3) INDEX_VERSION - Optional. The version of the Index to create and use in this sample. @@ -37,7 +37,7 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["PROJECT_ENDPOINT"] index_name = os.environ.get("INDEX_NAME", "index-test") index_version = os.environ.get("INDEX_VERSION", "1.0") ai_search_connection_name = os.environ.get("AI_SEARCH_CONNECTION_NAME", "my-ai-search-connection-name") diff --git a/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py b/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py index d09bb48a72ad..4623807d024d 100644 --- a/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py +++ b/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py @@ -8,7 +8,7 @@ DESCRIPTION: This sample demonstrates how to directly interact with MCP (Model Context Protocol) tools using the low-level MCP client library to connect to the Foundry Project's MCP tools API: - {AZURE_AI_PROJECT_ENDPOINT}/mcp_tools?api-version=2025-05-15-preview + {PROJECT_ENDPOINT}/mcp_tools?api-version=2025-05-15-preview For agent-based MCP tool usage, see samples in samples/agents/tools/sample_agent_mcp.py and related files in that directory. @@ -29,7 +29,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv mcp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) IMAGE_GEN_DEPLOYMENT_NAME - The deployment name of the image generation model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -64,7 +64,7 @@ # Enable httpx logging to see HTTP requests at the same level logging.getLogger("httpx").setLevel(getattr(logging, log_level, logging.CRITICAL)) -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] async def main(): diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py index 6af7c9c90f3c..85f8e3e5aec9 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py @@ -24,7 +24,7 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -46,7 +46,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py index f8b9643f7547..bde8a22a1c3e 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py @@ -24,7 +24,7 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -50,7 +50,7 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic.py index 0d3cf82f4fbe..ff307a66cbb9 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic.py @@ -22,7 +22,7 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -43,7 +43,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic_async.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic_async.py index 6999a6c154ca..fe12d1db07ee 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic_async.py @@ -23,7 +23,7 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -48,7 +48,7 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud.py index 94d1abdab835..cf116e910d08 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud.py @@ -20,7 +20,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -37,7 +37,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud_async.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud_async.py index 730dd3a53534..23fc4782e98e 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud_async.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud_async.py @@ -20,7 +20,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -41,7 +41,7 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team.py b/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team.py index 918ae00569e5..bdf4936f8f6c 100644 --- a/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team.py +++ b/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. Your model deployment name. + 2) MODEL_DEPLOYMENT_NAME - Required. Your model deployment name. 3) MODEL_ENDPOINT - Required. The Azure AI Model endpoint, as found in the overview page of your Microsoft Foundry project. Example: https://.services.ai.azure.com 4) MODEL_API_KEY - Required. The API key for your Azure AI Model. @@ -38,11 +38,11 @@ load_dotenv() endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ model_endpoint = os.environ["MODEL_ENDPOINT"] # Sample : https://.services.ai.azure.com model_api_key = os.environ["MODEL_API_KEY"] -model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini +model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py b/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py index 261b0f9aaedc..54170b0d3492 100644 --- a/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py +++ b/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. Your model deployment name. + 2) MODEL_DEPLOYMENT_NAME - Required. Your model deployment name. 3) MODEL_ENDPOINT - Required. The Azure AI Model endpoint, as found in the overview page of your Microsoft Foundry project. Example: https://.services.ai.azure.com 4) MODEL_API_KEY - Required. The API key for your Azure AI Model. @@ -43,11 +43,11 @@ async def sample_red_team_async() -> None: """Demonstrates how to perform Red Team operations using the AIProjectClient.""" endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ model_endpoint = os.environ["MODEL_ENDPOINT"] # Sample : https://.services.ai.azure.com model_api_key = os.environ["MODEL_API_KEY"] - model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini + model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic.py index 436237b03c46..062aef48577e 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -31,7 +31,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -40,13 +40,13 @@ # [START responses] with project_client.get_openai_client() as openai_client: response = openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], input="What is the size of France in square miles?", ) print(f"Response output: {response.output_text}") response = openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], input="And what is the capital city?", previous_response_id=response.id, ) diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_async.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_async.py index b68ab13eddd7..0501cc142cbe 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_async.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] async def main() -> None: @@ -43,13 +43,13 @@ async def main() -> None: project_client.get_openai_client() as openai_client, ): response = await openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], input="What is the size of France in square miles?", ) print(f"Response output: {response.output_text}") response = await openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], input="And what is the capital city?", previous_response_id=response.id, ) diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient.py index 9c5d8b656bd7..39375d948e2d 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient.py @@ -19,9 +19,9 @@ pip install openai azure-identity python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -34,11 +34,11 @@ openai = OpenAI( api_key=get_bearer_token_provider(DefaultAzureCredential(), "https://ai.azure.com/.default"), - base_url=os.environ["AZURE_AI_PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", + base_url=os.environ["PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", ) response = openai.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], input="How many feet are in a mile?", ) diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient_async.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient_async.py index 8a2934ff7418..47b46923f9b3 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient_async.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient_async.py @@ -19,9 +19,9 @@ pip install openai azure-identity python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -42,13 +42,13 @@ async def main() -> None: openai = AsyncOpenAI( api_key=get_bearer_token_provider(credential, "https://ai.azure.com/.default"), - base_url=os.environ["AZURE_AI_PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", + base_url=os.environ["PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", ) async with openai: response = await openai.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], input="How many feet are in a mile?", ) diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_image_input.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_image_input.py index 542d956b1cf0..1299f15d638c 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_image_input.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_image_input.py @@ -19,9 +19,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] def image_to_base64(image_path: str) -> str: @@ -71,6 +71,6 @@ def image_to_base64(image_path: str) -> str: ], } ], - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], ) print(f"Response output: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py index 86f3b2d1fa47..8d7f60673c2b 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -34,7 +34,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -43,7 +43,7 @@ ): with openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], input=[ {"role": "user", "content": "Tell me about the capital city of France"}, ], diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py index 110bb0b1c4be..6508bd40deb4 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -42,7 +42,7 @@ ): with openai_client.responses.stream( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], input=[ {"role": "user", "content": "Tell me about the capital city of France"}, ], diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py index d1c75654083a..a781b8152edc 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -42,7 +42,7 @@ class CalendarEvent(BaseModel): participants: list[str] -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -50,7 +50,7 @@ class CalendarEvent(BaseModel): project_client.get_openai_client() as openai_client, ): response = openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["MODEL_DEPLOYMENT_NAME"], instructions=""" Extracts calendar event information from the input messages, and return it in the desired structured output format. diff --git a/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry.py b/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry.py index 61f11436620b..b5ef4f48234a 100644 --- a/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry.py +++ b/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. """ @@ -28,7 +28,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry_async.py b/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry_async.py index 7554370401f4..4e98aff356bd 100644 --- a/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry_async.py +++ b/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py index d7aede04fb65..c8a6ebc74921 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py @@ -273,7 +273,7 @@ def _test_agent_creation_with_tracing_content_recording_enabled_impl(self, use_e with self.create_client(operation_group="tracing", **kwargs) as project_client: - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") print(f"Using model deployment: {model}") agent_definition = PromptAgentDefinition( @@ -387,7 +387,7 @@ def _test_agent_creation_with_tracing_content_recording_disabled_impl(self, use_ with self.create_client(operation_group="agents", **kwargs) as project_client: - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") agent_definition = PromptAgentDefinition( model=model, instructions="You are a helpful AI assistant. Always be polite and provide accurate information.", @@ -609,7 +609,7 @@ def _test_agent_with_structured_output_with_instructions_impl( operation_group = "tracing" if content_recording_enabled else "agents" with self.create_client(operation_group=operation_group, **kwargs) as project_client: - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") test_schema = { "type": "object", @@ -797,7 +797,7 @@ def _test_agent_with_structured_output_without_instructions_impl( operation_group = "tracing" if content_recording_enabled else "agents" with self.create_client(operation_group=operation_group, **kwargs) as project_client: - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") test_schema = { "type": "object", diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py index 96184005a350..80f20632fd98 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py @@ -75,7 +75,7 @@ async def _test_create_agent_with_tracing_content_recording_enabled_impl(self, u assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") async with project_client: agent_definition = PromptAgentDefinition( @@ -186,7 +186,7 @@ async def _test_agent_creation_with_tracing_content_recording_disabled_impl(self assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="agents", **kwargs) - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") async with project_client: agent_definition = PromptAgentDefinition( @@ -406,7 +406,7 @@ async def _test_agent_with_structured_output_with_instructions_impl( project_client = self.create_async_client(operation_group=operation_group, **kwargs) async with project_client: - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") test_schema = { "type": "object", @@ -591,7 +591,7 @@ async def _test_agent_with_structured_output_without_instructions_impl( project_client = self.create_async_client(operation_group=operation_group, **kwargs) async with project_client: - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") test_schema = { "type": "object", diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py index 7b8471717f2b..23a9116edd60 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py @@ -78,7 +78,7 @@ def _get_openai_client_and_deployment(self, **kwargs) -> Tuple[OpenAI, str]: openai_client = project_client.get_openai_client() # Get the model deployment name from test parameters - model_deployment_name = kwargs.get("azure_ai_model_deployment_name") + model_deployment_name = kwargs.get("model_deployment_name") return openai_client, model_deployment_name @@ -234,7 +234,7 @@ def _test_sync_non_streaming_with_content_recording_impl(self, use_events, **kwa with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -356,7 +356,7 @@ def _test_sync_non_streaming_without_content_recording_impl(self, use_events, ** with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -479,7 +479,7 @@ def _test_sync_streaming_with_content_recording_impl(self, use_events, **kwargs) with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -656,7 +656,7 @@ def test_sync_conversations_create(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -701,7 +701,7 @@ def test_sync_list_conversation_items_with_content_recording(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -778,7 +778,7 @@ def test_sync_list_conversation_items_without_content_recording(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -887,7 +887,7 @@ def _test_sync_non_streaming_without_conversation_impl(self, use_events, **kwarg assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") with project_client: # Get the OpenAI client from the project client @@ -994,7 +994,7 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl( with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -1253,7 +1253,7 @@ def _test_sync_function_tool_with_content_recording_streaming_impl( with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -1577,7 +1577,7 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl( with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -1813,7 +1813,7 @@ def _test_sync_function_tool_without_content_recording_streaming_impl( with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -2131,7 +2131,7 @@ def test_sync_function_tool_list_conversation_items_with_content_recording(self, with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -2281,7 +2281,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -2429,7 +2429,7 @@ def test_sync_multiple_text_inputs_with_content_recording_non_streaming(self, ** with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -2526,7 +2526,7 @@ def test_sync_multiple_text_inputs_with_content_recording_streaming(self, **kwar with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -2631,7 +2631,7 @@ def test_sync_multiple_text_inputs_without_content_recording_non_streaming(self, with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -2723,7 +2723,7 @@ def test_sync_multiple_text_inputs_without_content_recording_streaming(self, **k with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -2824,7 +2824,7 @@ def _test_image_only_content_off_binary_off_non_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -2929,7 +2929,7 @@ def _test_image_only_content_off_binary_on_non_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -3033,7 +3033,7 @@ def _test_image_only_content_on_binary_off_non_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -3137,7 +3137,7 @@ def _test_image_only_content_on_binary_on_non_streaming_impl(self, use_events, * with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -3245,7 +3245,7 @@ def _test_text_and_image_content_off_binary_off_non_streaming_impl(self, use_eve with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -3354,7 +3354,7 @@ def _test_text_and_image_content_off_binary_on_non_streaming_impl(self, use_even with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -3463,7 +3463,7 @@ def _test_text_and_image_content_on_binary_off_non_streaming_impl(self, use_even with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -3571,7 +3571,7 @@ def _test_text_and_image_content_on_binary_on_non_streaming_impl(self, use_event with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -3683,7 +3683,7 @@ def _test_image_only_content_off_binary_off_streaming_impl(self, use_events, **k with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -3796,7 +3796,7 @@ def _test_image_only_content_off_binary_on_streaming_impl(self, use_events, **kw with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -3908,7 +3908,7 @@ def _test_image_only_content_on_binary_off_streaming_impl(self, use_events, **kw with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -4020,7 +4020,7 @@ def _test_image_only_content_on_binary_on_streaming_impl(self, use_events, **kwa with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -4136,7 +4136,7 @@ def _test_text_and_image_content_off_binary_off_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -4253,7 +4253,7 @@ def _test_text_and_image_content_off_binary_on_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -4370,7 +4370,7 @@ def _test_text_and_image_content_on_binary_off_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -4486,7 +4486,7 @@ def _test_text_and_image_content_on_binary_on_streaming_impl(self, use_events, * with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -4601,7 +4601,7 @@ def test_responses_stream_method_with_content_recording(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -4654,7 +4654,7 @@ def test_responses_stream_method_without_content_recording(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = client.conversations.create() @@ -4709,7 +4709,7 @@ def test_responses_stream_method_with_tools_with_content_recording(self, **kwarg with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Define a function tool function_tool = FunctionTool( @@ -4823,7 +4823,7 @@ def test_responses_stream_method_with_tools_without_content_recording(self, **kw with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Define a function tool function_tool = FunctionTool( @@ -4960,7 +4960,7 @@ def test_workflow_agent_non_streaming_with_content_recording(self, **kwargs): assert True == AIProjectInstrumentor().is_content_recording_enabled() with self.create_client(operation_group="tracing", **kwargs) as project_client: - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") openai_client = project_client.get_openai_client() # Create Teacher Agent @@ -5159,7 +5159,7 @@ def test_workflow_agent_non_streaming_without_content_recording(self, **kwargs): assert False == AIProjectInstrumentor().is_content_recording_enabled() with self.create_client(operation_group="tracing", **kwargs) as project_client: - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") openai_client = project_client.get_openai_client() workflow_yaml = """ @@ -5276,7 +5276,7 @@ def test_workflow_agent_streaming_with_content_recording(self, **kwargs): assert True == AIProjectInstrumentor().is_content_recording_enabled() with self.create_client(operation_group="tracing", **kwargs) as project_client: - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") openai_client = project_client.get_openai_client() # Create Teacher Agent @@ -5478,7 +5478,7 @@ def test_workflow_agent_streaming_without_content_recording(self, **kwargs): assert False == AIProjectInstrumentor().is_content_recording_enabled() with self.create_client(operation_group="tracing", **kwargs) as project_client: - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") openai_client = project_client.get_openai_client() workflow_yaml = """ diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py index 8f2b18126533..490715fa0333 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py @@ -59,7 +59,7 @@ async def _test_async_non_streaming_with_content_recording_impl(self, use_events assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: # Get the OpenAI client from the project client @@ -165,7 +165,7 @@ async def _test_async_streaming_with_content_recording_impl(self, use_events, ** assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: # Get the OpenAI client from the project client @@ -277,7 +277,7 @@ async def test_async_conversations_create(self, **kwargs): assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: # Get the OpenAI client from the project client @@ -325,7 +325,7 @@ async def test_async_list_conversation_items_with_content_recording(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: # Get the OpenAI client from the project client @@ -417,7 +417,7 @@ async def _test_async_function_tool_with_content_recording_streaming_impl( async with project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -678,7 +678,7 @@ async def _test_async_function_tool_without_content_recording_streaming_impl( async with project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -924,7 +924,7 @@ async def test_async_multiple_text_inputs_with_content_recording_non_streaming(s async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Create a conversation conversation = await client.conversations.create() @@ -1021,7 +1021,7 @@ async def test_async_multiple_text_inputs_with_content_recording_streaming(self, async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Create a conversation conversation = await client.conversations.create() @@ -1126,7 +1126,7 @@ async def test_async_multiple_text_inputs_without_content_recording_non_streamin async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Create a conversation conversation = await client.conversations.create() @@ -1225,7 +1225,7 @@ async def test_async_image_only_content_off_binary_off_non_streaming(self, **kwa assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1297,7 +1297,7 @@ async def test_async_image_only_content_off_binary_on_non_streaming(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1368,7 +1368,7 @@ async def test_async_image_only_content_on_binary_off_non_streaming(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1439,7 +1439,7 @@ async def test_async_image_only_content_on_binary_on_non_streaming(self, **kwarg assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1514,7 +1514,7 @@ async def test_async_text_and_image_content_off_binary_off_non_streaming(self, * assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1590,7 +1590,7 @@ async def test_async_text_and_image_content_off_binary_on_non_streaming(self, ** assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1665,7 +1665,7 @@ async def test_async_text_and_image_content_on_binary_off_non_streaming(self, ** assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1740,7 +1740,7 @@ async def test_async_text_and_image_content_on_binary_on_non_streaming(self, **k assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1819,7 +1819,7 @@ async def test_async_image_only_content_off_binary_off_streaming(self, **kwargs) assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1899,7 +1899,7 @@ async def test_async_image_only_content_off_binary_on_streaming(self, **kwargs): assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1978,7 +1978,7 @@ async def test_async_image_only_content_on_binary_off_streaming(self, **kwargs): assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -2057,7 +2057,7 @@ async def test_async_image_only_content_on_binary_on_streaming(self, **kwargs): assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -2140,7 +2140,7 @@ async def test_async_text_and_image_content_off_binary_off_streaming(self, **kwa assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -2223,7 +2223,7 @@ async def test_async_text_and_image_content_off_binary_on_streaming(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -2306,7 +2306,7 @@ async def test_async_text_and_image_content_on_binary_off_streaming(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -2389,7 +2389,7 @@ async def test_async_text_and_image_content_on_binary_on_streaming(self, **kwarg assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -2474,7 +2474,7 @@ async def test_async_multiple_text_inputs_without_content_recording_streaming(se async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Create a conversation conversation = await client.conversations.create() @@ -2582,7 +2582,7 @@ async def test_async_responses_stream_method_with_content_recording(self, **kwar async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = await client.conversations.create() @@ -2662,7 +2662,7 @@ async def test_async_responses_stream_method_without_content_recording(self, **k async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") conversation = await client.conversations.create() @@ -2750,7 +2750,7 @@ async def _test_async_responses_stream_method_with_tools_with_content_recording_ async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Define a function tool function_tool = FunctionTool( @@ -2983,7 +2983,7 @@ async def _test_async_responses_stream_method_with_tools_without_content_recordi async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") # Define a function tool function_tool = FunctionTool( @@ -3214,7 +3214,7 @@ async def test_async_workflow_agent_non_streaming_with_content_recording(self, * assert True == AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: # Create a simple workflow agent @@ -3331,7 +3331,7 @@ async def test_async_workflow_agent_non_streaming_without_content_recording(self assert False == AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: workflow_yaml = """ @@ -3454,7 +3454,7 @@ async def test_async_workflow_agent_streaming_with_content_recording(self, **kwa assert True == AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: workflow_yaml = """ @@ -3575,7 +3575,7 @@ async def test_async_workflow_agent_streaming_without_content_recording(self, ** assert False == AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: workflow_yaml = """ @@ -3708,7 +3708,7 @@ async def _test_async_prompt_agent_with_responses_non_streaming_impl( assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -3846,7 +3846,7 @@ async def _test_async_prompt_agent_with_responses_streaming_impl( assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") async with project_client: client = project_client.get_openai_client() diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py index f68d4fdae952..7cb41a48cd3f 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py @@ -59,7 +59,7 @@ def test_sync_browser_automation_non_streaming_with_content_recording(self, **kw assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -194,7 +194,7 @@ def test_sync_browser_automation_non_streaming_without_content_recording(self, * assert not AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -320,7 +320,7 @@ def test_sync_browser_automation_streaming_with_content_recording(self, **kwargs self.setup_telemetry() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -448,7 +448,7 @@ def test_sync_browser_automation_streaming_without_content_recording(self, **kwa self.setup_telemetry() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py index 46918894c2fb..4b2454c510cb 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py @@ -62,7 +62,7 @@ async def test_async_browser_automation_non_streaming_with_content_recording(sel assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -193,7 +193,7 @@ async def test_async_browser_automation_non_streaming_without_content_recording( assert not AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -315,7 +315,7 @@ async def test_async_browser_automation_streaming_with_content_recording(self, * self.setup_telemetry() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -440,7 +440,7 @@ async def test_async_browser_automation_streaming_without_content_recording(self self.setup_telemetry() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py index 331d64b9aaa8..b23ea2b0e5e6 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py @@ -66,7 +66,7 @@ def test_sync_code_interpreter_non_streaming_with_content_recording(self, **kwar assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: @@ -254,7 +254,7 @@ def test_sync_code_interpreter_non_streaming_without_content_recording(self, **k assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: @@ -445,7 +445,7 @@ def test_sync_code_interpreter_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: @@ -636,7 +636,7 @@ def test_sync_code_interpreter_streaming_without_content_recording(self, **kwarg assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py index 7e5512b6fbbe..007b78ce5bd1 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py @@ -67,7 +67,7 @@ async def test_async_code_interpreter_non_streaming_with_content_recording(self, assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: @@ -254,7 +254,7 @@ async def test_async_code_interpreter_non_streaming_without_content_recording(se assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: @@ -445,7 +445,7 @@ async def test_async_code_interpreter_streaming_with_content_recording(self, **k assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: @@ -636,7 +636,7 @@ async def test_async_code_interpreter_streaming_without_content_recording(self, assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py index ca4f301212f3..76faa8c49ff4 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py @@ -53,7 +53,7 @@ def test_sync_file_search_non_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: @@ -262,7 +262,7 @@ def test_sync_file_search_non_streaming_without_content_recording(self, **kwargs assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: @@ -469,7 +469,7 @@ def test_sync_file_search_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: @@ -674,7 +674,7 @@ def test_sync_file_search_streaming_without_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py index fd6c36261449..647d24530f54 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py @@ -54,7 +54,7 @@ async def test_async_file_search_non_streaming_with_content_recording(self, **kw assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: @@ -263,7 +263,7 @@ async def test_async_file_search_non_streaming_without_content_recording(self, * assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: @@ -470,7 +470,7 @@ async def test_async_file_search_streaming_with_content_recording(self, **kwargs assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: @@ -675,7 +675,7 @@ async def test_async_file_search_streaming_without_content_recording(self, **kwa assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py index 4c5c453a3e23..3ef49deac892 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py @@ -59,7 +59,7 @@ def _test_sync_mcp_non_streaming_with_content_recording_impl(self, use_events, * assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: @@ -389,7 +389,7 @@ def _test_sync_mcp_non_streaming_without_content_recording_impl(self, use_events assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: @@ -706,7 +706,7 @@ def _test_sync_mcp_streaming_with_content_recording_impl(self, use_events, **kwa assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: @@ -982,7 +982,7 @@ def _test_sync_mcp_streaming_without_content_recording_impl(self, use_events, ** assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py index d9e82e2951e8..c218a653295c 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py @@ -60,7 +60,7 @@ async def _test_async_mcp_non_streaming_with_content_recording_impl(self, use_ev assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: @@ -389,7 +389,7 @@ async def _test_async_mcp_non_streaming_without_content_recording_impl(self, use assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: @@ -708,7 +708,7 @@ async def _test_async_mcp_streaming_with_content_recording_impl(self, use_events assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: @@ -987,7 +987,7 @@ async def _test_async_mcp_streaming_without_content_recording_impl(self, use_eve assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py index a198327679c3..f75e8d9694e7 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py @@ -41,7 +41,7 @@ def _get_openai_client_and_deployment(self, **kwargs) -> Tuple[OpenAI, str]: openai_client = project_client.get_openai_client() # Get the model deployment name from test parameters - model_deployment_name = kwargs.get("azure_ai_model_deployment_name") + model_deployment_name = kwargs.get("model_deployment_name") return openai_client, model_deployment_name diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py index bec6cfa9f2be..492c49c9d7af 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py @@ -206,7 +206,7 @@ def test_sync_workflow_non_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: @@ -371,7 +371,7 @@ def test_sync_workflow_non_streaming_without_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: @@ -538,7 +538,7 @@ def test_sync_workflow_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: @@ -706,7 +706,7 @@ def test_sync_workflow_streaming_without_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py index e366e1ec3ef5..1ce2aa436e88 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py @@ -205,7 +205,7 @@ async def test_async_workflow_non_streaming_with_content_recording(self, **kwarg assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: @@ -366,7 +366,7 @@ async def test_async_workflow_non_streaming_without_content_recording(self, **kw assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: @@ -531,7 +531,7 @@ async def test_async_workflow_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: @@ -697,7 +697,7 @@ async def test_async_workflow_streaming_without_content_recording(self, **kwargs assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("model_deployment_name") assert deployment_name is not None async with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py index 1cc36a6b0455..92fca677d439 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py @@ -48,7 +48,7 @@ def test_agent_responses_crud(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -158,7 +158,7 @@ def test_agent_responses_crud(self, **kwargs): @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) def test_agent_responses_with_structured_output(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py index b710851c366f..449dead94abc 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py @@ -23,7 +23,7 @@ class TestAgentResponsesCrudAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_responses_crud_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_async_client(operation_group="agents", **kwargs) @@ -129,7 +129,7 @@ async def test_agent_responses_crud_async(self, **kwargs): @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_responses_with_structured_output_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_async_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py index 10414b7a59d1..724baf8c022b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py @@ -39,7 +39,7 @@ def test_agents_crud(self, **kwargs): GET /agents/{agent_name}/versions/{agent_version} project_client.agents.get_version() """ print("\n") - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") project_client = self.create_client(operation_group="agents", **kwargs) first_agent_name = "MyAgent1" second_agent_name = "MyAgent2" diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py index e9776b7e6257..9853f973e098 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py @@ -24,7 +24,7 @@ async def test_agents_crud_async(self, **kwargs): It then gets, lists, and deletes them, validating at each step. It uses different ways of creating agents: strongly typed, dictionary, and IO[bytes]. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") project_client = self.create_async_client(operation_group="agents", **kwargs) first_agent_name = "MyAgent1" second_agent_name = "MyAgent2" diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py index 3953bf1c76d2..e835e18936b3 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py @@ -40,7 +40,7 @@ def test_calculate_and_save(self, **kwargs): 2. Function Tool: Saves the computed result """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -100,7 +100,7 @@ def test_generate_data_and_report(self, **kwargs): 2. Function Tool: Creates a report with the computed statistics """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py index 16f2c2c1ba41..a6da388ec0b9 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py @@ -39,7 +39,7 @@ def test_find_and_analyze_data(self, **kwargs): 2. Code Interpreter: Agent calculates the average of those numbers """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -121,7 +121,7 @@ def test_analyze_code_file(self, **kwargs): 2. Code Interpreter: Agent executes the code and returns the computed result """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py index f67e95c020a8..b127a14bc89d 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py @@ -32,7 +32,7 @@ def test_data_analysis_workflow(self, **kwargs): Test data analysis workflow: upload data, search, save results. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -163,7 +163,7 @@ def test_empty_vector_store_handling(self, **kwargs): Test how agent handles empty vector store (no files uploaded). """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -242,7 +242,7 @@ def test_python_code_file_search(self, **kwargs): 2. Function Tool: Agent saves the code review findings """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -372,7 +372,7 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs): - Context retention across searches and function calls """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py index 61d572fa0a37..3d2d12353dfb 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py @@ -43,7 +43,7 @@ def test_complete_analysis_workflow(self, **kwargs): 3. Function Tool: Agent saves the computed results """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py index 1ae26a32a1a4..1bc2d553caa2 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py @@ -39,7 +39,7 @@ def test_file_search_and_function_with_conversation(self, **kwargs): - Verifying conversation state preserves all tool interactions """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py index 02d341051ec8..d2a9add36b0a 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py @@ -81,7 +81,7 @@ def test_agent_ai_search_question_answering(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Get AI Search connection and index from environment ai_search_connection_id = kwargs.get("ai_search_project_connection_id") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py index 5bc67d9a2833..f7b55542a649 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py @@ -148,7 +148,7 @@ async def test_agent_ai_search_question_answering_async_parallel(self, **kwargs) DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_async_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py index 78ec18081aa0..300914195e05 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py @@ -45,7 +45,7 @@ def test_agent_bing_grounding(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Note: This test requires bing_project_connection_id environment variable # to be set with a valid Bing connection ID from the project @@ -145,7 +145,7 @@ def test_agent_bing_grounding_multiple_queries(self, **kwargs): Bing grounding and provide accurate responses with citations. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") bing_connection_id = kwargs.get("bing_project_connection_id") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py index 7b5d1ea27680..f6531d9a1c6b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py @@ -41,7 +41,7 @@ def test_agent_code_interpreter_simple_math(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") agent_name = "code-interpreter-simple-agent" with ( @@ -125,7 +125,7 @@ def test_agent_code_interpreter_file_generation(self, **kwargs): DELETE /files/{file_id} openai_client.files.delete() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py index d38d15b2bd0e..84f722173fa7 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py @@ -28,7 +28,7 @@ async def test_agent_code_interpreter_simple_math_async(self, **kwargs): without any file uploads or downloads - just pure code execution. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") agent_name = "code-interpreter-simple-agent-async" async with ( diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py index e7408afe97fa..c398c3d6a1c4 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py @@ -45,7 +45,7 @@ def test_agent_file_search(self, **kwargs): DELETE /vector_stores/{id} openai_client.vector_stores.delete() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -203,7 +203,7 @@ def test_agent_file_search_multi_turn_conversation(self, **kwargs): while using File Search to answer follow-up questions. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py index e3d96f5a4733..420b43ffca2b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py @@ -20,7 +20,7 @@ class TestAgentFileSearchAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_file_search_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, @@ -106,7 +106,7 @@ async def test_agent_file_search_multi_turn_conversation_async(self, **kwargs): while using File Search to answer follow-up questions. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py index e97814456771..861f3602d450 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py @@ -42,7 +42,7 @@ def test_agent_file_search_stream(self, **kwargs): DELETE /vector_stores/{id} openai_client.vector_stores.delete() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py index fb4e627df2de..9bcad6d77fc2 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py @@ -19,7 +19,7 @@ class TestAgentFileSearchStreamAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_file_search_stream_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py index 264bf97ebf73..51216e5c6300 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py @@ -41,7 +41,7 @@ def test_agent_function_tool(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") agent_name = "function-tool-agent" with ( @@ -172,7 +172,7 @@ def test_agent_function_tool_multi_turn_with_multiple_calls(self, **kwargs): - Ability to use previous function results in subsequent queries """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -381,7 +381,7 @@ def test_agent_function_tool_context_dependent_followup(self, **kwargs): remembering parameters from the first query. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py index f4388b1ccfe9..92b806f0935c 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py @@ -28,7 +28,7 @@ async def test_agent_function_tool_async(self, **kwargs): 3. Receive function results and incorporate them into responses """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") agent_name = "function-tool-agent-async" # Setup @@ -160,7 +160,7 @@ async def test_agent_function_tool_multi_turn_with_multiple_calls_async(self, ** - Ability to use previous function results in subsequent queries """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup project_client = self.create_async_client(operation_group="agents", **kwargs) @@ -370,7 +370,7 @@ async def test_agent_function_tool_context_dependent_followup_async(self, **kwar remembering parameters from the first query. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") # Setup async with ( diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py index c0c515839aaf..be27c5685b53 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py @@ -41,7 +41,7 @@ def test_agent_image_generation(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") image_model = kwargs.get("image_generation_model_deployment_name") agent_name = "image-gen-agent" diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py index a4775afb16b9..4450f75fd103 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py @@ -21,7 +21,7 @@ class TestAgentImageGenerationAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_image_generation_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") image_model = kwargs.get("image_generation_model_deployment_name") agent_name = "image-gen-agent" diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py index 5723478f7569..243b8a62c71e 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py @@ -48,7 +48,7 @@ def test_agent_mcp_basic(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -179,7 +179,7 @@ def test_agent_mcp_with_project_connection(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py index 36a951e79183..dfc0df69188b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py @@ -21,7 +21,7 @@ class TestAgentMCPAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_mcp_basic_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py index 3a1bc4e44d0d..2cc6b1023295 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py @@ -54,7 +54,7 @@ def test_agent_memory_search(self, **kwargs): DELETE /memory_stores/{memory_store_name} project_client.beta.memory_stores.delete() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") chat_model = kwargs.get("memory_store_chat_model_deployment_name") embedding_model = kwargs.get("memory_store_embedding_model_deployment_name") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py index dc6b69d22354..e1a888787053 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py @@ -29,7 +29,7 @@ class TestAgentMemorySearchAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_memory_search_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") chat_model = kwargs.get("memory_store_chat_model_deployment_name") embedding_model = kwargs.get("memory_store_embedding_model_deployment_name") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py index de8b85f19723..6905e4f27b6d 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py @@ -51,7 +51,7 @@ def test_agent_openapi(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py index 1b3e87ef063a..a2da6989196e 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py @@ -31,7 +31,7 @@ class TestAgentOpenApiAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_openapi_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py index 00fdf016e79d..110241a76862 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py @@ -40,7 +40,7 @@ def test_function_tool_with_conversation(self, **kwargs): - Using conversation_id parameter """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -201,7 +201,7 @@ def test_file_search_with_conversation(self, **kwargs): - Conversation context retention """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -318,7 +318,7 @@ def test_code_interpreter_with_conversation(self, **kwargs): - Variables/state persistence across turns """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -403,7 +403,7 @@ def test_code_interpreter_with_file_in_conversation(self, **kwargs): - Server-side code execution with file access and chart generation """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") import os with ( diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py index 9a8f616e9d7f..d242c7bb6c17 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py @@ -38,7 +38,7 @@ def test_agent_web_search(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py index e11732ca4cac..2a9bc17b5ee9 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py @@ -18,7 +18,7 @@ class TestAgentWebSearchAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_web_search_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py index 9e790b1f37c7..c29bbf42edcc 100644 --- a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py +++ b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py @@ -137,7 +137,7 @@ def test_datasets_upload_file(self, **kwargs): @recorded_by_proxy def test_datasets_upload_folder(self, **kwargs): - endpoint = kwargs.pop("azure_ai_project_endpoint") + endpoint = kwargs.pop("project_endpoint") print("\n=====> Endpoint:", endpoint) dataset_name = self.test_datasets_params["dataset_name_2"] diff --git a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py index 724b6318b938..678bd1bedae2 100644 --- a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py +++ b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py @@ -138,7 +138,7 @@ async def test_datasets_upload_file(self, **kwargs): @recorded_by_proxy_async async def test_datasets_upload_folder_async(self, **kwargs): - endpoint = kwargs.pop("azure_ai_project_endpoint") + endpoint = kwargs.pop("project_endpoint") print("\n=====> Endpoint:", endpoint) dataset_name = self.test_datasets_params["dataset_name_4"] diff --git a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py index 53132a89a396..a4dc057d9765 100644 --- a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py +++ b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py @@ -18,8 +18,8 @@ class TestDeployments(TestBase): def test_deployments(self, **kwargs): model_publisher = "OpenAI" - model_name = kwargs.get("azure_ai_model_deployment_name") - model_deployment_name = kwargs.get("azure_ai_model_deployment_name") + model_name = kwargs.get("model_deployment_name") + model_deployment_name = kwargs.get("model_deployment_name") with self.create_client(**kwargs) as project_client: diff --git a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py index 06f229c1e15b..dba5495b0038 100644 --- a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py +++ b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py @@ -18,8 +18,8 @@ class TestDeploymentsAsync(TestBase): async def test_deployments_async(self, **kwargs): model_publisher = "OpenAI" - model_name = kwargs.get("azure_ai_model_deployment_name") - model_deployment_name = kwargs.get("azure_ai_model_deployment_name") + model_name = kwargs.get("model_deployment_name") + model_deployment_name = kwargs.get("model_deployment_name") async with self.create_async_client(**kwargs) as project_client: diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py index d8e28452557f..51ed46e70849 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py @@ -334,11 +334,11 @@ def _test_deploy_and_infer_helper( subscription_id = kwargs.get("azure_subscription_id") resource_group = kwargs.get("azure_resource_group") - project_endpoint = kwargs.get("azure_ai_project_endpoint") + project_endpoint = kwargs.get("project_endpoint") if not all([subscription_id, resource_group, project_endpoint]): pytest.skip( - f"Missing required environment variables for deployment (azure_subscription_id, azure_resource_group, azure_ai_project_endpoint) - skipping {test_prefix} deploy and infer test" + f"Missing required environment variables for deployment (azure_subscription_id, azure_resource_group, project_endpoint) - skipping {test_prefix} deploy and infer test" ) account_name = self._extract_account_name_from_endpoint(project_endpoint, test_prefix) diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py index be0cc2de95dc..117c06684914 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py @@ -345,11 +345,11 @@ async def _test_deploy_and_infer_helper_async( subscription_id = kwargs.get("azure_subscription_id") resource_group = kwargs.get("azure_resource_group") - project_endpoint = kwargs.get("azure_ai_project_endpoint") + project_endpoint = kwargs.get("project_endpoint") if not all([subscription_id, resource_group, project_endpoint]): pytest.skip( - f"Missing required environment variables for deployment (azure_subscription_id, azure_resource_group, azure_ai_project_endpoint) - skipping {test_prefix} deploy and infer test" + f"Missing required environment variables for deployment (azure_subscription_id, azure_resource_group, project_endpoint) - skipping {test_prefix} deploy and infer test" ) account_name = self._extract_account_name_from_endpoint(project_endpoint, test_prefix) diff --git a/sdk/ai/azure-ai-projects/tests/responses/test_responses.py b/sdk/ai/azure-ai-projects/tests/responses/test_responses.py index 5165f37ddb03..77f354335d4a 100644 --- a/sdk/ai/azure-ai-projects/tests/responses/test_responses.py +++ b/sdk/ai/azure-ai-projects/tests/responses/test_responses.py @@ -57,7 +57,7 @@ def test_responses(self, **kwargs): ------+---------------------------------------------+----------------------------------- POST /openai/responses client.responses.create() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") client = self.create_client(operation_group="agents", **kwargs).get_openai_client() diff --git a/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py b/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py index bf7252962dad..187bbde4a259 100644 --- a/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py +++ b/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py @@ -45,7 +45,7 @@ class TestResponsesAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.HTTPX) async def test_responses_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("model_deployment_name") client = self.create_async_client(operation_group="agents", **kwargs).get_openai_client() diff --git a/sdk/ai/azure-ai-projects/tests/samples/README.md b/sdk/ai/azure-ai-projects/tests/samples/README.md index 3296cb7c58ea..4f2547678cbe 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/README.md +++ b/sdk/ai/azure-ai-projects/tests/samples/README.md @@ -67,7 +67,7 @@ class TestSamples(AzureRecordedTestCase): executor.execute() executor.validate_print_calls_by_llm( instructions=agent_tools_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], + project_endpoint=kwargs["project_endpoint"], ) ``` @@ -106,7 +106,7 @@ class TestSamplesAsync(AzureRecordedTestCase): await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=agent_tools_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], + project_endpoint=kwargs["project_endpoint"], ) ``` @@ -122,8 +122,8 @@ from devtools_testutils import EnvironmentVariableLoader servicePreparer = functools.partial( EnvironmentVariableLoader, "", - azure_ai_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", - azure_ai_model_deployment_name="gpt-4o", + project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", + model_deployment_name="gpt-4o", # add other sanitized vars here ) ``` @@ -154,8 +154,8 @@ If you need to remap the values provided by your fixtures to the environment-var ```python env_vars = { - "AZURE_AI_PROJECT_ENDPOINT": kwargs["TEST_AZURE_AI_PROJECT_ENDPOINT"], - "AZURE_AI_MODEL_DEPLOYMENT_NAME": kwargs["TEST_AZURE_AI_MODEL_DEPLOYMENT_NAME"], + "PROJECT_ENDPOINT": kwargs["TEST_AZURE_AI_PROJECT_ENDPOINT"], + "MODEL_DEPLOYMENT_NAME": kwargs["TEST_AZURE_AI_MODEL_DEPLOYMENT_NAME"], } executor = SyncSampleExecutor(self, sample_path, env_vars=env_vars, **kwargs) ``` diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py index 2bd4594c843d..0dac34211592 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py @@ -62,8 +62,8 @@ def test_agent_tools_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) @pytest.mark.parametrize( @@ -86,8 +86,8 @@ def test_memory_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=memories_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) @pytest.mark.parametrize( @@ -106,8 +106,8 @@ def test_agents_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=agents_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) @pytest.mark.parametrize( @@ -128,8 +128,8 @@ def test_connections_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) @pytest.mark.parametrize( @@ -148,8 +148,8 @@ def test_files_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) @pytest.mark.parametrize( @@ -168,8 +168,8 @@ def test_deployments_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) @pytest.mark.parametrize( @@ -188,8 +188,8 @@ def test_datasets_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) @pytest.mark.parametrize( @@ -211,6 +211,6 @@ def test_finetuning_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=fine_tuning_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples_async.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples_async.py index 8eccef50195f..fc6fecc417d6 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples_async.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples_async.py @@ -50,8 +50,8 @@ async def test_agent_tools_samples_async(self, sample_path: str, **kwargs) -> No await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=agent_tools_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) @pytest.mark.parametrize( @@ -75,8 +75,8 @@ async def test_memory_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=memories_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) @pytest.mark.parametrize( @@ -95,8 +95,8 @@ async def test_agents_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=agents_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) @pytest.mark.parametrize( @@ -117,8 +117,8 @@ async def test_connections_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) @pytest.mark.parametrize( @@ -139,8 +139,8 @@ async def test_files_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) @pytest.mark.parametrize( @@ -159,8 +159,8 @@ async def test_deployments_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) @pytest.mark.parametrize( @@ -184,6 +184,6 @@ async def test_datasets_samples(self, sample_path: str, **kwargs) -> None: # Proxy server probably not able to parse the captured print content await executor.validate_print_calls_by_llm_async( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py index bcea91df0eb5..9390fdf2d895 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py @@ -19,8 +19,8 @@ evaluationsPreparer = functools.partial( EnvironmentVariableLoader, "", - azure_ai_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", - azure_ai_model_deployment_name="sanitized-model-deployment-name", + project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", + model_deployment_name="sanitized-model-deployment-name", azure_ai_agent_name="sanitized-agent-name", ) @@ -184,8 +184,8 @@ def test_evaluation_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=evaluations_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) # To run this test with a specific sample, use: @@ -216,8 +216,8 @@ def test_agentic_evaluator_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=evaluations_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) # To run this test, use: @@ -247,6 +247,6 @@ def test_generic_agentic_evaluator_sample(self, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=evaluations_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["project_endpoint"], + model=kwargs["model_deployment_name"], ) diff --git a/sdk/ai/azure-ai-projects/tests/test_base.py b/sdk/ai/azure-ai-projects/tests/test_base.py index a0deb29a2cfd..afcbe80e2317 100644 --- a/sdk/ai/azure-ai-projects/tests/test_base.py +++ b/sdk/ai/azure-ai-projects/tests/test_base.py @@ -41,8 +41,8 @@ servicePreparer = functools.partial( EnvironmentVariableLoader, "", - azure_ai_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", - azure_ai_model_deployment_name="sanitized-model-deployment-name", + project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", + model_deployment_name="sanitized-model-deployment-name", image_generation_model_deployment_name="sanitized-gpt-image", container_app_resource_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.App/containerApps/00000", container_ingress_subdomain_suffix="00000", @@ -77,8 +77,8 @@ fineTuningServicePreparer = functools.partial( EnvironmentVariableLoader, "", - azure_ai_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", - azure_ai_model_deployment_name="sanitized-model-deployment-name", + project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", + model_deployment_name="sanitized-model-deployment-name", azure_ai_projects_azure_subscription_id="00000000-0000-0000-0000-000000000000", azure_ai_projects_azure_resource_group="sanitized-resource-group", azure_ai_projects_azure_aoai_account="sanitized-aoai-account", @@ -299,7 +299,7 @@ def open_with_lf( # helper function: create projects client using environment variables def create_client(self, *, operation_group: Optional[str] = None, **kwargs) -> AIProjectClient: # fetch environment variables - endpoint = kwargs.pop("azure_ai_project_endpoint") + endpoint = kwargs.pop("project_endpoint") credential = self.get_credential(AIProjectClient, is_async=False) allow_preview = kwargs.pop("allow_preview", operation_group in {"agents", "tracing"}) @@ -317,7 +317,7 @@ def create_client(self, *, operation_group: Optional[str] = None, **kwargs) -> A # helper function: create async projects client using environment variables def create_async_client(self, *, operation_group: Optional[str] = None, **kwargs) -> AsyncAIProjectClient: # fetch environment variables - endpoint = kwargs.pop("azure_ai_project_endpoint") + endpoint = kwargs.pop("project_endpoint") credential = self.get_credential(AsyncAIProjectClient, is_async=True) allow_preview = kwargs.pop("allow_preview", operation_group in {"agents", "tracing"}) From 67a8d7949111c946492f4b543b545ede95c0f202 Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Mon, 9 Mar 2026 20:28:30 -0700 Subject: [PATCH 07/16] Revert "Rename env varrs name" This reverts commit 86d53c53e8ffa1fac49c9f7388ee1b3f9e9f321f. --- sdk/ai/azure-ai-projects/.env.template | 4 +- sdk/ai/azure-ai-projects/CHANGELOG.md | 4 +- sdk/ai/azure-ai-projects/README.md | 18 ++-- .../samples/agents/sample_agent_basic.py | 8 +- .../agents/sample_agent_basic_async.py | 8 +- .../agents/sample_agent_retrieve_basic.py | 8 +- .../sample_agent_retrieve_basic_async.py | 8 +- .../agents/sample_agent_stream_events.py | 8 +- .../agents/sample_agent_structured_output.py | 8 +- .../sample_agent_structured_output_async.py | 8 +- .../agents/sample_workflow_multi_agent.py | 10 +-- .../sample_workflow_multi_agent_async.py | 10 +-- ..._agent_basic_with_azure_monitor_tracing.py | 8 +- ...sample_agent_basic_with_console_tracing.py | 8 +- ..._with_console_tracing_custom_attributes.py | 8 +- .../agents/tools/sample_agent_ai_search.py | 8 +- .../tools/sample_agent_azure_function.py | 8 +- .../tools/sample_agent_bing_custom_search.py | 8 +- .../tools/sample_agent_bing_grounding.py | 8 +- .../tools/sample_agent_browser_automation.py | 8 +- .../tools/sample_agent_code_interpreter.py | 8 +- .../sample_agent_code_interpreter_async.py | 8 +- ...ample_agent_code_interpreter_with_files.py | 8 +- ...agent_code_interpreter_with_files_async.py | 8 +- .../agents/tools/sample_agent_computer_use.py | 4 +- .../tools/sample_agent_computer_use_async.py | 4 +- .../agents/tools/sample_agent_fabric.py | 8 +- .../agents/tools/sample_agent_file_search.py | 8 +- .../sample_agent_file_search_in_stream.py | 8 +- ...ample_agent_file_search_in_stream_async.py | 8 +- .../tools/sample_agent_function_tool.py | 8 +- .../tools/sample_agent_function_tool_async.py | 8 +- .../tools/sample_agent_image_generation.py | 8 +- .../sample_agent_image_generation_async.py | 8 +- .../samples/agents/tools/sample_agent_mcp.py | 8 +- .../agents/tools/sample_agent_mcp_async.py | 8 +- ...ample_agent_mcp_with_project_connection.py | 8 +- ...agent_mcp_with_project_connection_async.py | 8 +- .../tools/sample_agent_memory_search.py | 8 +- .../tools/sample_agent_memory_search_async.py | 8 +- .../agents/tools/sample_agent_openapi.py | 8 +- ...e_agent_openapi_with_project_connection.py | 8 +- .../agents/tools/sample_agent_sharepoint.py | 8 +- .../agents/tools/sample_agent_to_agent.py | 8 +- .../agents/tools/sample_agent_web_search.py | 8 +- .../tools/sample_agent_web_search_preview.py | 8 +- ...ple_agent_web_search_with_custom_search.py | 8 +- .../samples/connections/sample_connections.py | 4 +- .../connections/sample_connections_async.py | 4 +- .../samples/datasets/sample_datasets.py | 4 +- .../samples/datasets/sample_datasets_async.py | 4 +- .../datasets/sample_datasets_download.py | 4 +- .../samples/deployments/sample_deployments.py | 8 +- .../deployments/sample_deployments_async.py | 8 +- .../samples/evaluations/README.md | 8 +- .../agentic_evaluators/sample_coherence.py | 8 +- .../agentic_evaluators/sample_fluency.py | 8 +- .../agent_utils.py | 2 +- .../sample_generic_agentic_evaluator.py | 6 +- .../agentic_evaluators/sample_groundedness.py | 8 +- .../sample_intent_resolution.py | 8 +- .../agentic_evaluators/sample_relevance.py | 8 +- .../sample_response_completeness.py | 8 +- .../sample_task_adherence.py | 8 +- .../sample_task_completion.py | 8 +- .../sample_task_navigation_efficiency.py | 4 +- .../sample_tool_call_accuracy.py | 8 +- .../sample_tool_call_success.py | 8 +- .../sample_tool_input_accuracy.py | 8 +- .../sample_tool_output_utilization.py | 8 +- .../sample_tool_selection.py | 8 +- .../evaluations/sample_agent_evaluation.py | 8 +- .../sample_agent_response_evaluation.py | 8 +- ..._response_evaluation_with_function_tool.py | 8 +- .../sample_continuous_evaluation_rule.py | 8 +- .../evaluations/sample_eval_catalog.py | 4 +- ...mple_eval_catalog_code_based_evaluators.py | 8 +- ...le_eval_catalog_prompt_based_evaluators.py | 8 +- .../sample_evaluation_cluster_insight.py | 10 +-- .../sample_evaluation_compare_insight.py | 8 +- .../sample_evaluations_ai_assisted.py | 8 +- ...ple_evaluations_builtin_with_dataset_id.py | 8 +- ...le_evaluations_builtin_with_inline_data.py | 8 +- ...valuations_builtin_with_inline_data_oai.py | 8 +- .../sample_evaluations_builtin_with_traces.py | 8 +- .../evaluations/sample_evaluations_graders.py | 8 +- ...aluations_score_model_grader_with_image.py | 8 +- .../evaluations/sample_model_evaluation.py | 8 +- .../evaluations/sample_redteam_evaluations.py | 6 +- .../sample_scheduled_evaluations.py | 12 +-- .../samples/files/sample_files.py | 4 +- .../samples/files/sample_files_async.py | 4 +- .../finetuning/sample_finetuning_dpo_job.py | 4 +- .../sample_finetuning_dpo_job_async.py | 4 +- ...le_finetuning_oss_models_supervised_job.py | 4 +- ...etuning_oss_models_supervised_job_async.py | 4 +- .../sample_finetuning_reinforcement_job.py | 4 +- ...mple_finetuning_reinforcement_job_async.py | 4 +- .../sample_finetuning_supervised_job.py | 4 +- .../sample_finetuning_supervised_job_async.py | 4 +- .../samples/indexes/sample_indexes.py | 4 +- .../samples/indexes/sample_indexes_async.py | 4 +- .../mcp_client/sample_mcp_tool_async.py | 6 +- .../memories/sample_memory_advanced.py | 4 +- .../memories/sample_memory_advanced_async.py | 4 +- .../samples/memories/sample_memory_basic.py | 4 +- .../memories/sample_memory_basic_async.py | 4 +- .../samples/memories/sample_memory_crud.py | 4 +- .../memories/sample_memory_crud_async.py | 4 +- .../samples/red_team/sample_red_team.py | 8 +- .../samples/red_team/sample_red_team_async.py | 8 +- .../responses/sample_responses_basic.py | 10 +-- .../responses/sample_responses_basic_async.py | 10 +-- ...responses_basic_without_aiprojectclient.py | 8 +- ...ses_basic_without_aiprojectclient_async.py | 8 +- .../responses/sample_responses_image_input.py | 8 +- .../sample_responses_stream_events.py | 8 +- .../sample_responses_stream_manager.py | 8 +- .../sample_responses_structured_output.py | 8 +- .../samples/telemetry/sample_telemetry.py | 4 +- .../telemetry/sample_telemetry_async.py | 4 +- .../telemetry/test_ai_agents_instrumentor.py | 8 +- .../test_ai_agents_instrumentor_async.py | 8 +- .../telemetry/test_responses_instrumentor.py | 84 +++++++++---------- .../test_responses_instrumentor_async.py | 72 ++++++++-------- ...sponses_instrumentor_browser_automation.py | 8 +- ...s_instrumentor_browser_automation_async.py | 8 +- ...responses_instrumentor_code_interpreter.py | 8 +- ...ses_instrumentor_code_interpreter_async.py | 8 +- ...test_responses_instrumentor_file_search.py | 8 +- ...esponses_instrumentor_file_search_async.py | 8 +- .../test_responses_instrumentor_mcp.py | 8 +- .../test_responses_instrumentor_mcp_async.py | 8 +- .../test_responses_instrumentor_metrics.py | 2 +- .../test_responses_instrumentor_workflow.py | 8 +- ...t_responses_instrumentor_workflow_async.py | 8 +- .../tests/agents/test_agent_responses_crud.py | 4 +- .../agents/test_agent_responses_crud_async.py | 4 +- .../tests/agents/test_agents_crud.py | 2 +- .../tests/agents/test_agents_crud_async.py | 2 +- ...est_agent_code_interpreter_and_function.py | 4 +- ..._agent_file_search_and_code_interpreter.py | 4 +- .../test_agent_file_search_and_function.py | 8 +- ...t_file_search_code_interpreter_function.py | 2 +- .../test_multitool_with_conversations.py | 2 +- .../agents/tools/test_agent_ai_search.py | 2 +- .../tools/test_agent_ai_search_async.py | 2 +- .../agents/tools/test_agent_bing_grounding.py | 4 +- .../tools/test_agent_code_interpreter.py | 4 +- .../test_agent_code_interpreter_async.py | 2 +- .../agents/tools/test_agent_file_search.py | 4 +- .../tools/test_agent_file_search_async.py | 4 +- .../tools/test_agent_file_search_stream.py | 2 +- .../test_agent_file_search_stream_async.py | 2 +- .../agents/tools/test_agent_function_tool.py | 6 +- .../tools/test_agent_function_tool_async.py | 6 +- .../tools/test_agent_image_generation.py | 2 +- .../test_agent_image_generation_async.py | 2 +- .../tests/agents/tools/test_agent_mcp.py | 4 +- .../agents/tools/test_agent_mcp_async.py | 2 +- .../agents/tools/test_agent_memory_search.py | 2 +- .../tools/test_agent_memory_search_async.py | 2 +- .../tests/agents/tools/test_agent_openapi.py | 2 +- .../agents/tools/test_agent_openapi_async.py | 2 +- .../test_agent_tools_with_conversations.py | 8 +- .../agents/tools/test_agent_web_search.py | 2 +- .../tools/test_agent_web_search_async.py | 2 +- .../tests/datasets/test_datasets.py | 2 +- .../tests/datasets/test_datasets_async.py | 2 +- .../tests/deployments/test_deployments.py | 4 +- .../deployments/test_deployments_async.py | 4 +- .../tests/finetuning/test_finetuning.py | 4 +- .../tests/finetuning/test_finetuning_async.py | 4 +- .../tests/responses/test_responses.py | 2 +- .../tests/responses/test_responses_async.py | 2 +- .../azure-ai-projects/tests/samples/README.md | 12 +-- .../tests/samples/test_samples.py | 32 +++---- .../tests/samples/test_samples_async.py | 28 +++---- .../tests/samples/test_samples_evaluations.py | 16 ++-- sdk/ai/azure-ai-projects/tests/test_base.py | 12 +-- 180 files changed, 666 insertions(+), 668 deletions(-) diff --git a/sdk/ai/azure-ai-projects/.env.template b/sdk/ai/azure-ai-projects/.env.template index 89e81956f322..df66f2a71199 100644 --- a/sdk/ai/azure-ai-projects/.env.template +++ b/sdk/ai/azure-ai-projects/.env.template @@ -20,8 +20,8 @@ AZURE_AI_PROJECTS_CONSOLE_LOGGING= # Project endpoint has the format: # `https://.services.ai.azure.com/api/projects/` -PROJECT_ENDPOINT= -MODEL_DEPLOYMENT_NAME= +AZURE_AI_PROJECT_ENDPOINT= +AZURE_AI_MODEL_DEPLOYMENT_NAME= AZURE_AI_AGENT_NAME= CONVERSATION_ID= CONNECTION_NAME= diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 80b7bcd0be13..34c72e0a6fa5 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -20,9 +20,7 @@ ### Other Changes -* Replace environment variable name, `AZURE_AI_PROJECT_ENDPOINT` TO `PROJECT_ENDPOINT`. -* Replace environment variable name, `AZURE_AI_MODEL_DEPLOYMENT_NAME` TO `MODEL_DEPLOYMENT_NAME`. - +* Placeholder ## 2.0.0 (2026-03-06) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index a9b17e078b94..8c741a297add 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -54,7 +54,7 @@ To report an issue with the client library, or request additional features, plea * Python 3.9 or later. * An [Azure subscription][azure_sub]. * A [project in Microsoft Foundry](https://learn.microsoft.com/azure/foundry/how-to/create-projects). -* A Foundry project endpoint URL of the form `https://your-ai-services-account-name.services.ai.azure.com/api/projects/your-project-name`. It can be found in your Microsoft Foundry Project home page. Below we will assume the environment variable `PROJECT_ENDPOINT` was defined to hold this value. +* A Foundry project endpoint URL of the form `https://your-ai-services-account-name.services.ai.azure.com/api/projects/your-project-name`. It can be found in your Microsoft Foundry Project home page. Below we will assume the environment variable `AZURE_AI_PROJECT_ENDPOINT` was defined to hold this value. * An Entra ID token for authentication. Your application needs an object that implements the [TokenCredential](https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential) interface. Code samples here use [DefaultAzureCredential](https://learn.microsoft.com/python/api/azure-identity/azure.identity.defaultazurecredential). To get that working, you will need: * An appropriate role assignment. See [Role-based access control in Microsoft Foundry portal](https://learn.microsoft.com/azure/foundry/concepts/rbac-foundry). Role assignment can be done via the "Access Control (IAM)" tab of your Azure AI Project resource in the Azure portal. * [Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) installed. @@ -87,7 +87,7 @@ from azure.identity import DefaultAzureCredential with ( DefaultAzureCredential() as credential, - AIProjectClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=credential) as project_client, + AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, ): ``` @@ -107,7 +107,7 @@ from azure.identity.aio import DefaultAzureCredential async with ( DefaultAzureCredential() as credential, - AIProjectClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=credential) as project_client, + AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, ): ``` @@ -117,20 +117,20 @@ async with ( Your Microsoft Foundry project may have one or more AI models deployed. These could be OpenAI models, Microsoft models, or models from other providers. Use the code below to get an authenticated [OpenAI](https://github.com/openai/openai-python?tab=readme-ov-file#usage) client from the [openai](https://pypi.org/project/openai/) package, and execute an example multi-turn "Responses" calls. -The code below assumes the environment variable `MODEL_DEPLOYMENT_NAME` is defined. It's the deployment name of an AI model in your Foundry Project. See "Build" menu, under "Models" (First column of the "Deployments" table). +The code below assumes the environment variable `AZURE_AI_MODEL_DEPLOYMENT_NAME` is defined. It's the deployment name of an AI model in your Foundry Project. See "Build" menu, under "Models" (First column of the "Deployments" table). ```python with project_client.get_openai_client() as openai_client: response = openai_client.responses.create( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], input="What is the size of France in square miles?", ) print(f"Response output: {response.output_text}") response = openai_client.responses.create( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], input="And what is the capital city?", previous_response_id=response.id, ) @@ -145,7 +145,7 @@ See the "responses" folder in the [package samples][samples] for additional samp The `.agents` property on the `AIProjectClient` gives you access to all Agent operations. Agents use an extension of the OpenAI Responses protocol, so you will need to get an `OpenAI` client to do Agent operations, as shown in the example below. -The code below assumes environment variable `MODEL_DEPLOYMENT_NAME` is defined. It's the deployment name of an AI model in your Foundry Project. See "Build" menu, under "Models" (First column of the "Deployments" table). +The code below assumes environment variable `AZURE_AI_MODEL_DEPLOYMENT_NAME` is defined. It's the deployment name of an AI model in your Foundry Project. See "Build" menu, under "Models" (First column of the "Deployments" table). See the "agents" folder in the [package samples][samples] for an extensive set of samples, including streaming, tool usage and memory store usage. @@ -156,7 +156,7 @@ with project_client.get_openai_client() as openai_client: agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) @@ -1357,7 +1357,7 @@ By default logs redact the values of URL query strings, the values of some HTTP ```python project_client = AIProjectClient( credential=DefaultAzureCredential(), - endpoint=os.environ["PROJECT_ENDPOINT"], + endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], logging_enable=True ) ``` diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py index a494cd33cd1e..1b57d0bcd29c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -34,7 +34,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -46,7 +46,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py index c4ddae1518c3..69404d31ae35 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async def main() -> None: @@ -48,7 +48,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions.", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py index 89d6192b6c7b..876dc8daebcd 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py @@ -22,9 +22,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -36,8 +36,8 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] -model = os.environ["MODEL_DEPLOYMENT_NAME"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +model = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py index 89320902dd05..8baa7034c139 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py @@ -22,9 +22,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -37,8 +37,8 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] -model = os.environ["MODEL_DEPLOYMENT_NAME"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +model = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] async def main(): diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py index cfb11d16d92b..5ab97c36b587 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py @@ -21,9 +21,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -46,7 +46,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py index 83f7e7803384..dfeb5c961720 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py @@ -24,9 +24,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv pydantic Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -51,7 +51,7 @@ class CalendarEvent(BaseModel): participants: list[str] -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -62,7 +62,7 @@ class CalendarEvent(BaseModel): agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], text=PromptAgentDefinitionTextOptions( format=TextResponseFormatJsonSchema(name="CalendarEvent", schema=CalendarEvent.model_json_schema()) ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py index a21d54e8426d..12bdaf31231a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py @@ -24,9 +24,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp pydantic Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -44,7 +44,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] class CalendarEvent(BaseModel): @@ -63,7 +63,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], text=PromptAgentDefinitionTextOptions( format=TextResponseFormatJsonSchema(name="CalendarEvent", schema=CalendarEvent.model_json_schema()) ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py index 5def14deed3e..b8674dc5c146 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -46,7 +46,7 @@ teacher_agent = project_client.agents.create_version( agent_name="teacher-agent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="""You are a teacher that create pre-school math question for student and check answer. If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", @@ -58,7 +58,7 @@ student_agent = project_client.agents.create_version( agent_name="student-agent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py index 160e059dbd39..b565a04be074 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -36,7 +36,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async def main(): @@ -50,7 +50,7 @@ async def main(): teacher_agent = await project_client.agents.create_version( agent_name="teacher-agent-async", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="""You are a teacher that create pre-school math question for student and check answer. If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", @@ -61,7 +61,7 @@ async def main(): student_agent = await project_client.agents.create_version( agent_name="student-agent-async", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py index cdf10921d9a0..0f318459182f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv azure-monitor-opentelemetry Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING - Set to `true` to enable GenAI telemetry tracing, which is disabled by default. @@ -46,7 +46,7 @@ with ( DefaultAzureCredential() as credential, - AIProjectClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=credential) as project_client, + AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, ): # [START setup_azure_monitor_tracing] # Enable Azure Monitor tracing @@ -62,7 +62,7 @@ # [END create_span_for_scenario] with project_client.get_openai_client() as openai_client: agent_definition = PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py index 5495550a2131..e5aa0582e9a6 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv opentelemetry-sdk azure-core-tracing-opentelemetry Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING - Set to `true` to enable GenAI telemetry tracing, which is disabled by default. @@ -90,11 +90,11 @@ def display_conversation_item(item: Any) -> None: # [END create_span_for_scenario] with ( DefaultAzureCredential() as credential, - AIProjectClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=credential) as project_client, + AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, project_client.get_openai_client() as openai_client, ): agent_definition = PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py index c35bc71b8823..64786e3a00e0 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py +++ b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv opentelemetry-sdk azure-core-tracing-opentelemetry Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING - Set to `true` to enable GenAI telemetry tracing, which is disabled by default. @@ -44,7 +44,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] # Define the custom span processor that is used for adding the custom @@ -94,7 +94,7 @@ def on_end(self, span: ReadableSpan): ): agent_definition = PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py index e0e5cc4267ea..6f082269687a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) AI_SEARCH_PROJECT_CONNECTION_ID - The AI Search project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -41,7 +41,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -66,7 +66,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="""You are a helpful assistant. You must always provide citations for answers using the tool and render them as: `\u3010message_idx:search_idx\u2020source\u3011`.""", tools=[tool], diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py index d6f5aed57571..242ee742229b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0b1" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) STORAGE_INPUT_QUEUE_NAME - The name of the Azure Storage Queue to use for input and output in the Azure Function tool. 4) STORAGE_OUTPUT_QUEUE_NAME - The name of the Azure Storage Queue to use for output in the Azure Function tool. @@ -44,7 +44,7 @@ agent = None -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -82,7 +82,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py index 43c3246fb6f9..1ebf4c6d213b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py @@ -27,9 +27,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID - The Bing Custom Search project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -50,7 +50,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -74,7 +74,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="""You are a helpful agent that can use Bing Custom Search tools to assist users. Use the available Bing Custom Search tools to answer questions and perform tasks.""", tools=[tool], diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py index 7ca03afa983a..22fb479f0109 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py @@ -35,9 +35,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) BING_PROJECT_CONNECTION_ID - The Bing project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. """ @@ -55,7 +55,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -76,7 +76,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py index 38129dea7b78..aa660411a6cc 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) BROWSER_AUTOMATION_PROJECT_CONNECTION_ID - The browser automation project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -60,7 +60,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="""You are an Agent helping with browser automation tasks. You can answer questions, provide information, and assist with various tasks related to web browsing using the Browser Automation tool available to you.""", diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py index 72f9d84a4aa9..6007468f8439 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -31,7 +31,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -47,7 +47,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py index de936fcfff6b..edde775f47aa 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async def main() -> None: @@ -46,7 +46,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[CodeInterpreterTool()], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py index 972d9ad84447..3cb511cfcc3b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -57,7 +57,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files_async.py index e275ab430db7..69ddd048bcc8 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files_async.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async def main() -> None: @@ -59,7 +59,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[CodeInterpreterTool(container=AutoCodeInterpreterToolParam(file_ids=[file.id]))], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py index ec91666a1fa2..3b1ec849257c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py @@ -23,7 +23,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) (Optional) COMPUTER_USE_MODEL_DEPLOYMENT_NAME - The deployment name of the computer-use-preview model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -45,7 +45,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py index 65b027d6e281..c2c85a6c9906 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py @@ -23,7 +23,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) (Optional) COMPUTER_USE_MODEL_DEPLOYMENT_NAME - The deployment name of the computer-use-preview model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -44,7 +44,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async def main(): diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py index c780ff71b510..20bcd70f597d 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) FABRIC_PROJECT_CONNECTION_ID - The Fabric project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -59,7 +59,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py index c92e0cfc1e0f..9e14fdd34461 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -60,7 +60,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can search through product information.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py index c8ce27101fc2..4bac22ce828c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -63,7 +63,7 @@ agent = project_client.agents.create_version( agent_name="StreamingFileSearchAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can search through product information and provide detailed responses. Use the file search tool to find relevant information before answering.", tools=[FileSearchTool(vector_store_ids=[vector_store.id])], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py index 1ca1c6ec2ec5..7f02ea13a7d6 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async def main() -> None: @@ -63,7 +63,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="StreamingFileSearchAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can search through product information and provide detailed responses. Use the file search tool to find relevant information before answering.", tools=[FileSearchTool(vector_store_ids=[vector_store.id])], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py index a8ee3ce16447..701201d55d99 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -39,7 +39,7 @@ def get_horoscope(sign: str) -> str: return f"{sign}: Next Tuesday you will befriend a baby otter." -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -69,7 +69,7 @@ def get_horoscope(sign: str) -> str: agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can use function tools.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py index 8f7a416abeb1..ccbc373e6585 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async def get_horoscope(sign: str) -> str: @@ -70,7 +70,7 @@ async def main(): agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can use function tools.", tools=[func_tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py index db372ec52647..5b9ea7011b3c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py @@ -28,9 +28,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the chat model (e.g., gpt-4o, gpt-4o-mini, gpt-5o, gpt-5o-mini) + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model (e.g., gpt-4o, gpt-4o-mini, gpt-5o, gpt-5o-mini) used by the Agent for understanding and responding to prompts. This is NOT the image generation model. 3) IMAGE_GENERATION_MODEL_DEPLOYMENT_NAME - The deployment name of the image generation model (e.g. gpt-image-1) used by the ImageGenTool. @@ -53,7 +53,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -73,7 +73,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="Generate images based on user prompts", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py index 34010e739490..b7859d6483eb 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py @@ -28,9 +28,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the chat model (e.g., gpt-4o, gpt-4o-mini, gpt-5o, gpt-5o-mini) + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model (e.g., gpt-4o, gpt-4o-mini, gpt-5o, gpt-5o-mini) used by the Agent for understanding and responding to prompts. This is NOT the image generation model. 3) IMAGE_GENERATION_MODEL_DEPLOYMENT_NAME - The deployment name of the image generation model (e.g. gpt-image-1) used by the ImageGenTool. @@ -53,7 +53,7 @@ from azure.ai.projects.models import PromptAgentDefinition, ImageGenTool load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async def main(): @@ -68,7 +68,7 @@ async def main(): agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="Generate images based on user prompts", tools=[ImageGenTool(model=image_generation_model, quality="low", size="1024x1024")], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py index 95a4010ced59..c318f9004f1e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -50,7 +50,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful agent that can use MCP tools to assist users. Use the available MCP tools to answer questions and perform tasks.", tools=[mcp_tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py index 0e980be777ad..cc12b02a8fdd 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async def main(): @@ -55,7 +55,7 @@ async def main(): agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful agent that can use MCP tools to assist users. Use the available MCP tools to answer questions and perform tasks.", tools=tools, ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py index 27af3ffcd510..be1036a9fde8 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) MCP_PROJECT_CONNECTION_ID - The connection resource ID in Custom keys with key equals to "Authorization" and value to be "Bearer ". @@ -34,7 +34,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -55,7 +55,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent7", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="Use MCP tools as needed", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py index 7d25ec8836ae..71ec422987a4 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) MCP_PROJECT_CONNECTION_ID - The connection resource ID in Custom keys with key equals to "Authorization" and value to be "Bearer ". @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async def main(): @@ -58,7 +58,7 @@ async def main(): agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="Use MCP tools as needed", tools=tools, ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py index 6888ad8263a5..26f9221631f1 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py @@ -23,9 +23,9 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the Agent's AI model, + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the Agent's AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model for memory, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -48,7 +48,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -96,7 +96,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py index dd3792a89249..6e959c0c3a88 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py @@ -23,9 +23,9 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the Agent's AI model, + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the Agent's AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model for memory, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -51,7 +51,7 @@ async def main() -> None: - endpoint = os.environ["PROJECT_ENDPOINT"] + endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, @@ -90,7 +90,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", tools=[ MemorySearchPreviewTool( diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py index 33080b31243c..aba7820c69d6 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv jsonref Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -38,7 +38,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -65,7 +65,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py index e4d56f1ba0f6..886288df69de 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv jsonref Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) OPENAPI_PROJECT_CONNECTION_ID - The OpenAPI project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -42,7 +42,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -75,7 +75,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py index 273b6c18a73e..6a108dfe83a9 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) SHAREPOINT_PROJECT_CONNECTION_ID - The SharePoint project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -59,7 +59,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="""You are a helpful agent that can use SharePoint tools to assist users. Use the available SharePoint tools to answer questions and perform tasks.""", tools=[tool], diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py index 2a0475de9bfa..133b99d589e0 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py @@ -19,9 +19,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) A2A_PROJECT_CONNECTION_ID - The A2A project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -41,7 +41,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -61,7 +61,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py index 6837cb0b13a2..01729d031854 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py @@ -26,9 +26,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -46,7 +46,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -60,7 +60,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can search the web", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py index d433fb8cf8e7..bf09f23786ff 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py @@ -26,9 +26,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -42,7 +42,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -56,7 +56,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent105", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can search the web", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py index f086c37c2517..41d8d6f75aa2 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py @@ -27,9 +27,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID - The Bing Custom Search project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -51,7 +51,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -70,7 +70,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that can search the web and bing", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index f970221e5068..a867acc2106e 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - The name of a connection, as found in the "Connected resources" tab in the Management Center of your Microsoft Foundry project. @@ -31,7 +31,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] connection_name = os.environ["CONNECTION_NAME"] with ( diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections_async.py index fce1328c7d36..e5814abf27fc 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - The name of a connection, as found in the "Connected resources" tab in the Management Center of your Microsoft Foundry project. @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] connection_name = os.environ["CONNECTION_NAME"] diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py index c0ef2fb68bf9..eb664060a12c 100644 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py +++ b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - Optional. The name of the Azure Storage Account connection to use for uploading files. 3) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. @@ -36,7 +36,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] connection_name = os.environ.get("CONNECTION_NAME") dataset_name = os.environ.get("DATASET_NAME", "dataset-test") dataset_version_1 = os.environ.get("DATASET_VERSION_1", "1.0") diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py index ebf84028bbc8..7740507124d9 100644 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py +++ b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - Optional. The name of the Azure Storage Account connection to use for uploading files. 3) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. @@ -45,7 +45,7 @@ async def main() -> None: - endpoint = os.environ["PROJECT_ENDPOINT"] + endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] connection_name = os.environ.get("CONNECTION_NAME") dataset_name = os.environ.get("DATASET_NAME", "dataset-test") dataset_version_1 = os.environ.get("DATASET_VERSION_1", "1.0") diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_download.py b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_download.py index 138431174b93..2a9aa4fc73a2 100644 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_download.py +++ b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_download.py @@ -19,7 +19,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - Optional. The name of the Azure Storage Account connection to use for uploading files. 3) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] connection_name = os.environ.get("CONNECTION_NAME") dataset_name = os.environ.get("DATASET_NAME", "dataset-test") dataset_version = os.environ.get("DATASET_VERSION", "1.0") diff --git a/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments.py b/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments.py index f6faeb5037bd..cee409590d7a 100644 --- a/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments.py +++ b/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the deployment to retrieve. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the deployment to retrieve. 3) MODEL_PUBLISHER - Optional. The publisher of the model to filter by. 4) MODEL_NAME - Optional. The name of the model to filter by. """ @@ -31,8 +31,8 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] -model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] model_publisher = os.environ.get("MODEL_PUBLISHER", "Microsoft") model_name = os.environ.get("MODEL_NAME", "Phi-4") diff --git a/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments_async.py b/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments_async.py index 00c7d1a3e812..ae2a1151ba17 100644 --- a/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments_async.py +++ b/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments_async.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the deployment to retrieve. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the deployment to retrieve. 3) MODEL_PUBLISHER - Optional. The publisher of the model to filter by. 4) MODEL_NAME - Optional. The name of the model to filter by. """ @@ -35,8 +35,8 @@ async def main() -> None: - endpoint = os.environ["PROJECT_ENDPOINT"] - model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] + endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] model_publisher = os.environ.get("MODEL_PUBLISHER", "Microsoft") model_name = os.environ.get("MODEL_NAME", "Phi-4") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/README.md b/sdk/ai/azure-ai-projects/samples/evaluations/README.md index 078a9edc6cfc..628468aa20e4 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/README.md +++ b/sdk/ai/azure-ai-projects/samples/evaluations/README.md @@ -11,8 +11,8 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv ``` Set these environment variables: -- `PROJECT_ENDPOINT` - Your Azure AI Project endpoint (e.g., `https://.services.ai.azure.com/api/projects/`) -- `MODEL_DEPLOYMENT_NAME` - The model deployment name (e.g., `gpt-4o-mini`) +- `AZURE_AI_PROJECT_ENDPOINT` - Your Azure AI Project endpoint (e.g., `https://.services.ai.azure.com/api/projects/`) +- `AZURE_AI_MODEL_DEPLOYMENT_NAME` - The model deployment name (e.g., `gpt-4o-mini`) ## Sample Index @@ -94,8 +94,8 @@ Located in the [agentic_evaluators](https://github.com/Azure/azure-sdk-for-pytho ```bash # Set environment variables -export PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" -export MODEL_DEPLOYMENT_NAME="gpt-4o-mini" # Replace with your model +export AZURE_AI_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" +export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4o-mini" # Replace with your model # Run a sample python sample_evaluations_builtin_with_inline_data.py diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py index 19ebfbc845fa..13bbaf3726bc 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, AIProjectClient(endpoint=endpoint, credential=credential) as project_client, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py index 0e747921b2dd..dc0a7199df78 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py index 49f206917552..1650b0d1cc5d 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py @@ -28,7 +28,7 @@ def run_evaluator( data_mapping: dict[str, str], ) -> None: endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ with ( diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py index d54b7e664ed0..df15b21b6699 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -36,7 +36,7 @@ def _get_evaluator_initialization_parameters(evaluator_name: str) -> dict[str, s if evaluator_name == "task_navigation_efficiency": return {"matching_mode": "exact_match"} # Can be "exact_match", "in_order_match", or "any_order_match" else: - model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini return {"deployment_name": model_deployment_name} diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py index 4832d3f1d926..654adfb1faec 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py index 06861dcb05ea..e434a78db732 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py index 0ca65a06c49c..1d3cc28dc6cb 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py index a73734676532..cd6ce53055ea 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py index c884df5ecf08..b2f5381c54e4 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py index b5439678f8eb..92d39f2ddddc 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py index 9383602a6194..6c9bc015d529 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. """ @@ -42,7 +42,7 @@ def main() -> None: endpoint = os.environ.get( - "PROJECT_ENDPOINT", "" + "AZURE_AI_PROJECT_ENDPOINT", "" ) # Sample : https://.services.ai.azure.com/api/projects/ with ( diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py index c5384ce3c16c..c2f8980e503e 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py index ef82d24014cd..5404c8bb1183 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -44,9 +44,9 @@ def main() -> None: endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py index 22dcc2c0f242..5809882fb1ee 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py index 287a36a417f5..3437f8fd674f 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py index eb21c7c1042e..e7646aeaabe8 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py index b3c6d2a45a27..7cafd28186f2 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py @@ -19,10 +19,10 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) AZURE_AI_AGENT_NAME - The name of the AI agent to use for evaluation. - 3) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 3) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -39,8 +39,8 @@ from openai.types.evals.run_retrieve_response import RunRetrieveResponse load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini # [START agent_evaluation_basic] with ( diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py index dae8f5f97596..9c81014daa52 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py @@ -19,10 +19,10 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) AZURE_AI_AGENT_NAME - The name of the AI agent to use for evaluation. - 3) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 3) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -50,7 +50,7 @@ agent = project_client.agents.create_version( agent_name=os.environ["AZURE_AI_AGENT_NAME"], definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py index b69f22681c64..37d7244c3808 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py @@ -19,9 +19,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -40,8 +40,8 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] -model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] # Define a function tool for the model to use func_tool = FunctionTool( diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py index 7a37829a1110..7c38cb9a2d5d 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py @@ -28,10 +28,10 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) AZURE_AI_AGENT_NAME - The name of the AI agent to use for evaluation. - 3) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 3) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -50,7 +50,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -63,7 +63,7 @@ agent = project_client.agents.create_version( agent_name=os.environ["AZURE_AI_AGENT_NAME"], definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py index ff0d08897d8d..255ea0c2e660 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. """ @@ -41,7 +41,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py index abd5526eb45b..c3227497204d 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Optional. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Optional. The name of the model deployment to use for evaluation. """ @@ -42,8 +42,8 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME") +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py index fc00c7831b57..b3dfb581b3ce 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Optional. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Optional. The name of the model deployment to use for evaluation. For Custom Prompt Based Evaluators: @@ -75,8 +75,8 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME") +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py index d7a7886e5900..77093c5d704b 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py @@ -21,9 +21,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -51,11 +51,11 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME") +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME") if not model_deployment_name: - raise ValueError("MODEL_DEPLOYMENT_NAME environment variable is not set") + raise ValueError("AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py index b830d1111548..c0fe4424bc85 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py @@ -21,9 +21,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -47,7 +47,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -64,7 +64,7 @@ TestingCriterionLabelModel( type="label_model", name="sentiment_analysis", - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], input=[ { "role": "developer", diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py index 4d1f0b66289c..8ad0899d2a01 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -39,8 +39,8 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py index 0aebda63159e..c113c8b67e06 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. 3) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. 4) DATASET_VERSION - Optional. The version of the Dataset to create and use in this sample. 5) DATA_FOLDER - Optional. The folder path where the data files for upload are located. @@ -44,8 +44,8 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") dataset_name = os.environ.get("DATASET_NAME", "") dataset_version = os.environ.get("DATASET_VERSION", "1") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py index 1cf2e9328207..fa4ec52105a3 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -41,9 +41,9 @@ endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ -model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini +model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini # Construct the paths to the data folder and data file used in this sample script_dir = os.path.dirname(os.path.abspath(__file__)) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py index b2d9b79453b6..9d968bbbf296 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py @@ -18,9 +18,9 @@ pip install openai azure-identity python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -42,10 +42,10 @@ client = OpenAI( api_key=get_bearer_token_provider(DefaultAzureCredential(), "https://ai.azure.com/.default"), - base_url=os.environ["PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", + base_url=os.environ["AZURE_AI_PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", ) -model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini +model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini data_source_config = DataSourceConfigCustom( { diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py index e24649f8fc78..33aff799eb4f 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py @@ -19,12 +19,12 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv azure-monitor-query Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. 2) APPINSIGHTS_RESOURCE_ID - Required. The Azure Application Insights resource ID that stores agent traces. It has the form: /subscriptions//resourceGroups//providers/Microsoft.Insights/components/. 3) AGENT_ID - Required. The agent identifier emitted by the Azure tracing integration, used to filter traces. - 4) MODEL_DEPLOYMENT_NAME - Required. The Azure OpenAI deployment name to use with the built-in evaluators. + 4) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The Azure OpenAI deployment name to use with the built-in evaluators. 5) TRACE_LOOKBACK_HOURS - Optional. Number of hours to look back when querying traces and in the evaluation run. Defaults to 1. """ @@ -44,12 +44,12 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] appinsights_resource_id = os.environ[ "APPINSIGHTS_RESOURCE_ID" ] # Sample : /subscriptions//resourceGroups//providers/Microsoft.Insights/components/ agent_id = os.environ["AGENT_ID"] -model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] +model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] trace_query_hours = int(os.environ.get("TRACE_LOOKBACK_HOURS", "1")) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py index e382f1ce7c3d..ecb0f7fc86c7 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -39,8 +39,8 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py index 7f14453d4e26..0f9377ef9fbf 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv pillow Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -47,8 +47,8 @@ file_path = os.path.abspath(__file__) folder_path = os.path.dirname(file_path) -endpoint = os.environ["PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("MODEL_DEPLOYMENT_NAME", "") +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") def image_to_data_uri(image_path: str) -> str: diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py index c66866f7ed27..8b0170031dd3 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -68,7 +68,7 @@ ) print(f"Evaluation created (id: {eval_object.id}, name: {eval_object.name})") - model = os.environ["MODEL_DEPLOYMENT_NAME"] + model = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] data_source = { "type": "azure_ai_target_completions", "source": { diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py index 6f75a6bad70f..6516c637796e 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. 2) AZURE_AI_AGENT_NAME - Required. The name of the Agent to perform red teaming evaluation on. """ @@ -45,7 +45,7 @@ def main() -> None: load_dotenv() # - endpoint = os.environ.get("PROJECT_ENDPOINT", "") + endpoint = os.environ.get("AZURE_AI_PROJECT_ENDPOINT", "") agent_name = os.environ.get("AZURE_AI_AGENT_NAME", "") with ( @@ -56,7 +56,7 @@ def main() -> None: agent_version = project_client.agents.create_version( agent_name=agent_name, definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py index fcde3de826ca..29397e006414 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv azure-mgmt-authorization azure-mgmt-resource Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. 2) AZURE_SUBSCRIPTION_ID - Required for RBAC assignment. The Azure subscription ID where the project is located. 3) AZURE_RESOURCE_GROUP_NAME - Required for RBAC assignment. The resource group name where the project is located. @@ -75,13 +75,13 @@ def assign_rbac(): """ load_dotenv() - endpoint = os.environ.get("PROJECT_ENDPOINT", "") + endpoint = os.environ.get("AZURE_AI_PROJECT_ENDPOINT", "") subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID", "") resource_group_name = os.environ.get("AZURE_RESOURCE_GROUP_NAME", "") if not endpoint or not subscription_id or not resource_group_name: print( - "Error: PROJECT_ENDPOINT, AZURE_SUBSCRIPTION_ID, and AZURE_RESOURCE_GROUP_NAME environment variables are required" + "Error: AZURE_AI_PROJECT_ENDPOINT, AZURE_SUBSCRIPTION_ID, and AZURE_RESOURCE_GROUP_NAME environment variables are required" ) return @@ -214,7 +214,7 @@ def assign_rbac(): def schedule_dataset_evaluation() -> None: - endpoint = os.environ["PROJECT_ENDPOINT"] + endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] dataset_name = os.environ.get("DATASET_NAME", "") dataset_version = os.environ.get("DATASET_VERSION", "1") # Construct the paths to the data folder and data file used in this sample @@ -327,7 +327,7 @@ def schedule_dataset_evaluation() -> None: def schedule_redteam_evaluation() -> None: load_dotenv() # - endpoint = os.environ.get("PROJECT_ENDPOINT", "") + endpoint = os.environ.get("AZURE_AI_PROJECT_ENDPOINT", "") agent_name = os.environ.get("AZURE_AI_AGENT_NAME", "") # Construct the paths to the data folder and data file used in this sample @@ -343,7 +343,7 @@ def schedule_redteam_evaluation() -> None: agent_version = project_client.agents.create_version( agent_name=agent_name, definition=PromptAgentDefinition( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/files/sample_files.py b/sdk/ai/azure-ai-projects/samples/files/sample_files.py index ba156aff897e..57f8e3fb3187 100644 --- a/sdk/ai/azure-ai-projects/samples/files/sample_files.py +++ b/sdk/ai/azure-ai-projects/samples/files/sample_files.py @@ -16,7 +16,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) FILE_PATH - Optional. Path to the file to upload. Defaults to the `data` folder. """ @@ -29,7 +29,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] script_dir = Path(__file__).parent file_path = os.environ.get("FILE_PATH", os.path.join(script_dir, "data", "test_file.jsonl")) diff --git a/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py b/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py index 0fbdcb80223c..d6bb1491a678 100644 --- a/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py +++ b/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py @@ -16,7 +16,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) FILE_PATH - Optional. Path to the file to upload. Defaults to the `data` folder. """ @@ -30,7 +30,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] script_dir = Path(__file__).parent file_path = os.environ.get("FILE_PATH", os.path.join(script_dir, "data", "test_file.jsonl")) diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py index 2e66c9edfb27..b8fe46419490 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4o` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "gpt-4o-mini") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "dpo_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "dpo_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py index 340159571043..4e7b5dc91ec0 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4o` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "gpt-4o-mini") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "dpo_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "dpo_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py index 5918ff361cdf..c4a44e2d727f 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4.1` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "Ministral-3B") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "sft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "sft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py index 01267d2d3ecf..2fcd5ee8e93c 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `Ministral-3B` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -34,7 +34,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "Ministral-3B") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "sft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "sft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py index 576587bfb4cc..6b1bed171863 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `o4-mini` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "o4-mini") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "rft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "rft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py index 0034551f6005..701bf384731f 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `o4-mini` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "o4-mini") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "rft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "rft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py index 4edd2d93f1ee..626af3c4bedd 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py @@ -21,7 +21,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv azure-mgmt-cognitiveservices Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4.1` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -43,7 +43,7 @@ load_dotenv() # For fine-tuning -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "gpt-4.1") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "sft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "sft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py index 47d3136f2ced..cd967034fd92 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py @@ -21,7 +21,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp azure-mgmt-cognitiveservices Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4.1` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -43,7 +43,7 @@ load_dotenv() # For fine-tuning -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "gpt-4.1") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "sft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "sft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes.py b/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes.py index 594e651a917d..fd865b8d8ca1 100644 --- a/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes.py +++ b/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) INDEX_NAME - Optional. The name of the Index to create and use in this sample. 3) INDEX_VERSION - Optional. The version of the Index to create and use in this sample. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] index_name = os.environ.get("INDEX_NAME", "index-test") index_version = os.environ.get("INDEX_VERSION", "1.0") ai_search_connection_name = os.environ.get("AI_SEARCH_CONNECTION_NAME", "my-ai-search-connection-name") diff --git a/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes_async.py b/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes_async.py index 1cd9c00f1328..9ace80a10df8 100644 --- a/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes_async.py +++ b/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) INDEX_NAME - Optional. The name of the Index to create and use in this sample. 3) INDEX_VERSION - Optional. The version of the Index to create and use in this sample. @@ -37,7 +37,7 @@ async def main() -> None: - endpoint = os.environ["PROJECT_ENDPOINT"] + endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] index_name = os.environ.get("INDEX_NAME", "index-test") index_version = os.environ.get("INDEX_VERSION", "1.0") ai_search_connection_name = os.environ.get("AI_SEARCH_CONNECTION_NAME", "my-ai-search-connection-name") diff --git a/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py b/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py index 4623807d024d..d09bb48a72ad 100644 --- a/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py +++ b/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py @@ -8,7 +8,7 @@ DESCRIPTION: This sample demonstrates how to directly interact with MCP (Model Context Protocol) tools using the low-level MCP client library to connect to the Foundry Project's MCP tools API: - {PROJECT_ENDPOINT}/mcp_tools?api-version=2025-05-15-preview + {AZURE_AI_PROJECT_ENDPOINT}/mcp_tools?api-version=2025-05-15-preview For agent-based MCP tool usage, see samples in samples/agents/tools/sample_agent_mcp.py and related files in that directory. @@ -29,7 +29,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv mcp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) IMAGE_GEN_DEPLOYMENT_NAME - The deployment name of the image generation model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -64,7 +64,7 @@ # Enable httpx logging to see HTTP requests at the same level logging.getLogger("httpx").setLevel(getattr(logging, log_level, logging.CRITICAL)) -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async def main(): diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py index 85f8e3e5aec9..6af7c9c90f3c 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py @@ -24,7 +24,7 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -46,7 +46,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py index bde8a22a1c3e..f8b9643f7547 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py @@ -24,7 +24,7 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -50,7 +50,7 @@ async def main() -> None: - endpoint = os.environ["PROJECT_ENDPOINT"] + endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic.py index ff307a66cbb9..0d3cf82f4fbe 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic.py @@ -22,7 +22,7 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -43,7 +43,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic_async.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic_async.py index fe12d1db07ee..6999a6c154ca 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic_async.py @@ -23,7 +23,7 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -48,7 +48,7 @@ async def main() -> None: - endpoint = os.environ["PROJECT_ENDPOINT"] + endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud.py index cf116e910d08..94d1abdab835 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud.py @@ -20,7 +20,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -37,7 +37,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud_async.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud_async.py index 23fc4782e98e..730dd3a53534 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud_async.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud_async.py @@ -20,7 +20,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -41,7 +41,7 @@ async def main() -> None: - endpoint = os.environ["PROJECT_ENDPOINT"] + endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team.py b/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team.py index bdf4936f8f6c..918ae00569e5 100644 --- a/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team.py +++ b/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. - 2) MODEL_DEPLOYMENT_NAME - Required. Your model deployment name. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. Your model deployment name. 3) MODEL_ENDPOINT - Required. The Azure AI Model endpoint, as found in the overview page of your Microsoft Foundry project. Example: https://.services.ai.azure.com 4) MODEL_API_KEY - Required. The API key for your Azure AI Model. @@ -38,11 +38,11 @@ load_dotenv() endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ model_endpoint = os.environ["MODEL_ENDPOINT"] # Sample : https://.services.ai.azure.com model_api_key = os.environ["MODEL_API_KEY"] -model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini +model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py b/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py index 54170b0d3492..261b0f9aaedc 100644 --- a/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py +++ b/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. - 2) MODEL_DEPLOYMENT_NAME - Required. Your model deployment name. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. Your model deployment name. 3) MODEL_ENDPOINT - Required. The Azure AI Model endpoint, as found in the overview page of your Microsoft Foundry project. Example: https://.services.ai.azure.com 4) MODEL_API_KEY - Required. The API key for your Azure AI Model. @@ -43,11 +43,11 @@ async def sample_red_team_async() -> None: """Demonstrates how to perform Red Team operations using the AIProjectClient.""" endpoint = os.environ[ - "PROJECT_ENDPOINT" + "AZURE_AI_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ model_endpoint = os.environ["MODEL_ENDPOINT"] # Sample : https://.services.ai.azure.com model_api_key = os.environ["MODEL_API_KEY"] - model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini + model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic.py index 062aef48577e..436237b03c46 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -31,7 +31,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -40,13 +40,13 @@ # [START responses] with project_client.get_openai_client() as openai_client: response = openai_client.responses.create( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], input="What is the size of France in square miles?", ) print(f"Response output: {response.output_text}") response = openai_client.responses.create( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], input="And what is the capital city?", previous_response_id=response.id, ) diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_async.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_async.py index 0501cc142cbe..b68ab13eddd7 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_async.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async def main() -> None: @@ -43,13 +43,13 @@ async def main() -> None: project_client.get_openai_client() as openai_client, ): response = await openai_client.responses.create( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], input="What is the size of France in square miles?", ) print(f"Response output: {response.output_text}") response = await openai_client.responses.create( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], input="And what is the capital city?", previous_response_id=response.id, ) diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient.py index 39375d948e2d..9c5d8b656bd7 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient.py @@ -19,9 +19,9 @@ pip install openai azure-identity python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -34,11 +34,11 @@ openai = OpenAI( api_key=get_bearer_token_provider(DefaultAzureCredential(), "https://ai.azure.com/.default"), - base_url=os.environ["PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", + base_url=os.environ["AZURE_AI_PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", ) response = openai.responses.create( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], input="How many feet are in a mile?", ) diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient_async.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient_async.py index 47b46923f9b3..8a2934ff7418 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient_async.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient_async.py @@ -19,9 +19,9 @@ pip install openai azure-identity python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -42,13 +42,13 @@ async def main() -> None: openai = AsyncOpenAI( api_key=get_bearer_token_provider(credential, "https://ai.azure.com/.default"), - base_url=os.environ["PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", + base_url=os.environ["AZURE_AI_PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", ) async with openai: response = await openai.responses.create( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], input="How many feet are in a mile?", ) diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_image_input.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_image_input.py index 1299f15d638c..542d956b1cf0 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_image_input.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_image_input.py @@ -19,9 +19,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] def image_to_base64(image_path: str) -> str: @@ -71,6 +71,6 @@ def image_to_base64(image_path: str) -> str: ], } ], - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], ) print(f"Response output: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py index 8d7f60673c2b..86f3b2d1fa47 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -34,7 +34,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -43,7 +43,7 @@ ): with openai_client.responses.create( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], input=[ {"role": "user", "content": "Tell me about the capital city of France"}, ], diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py index 6508bd40deb4..110bb0b1c4be 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -42,7 +42,7 @@ ): with openai_client.responses.stream( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], input=[ {"role": "user", "content": "Tell me about the capital city of France"}, ], diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py index a781b8152edc..d1c75654083a 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -42,7 +42,7 @@ class CalendarEvent(BaseModel): participants: list[str] -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -50,7 +50,7 @@ class CalendarEvent(BaseModel): project_client.get_openai_client() as openai_client, ): response = openai_client.responses.create( - model=os.environ["MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], instructions=""" Extracts calendar event information from the input messages, and return it in the desired structured output format. diff --git a/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry.py b/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry.py index b5ef4f48234a..61f11436620b 100644 --- a/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry.py +++ b/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. """ @@ -28,7 +28,7 @@ load_dotenv() -endpoint = os.environ["PROJECT_ENDPOINT"] +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry_async.py b/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry_async.py index 4e98aff356bd..7554370401f4 100644 --- a/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry_async.py +++ b/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ async def main() -> None: - endpoint = os.environ["PROJECT_ENDPOINT"] + endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py index c8a6ebc74921..d7aede04fb65 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py @@ -273,7 +273,7 @@ def _test_agent_creation_with_tracing_content_recording_enabled_impl(self, use_e with self.create_client(operation_group="tracing", **kwargs) as project_client: - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") print(f"Using model deployment: {model}") agent_definition = PromptAgentDefinition( @@ -387,7 +387,7 @@ def _test_agent_creation_with_tracing_content_recording_disabled_impl(self, use_ with self.create_client(operation_group="agents", **kwargs) as project_client: - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") agent_definition = PromptAgentDefinition( model=model, instructions="You are a helpful AI assistant. Always be polite and provide accurate information.", @@ -609,7 +609,7 @@ def _test_agent_with_structured_output_with_instructions_impl( operation_group = "tracing" if content_recording_enabled else "agents" with self.create_client(operation_group=operation_group, **kwargs) as project_client: - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") test_schema = { "type": "object", @@ -797,7 +797,7 @@ def _test_agent_with_structured_output_without_instructions_impl( operation_group = "tracing" if content_recording_enabled else "agents" with self.create_client(operation_group=operation_group, **kwargs) as project_client: - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") test_schema = { "type": "object", diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py index 80f20632fd98..96184005a350 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py @@ -75,7 +75,7 @@ async def _test_create_agent_with_tracing_content_recording_enabled_impl(self, u assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") async with project_client: agent_definition = PromptAgentDefinition( @@ -186,7 +186,7 @@ async def _test_agent_creation_with_tracing_content_recording_disabled_impl(self assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="agents", **kwargs) - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") async with project_client: agent_definition = PromptAgentDefinition( @@ -406,7 +406,7 @@ async def _test_agent_with_structured_output_with_instructions_impl( project_client = self.create_async_client(operation_group=operation_group, **kwargs) async with project_client: - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") test_schema = { "type": "object", @@ -591,7 +591,7 @@ async def _test_agent_with_structured_output_without_instructions_impl( project_client = self.create_async_client(operation_group=operation_group, **kwargs) async with project_client: - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") test_schema = { "type": "object", diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py index 23a9116edd60..7b8471717f2b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py @@ -78,7 +78,7 @@ def _get_openai_client_and_deployment(self, **kwargs) -> Tuple[OpenAI, str]: openai_client = project_client.get_openai_client() # Get the model deployment name from test parameters - model_deployment_name = kwargs.get("model_deployment_name") + model_deployment_name = kwargs.get("azure_ai_model_deployment_name") return openai_client, model_deployment_name @@ -234,7 +234,7 @@ def _test_sync_non_streaming_with_content_recording_impl(self, use_events, **kwa with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -356,7 +356,7 @@ def _test_sync_non_streaming_without_content_recording_impl(self, use_events, ** with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -479,7 +479,7 @@ def _test_sync_streaming_with_content_recording_impl(self, use_events, **kwargs) with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -656,7 +656,7 @@ def test_sync_conversations_create(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -701,7 +701,7 @@ def test_sync_list_conversation_items_with_content_recording(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -778,7 +778,7 @@ def test_sync_list_conversation_items_without_content_recording(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -887,7 +887,7 @@ def _test_sync_non_streaming_without_conversation_impl(self, use_events, **kwarg assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") with project_client: # Get the OpenAI client from the project client @@ -994,7 +994,7 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl( with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -1253,7 +1253,7 @@ def _test_sync_function_tool_with_content_recording_streaming_impl( with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -1577,7 +1577,7 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl( with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -1813,7 +1813,7 @@ def _test_sync_function_tool_without_content_recording_streaming_impl( with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -2131,7 +2131,7 @@ def test_sync_function_tool_list_conversation_items_with_content_recording(self, with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -2281,7 +2281,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -2429,7 +2429,7 @@ def test_sync_multiple_text_inputs_with_content_recording_non_streaming(self, ** with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -2526,7 +2526,7 @@ def test_sync_multiple_text_inputs_with_content_recording_streaming(self, **kwar with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -2631,7 +2631,7 @@ def test_sync_multiple_text_inputs_without_content_recording_non_streaming(self, with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -2723,7 +2723,7 @@ def test_sync_multiple_text_inputs_without_content_recording_streaming(self, **k with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Create a conversation conversation = client.conversations.create() @@ -2824,7 +2824,7 @@ def _test_image_only_content_off_binary_off_non_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -2929,7 +2929,7 @@ def _test_image_only_content_off_binary_on_non_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -3033,7 +3033,7 @@ def _test_image_only_content_on_binary_off_non_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -3137,7 +3137,7 @@ def _test_image_only_content_on_binary_on_non_streaming_impl(self, use_events, * with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -3245,7 +3245,7 @@ def _test_text_and_image_content_off_binary_off_non_streaming_impl(self, use_eve with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -3354,7 +3354,7 @@ def _test_text_and_image_content_off_binary_on_non_streaming_impl(self, use_even with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -3463,7 +3463,7 @@ def _test_text_and_image_content_on_binary_off_non_streaming_impl(self, use_even with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -3571,7 +3571,7 @@ def _test_text_and_image_content_on_binary_on_non_streaming_impl(self, use_event with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -3683,7 +3683,7 @@ def _test_image_only_content_off_binary_off_streaming_impl(self, use_events, **k with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -3796,7 +3796,7 @@ def _test_image_only_content_off_binary_on_streaming_impl(self, use_events, **kw with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -3908,7 +3908,7 @@ def _test_image_only_content_on_binary_off_streaming_impl(self, use_events, **kw with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -4020,7 +4020,7 @@ def _test_image_only_content_on_binary_on_streaming_impl(self, use_events, **kwa with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -4136,7 +4136,7 @@ def _test_text_and_image_content_off_binary_off_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -4253,7 +4253,7 @@ def _test_text_and_image_content_off_binary_on_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -4370,7 +4370,7 @@ def _test_text_and_image_content_on_binary_off_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -4486,7 +4486,7 @@ def _test_text_and_image_content_on_binary_on_streaming_impl(self, use_events, * with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -4601,7 +4601,7 @@ def test_responses_stream_method_with_content_recording(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -4654,7 +4654,7 @@ def test_responses_stream_method_without_content_recording(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = client.conversations.create() @@ -4709,7 +4709,7 @@ def test_responses_stream_method_with_tools_with_content_recording(self, **kwarg with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Define a function tool function_tool = FunctionTool( @@ -4823,7 +4823,7 @@ def test_responses_stream_method_with_tools_without_content_recording(self, **kw with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Define a function tool function_tool = FunctionTool( @@ -4960,7 +4960,7 @@ def test_workflow_agent_non_streaming_with_content_recording(self, **kwargs): assert True == AIProjectInstrumentor().is_content_recording_enabled() with self.create_client(operation_group="tracing", **kwargs) as project_client: - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") openai_client = project_client.get_openai_client() # Create Teacher Agent @@ -5159,7 +5159,7 @@ def test_workflow_agent_non_streaming_without_content_recording(self, **kwargs): assert False == AIProjectInstrumentor().is_content_recording_enabled() with self.create_client(operation_group="tracing", **kwargs) as project_client: - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") openai_client = project_client.get_openai_client() workflow_yaml = """ @@ -5276,7 +5276,7 @@ def test_workflow_agent_streaming_with_content_recording(self, **kwargs): assert True == AIProjectInstrumentor().is_content_recording_enabled() with self.create_client(operation_group="tracing", **kwargs) as project_client: - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") openai_client = project_client.get_openai_client() # Create Teacher Agent @@ -5478,7 +5478,7 @@ def test_workflow_agent_streaming_without_content_recording(self, **kwargs): assert False == AIProjectInstrumentor().is_content_recording_enabled() with self.create_client(operation_group="tracing", **kwargs) as project_client: - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") openai_client = project_client.get_openai_client() workflow_yaml = """ diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py index 490715fa0333..8f2b18126533 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py @@ -59,7 +59,7 @@ async def _test_async_non_streaming_with_content_recording_impl(self, use_events assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: # Get the OpenAI client from the project client @@ -165,7 +165,7 @@ async def _test_async_streaming_with_content_recording_impl(self, use_events, ** assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: # Get the OpenAI client from the project client @@ -277,7 +277,7 @@ async def test_async_conversations_create(self, **kwargs): assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: # Get the OpenAI client from the project client @@ -325,7 +325,7 @@ async def test_async_list_conversation_items_with_content_recording(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: # Get the OpenAI client from the project client @@ -417,7 +417,7 @@ async def _test_async_function_tool_with_content_recording_streaming_impl( async with project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -678,7 +678,7 @@ async def _test_async_function_tool_without_content_recording_streaming_impl( async with project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Define a function tool func_tool = FunctionTool( @@ -924,7 +924,7 @@ async def test_async_multiple_text_inputs_with_content_recording_non_streaming(s async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Create a conversation conversation = await client.conversations.create() @@ -1021,7 +1021,7 @@ async def test_async_multiple_text_inputs_with_content_recording_streaming(self, async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Create a conversation conversation = await client.conversations.create() @@ -1126,7 +1126,7 @@ async def test_async_multiple_text_inputs_without_content_recording_non_streamin async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Create a conversation conversation = await client.conversations.create() @@ -1225,7 +1225,7 @@ async def test_async_image_only_content_off_binary_off_non_streaming(self, **kwa assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1297,7 +1297,7 @@ async def test_async_image_only_content_off_binary_on_non_streaming(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1368,7 +1368,7 @@ async def test_async_image_only_content_on_binary_off_non_streaming(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1439,7 +1439,7 @@ async def test_async_image_only_content_on_binary_on_non_streaming(self, **kwarg assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1514,7 +1514,7 @@ async def test_async_text_and_image_content_off_binary_off_non_streaming(self, * assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1590,7 +1590,7 @@ async def test_async_text_and_image_content_off_binary_on_non_streaming(self, ** assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1665,7 +1665,7 @@ async def test_async_text_and_image_content_on_binary_off_non_streaming(self, ** assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1740,7 +1740,7 @@ async def test_async_text_and_image_content_on_binary_on_non_streaming(self, **k assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1819,7 +1819,7 @@ async def test_async_image_only_content_off_binary_off_streaming(self, **kwargs) assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1899,7 +1899,7 @@ async def test_async_image_only_content_off_binary_on_streaming(self, **kwargs): assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -1978,7 +1978,7 @@ async def test_async_image_only_content_on_binary_off_streaming(self, **kwargs): assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -2057,7 +2057,7 @@ async def test_async_image_only_content_on_binary_on_streaming(self, **kwargs): assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -2140,7 +2140,7 @@ async def test_async_text_and_image_content_off_binary_off_streaming(self, **kwa assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -2223,7 +2223,7 @@ async def test_async_text_and_image_content_off_binary_on_streaming(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -2306,7 +2306,7 @@ async def test_async_text_and_image_content_on_binary_off_streaming(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -2389,7 +2389,7 @@ async def test_async_text_and_image_content_on_binary_on_streaming(self, **kwarg assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -2474,7 +2474,7 @@ async def test_async_multiple_text_inputs_without_content_recording_streaming(se async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Create a conversation conversation = await client.conversations.create() @@ -2582,7 +2582,7 @@ async def test_async_responses_stream_method_with_content_recording(self, **kwar async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = await client.conversations.create() @@ -2662,7 +2662,7 @@ async def test_async_responses_stream_method_without_content_recording(self, **k async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") conversation = await client.conversations.create() @@ -2750,7 +2750,7 @@ async def _test_async_responses_stream_method_with_tools_with_content_recording_ async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Define a function tool function_tool = FunctionTool( @@ -2983,7 +2983,7 @@ async def _test_async_responses_stream_method_with_tools_without_content_recordi async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") # Define a function tool function_tool = FunctionTool( @@ -3214,7 +3214,7 @@ async def test_async_workflow_agent_non_streaming_with_content_recording(self, * assert True == AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: # Create a simple workflow agent @@ -3331,7 +3331,7 @@ async def test_async_workflow_agent_non_streaming_without_content_recording(self assert False == AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: workflow_yaml = """ @@ -3454,7 +3454,7 @@ async def test_async_workflow_agent_streaming_with_content_recording(self, **kwa assert True == AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: workflow_yaml = """ @@ -3575,7 +3575,7 @@ async def test_async_workflow_agent_streaming_without_content_recording(self, ** assert False == AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: workflow_yaml = """ @@ -3708,7 +3708,7 @@ async def _test_async_prompt_agent_with_responses_non_streaming_impl( assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() @@ -3846,7 +3846,7 @@ async def _test_async_prompt_agent_with_responses_streaming_impl( assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") async with project_client: client = project_client.get_openai_client() diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py index 7cb41a48cd3f..f68d4fdae952 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py @@ -59,7 +59,7 @@ def test_sync_browser_automation_non_streaming_with_content_recording(self, **kw assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -194,7 +194,7 @@ def test_sync_browser_automation_non_streaming_without_content_recording(self, * assert not AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -320,7 +320,7 @@ def test_sync_browser_automation_streaming_with_content_recording(self, **kwargs self.setup_telemetry() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -448,7 +448,7 @@ def test_sync_browser_automation_streaming_without_content_recording(self, **kwa self.setup_telemetry() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py index 4b2454c510cb..46918894c2fb 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py @@ -62,7 +62,7 @@ async def test_async_browser_automation_non_streaming_with_content_recording(sel assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -193,7 +193,7 @@ async def test_async_browser_automation_non_streaming_without_content_recording( assert not AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -315,7 +315,7 @@ async def test_async_browser_automation_streaming_with_content_recording(self, * self.setup_telemetry() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -440,7 +440,7 @@ async def test_async_browser_automation_streaming_without_content_recording(self self.setup_telemetry() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py index b23ea2b0e5e6..331d64b9aaa8 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py @@ -66,7 +66,7 @@ def test_sync_code_interpreter_non_streaming_with_content_recording(self, **kwar assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: @@ -254,7 +254,7 @@ def test_sync_code_interpreter_non_streaming_without_content_recording(self, **k assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: @@ -445,7 +445,7 @@ def test_sync_code_interpreter_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: @@ -636,7 +636,7 @@ def test_sync_code_interpreter_streaming_without_content_recording(self, **kwarg assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py index 007b78ce5bd1..7e5512b6fbbe 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py @@ -67,7 +67,7 @@ async def test_async_code_interpreter_non_streaming_with_content_recording(self, assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: @@ -254,7 +254,7 @@ async def test_async_code_interpreter_non_streaming_without_content_recording(se assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: @@ -445,7 +445,7 @@ async def test_async_code_interpreter_streaming_with_content_recording(self, **k assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: @@ -636,7 +636,7 @@ async def test_async_code_interpreter_streaming_without_content_recording(self, assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py index 76faa8c49ff4..ca4f301212f3 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py @@ -53,7 +53,7 @@ def test_sync_file_search_non_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: @@ -262,7 +262,7 @@ def test_sync_file_search_non_streaming_without_content_recording(self, **kwargs assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: @@ -469,7 +469,7 @@ def test_sync_file_search_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: @@ -674,7 +674,7 @@ def test_sync_file_search_streaming_without_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py index 647d24530f54..fd6c36261449 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py @@ -54,7 +54,7 @@ async def test_async_file_search_non_streaming_with_content_recording(self, **kw assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: @@ -263,7 +263,7 @@ async def test_async_file_search_non_streaming_without_content_recording(self, * assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: @@ -470,7 +470,7 @@ async def test_async_file_search_streaming_with_content_recording(self, **kwargs assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: @@ -675,7 +675,7 @@ async def test_async_file_search_streaming_without_content_recording(self, **kwa assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py index 3ef49deac892..4c5c453a3e23 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py @@ -59,7 +59,7 @@ def _test_sync_mcp_non_streaming_with_content_recording_impl(self, use_events, * assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: @@ -389,7 +389,7 @@ def _test_sync_mcp_non_streaming_without_content_recording_impl(self, use_events assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: @@ -706,7 +706,7 @@ def _test_sync_mcp_streaming_with_content_recording_impl(self, use_events, **kwa assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: @@ -982,7 +982,7 @@ def _test_sync_mcp_streaming_without_content_recording_impl(self, use_events, ** assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py index c218a653295c..d9e82e2951e8 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py @@ -60,7 +60,7 @@ async def _test_async_mcp_non_streaming_with_content_recording_impl(self, use_ev assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: @@ -389,7 +389,7 @@ async def _test_async_mcp_non_streaming_without_content_recording_impl(self, use assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: @@ -708,7 +708,7 @@ async def _test_async_mcp_streaming_with_content_recording_impl(self, use_events assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: @@ -987,7 +987,7 @@ async def _test_async_mcp_streaming_without_content_recording_impl(self, use_eve assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py index f75e8d9694e7..a198327679c3 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py @@ -41,7 +41,7 @@ def _get_openai_client_and_deployment(self, **kwargs) -> Tuple[OpenAI, str]: openai_client = project_client.get_openai_client() # Get the model deployment name from test parameters - model_deployment_name = kwargs.get("model_deployment_name") + model_deployment_name = kwargs.get("azure_ai_model_deployment_name") return openai_client, model_deployment_name diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py index 492c49c9d7af..bec6cfa9f2be 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py @@ -206,7 +206,7 @@ def test_sync_workflow_non_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: @@ -371,7 +371,7 @@ def test_sync_workflow_non_streaming_without_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: @@ -538,7 +538,7 @@ def test_sync_workflow_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: @@ -706,7 +706,7 @@ def test_sync_workflow_streaming_without_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py index 1ce2aa436e88..e366e1ec3ef5 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py @@ -205,7 +205,7 @@ async def test_async_workflow_non_streaming_with_content_recording(self, **kwarg assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: @@ -366,7 +366,7 @@ async def test_async_workflow_non_streaming_without_content_recording(self, **kw assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: @@ -531,7 +531,7 @@ async def test_async_workflow_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: @@ -697,7 +697,7 @@ async def test_async_workflow_streaming_without_content_recording(self, **kwargs assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("model_deployment_name") + deployment_name = kwargs.get("azure_ai_model_deployment_name") assert deployment_name is not None async with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py index 92fca677d439..1cc36a6b0455 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py @@ -48,7 +48,7 @@ def test_agent_responses_crud(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -158,7 +158,7 @@ def test_agent_responses_crud(self, **kwargs): @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) def test_agent_responses_with_structured_output(self, **kwargs): - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py index 449dead94abc..b710851c366f 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py @@ -23,7 +23,7 @@ class TestAgentResponsesCrudAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_responses_crud_async(self, **kwargs): - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_async_client(operation_group="agents", **kwargs) @@ -129,7 +129,7 @@ async def test_agent_responses_crud_async(self, **kwargs): @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_responses_with_structured_output_async(self, **kwargs): - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_async_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py index 724baf8c022b..10414b7a59d1 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py @@ -39,7 +39,7 @@ def test_agents_crud(self, **kwargs): GET /agents/{agent_name}/versions/{agent_version} project_client.agents.get_version() """ print("\n") - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") project_client = self.create_client(operation_group="agents", **kwargs) first_agent_name = "MyAgent1" second_agent_name = "MyAgent2" diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py index 9853f973e098..e9776b7e6257 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py @@ -24,7 +24,7 @@ async def test_agents_crud_async(self, **kwargs): It then gets, lists, and deletes them, validating at each step. It uses different ways of creating agents: strongly typed, dictionary, and IO[bytes]. """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") project_client = self.create_async_client(operation_group="agents", **kwargs) first_agent_name = "MyAgent1" second_agent_name = "MyAgent2" diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py index e835e18936b3..3953bf1c76d2 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py @@ -40,7 +40,7 @@ def test_calculate_and_save(self, **kwargs): 2. Function Tool: Saves the computed result """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -100,7 +100,7 @@ def test_generate_data_and_report(self, **kwargs): 2. Function Tool: Creates a report with the computed statistics """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py index a6da388ec0b9..16f2c2c1ba41 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py @@ -39,7 +39,7 @@ def test_find_and_analyze_data(self, **kwargs): 2. Code Interpreter: Agent calculates the average of those numbers """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -121,7 +121,7 @@ def test_analyze_code_file(self, **kwargs): 2. Code Interpreter: Agent executes the code and returns the computed result """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py index b127a14bc89d..f67e95c020a8 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py @@ -32,7 +32,7 @@ def test_data_analysis_workflow(self, **kwargs): Test data analysis workflow: upload data, search, save results. """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -163,7 +163,7 @@ def test_empty_vector_store_handling(self, **kwargs): Test how agent handles empty vector store (no files uploaded). """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -242,7 +242,7 @@ def test_python_code_file_search(self, **kwargs): 2. Function Tool: Agent saves the code review findings """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -372,7 +372,7 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs): - Context retention across searches and function calls """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py index 3d2d12353dfb..61d572fa0a37 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py @@ -43,7 +43,7 @@ def test_complete_analysis_workflow(self, **kwargs): 3. Function Tool: Agent saves the computed results """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py index 1bc2d553caa2..1ae26a32a1a4 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py @@ -39,7 +39,7 @@ def test_file_search_and_function_with_conversation(self, **kwargs): - Verifying conversation state preserves all tool interactions """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py index d2a9add36b0a..02d341051ec8 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py @@ -81,7 +81,7 @@ def test_agent_ai_search_question_answering(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Get AI Search connection and index from environment ai_search_connection_id = kwargs.get("ai_search_project_connection_id") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py index f7b55542a649..5bc67d9a2833 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py @@ -148,7 +148,7 @@ async def test_agent_ai_search_question_answering_async_parallel(self, **kwargs) DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_async_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py index 300914195e05..78ec18081aa0 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py @@ -45,7 +45,7 @@ def test_agent_bing_grounding(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Note: This test requires bing_project_connection_id environment variable # to be set with a valid Bing connection ID from the project @@ -145,7 +145,7 @@ def test_agent_bing_grounding_multiple_queries(self, **kwargs): Bing grounding and provide accurate responses with citations. """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") bing_connection_id = kwargs.get("bing_project_connection_id") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py index f6531d9a1c6b..7b5d1ea27680 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py @@ -41,7 +41,7 @@ def test_agent_code_interpreter_simple_math(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") agent_name = "code-interpreter-simple-agent" with ( @@ -125,7 +125,7 @@ def test_agent_code_interpreter_file_generation(self, **kwargs): DELETE /files/{file_id} openai_client.files.delete() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py index 84f722173fa7..d38d15b2bd0e 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py @@ -28,7 +28,7 @@ async def test_agent_code_interpreter_simple_math_async(self, **kwargs): without any file uploads or downloads - just pure code execution. """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") agent_name = "code-interpreter-simple-agent-async" async with ( diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py index c398c3d6a1c4..e7408afe97fa 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py @@ -45,7 +45,7 @@ def test_agent_file_search(self, **kwargs): DELETE /vector_stores/{id} openai_client.vector_stores.delete() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -203,7 +203,7 @@ def test_agent_file_search_multi_turn_conversation(self, **kwargs): while using File Search to answer follow-up questions. """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py index 420b43ffca2b..e3d96f5a4733 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py @@ -20,7 +20,7 @@ class TestAgentFileSearchAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_file_search_async(self, **kwargs): - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, @@ -106,7 +106,7 @@ async def test_agent_file_search_multi_turn_conversation_async(self, **kwargs): while using File Search to answer follow-up questions. """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py index 861f3602d450..e97814456771 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py @@ -42,7 +42,7 @@ def test_agent_file_search_stream(self, **kwargs): DELETE /vector_stores/{id} openai_client.vector_stores.delete() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py index 9bcad6d77fc2..fb4e627df2de 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py @@ -19,7 +19,7 @@ class TestAgentFileSearchStreamAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_file_search_stream_async(self, **kwargs): - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py index 51216e5c6300..264bf97ebf73 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py @@ -41,7 +41,7 @@ def test_agent_function_tool(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") agent_name = "function-tool-agent" with ( @@ -172,7 +172,7 @@ def test_agent_function_tool_multi_turn_with_multiple_calls(self, **kwargs): - Ability to use previous function results in subsequent queries """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -381,7 +381,7 @@ def test_agent_function_tool_context_dependent_followup(self, **kwargs): remembering parameters from the first query. """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py index 92b806f0935c..f4388b1ccfe9 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py @@ -28,7 +28,7 @@ async def test_agent_function_tool_async(self, **kwargs): 3. Receive function results and incorporate them into responses """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") agent_name = "function-tool-agent-async" # Setup @@ -160,7 +160,7 @@ async def test_agent_function_tool_multi_turn_with_multiple_calls_async(self, ** - Ability to use previous function results in subsequent queries """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup project_client = self.create_async_client(operation_group="agents", **kwargs) @@ -370,7 +370,7 @@ async def test_agent_function_tool_context_dependent_followup_async(self, **kwar remembering parameters from the first query. """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") # Setup async with ( diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py index be27c5685b53..c0c515839aaf 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py @@ -41,7 +41,7 @@ def test_agent_image_generation(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") image_model = kwargs.get("image_generation_model_deployment_name") agent_name = "image-gen-agent" diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py index 4450f75fd103..a4775afb16b9 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py @@ -21,7 +21,7 @@ class TestAgentImageGenerationAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_image_generation_async(self, **kwargs): - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") image_model = kwargs.get("image_generation_model_deployment_name") agent_name = "image-gen-agent" diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py index 243b8a62c71e..5723478f7569 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py @@ -48,7 +48,7 @@ def test_agent_mcp_basic(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -179,7 +179,7 @@ def test_agent_mcp_with_project_connection(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py index dfc0df69188b..36a951e79183 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py @@ -21,7 +21,7 @@ class TestAgentMCPAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_mcp_basic_async(self, **kwargs): - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py index 2cc6b1023295..3a1bc4e44d0d 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py @@ -54,7 +54,7 @@ def test_agent_memory_search(self, **kwargs): DELETE /memory_stores/{memory_store_name} project_client.beta.memory_stores.delete() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") chat_model = kwargs.get("memory_store_chat_model_deployment_name") embedding_model = kwargs.get("memory_store_embedding_model_deployment_name") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py index e1a888787053..dc6b69d22354 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py @@ -29,7 +29,7 @@ class TestAgentMemorySearchAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_memory_search_async(self, **kwargs): - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") chat_model = kwargs.get("memory_store_chat_model_deployment_name") embedding_model = kwargs.get("memory_store_embedding_model_deployment_name") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py index 6905e4f27b6d..de8b85f19723 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py @@ -51,7 +51,7 @@ def test_agent_openapi(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py index a2da6989196e..1b3e87ef063a 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py @@ -31,7 +31,7 @@ class TestAgentOpenApiAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_openapi_async(self, **kwargs): - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py index 110241a76862..00fdf016e79d 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py @@ -40,7 +40,7 @@ def test_function_tool_with_conversation(self, **kwargs): - Using conversation_id parameter """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -201,7 +201,7 @@ def test_file_search_with_conversation(self, **kwargs): - Conversation context retention """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -318,7 +318,7 @@ def test_code_interpreter_with_conversation(self, **kwargs): - Variables/state persistence across turns """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -403,7 +403,7 @@ def test_code_interpreter_with_file_in_conversation(self, **kwargs): - Server-side code execution with file access and chart generation """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") import os with ( diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py index d242c7bb6c17..9a8f616e9d7f 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py @@ -38,7 +38,7 @@ def test_agent_web_search(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py index 2a9bc17b5ee9..e11732ca4cac 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py @@ -18,7 +18,7 @@ class TestAgentWebSearchAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_web_search_async(self, **kwargs): - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py index c29bbf42edcc..9e790b1f37c7 100644 --- a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py +++ b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py @@ -137,7 +137,7 @@ def test_datasets_upload_file(self, **kwargs): @recorded_by_proxy def test_datasets_upload_folder(self, **kwargs): - endpoint = kwargs.pop("project_endpoint") + endpoint = kwargs.pop("azure_ai_project_endpoint") print("\n=====> Endpoint:", endpoint) dataset_name = self.test_datasets_params["dataset_name_2"] diff --git a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py index 678bd1bedae2..724b6318b938 100644 --- a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py +++ b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py @@ -138,7 +138,7 @@ async def test_datasets_upload_file(self, **kwargs): @recorded_by_proxy_async async def test_datasets_upload_folder_async(self, **kwargs): - endpoint = kwargs.pop("project_endpoint") + endpoint = kwargs.pop("azure_ai_project_endpoint") print("\n=====> Endpoint:", endpoint) dataset_name = self.test_datasets_params["dataset_name_4"] diff --git a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py index a4dc057d9765..53132a89a396 100644 --- a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py +++ b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py @@ -18,8 +18,8 @@ class TestDeployments(TestBase): def test_deployments(self, **kwargs): model_publisher = "OpenAI" - model_name = kwargs.get("model_deployment_name") - model_deployment_name = kwargs.get("model_deployment_name") + model_name = kwargs.get("azure_ai_model_deployment_name") + model_deployment_name = kwargs.get("azure_ai_model_deployment_name") with self.create_client(**kwargs) as project_client: diff --git a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py index dba5495b0038..06f229c1e15b 100644 --- a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py +++ b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py @@ -18,8 +18,8 @@ class TestDeploymentsAsync(TestBase): async def test_deployments_async(self, **kwargs): model_publisher = "OpenAI" - model_name = kwargs.get("model_deployment_name") - model_deployment_name = kwargs.get("model_deployment_name") + model_name = kwargs.get("azure_ai_model_deployment_name") + model_deployment_name = kwargs.get("azure_ai_model_deployment_name") async with self.create_async_client(**kwargs) as project_client: diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py index 51ed46e70849..d8e28452557f 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py @@ -334,11 +334,11 @@ def _test_deploy_and_infer_helper( subscription_id = kwargs.get("azure_subscription_id") resource_group = kwargs.get("azure_resource_group") - project_endpoint = kwargs.get("project_endpoint") + project_endpoint = kwargs.get("azure_ai_project_endpoint") if not all([subscription_id, resource_group, project_endpoint]): pytest.skip( - f"Missing required environment variables for deployment (azure_subscription_id, azure_resource_group, project_endpoint) - skipping {test_prefix} deploy and infer test" + f"Missing required environment variables for deployment (azure_subscription_id, azure_resource_group, azure_ai_project_endpoint) - skipping {test_prefix} deploy and infer test" ) account_name = self._extract_account_name_from_endpoint(project_endpoint, test_prefix) diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py index 117c06684914..be0cc2de95dc 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py @@ -345,11 +345,11 @@ async def _test_deploy_and_infer_helper_async( subscription_id = kwargs.get("azure_subscription_id") resource_group = kwargs.get("azure_resource_group") - project_endpoint = kwargs.get("project_endpoint") + project_endpoint = kwargs.get("azure_ai_project_endpoint") if not all([subscription_id, resource_group, project_endpoint]): pytest.skip( - f"Missing required environment variables for deployment (azure_subscription_id, azure_resource_group, project_endpoint) - skipping {test_prefix} deploy and infer test" + f"Missing required environment variables for deployment (azure_subscription_id, azure_resource_group, azure_ai_project_endpoint) - skipping {test_prefix} deploy and infer test" ) account_name = self._extract_account_name_from_endpoint(project_endpoint, test_prefix) diff --git a/sdk/ai/azure-ai-projects/tests/responses/test_responses.py b/sdk/ai/azure-ai-projects/tests/responses/test_responses.py index 77f354335d4a..5165f37ddb03 100644 --- a/sdk/ai/azure-ai-projects/tests/responses/test_responses.py +++ b/sdk/ai/azure-ai-projects/tests/responses/test_responses.py @@ -57,7 +57,7 @@ def test_responses(self, **kwargs): ------+---------------------------------------------+----------------------------------- POST /openai/responses client.responses.create() """ - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") client = self.create_client(operation_group="agents", **kwargs).get_openai_client() diff --git a/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py b/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py index 187bbde4a259..bf7252962dad 100644 --- a/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py +++ b/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py @@ -45,7 +45,7 @@ class TestResponsesAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.HTTPX) async def test_responses_async(self, **kwargs): - model = kwargs.get("model_deployment_name") + model = kwargs.get("azure_ai_model_deployment_name") client = self.create_async_client(operation_group="agents", **kwargs).get_openai_client() diff --git a/sdk/ai/azure-ai-projects/tests/samples/README.md b/sdk/ai/azure-ai-projects/tests/samples/README.md index 4f2547678cbe..3296cb7c58ea 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/README.md +++ b/sdk/ai/azure-ai-projects/tests/samples/README.md @@ -67,7 +67,7 @@ class TestSamples(AzureRecordedTestCase): executor.execute() executor.validate_print_calls_by_llm( instructions=agent_tools_instructions, - project_endpoint=kwargs["project_endpoint"], + project_endpoint=kwargs["azure_ai_project_endpoint"], ) ``` @@ -106,7 +106,7 @@ class TestSamplesAsync(AzureRecordedTestCase): await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=agent_tools_instructions, - project_endpoint=kwargs["project_endpoint"], + project_endpoint=kwargs["azure_ai_project_endpoint"], ) ``` @@ -122,8 +122,8 @@ from devtools_testutils import EnvironmentVariableLoader servicePreparer = functools.partial( EnvironmentVariableLoader, "", - project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", - model_deployment_name="gpt-4o", + azure_ai_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", + azure_ai_model_deployment_name="gpt-4o", # add other sanitized vars here ) ``` @@ -154,8 +154,8 @@ If you need to remap the values provided by your fixtures to the environment-var ```python env_vars = { - "PROJECT_ENDPOINT": kwargs["TEST_AZURE_AI_PROJECT_ENDPOINT"], - "MODEL_DEPLOYMENT_NAME": kwargs["TEST_AZURE_AI_MODEL_DEPLOYMENT_NAME"], + "AZURE_AI_PROJECT_ENDPOINT": kwargs["TEST_AZURE_AI_PROJECT_ENDPOINT"], + "AZURE_AI_MODEL_DEPLOYMENT_NAME": kwargs["TEST_AZURE_AI_MODEL_DEPLOYMENT_NAME"], } executor = SyncSampleExecutor(self, sample_path, env_vars=env_vars, **kwargs) ``` diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py index 0dac34211592..2bd4594c843d 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py @@ -62,8 +62,8 @@ def test_agent_tools_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) @pytest.mark.parametrize( @@ -86,8 +86,8 @@ def test_memory_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=memories_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) @pytest.mark.parametrize( @@ -106,8 +106,8 @@ def test_agents_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=agents_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) @pytest.mark.parametrize( @@ -128,8 +128,8 @@ def test_connections_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) @pytest.mark.parametrize( @@ -148,8 +148,8 @@ def test_files_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) @pytest.mark.parametrize( @@ -168,8 +168,8 @@ def test_deployments_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) @pytest.mark.parametrize( @@ -188,8 +188,8 @@ def test_datasets_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) @pytest.mark.parametrize( @@ -211,6 +211,6 @@ def test_finetuning_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=fine_tuning_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples_async.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples_async.py index fc6fecc417d6..8eccef50195f 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples_async.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples_async.py @@ -50,8 +50,8 @@ async def test_agent_tools_samples_async(self, sample_path: str, **kwargs) -> No await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=agent_tools_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) @pytest.mark.parametrize( @@ -75,8 +75,8 @@ async def test_memory_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=memories_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) @pytest.mark.parametrize( @@ -95,8 +95,8 @@ async def test_agents_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=agents_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) @pytest.mark.parametrize( @@ -117,8 +117,8 @@ async def test_connections_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=resource_management_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) @pytest.mark.parametrize( @@ -139,8 +139,8 @@ async def test_files_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=resource_management_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) @pytest.mark.parametrize( @@ -159,8 +159,8 @@ async def test_deployments_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=resource_management_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) @pytest.mark.parametrize( @@ -184,6 +184,6 @@ async def test_datasets_samples(self, sample_path: str, **kwargs) -> None: # Proxy server probably not able to parse the captured print content await executor.validate_print_calls_by_llm_async( instructions=resource_management_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py index 9390fdf2d895..bcea91df0eb5 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py @@ -19,8 +19,8 @@ evaluationsPreparer = functools.partial( EnvironmentVariableLoader, "", - project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", - model_deployment_name="sanitized-model-deployment-name", + azure_ai_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", + azure_ai_model_deployment_name="sanitized-model-deployment-name", azure_ai_agent_name="sanitized-agent-name", ) @@ -184,8 +184,8 @@ def test_evaluation_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=evaluations_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) # To run this test with a specific sample, use: @@ -216,8 +216,8 @@ def test_agentic_evaluator_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=evaluations_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) # To run this test, use: @@ -247,6 +247,6 @@ def test_generic_agentic_evaluator_sample(self, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=evaluations_instructions, - project_endpoint=kwargs["project_endpoint"], - model=kwargs["model_deployment_name"], + project_endpoint=kwargs["azure_ai_project_endpoint"], + model=kwargs["azure_ai_model_deployment_name"], ) diff --git a/sdk/ai/azure-ai-projects/tests/test_base.py b/sdk/ai/azure-ai-projects/tests/test_base.py index afcbe80e2317..a0deb29a2cfd 100644 --- a/sdk/ai/azure-ai-projects/tests/test_base.py +++ b/sdk/ai/azure-ai-projects/tests/test_base.py @@ -41,8 +41,8 @@ servicePreparer = functools.partial( EnvironmentVariableLoader, "", - project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", - model_deployment_name="sanitized-model-deployment-name", + azure_ai_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", + azure_ai_model_deployment_name="sanitized-model-deployment-name", image_generation_model_deployment_name="sanitized-gpt-image", container_app_resource_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.App/containerApps/00000", container_ingress_subdomain_suffix="00000", @@ -77,8 +77,8 @@ fineTuningServicePreparer = functools.partial( EnvironmentVariableLoader, "", - project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", - model_deployment_name="sanitized-model-deployment-name", + azure_ai_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", + azure_ai_model_deployment_name="sanitized-model-deployment-name", azure_ai_projects_azure_subscription_id="00000000-0000-0000-0000-000000000000", azure_ai_projects_azure_resource_group="sanitized-resource-group", azure_ai_projects_azure_aoai_account="sanitized-aoai-account", @@ -299,7 +299,7 @@ def open_with_lf( # helper function: create projects client using environment variables def create_client(self, *, operation_group: Optional[str] = None, **kwargs) -> AIProjectClient: # fetch environment variables - endpoint = kwargs.pop("project_endpoint") + endpoint = kwargs.pop("azure_ai_project_endpoint") credential = self.get_credential(AIProjectClient, is_async=False) allow_preview = kwargs.pop("allow_preview", operation_group in {"agents", "tracing"}) @@ -317,7 +317,7 @@ def create_client(self, *, operation_group: Optional[str] = None, **kwargs) -> A # helper function: create async projects client using environment variables def create_async_client(self, *, operation_group: Optional[str] = None, **kwargs) -> AsyncAIProjectClient: # fetch environment variables - endpoint = kwargs.pop("project_endpoint") + endpoint = kwargs.pop("azure_ai_project_endpoint") credential = self.get_credential(AsyncAIProjectClient, is_async=True) allow_preview = kwargs.pop("allow_preview", operation_group in {"agents", "tracing"}) From c357ed8617638b886d43983d28577e5fe8103059 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 10 Mar 2026 06:25:31 -0700 Subject: [PATCH 08/16] Better Exception messages when you try to use preview features, and "allow_preview=True" is not specified (#45600) --- sdk/ai/azure-ai-projects/assets.json | 2 +- .../ai/projects/aio/operations/_operations.py | 1 + .../ai/projects/aio/operations/_patch.py | 4 + .../aio/operations/_patch_agents_async.py | 176 ++++++++++++++++ .../_patch_evaluation_rules_async.py | 114 ++++++++++ .../azure/ai/projects/operations/_patch.py | 4 + .../ai/projects/operations/_patch_agents.py | 197 ++++++++++++++++++ .../operations/_patch_evaluation_rules.py | 114 ++++++++++ .../test_agent_create_version_exception.py | 51 +++++ ...st_agent_create_version_exception_async.py | 52 +++++ sdk/ai/azure-ai-projects/tests/test_base.py | 8 +- 11 files changed, 716 insertions(+), 7 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_agents_async.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_evaluation_rules_async.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_agents.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_evaluation_rules.py create mode 100644 sdk/ai/azure-ai-projects/tests/agents/test_agent_create_version_exception.py create mode 100644 sdk/ai/azure-ai-projects/tests/agents/test_agent_create_version_exception_async.py diff --git a/sdk/ai/azure-ai-projects/assets.json b/sdk/ai/azure-ai-projects/assets.json index 1d1c8afa0323..5d326c8c0af9 100644 --- a/sdk/ai/azure-ai-projects/assets.json +++ b/sdk/ai/azure-ai-projects/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-projects", - "Tag": "python/ai/azure-ai-projects_e907ded382" + "Tag": "python/ai/azure-ai-projects_a7df068ea3" } diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index 6f2e077a1e91..3499ba5338e5 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -115,6 +115,7 @@ _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False + class BetaOperations: """ .. warning:: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 3d103d797c20..fd4e68774f3b 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -9,7 +9,9 @@ """ from typing import Any, List +from ._patch_agents_async import AgentsOperations from ._patch_datasets_async import DatasetsOperations +from ._patch_evaluation_rules_async import EvaluationRulesOperations from ._patch_telemetry_async import TelemetryOperations from ._patch_connections_async import ConnectionsOperations from ._patch_memories_async import BetaMemoryStoresOperations @@ -53,7 +55,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: __all__: List[str] = [ + "AgentsOperations", "BetaEvaluationTaxonomiesOperations", + "EvaluationRulesOperations", "BetaEvaluatorsOperations", "BetaInsightsOperations", "BetaMemoryStoresOperations", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_agents_async.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_agents_async.py new file mode 100644 index 000000000000..0ed177f12ab9 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_agents_async.py @@ -0,0 +1,176 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" + +from typing import Union, Optional, Any, IO, overload +from azure.core.exceptions import HttpResponseError +from ._operations import AgentsOperations as GeneratedAgentsOperations, JSON, _Unset +from ... import models as _models +from ...operations._patch_agents import _PREVIEW_FEATURE_REQUIRED_CODE, _PREVIEW_FEATURE_ADDED_ERROR_MESSAGE + + +class AgentsOperations(GeneratedAgentsOperations): + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.aio.AIProjectClient`'s + :attr:`agents` attribute. + """ + + @overload + async def create_version( + self, + agent_name: str, + *, + definition: _models.AgentDefinition, + content_type: str = "application/json", + metadata: Optional[dict[str, str]] = None, + description: Optional[str] = None, + **kwargs: Any, + ) -> _models.AgentVersionDetails: + """Create a new agent version. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :keyword definition: The agent definition. This can be a workflow, hosted agent, or a simple + agent definition. Required. + :paramtype definition: ~azure.ai.projects.models.AgentDefinition + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be + useful for storing additional information about the object in a structured + format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings + with a maximum length of 512 characters. Default value is None. + :paramtype metadata: dict[str, str] + :keyword description: A human-readable description of the agent. Default value is None. + :paramtype description: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + ... + + @overload + async def create_version( + self, agent_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentVersionDetails: + """Create a new agent version. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + ... + + @overload + async def create_version( + self, agent_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentVersionDetails: + """Create a new agent version. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + ... + + async def create_version( + self, + agent_name: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + definition: _models.AgentDefinition = _Unset, + metadata: Optional[dict[str, str]] = None, + description: Optional[str] = None, + **kwargs: Any, + ) -> _models.AgentVersionDetails: + """Create a new agent version. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword definition: The agent definition. This can be a workflow, hosted agent, or a simple + agent definition. Required. + :paramtype definition: ~azure.ai.projects.models.AgentDefinition + :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be + useful for storing additional information about the object in a structured + format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings + with a maximum length of 512 characters. Default value is None. + :paramtype metadata: dict[str, str] + :keyword description: A human-readable description of the agent. Default value is None. + :paramtype description: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + try: + return await super().create_version( + agent_name, + body, + definition=definition, + metadata=metadata, + description=description, + **kwargs, + ) + except HttpResponseError as exc: + if exc.status_code == 403 and not self._config.allow_preview and exc.model is not None: + api_error_response = exc.model + if hasattr(api_error_response, "error") and api_error_response.error is not None: + if api_error_response.error.code == _PREVIEW_FEATURE_REQUIRED_CODE: + new_exc = HttpResponseError( + message=f"{exc.message} {_PREVIEW_FEATURE_ADDED_ERROR_MESSAGE}", + ) + new_exc.status_code = exc.status_code + new_exc.reason = exc.reason + new_exc.response = exc.response + new_exc.model = exc.model + raise new_exc from exc + raise diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_evaluation_rules_async.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_evaluation_rules_async.py new file mode 100644 index 000000000000..08ab156a9bbe --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_evaluation_rules_async.py @@ -0,0 +1,114 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" + +from typing import Union, Any, IO, overload +from azure.core.exceptions import HttpResponseError +from ._operations import EvaluationRulesOperations as GeneratedEvaluationRulesOperations, JSON +from ... import models as _models +from ...operations._patch_agents import _PREVIEW_FEATURE_REQUIRED_CODE, _PREVIEW_FEATURE_ADDED_ERROR_MESSAGE + + +class EvaluationRulesOperations(GeneratedEvaluationRulesOperations): + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.aio.AIProjectClient`'s + :attr:`evaluation_rules` attribute. + """ + + @overload + async def create_or_update( + self, id: str, evaluation_rule: _models.EvaluationRule, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. + + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Required. + :type evaluation_rule: ~azure.ai.projects.models.EvaluationRule + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + ... + + @overload + async def create_or_update( + self, id: str, evaluation_rule: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. + + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Required. + :type evaluation_rule: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + ... + + @overload + async def create_or_update( + self, id: str, evaluation_rule: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. + + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Required. + :type evaluation_rule: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + ... + + async def create_or_update( + self, id: str, evaluation_rule: Union[_models.EvaluationRule, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. + + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Is one of the following types: + EvaluationRule, JSON, IO[bytes] Required. + :type evaluation_rule: ~azure.ai.projects.models.EvaluationRule or JSON or IO[bytes] + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + try: + return await super().create_or_update(id, evaluation_rule, **kwargs) + except HttpResponseError as exc: + if exc.status_code == 403 and not self._config.allow_preview and exc.model is not None: + api_error_response = exc.model + if hasattr(api_error_response, "error") and api_error_response.error is not None: + if api_error_response.error.code == _PREVIEW_FEATURE_REQUIRED_CODE: + new_exc = HttpResponseError( + message=f"{exc.message} {_PREVIEW_FEATURE_ADDED_ERROR_MESSAGE}", + ) + new_exc.status_code = exc.status_code + new_exc.reason = exc.reason + new_exc.response = exc.response + new_exc.model = exc.model + raise new_exc from exc + raise diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 5f66db634e9d..bc78f4d6baf8 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -9,7 +9,9 @@ """ from typing import Any, List +from ._patch_agents import AgentsOperations from ._patch_datasets import DatasetsOperations +from ._patch_evaluation_rules import EvaluationRulesOperations from ._patch_telemetry import TelemetryOperations from ._patch_connections import ConnectionsOperations from ._patch_memories import BetaMemoryStoresOperations @@ -53,7 +55,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: __all__: List[str] = [ + "AgentsOperations", "BetaEvaluationTaxonomiesOperations", + "EvaluationRulesOperations", "BetaEvaluatorsOperations", "BetaInsightsOperations", "BetaMemoryStoresOperations", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_agents.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_agents.py new file mode 100644 index 000000000000..6f2f3374d3f9 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_agents.py @@ -0,0 +1,197 @@ +# pylint: disable=line-too-long,useless-suppression,pointless-string-statement +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" + +from typing import Union, Optional, Any, IO, overload, Final +from azure.core.exceptions import HttpResponseError +from ._operations import AgentsOperations as GeneratedAgentsOperations, JSON, _Unset +from .. import models as _models + +""" +Example service response payload when the caller is trying to use a feature preview without opt-in flag (service error 403 (Forbidden)): + +"error": { + "code": "preview_feature_required", + "message": "Workflow agents is in preview. This operation requires the following opt-in preview feature(s): WorkflowAgents=V1Preview. Include the 'Foundry-Features: WorkflowAgents=V1Preview' header in your request.", + "param": "Foundry-Features", + "type": "invalid_request_error", + "details": [], + "additionalInfo": { + "request_id": "fdbc95804b7599404973026cd9ec732a" + } + } + +""" +_PREVIEW_FEATURE_REQUIRED_CODE: Final = "preview_feature_required" +_PREVIEW_FEATURE_ADDED_ERROR_MESSAGE: Final = ( + '\n**Python SDK users**: This operation requires you to set "allow_preview=True" ' + "when calling the AIProjectClient constructor. " + "\nNote that preview features are under development and subject to change." +) + + +class AgentsOperations(GeneratedAgentsOperations): + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.AIProjectClient`'s + :attr:`agents` attribute. + """ + + @overload + def create_version( + self, + agent_name: str, + *, + definition: _models.AgentDefinition, + content_type: str = "application/json", + metadata: Optional[dict[str, str]] = None, + description: Optional[str] = None, + **kwargs: Any, + ) -> _models.AgentVersionDetails: + """Create a new agent version. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :keyword definition: The agent definition. This can be a workflow, hosted agent, or a simple + agent definition. Required. + :paramtype definition: ~azure.ai.projects.models.AgentDefinition + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be + useful for storing additional information about the object in a structured + format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings + with a maximum length of 512 characters. Default value is None. + :paramtype metadata: dict[str, str] + :keyword description: A human-readable description of the agent. Default value is None. + :paramtype description: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + ... + + @overload + def create_version( + self, agent_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentVersionDetails: + """Create a new agent version. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + ... + + @overload + def create_version( + self, agent_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentVersionDetails: + """Create a new agent version. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + ... + + def create_version( + self, + agent_name: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + definition: _models.AgentDefinition = _Unset, + metadata: Optional[dict[str, str]] = None, + description: Optional[str] = None, + **kwargs: Any, + ) -> _models.AgentVersionDetails: + """Create a new agent version. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword definition: The agent definition. This can be a workflow, hosted agent, or a simple + agent definition. Required. + :paramtype definition: ~azure.ai.projects.models.AgentDefinition + :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be + useful for storing additional information about the object in a structured + format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings + with a maximum length of 512 characters. Default value is None. + :paramtype metadata: dict[str, str] + :keyword description: A human-readable description of the agent. Default value is None. + :paramtype description: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + try: + return super().create_version( + agent_name, + body, + definition=definition, + metadata=metadata, + description=description, + **kwargs, + ) + except HttpResponseError as exc: + if exc.status_code == 403 and not self._config.allow_preview and exc.model is not None: + api_error_response = exc.model + if hasattr(api_error_response, "error") and api_error_response.error is not None: + if api_error_response.error.code == _PREVIEW_FEATURE_REQUIRED_CODE: + new_exc = HttpResponseError( + message=f"{exc.message} {_PREVIEW_FEATURE_ADDED_ERROR_MESSAGE}", + ) + new_exc.status_code = exc.status_code + new_exc.reason = exc.reason + new_exc.response = exc.response + new_exc.model = exc.model + raise new_exc from exc + raise diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_evaluation_rules.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_evaluation_rules.py new file mode 100644 index 000000000000..5c2ca412a468 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_evaluation_rules.py @@ -0,0 +1,114 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" + +from typing import Union, Any, IO, overload +from azure.core.exceptions import HttpResponseError +from ._operations import EvaluationRulesOperations as GeneratedEvaluationRulesOperations, JSON +from ._patch_agents import _PREVIEW_FEATURE_REQUIRED_CODE, _PREVIEW_FEATURE_ADDED_ERROR_MESSAGE +from .. import models as _models + + +class EvaluationRulesOperations(GeneratedEvaluationRulesOperations): + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.AIProjectClient`'s + :attr:`evaluation_rules` attribute. + """ + + @overload + def create_or_update( + self, id: str, evaluation_rule: _models.EvaluationRule, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. + + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Required. + :type evaluation_rule: ~azure.ai.projects.models.EvaluationRule + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + ... + + @overload + def create_or_update( + self, id: str, evaluation_rule: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. + + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Required. + :type evaluation_rule: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + ... + + @overload + def create_or_update( + self, id: str, evaluation_rule: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. + + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Required. + :type evaluation_rule: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + ... + + def create_or_update( + self, id: str, evaluation_rule: Union[_models.EvaluationRule, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. + + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Is one of the following types: + EvaluationRule, JSON, IO[bytes] Required. + :type evaluation_rule: ~azure.ai.projects.models.EvaluationRule or JSON or IO[bytes] + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + try: + return super().create_or_update(id, evaluation_rule, **kwargs) + except HttpResponseError as exc: + if exc.status_code == 403 and not self._config.allow_preview and exc.model is not None: + api_error_response = exc.model + if hasattr(api_error_response, "error") and api_error_response.error is not None: + if api_error_response.error.code == _PREVIEW_FEATURE_REQUIRED_CODE: + new_exc = HttpResponseError( + message=f"{exc.message} {_PREVIEW_FEATURE_ADDED_ERROR_MESSAGE}", + ) + new_exc.status_code = exc.status_code + new_exc.reason = exc.reason + new_exc.response = exc.response + new_exc.model = exc.model + raise new_exc from exc + raise diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agent_create_version_exception.py b/sdk/ai/azure-ai-projects/tests/agents/test_agent_create_version_exception.py new file mode 100644 index 000000000000..6cc81e4f5553 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agent_create_version_exception.py @@ -0,0 +1,51 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +# cSpell:disable + +import functools +import pytest +from test_base import TestBase, servicePreparer +from devtools_testutils import recorded_by_proxy, RecordedTransport +from azure.core.exceptions import HttpResponseError +from azure.ai.projects.models import WorkflowAgentDefinition +from azure.ai.projects.operations._patch_agents import _PREVIEW_FEATURE_ADDED_ERROR_MESSAGE + +# Minimal workflow YAML — the service rejects the request before validating the +# definition, so the content only needs to be a non-empty string. +_MINIMAL_WORKFLOW_YAML = """\ +kind: workflow +trigger: + kind: OnConversationStart + id: my_workflow + actions: [] +""" + + +# To run this test: +# pytest tests\agents\test_agent_create_version_exception.py -s +class TestAgentCreateVersionException(TestBase): + + @servicePreparer() + @recorded_by_proxy(RecordedTransport.AZURE_CORE) + def test_create_version_raises_exception_when_allow_preview_not_set(self, **kwargs): + """ + Verify that calling agents.create_version() with a WorkflowAgentDefinition when + AIProjectClient was constructed WITHOUT allow_preview=True raises an HttpResponseError + (HTTP 403) whose message contains the SDK-specific hint pointing users to set + allow_preview=True. + """ + # Deliberately create client WITHOUT allow_preview=True + project_client = self.create_client(**kwargs) + + with pytest.raises(HttpResponseError) as exc_info: + project_client.agents.create_version( + agent_name="workflow-agent-preview-test", + definition=WorkflowAgentDefinition(workflow=_MINIMAL_WORKFLOW_YAML), + ) + + raised = exc_info.value + assert raised.status_code == 403 + assert _PREVIEW_FEATURE_ADDED_ERROR_MESSAGE in raised.message diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agent_create_version_exception_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agent_create_version_exception_async.py new file mode 100644 index 000000000000..a4ebac76ffa6 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agent_create_version_exception_async.py @@ -0,0 +1,52 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +# cSpell:disable + +import pytest +from test_base import TestBase, servicePreparer +from devtools_testutils.aio import recorded_by_proxy_async +from devtools_testutils import RecordedTransport +from azure.core.exceptions import HttpResponseError +from azure.ai.projects.models import WorkflowAgentDefinition +from azure.ai.projects.operations._patch_agents import _PREVIEW_FEATURE_ADDED_ERROR_MESSAGE + +# Minimal workflow YAML — the service rejects the request before validating the +# definition, so the content only needs to be a non-empty string. +_MINIMAL_WORKFLOW_YAML = """\ +kind: workflow +trigger: + kind: OnConversationStart + id: my_workflow + actions: [] +""" + + +# To run this test: +# pytest tests\agents\test_agent_create_version_exception_async.py -s +class TestAgentCreateVersionExceptionAsync(TestBase): + + @servicePreparer() + @recorded_by_proxy_async(RecordedTransport.AZURE_CORE) + async def test_create_version_raises_exception_when_allow_preview_not_set_async(self, **kwargs): + """ + Verify that calling agents.create_version() with a WorkflowAgentDefinition when + AsyncAIProjectClient was constructed WITHOUT allow_preview=True raises an HttpResponseError + (HTTP 403) whose message contains the SDK-specific hint pointing users to set + allow_preview=True. + """ + # Deliberately create client WITHOUT allow_preview=True + project_client = self.create_async_client(**kwargs) + + async with project_client: + with pytest.raises(HttpResponseError) as exc_info: + await project_client.agents.create_version( + agent_name="workflow-agent-preview-test", + definition=WorkflowAgentDefinition(workflow=_MINIMAL_WORKFLOW_YAML), + ) + + raised = exc_info.value + assert raised.status_code == 403 + assert _PREVIEW_FEATURE_ADDED_ERROR_MESSAGE in raised.message diff --git a/sdk/ai/azure-ai-projects/tests/test_base.py b/sdk/ai/azure-ai-projects/tests/test_base.py index a0deb29a2cfd..ac072e7f5844 100644 --- a/sdk/ai/azure-ai-projects/tests/test_base.py +++ b/sdk/ai/azure-ai-projects/tests/test_base.py @@ -44,8 +44,6 @@ azure_ai_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", azure_ai_model_deployment_name="sanitized-model-deployment-name", image_generation_model_deployment_name="sanitized-gpt-image", - container_app_resource_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.App/containerApps/00000", - container_ingress_subdomain_suffix="00000", bing_project_connection_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sanitized-resource-group/providers/Microsoft.CognitiveServices/accounts/sanitized-account/projects/sanitized-project/connections/sanitized-bing-connection", ai_search_project_connection_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sanitized-resource-group/providers/Microsoft.CognitiveServices/accounts/sanitized-account/projects/sanitized-project/connections/sanitized-ai-search-connection", bing_custom_search_project_connection_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sanitized-resource-group/providers/Microsoft.CognitiveServices/accounts/sanitized-account/projects/sanitized-project/connections/sanitized-bing-custom-search-connection", @@ -297,11 +295,10 @@ def open_with_lf( return patched_open_crlf_to_lf(file, mode, buffering, encoding, errors, newline, closefd, opener) # helper function: create projects client using environment variables - def create_client(self, *, operation_group: Optional[str] = None, **kwargs) -> AIProjectClient: + def create_client(self, *, allow_preview: bool = False, **kwargs) -> AIProjectClient: # fetch environment variables endpoint = kwargs.pop("azure_ai_project_endpoint") credential = self.get_credential(AIProjectClient, is_async=False) - allow_preview = kwargs.pop("allow_preview", operation_group in {"agents", "tracing"}) print(f"Creating AIProjectClient with endpoint: {endpoint}") @@ -315,11 +312,10 @@ def create_client(self, *, operation_group: Optional[str] = None, **kwargs) -> A return client # helper function: create async projects client using environment variables - def create_async_client(self, *, operation_group: Optional[str] = None, **kwargs) -> AsyncAIProjectClient: + def create_async_client(self, *, allow_preview: bool = False, **kwargs) -> AsyncAIProjectClient: # fetch environment variables endpoint = kwargs.pop("azure_ai_project_endpoint") credential = self.get_credential(AsyncAIProjectClient, is_async=True) - allow_preview = kwargs.pop("allow_preview", operation_group in {"agents", "tracing"}) print(f"Creating AsyncAIProjectClient with endpoint: {endpoint}") From 7602ddb547e7095e4db5b1b12c8a59bc61ce455c Mon Sep 17 00:00:00 2001 From: Jayesh Tanna Date: Tue, 10 Mar 2026 20:57:07 +0530 Subject: [PATCH 09/16] marking finetuning pause and resume operations as live extended tests (#45611) * marking finetuning pause and resume operations as live extended tests * updating recording --------- Co-authored-by: Jayesh Tanna --- sdk/ai/azure-ai-projects/assets.json | 2 +- .../azure-ai-projects/tests/finetuning/test_finetuning.py | 8 ++++++++ .../tests/finetuning/test_finetuning_async.py | 8 ++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-projects/assets.json b/sdk/ai/azure-ai-projects/assets.json index 5d326c8c0af9..3d798e3359e9 100644 --- a/sdk/ai/azure-ai-projects/assets.json +++ b/sdk/ai/azure-ai-projects/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-projects", - "Tag": "python/ai/azure-ai-projects_a7df068ea3" + "Tag": "python/ai/azure-ai-projects_f6ff7973d2" } diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py index d8e28452557f..aeb407b0e5f0 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py @@ -653,6 +653,10 @@ def test_finetuning_list_events(self, **kwargs): self._cleanup_test_file(openai_client, train_file.id) self._cleanup_test_file(openai_client, validation_file.id) + @pytest.mark.skipif( + not is_live_and_not_recording() or os.getenv("RUN_EXTENDED_FINE_TUNING_LIVE_TESTS", "false").lower() != "true", + reason="Skipped extended FT live tests. Those only run live, without recordings, when RUN_EXTENDED_FINE_TUNING_LIVE_TESTS=true", + ) @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) def test_finetuning_pause_job(self, **kwargs): @@ -683,6 +687,10 @@ def test_finetuning_pause_job(self, **kwargs): print(f"[test_finetuning_pause_job] Successfully paused and verified job: {running_job_id}") + @pytest.mark.skipif( + not is_live_and_not_recording() or os.getenv("RUN_EXTENDED_FINE_TUNING_LIVE_TESTS", "false").lower() != "true", + reason="Skipped extended FT live tests. Those only run live, without recordings, when RUN_EXTENDED_FINE_TUNING_LIVE_TESTS=true", + ) @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) def test_finetuning_resume_job(self, **kwargs): diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py index be0cc2de95dc..b9789e077e92 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py @@ -679,6 +679,10 @@ async def test_finetuning_list_events_async(self, **kwargs): await self._cleanup_test_file_async(openai_client, train_file.id) await self._cleanup_test_file_async(openai_client, validation_file.id) + @pytest.mark.skipif( + not is_live_and_not_recording() or os.getenv("RUN_EXTENDED_FINE_TUNING_LIVE_TESTS", "false").lower() != "true", + reason="Skipped extended FT live tests. Those only run live, without recordings, when RUN_EXTENDED_FINE_TUNING_LIVE_TESTS=true", + ) @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_finetuning_pause_job_async(self, **kwargs): @@ -710,6 +714,10 @@ async def test_finetuning_pause_job_async(self, **kwargs): print(f"[test_finetuning_pause_job] Job status after pause: {paused_job.status}") print(f"[test_finetuning_pause_job] Successfully paused and verified job: {running_job_id}") + @pytest.mark.skipif( + not is_live_and_not_recording() or os.getenv("RUN_EXTENDED_FINE_TUNING_LIVE_TESTS", "false").lower() != "true", + reason="Skipped extended FT live tests. Those only run live, without recordings, when RUN_EXTENDED_FINE_TUNING_LIVE_TESTS=true", + ) @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_finetuning_resume_job_async(self, **kwargs): From c2e7e579920942fbd9efedb2ea656cc8d867b749 Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Tue, 10 Mar 2026 15:27:51 -0700 Subject: [PATCH 10/16] Rename env vars (#45599) * rename env vars * rename env var * resolved comments * remove chat completion * resolved comment --- sdk/ai/azure-ai-projects/.env.template | 6 +- sdk/ai/azure-ai-projects/CHANGELOG.md | 4 +- sdk/ai/azure-ai-projects/README.md | 20 ++--- .../samples/agents/sample_agent_basic.py | 8 +- .../agents/sample_agent_basic_async.py | 8 +- .../agents/sample_agent_retrieve_basic.py | 8 +- .../sample_agent_retrieve_basic_async.py | 8 +- .../agents/sample_agent_stream_events.py | 8 +- .../agents/sample_agent_structured_output.py | 8 +- .../sample_agent_structured_output_async.py | 8 +- .../agents/sample_workflow_multi_agent.py | 10 +-- .../sample_workflow_multi_agent_async.py | 10 +-- ..._agent_basic_with_azure_monitor_tracing.py | 8 +- ...sample_agent_basic_with_console_tracing.py | 8 +- ..._with_console_tracing_custom_attributes.py | 8 +- .../agents/tools/sample_agent_ai_search.py | 8 +- .../tools/sample_agent_azure_function.py | 8 +- .../tools/sample_agent_bing_custom_search.py | 8 +- .../tools/sample_agent_bing_grounding.py | 8 +- .../tools/sample_agent_browser_automation.py | 8 +- .../tools/sample_agent_code_interpreter.py | 8 +- .../sample_agent_code_interpreter_async.py | 8 +- ...ample_agent_code_interpreter_with_files.py | 8 +- ...agent_code_interpreter_with_files_async.py | 8 +- .../agents/tools/sample_agent_computer_use.py | 4 +- .../tools/sample_agent_computer_use_async.py | 4 +- .../agents/tools/sample_agent_fabric.py | 8 +- .../agents/tools/sample_agent_file_search.py | 8 +- .../sample_agent_file_search_in_stream.py | 8 +- ...ample_agent_file_search_in_stream_async.py | 8 +- .../tools/sample_agent_function_tool.py | 8 +- .../tools/sample_agent_function_tool_async.py | 8 +- .../tools/sample_agent_image_generation.py | 8 +- .../sample_agent_image_generation_async.py | 8 +- .../samples/agents/tools/sample_agent_mcp.py | 8 +- .../agents/tools/sample_agent_mcp_async.py | 8 +- ...ample_agent_mcp_with_project_connection.py | 8 +- ...agent_mcp_with_project_connection_async.py | 8 +- .../tools/sample_agent_memory_search.py | 8 +- .../tools/sample_agent_memory_search_async.py | 8 +- .../agents/tools/sample_agent_openapi.py | 8 +- ...e_agent_openapi_with_project_connection.py | 8 +- .../agents/tools/sample_agent_sharepoint.py | 8 +- .../agents/tools/sample_agent_to_agent.py | 8 +- .../agents/tools/sample_agent_web_search.py | 8 +- .../tools/sample_agent_web_search_preview.py | 8 +- ...ple_agent_web_search_with_custom_search.py | 8 +- .../samples/connections/sample_connections.py | 4 +- .../connections/sample_connections_async.py | 4 +- .../samples/datasets/sample_datasets.py | 4 +- .../samples/datasets/sample_datasets_async.py | 4 +- .../datasets/sample_datasets_download.py | 4 +- .../samples/deployments/sample_deployments.py | 8 +- .../deployments/sample_deployments_async.py | 8 +- .../samples/evaluations/README.md | 8 +- .../agentic_evaluators/sample_coherence.py | 8 +- .../agentic_evaluators/sample_fluency.py | 8 +- .../agent_utils.py | 2 +- .../sample_generic_agentic_evaluator.py | 6 +- .../agentic_evaluators/sample_groundedness.py | 8 +- .../sample_intent_resolution.py | 8 +- .../agentic_evaluators/sample_relevance.py | 8 +- .../sample_response_completeness.py | 8 +- .../sample_task_adherence.py | 8 +- .../sample_task_completion.py | 8 +- .../sample_task_navigation_efficiency.py | 4 +- .../sample_tool_call_accuracy.py | 8 +- .../sample_tool_call_success.py | 8 +- .../sample_tool_input_accuracy.py | 8 +- .../sample_tool_output_utilization.py | 8 +- .../sample_tool_selection.py | 8 +- .../evaluations/sample_agent_evaluation.py | 12 +-- .../sample_agent_response_evaluation.py | 12 +-- ..._response_evaluation_with_function_tool.py | 8 +- .../sample_continuous_evaluation_rule.py | 12 +-- .../evaluations/sample_eval_catalog.py | 4 +- ...mple_eval_catalog_code_based_evaluators.py | 8 +- ...le_eval_catalog_prompt_based_evaluators.py | 8 +- .../sample_evaluation_cluster_insight.py | 10 +-- .../sample_evaluation_compare_insight.py | 8 +- .../sample_evaluations_ai_assisted.py | 8 +- ...ple_evaluations_builtin_with_dataset_id.py | 8 +- ...le_evaluations_builtin_with_inline_data.py | 8 +- ...valuations_builtin_with_inline_data_oai.py | 8 +- .../sample_evaluations_builtin_with_traces.py | 8 +- .../evaluations/sample_evaluations_graders.py | 8 +- ...aluations_score_model_grader_with_image.py | 8 +- .../evaluations/sample_model_evaluation.py | 8 +- .../evaluations/sample_redteam_evaluations.py | 10 +-- .../sample_scheduled_evaluations.py | 16 ++-- .../samples/files/sample_files.py | 4 +- .../samples/files/sample_files_async.py | 4 +- .../finetuning/sample_finetuning_dpo_job.py | 4 +- .../sample_finetuning_dpo_job_async.py | 4 +- ...le_finetuning_oss_models_supervised_job.py | 4 +- ...etuning_oss_models_supervised_job_async.py | 4 +- .../sample_finetuning_reinforcement_job.py | 4 +- ...mple_finetuning_reinforcement_job_async.py | 4 +- .../sample_finetuning_supervised_job.py | 4 +- .../sample_finetuning_supervised_job_async.py | 4 +- .../samples/indexes/sample_indexes.py | 4 +- .../samples/indexes/sample_indexes_async.py | 4 +- .../mcp_client/sample_mcp_tool_async.py | 6 +- .../memories/sample_memory_advanced.py | 4 +- .../memories/sample_memory_advanced_async.py | 4 +- .../samples/memories/sample_memory_basic.py | 4 +- .../memories/sample_memory_basic_async.py | 4 +- .../samples/memories/sample_memory_crud.py | 4 +- .../memories/sample_memory_crud_async.py | 4 +- .../samples/red_team/sample_red_team.py | 8 +- .../samples/red_team/sample_red_team_async.py | 8 +- .../responses/sample_responses_basic.py | 10 +-- .../responses/sample_responses_basic_async.py | 10 +-- ...responses_basic_without_aiprojectclient.py | 8 +- ...ses_basic_without_aiprojectclient_async.py | 8 +- .../responses/sample_responses_image_input.py | 8 +- .../sample_responses_stream_events.py | 8 +- .../sample_responses_stream_manager.py | 8 +- .../sample_responses_structured_output.py | 8 +- .../samples/telemetry/sample_telemetry.py | 4 +- .../telemetry/sample_telemetry_async.py | 4 +- .../telemetry/test_ai_agents_instrumentor.py | 8 +- .../test_ai_agents_instrumentor_async.py | 8 +- .../telemetry/test_responses_instrumentor.py | 84 +++++++++---------- .../test_responses_instrumentor_async.py | 72 ++++++++-------- ...sponses_instrumentor_browser_automation.py | 8 +- ...s_instrumentor_browser_automation_async.py | 8 +- ...responses_instrumentor_code_interpreter.py | 8 +- ...ses_instrumentor_code_interpreter_async.py | 8 +- ...test_responses_instrumentor_file_search.py | 8 +- ...esponses_instrumentor_file_search_async.py | 8 +- .../test_responses_instrumentor_mcp.py | 8 +- .../test_responses_instrumentor_mcp_async.py | 8 +- .../test_responses_instrumentor_metrics.py | 2 +- .../test_responses_instrumentor_workflow.py | 8 +- ...t_responses_instrumentor_workflow_async.py | 8 +- .../tests/agents/test_agent_responses_crud.py | 4 +- .../agents/test_agent_responses_crud_async.py | 4 +- .../tests/agents/test_agents_crud.py | 2 +- .../tests/agents/test_agents_crud_async.py | 2 +- ...est_agent_code_interpreter_and_function.py | 4 +- ..._agent_file_search_and_code_interpreter.py | 4 +- .../test_agent_file_search_and_function.py | 8 +- ...t_file_search_code_interpreter_function.py | 2 +- .../test_multitool_with_conversations.py | 2 +- .../agents/tools/test_agent_ai_search.py | 2 +- .../tools/test_agent_ai_search_async.py | 2 +- .../agents/tools/test_agent_bing_grounding.py | 4 +- .../tools/test_agent_code_interpreter.py | 4 +- .../test_agent_code_interpreter_async.py | 2 +- .../agents/tools/test_agent_file_search.py | 4 +- .../tools/test_agent_file_search_async.py | 4 +- .../tools/test_agent_file_search_stream.py | 2 +- .../test_agent_file_search_stream_async.py | 2 +- .../agents/tools/test_agent_function_tool.py | 6 +- .../tools/test_agent_function_tool_async.py | 6 +- .../tools/test_agent_image_generation.py | 2 +- .../test_agent_image_generation_async.py | 2 +- .../tests/agents/tools/test_agent_mcp.py | 4 +- .../agents/tools/test_agent_mcp_async.py | 2 +- .../agents/tools/test_agent_memory_search.py | 2 +- .../tools/test_agent_memory_search_async.py | 2 +- .../tests/agents/tools/test_agent_openapi.py | 2 +- .../agents/tools/test_agent_openapi_async.py | 2 +- .../test_agent_tools_with_conversations.py | 8 +- .../agents/tools/test_agent_web_search.py | 2 +- .../tools/test_agent_web_search_async.py | 2 +- .../tests/datasets/test_datasets.py | 2 +- .../tests/datasets/test_datasets_async.py | 2 +- .../tests/deployments/test_deployments.py | 4 +- .../deployments/test_deployments_async.py | 4 +- .../tests/finetuning/test_finetuning.py | 4 +- .../tests/finetuning/test_finetuning_async.py | 4 +- .../tests/responses/test_responses.py | 2 +- .../tests/responses/test_responses_async.py | 2 +- .../azure-ai-projects/tests/samples/README.md | 12 +-- .../tests/samples/test_samples.py | 32 +++---- .../tests/samples/test_samples_async.py | 28 +++---- .../tests/samples/test_samples_evaluations.py | 18 ++-- sdk/ai/azure-ai-projects/tests/test_base.py | 12 +-- 180 files changed, 681 insertions(+), 679 deletions(-) diff --git a/sdk/ai/azure-ai-projects/.env.template b/sdk/ai/azure-ai-projects/.env.template index df66f2a71199..620367d29578 100644 --- a/sdk/ai/azure-ai-projects/.env.template +++ b/sdk/ai/azure-ai-projects/.env.template @@ -20,9 +20,9 @@ AZURE_AI_PROJECTS_CONSOLE_LOGGING= # Project endpoint has the format: # `https://.services.ai.azure.com/api/projects/` -AZURE_AI_PROJECT_ENDPOINT= -AZURE_AI_MODEL_DEPLOYMENT_NAME= -AZURE_AI_AGENT_NAME= +FOUNDRY_PROJECT_ENDPOINT= +FOUNDRY_MODEL_NAME= +FOUNDRY_AGENT_NAME= CONVERSATION_ID= CONNECTION_NAME= MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME= diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 34c72e0a6fa5..44dfff20303b 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -16,7 +16,9 @@ ### Sample updates -* Placeholder +* Renamed environment variable `AZURE_AI_PROJECT_ENDPOINT` to `FOUNDRY_PROJECT_ENDPOINT` in all samples. +* Renamed environment variable `AZURE_AI_MODEL_DEPLOYMENT_NAME` to `FOUNDRY_MODEL_NAME` in all samples. +* Renamed environment variable `AZURE_AI_MODEL_AGENT_NAME` to `FOUNDRY_AGENT_NAME` in all samples. ### Other Changes diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 8c741a297add..15e6b44c62ef 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -54,7 +54,7 @@ To report an issue with the client library, or request additional features, plea * Python 3.9 or later. * An [Azure subscription][azure_sub]. * A [project in Microsoft Foundry](https://learn.microsoft.com/azure/foundry/how-to/create-projects). -* A Foundry project endpoint URL of the form `https://your-ai-services-account-name.services.ai.azure.com/api/projects/your-project-name`. It can be found in your Microsoft Foundry Project home page. Below we will assume the environment variable `AZURE_AI_PROJECT_ENDPOINT` was defined to hold this value. +* A Foundry project endpoint URL of the form `https://your-ai-services-account-name.services.ai.azure.com/api/projects/your-project-name`. It can be found in your Microsoft Foundry Project home page. Below we will assume the environment variable `FOUNDRY_PROJECT_ENDPOINT` was defined to hold this value. * An Entra ID token for authentication. Your application needs an object that implements the [TokenCredential](https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential) interface. Code samples here use [DefaultAzureCredential](https://learn.microsoft.com/python/api/azure-identity/azure.identity.defaultazurecredential). To get that working, you will need: * An appropriate role assignment. See [Role-based access control in Microsoft Foundry portal](https://learn.microsoft.com/azure/foundry/concepts/rbac-foundry). Role assignment can be done via the "Access Control (IAM)" tab of your Azure AI Project resource in the Azure portal. * [Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) installed. @@ -87,7 +87,7 @@ from azure.identity import DefaultAzureCredential with ( DefaultAzureCredential() as credential, - AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, + AIProjectClient(endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], credential=credential) as project_client, ): ``` @@ -107,7 +107,7 @@ from azure.identity.aio import DefaultAzureCredential async with ( DefaultAzureCredential() as credential, - AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, + AIProjectClient(endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], credential=credential) as project_client, ): ``` @@ -117,20 +117,20 @@ async with ( Your Microsoft Foundry project may have one or more AI models deployed. These could be OpenAI models, Microsoft models, or models from other providers. Use the code below to get an authenticated [OpenAI](https://github.com/openai/openai-python?tab=readme-ov-file#usage) client from the [openai](https://pypi.org/project/openai/) package, and execute an example multi-turn "Responses" calls. -The code below assumes the environment variable `AZURE_AI_MODEL_DEPLOYMENT_NAME` is defined. It's the deployment name of an AI model in your Foundry Project. See "Build" menu, under "Models" (First column of the "Deployments" table). +The code below assumes the environment variable `FOUNDRY_MODEL_NAME` is defined. It's the deployment name of an AI model in your Foundry Project. See "Build" menu, under "Models" (First column of the "Deployments" table). ```python with project_client.get_openai_client() as openai_client: response = openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], input="What is the size of France in square miles?", ) print(f"Response output: {response.output_text}") response = openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], input="And what is the capital city?", previous_response_id=response.id, ) @@ -145,7 +145,7 @@ See the "responses" folder in the [package samples][samples] for additional samp The `.agents` property on the `AIProjectClient` gives you access to all Agent operations. Agents use an extension of the OpenAI Responses protocol, so you will need to get an `OpenAI` client to do Agent operations, as shown in the example below. -The code below assumes environment variable `AZURE_AI_MODEL_DEPLOYMENT_NAME` is defined. It's the deployment name of an AI model in your Foundry Project. See "Build" menu, under "Models" (First column of the "Deployments" table). +The code below assumes environment variable `FOUNDRY_MODEL_NAME` is defined. It's the deployment name of an AI model in your Foundry Project. See "Build" menu, under "Models" (First column of the "Deployments" table). See the "agents" folder in the [package samples][samples] for an extensive set of samples, including streaming, tool usage and memory store usage. @@ -156,7 +156,7 @@ with project_client.get_openai_client() as openai_client: agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) @@ -765,7 +765,7 @@ with ( project_client.get_openai_client() as openai_client, ): agent = project_client.agents.create_version( - agent_name=os.environ["AZURE_AI_AGENT_NAME"], + agent_name=os.environ["FOUNDRY_AGENT_NAME"], definition=PromptAgentDefinition( model=model_deployment_name, instructions="You are a helpful assistant that answers general questions", @@ -1357,7 +1357,7 @@ By default logs redact the values of URL query strings, the values of some HTTP ```python project_client = AIProjectClient( credential=DefaultAzureCredential(), - endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], logging_enable=True ) ``` diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py index 1b57d0bcd29c..e14dbbcb6148 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -34,7 +34,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -46,7 +46,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py index 69404d31ae35..009171acdee9 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async def main() -> None: @@ -48,7 +48,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that answers general questions.", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py index 876dc8daebcd..07d039779a94 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py @@ -22,9 +22,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -36,8 +36,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model = os.environ["FOUNDRY_MODEL_NAME"] with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py index 8baa7034c139..3f2c79647935 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py @@ -22,9 +22,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -37,8 +37,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model = os.environ["FOUNDRY_MODEL_NAME"] async def main(): diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py index 5ab97c36b587..990fb833b7e4 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py @@ -21,9 +21,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -46,7 +46,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py index dfeb5c961720..b7bd9155561b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py @@ -24,9 +24,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv pydantic Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -51,7 +51,7 @@ class CalendarEvent(BaseModel): participants: list[str] -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -62,7 +62,7 @@ class CalendarEvent(BaseModel): agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], text=PromptAgentDefinitionTextOptions( format=TextResponseFormatJsonSchema(name="CalendarEvent", schema=CalendarEvent.model_json_schema()) ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py index 12bdaf31231a..48b58d57633c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py @@ -24,9 +24,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp pydantic Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -44,7 +44,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] class CalendarEvent(BaseModel): @@ -63,7 +63,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], text=PromptAgentDefinitionTextOptions( format=TextResponseFormatJsonSchema(name="CalendarEvent", schema=CalendarEvent.model_json_schema()) ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py index b8674dc5c146..6f3d21766939 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -46,7 +46,7 @@ teacher_agent = project_client.agents.create_version( agent_name="teacher-agent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="""You are a teacher that create pre-school math question for student and check answer. If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", @@ -58,7 +58,7 @@ student_agent = project_client.agents.create_version( agent_name="student-agent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py index b565a04be074..1d971d0ce3b3 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -36,7 +36,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async def main(): @@ -50,7 +50,7 @@ async def main(): teacher_agent = await project_client.agents.create_version( agent_name="teacher-agent-async", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="""You are a teacher that create pre-school math question for student and check answer. If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", @@ -61,7 +61,7 @@ async def main(): student_agent = await project_client.agents.create_version( agent_name="student-agent-async", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py index 0f318459182f..a9cd0f9907b2 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv azure-monitor-opentelemetry Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING - Set to `true` to enable GenAI telemetry tracing, which is disabled by default. @@ -46,7 +46,7 @@ with ( DefaultAzureCredential() as credential, - AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, + AIProjectClient(endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], credential=credential) as project_client, ): # [START setup_azure_monitor_tracing] # Enable Azure Monitor tracing @@ -62,7 +62,7 @@ # [END create_span_for_scenario] with project_client.get_openai_client() as openai_client: agent_definition = PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that answers general questions", ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py index e5aa0582e9a6..82d04285785b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv opentelemetry-sdk azure-core-tracing-opentelemetry Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING - Set to `true` to enable GenAI telemetry tracing, which is disabled by default. @@ -90,11 +90,11 @@ def display_conversation_item(item: Any) -> None: # [END create_span_for_scenario] with ( DefaultAzureCredential() as credential, - AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, + AIProjectClient(endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], credential=credential) as project_client, project_client.get_openai_client() as openai_client, ): agent_definition = PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that answers general questions", ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py index 64786e3a00e0..7fe16305c996 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py +++ b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv opentelemetry-sdk azure-core-tracing-opentelemetry Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING - Set to `true` to enable GenAI telemetry tracing, which is disabled by default. @@ -44,7 +44,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] # Define the custom span processor that is used for adding the custom @@ -94,7 +94,7 @@ def on_end(self, span: ReadableSpan): ): agent_definition = PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that answers general questions", ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py index 6f082269687a..75ba32b79d8b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) AI_SEARCH_PROJECT_CONNECTION_ID - The AI Search project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -41,7 +41,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -66,7 +66,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="""You are a helpful assistant. You must always provide citations for answers using the tool and render them as: `\u3010message_idx:search_idx\u2020source\u3011`.""", tools=[tool], diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py index 242ee742229b..5e78c32d6325 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0b1" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) STORAGE_INPUT_QUEUE_NAME - The name of the Azure Storage Queue to use for input and output in the Azure Function tool. 4) STORAGE_OUTPUT_QUEUE_NAME - The name of the Azure Storage Queue to use for output in the Azure Function tool. @@ -44,7 +44,7 @@ agent = None -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -82,7 +82,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py index 1ebf4c6d213b..b61867cbe8f2 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py @@ -27,9 +27,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID - The Bing Custom Search project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -50,7 +50,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -74,7 +74,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="""You are a helpful agent that can use Bing Custom Search tools to assist users. Use the available Bing Custom Search tools to answer questions and perform tasks.""", tools=[tool], diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py index 22fb479f0109..4adb31cec416 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py @@ -35,9 +35,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) BING_PROJECT_CONNECTION_ID - The Bing project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. """ @@ -55,7 +55,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -76,7 +76,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py index aa660411a6cc..2c0b98a96a5c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) BROWSER_AUTOMATION_PROJECT_CONNECTION_ID - The browser automation project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -60,7 +60,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="""You are an Agent helping with browser automation tasks. You can answer questions, provide information, and assist with various tasks related to web browsing using the Browser Automation tool available to you.""", diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py index 6007468f8439..e2c64efb44be 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -31,7 +31,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -47,7 +47,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py index edde775f47aa..5371a781e75a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async def main() -> None: @@ -46,7 +46,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant.", tools=[CodeInterpreterTool()], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py index 3cb511cfcc3b..336fc0c17944 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -57,7 +57,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files_async.py index 69ddd048bcc8..88b59c545e49 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files_async.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async def main() -> None: @@ -59,7 +59,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant.", tools=[CodeInterpreterTool(container=AutoCodeInterpreterToolParam(file_ids=[file.id]))], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py index 3b1ec849257c..bf448cee686a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py @@ -23,7 +23,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) (Optional) COMPUTER_USE_MODEL_DEPLOYMENT_NAME - The deployment name of the computer-use-preview model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -45,7 +45,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py index c2c85a6c9906..2720916e1759 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py @@ -23,7 +23,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) (Optional) COMPUTER_USE_MODEL_DEPLOYMENT_NAME - The deployment name of the computer-use-preview model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -44,7 +44,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async def main(): diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py index 20bcd70f597d..45a2e29ee4ed 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) FABRIC_PROJECT_CONNECTION_ID - The Fabric project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -59,7 +59,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py index 9e14fdd34461..c785e1877ade 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -60,7 +60,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that can search through product information.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py index 4bac22ce828c..682176ad690c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -63,7 +63,7 @@ agent = project_client.agents.create_version( agent_name="StreamingFileSearchAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that can search through product information and provide detailed responses. Use the file search tool to find relevant information before answering.", tools=[FileSearchTool(vector_store_ids=[vector_store.id])], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py index 7f02ea13a7d6..8a2fdd22bfc6 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async def main() -> None: @@ -63,7 +63,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="StreamingFileSearchAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that can search through product information and provide detailed responses. Use the file search tool to find relevant information before answering.", tools=[FileSearchTool(vector_store_ids=[vector_store.id])], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py index 701201d55d99..33166f2ad06f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -39,7 +39,7 @@ def get_horoscope(sign: str) -> str: return f"{sign}: Next Tuesday you will befriend a baby otter." -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -69,7 +69,7 @@ def get_horoscope(sign: str) -> str: agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that can use function tools.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py index ccbc373e6585..1ecd17dee095 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async def get_horoscope(sign: str) -> str: @@ -70,7 +70,7 @@ async def main(): agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that can use function tools.", tools=[func_tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py index 5b9ea7011b3c..5232a443ca34 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py @@ -28,9 +28,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model (e.g., gpt-4o, gpt-4o-mini, gpt-5o, gpt-5o-mini) + 2) FOUNDRY_MODEL_NAME - The deployment name of the chat model (e.g., gpt-4o, gpt-4o-mini, gpt-5o, gpt-5o-mini) used by the Agent for understanding and responding to prompts. This is NOT the image generation model. 3) IMAGE_GENERATION_MODEL_DEPLOYMENT_NAME - The deployment name of the image generation model (e.g. gpt-image-1) used by the ImageGenTool. @@ -53,7 +53,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -73,7 +73,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="Generate images based on user prompts", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py index b7859d6483eb..37fa57e82a16 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py @@ -28,9 +28,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model (e.g., gpt-4o, gpt-4o-mini, gpt-5o, gpt-5o-mini) + 2) FOUNDRY_MODEL_NAME - The deployment name of the chat model (e.g., gpt-4o, gpt-4o-mini, gpt-5o, gpt-5o-mini) used by the Agent for understanding and responding to prompts. This is NOT the image generation model. 3) IMAGE_GENERATION_MODEL_DEPLOYMENT_NAME - The deployment name of the image generation model (e.g. gpt-image-1) used by the ImageGenTool. @@ -53,7 +53,7 @@ from azure.ai.projects.models import PromptAgentDefinition, ImageGenTool load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async def main(): @@ -68,7 +68,7 @@ async def main(): agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="Generate images based on user prompts", tools=[ImageGenTool(model=image_generation_model, quality="low", size="1024x1024")], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py index c318f9004f1e..9b86f113f234 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -50,7 +50,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful agent that can use MCP tools to assist users. Use the available MCP tools to answer questions and perform tasks.", tools=[mcp_tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py index cc12b02a8fdd..917caae9da33 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async def main(): @@ -55,7 +55,7 @@ async def main(): agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful agent that can use MCP tools to assist users. Use the available MCP tools to answer questions and perform tasks.", tools=tools, ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py index be1036a9fde8..d34313d8651d 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) MCP_PROJECT_CONNECTION_ID - The connection resource ID in Custom keys with key equals to "Authorization" and value to be "Bearer ". @@ -34,7 +34,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -55,7 +55,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent7", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="Use MCP tools as needed", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py index 71ec422987a4..974699e42901 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) MCP_PROJECT_CONNECTION_ID - The connection resource ID in Custom keys with key equals to "Authorization" and value to be "Bearer ". @@ -35,7 +35,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async def main(): @@ -58,7 +58,7 @@ async def main(): agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="Use MCP tools as needed", tools=tools, ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py index 26f9221631f1..d9576a243c2d 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py @@ -23,9 +23,9 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the Agent's AI model, + 2) FOUNDRY_MODEL_NAME - The deployment name of the Agent's AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model for memory, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -48,7 +48,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -96,7 +96,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that answers general questions", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py index 6e959c0c3a88..72fa11db61d7 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py @@ -23,9 +23,9 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the Agent's AI model, + 2) FOUNDRY_MODEL_NAME - The deployment name of the Agent's AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model for memory, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -51,7 +51,7 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, @@ -90,7 +90,7 @@ async def main() -> None: agent = await project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that answers general questions", tools=[ MemorySearchPreviewTool( diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py index aba7820c69d6..5e45e9ec41bf 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv jsonref Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -38,7 +38,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -65,7 +65,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py index 886288df69de..b580ee51eb2a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv jsonref Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) OPENAPI_PROJECT_CONNECTION_ID - The OpenAPI project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -42,7 +42,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -75,7 +75,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py index 6a108dfe83a9..6af21a566a6c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) SHAREPOINT_PROJECT_CONNECTION_ID - The SharePoint project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -59,7 +59,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="""You are a helpful agent that can use SharePoint tools to assist users. Use the available SharePoint tools to answer questions and perform tasks.""", tools=[tool], diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py index 133b99d589e0..97ae4365f075 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py @@ -19,9 +19,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) A2A_PROJECT_CONNECTION_ID - The A2A project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -41,7 +41,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -61,7 +61,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant.", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py index 01729d031854..5eea41125bf8 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py @@ -26,9 +26,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -46,7 +46,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -60,7 +60,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that can search the web", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py index bf09f23786ff..4148bb7fd29e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py @@ -26,9 +26,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -42,7 +42,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -56,7 +56,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent105", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that can search the web", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py index 41d8d6f75aa2..b9da3e2e856b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py @@ -27,9 +27,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. 3) BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID - The Bing Custom Search project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. @@ -51,7 +51,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -70,7 +70,7 @@ agent = project_client.agents.create_version( agent_name="MyAgent", definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that can search the web and bing", tools=[tool], ), diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index a867acc2106e..1de80cfdd689 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - The name of a connection, as found in the "Connected resources" tab in the Management Center of your Microsoft Foundry project. @@ -31,7 +31,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] connection_name = os.environ["CONNECTION_NAME"] with ( diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections_async.py index e5814abf27fc..f23c5f932fbe 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - The name of a connection, as found in the "Connected resources" tab in the Management Center of your Microsoft Foundry project. @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] connection_name = os.environ["CONNECTION_NAME"] diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py index eb664060a12c..791b33754479 100644 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py +++ b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - Optional. The name of the Azure Storage Account connection to use for uploading files. 3) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. @@ -36,7 +36,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] connection_name = os.environ.get("CONNECTION_NAME") dataset_name = os.environ.get("DATASET_NAME", "dataset-test") dataset_version_1 = os.environ.get("DATASET_VERSION_1", "1.0") diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py index 7740507124d9..6ceacc201426 100644 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py +++ b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_async.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - Optional. The name of the Azure Storage Account connection to use for uploading files. 3) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. @@ -45,7 +45,7 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] connection_name = os.environ.get("CONNECTION_NAME") dataset_name = os.environ.get("DATASET_NAME", "dataset-test") dataset_version_1 = os.environ.get("DATASET_VERSION_1", "1.0") diff --git a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_download.py b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_download.py index 2a9aa4fc73a2..a32d6b1e8368 100644 --- a/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_download.py +++ b/sdk/ai/azure-ai-projects/samples/datasets/sample_datasets_download.py @@ -19,7 +19,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) CONNECTION_NAME - Optional. The name of the Azure Storage Account connection to use for uploading files. 3) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] connection_name = os.environ.get("CONNECTION_NAME") dataset_name = os.environ.get("DATASET_NAME", "dataset-test") dataset_version = os.environ.get("DATASET_VERSION", "1.0") diff --git a/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments.py b/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments.py index cee409590d7a..203015acdad7 100644 --- a/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments.py +++ b/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the deployment to retrieve. + 2) FOUNDRY_MODEL_NAME - Required. The name of the deployment to retrieve. 3) MODEL_PUBLISHER - Optional. The publisher of the model to filter by. 4) MODEL_NAME - Optional. The name of the model to filter by. """ @@ -31,8 +31,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model_deployment_name = os.environ["FOUNDRY_MODEL_NAME"] model_publisher = os.environ.get("MODEL_PUBLISHER", "Microsoft") model_name = os.environ.get("MODEL_NAME", "Phi-4") diff --git a/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments_async.py b/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments_async.py index ae2a1151ba17..bc31782e60f1 100644 --- a/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments_async.py +++ b/sdk/ai/azure-ai-projects/samples/deployments/sample_deployments_async.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the deployment to retrieve. + 2) FOUNDRY_MODEL_NAME - Required. The name of the deployment to retrieve. 3) MODEL_PUBLISHER - Optional. The publisher of the model to filter by. 4) MODEL_NAME - Optional. The name of the model to filter by. """ @@ -35,8 +35,8 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] - model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] + endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] + model_deployment_name = os.environ["FOUNDRY_MODEL_NAME"] model_publisher = os.environ.get("MODEL_PUBLISHER", "Microsoft") model_name = os.environ.get("MODEL_NAME", "Phi-4") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/README.md b/sdk/ai/azure-ai-projects/samples/evaluations/README.md index 628468aa20e4..b87c5f25d0b5 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/README.md +++ b/sdk/ai/azure-ai-projects/samples/evaluations/README.md @@ -11,8 +11,8 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv ``` Set these environment variables: -- `AZURE_AI_PROJECT_ENDPOINT` - Your Azure AI Project endpoint (e.g., `https://.services.ai.azure.com/api/projects/`) -- `AZURE_AI_MODEL_DEPLOYMENT_NAME` - The model deployment name (e.g., `gpt-4o-mini`) +- `FOUNDRY_PROJECT_ENDPOINT` - Your Azure AI Project endpoint (e.g., `https://.services.ai.azure.com/api/projects/`) +- `FOUNDRY_MODEL_NAME` - The model deployment name (e.g., `gpt-4o-mini`) ## Sample Index @@ -94,8 +94,8 @@ Located in the [agentic_evaluators](https://github.com/Azure/azure-sdk-for-pytho ```bash # Set environment variables -export AZURE_AI_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" -export AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4o-mini" # Replace with your model +export FOUNDRY_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" +export FOUNDRY_MODEL_NAME="gpt-4o-mini" # Replace with your model # Run a sample python sample_evaluations_builtin_with_inline_data.py diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py index 13bbaf3726bc..e3c2f9291a09 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, AIProjectClient(endpoint=endpoint, credential=credential) as project_client, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py index dc0a7199df78..5ab4e46690ac 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py index 1650b0d1cc5d..68ef5ef7bbc3 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py @@ -28,7 +28,7 @@ def run_evaluator( data_mapping: dict[str, str], ) -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ with ( diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py index df15b21b6699..b5be2881ae38 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -36,7 +36,7 @@ def _get_evaluator_initialization_parameters(evaluator_name: str) -> dict[str, s if evaluator_name == "task_navigation_efficiency": return {"matching_mode": "exact_match"} # Can be "exact_match", "in_order_match", or "any_order_match" else: - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini return {"deployment_name": model_deployment_name} diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py index 654adfb1faec..8aa48ca726a6 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py index e434a78db732..4601d4587925 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py index 1d3cc28dc6cb..553770301b91 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py index cd6ce53055ea..c37eafae3d41 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py index b2f5381c54e4..30066c7ff66d 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py index 92d39f2ddddc..73dce3ae4070 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py index 6c9bc015d529..47dd01c5eeb0 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. """ @@ -42,7 +42,7 @@ def main() -> None: endpoint = os.environ.get( - "AZURE_AI_PROJECT_ENDPOINT", "" + "FOUNDRY_PROJECT_ENDPOINT", "" ) # Sample : https://.services.ai.azure.com/api/projects/ with ( diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py index c2f8980e503e..d3aea2348416 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py index 5404c8bb1183..12e1746db771 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -44,9 +44,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py index 5809882fb1ee..b08119e4f88c 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py index 3437f8fd674f..72ea0e1f260c 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py index e7646aeaabe8..38f49bc9c582 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ from dotenv import load_dotenv @@ -43,9 +43,9 @@ def main() -> None: endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ - model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py index 7cafd28186f2..bc54ebd5c70d 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py @@ -19,10 +19,10 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_AGENT_NAME - The name of the AI agent to use for evaluation. - 3) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_AGENT_NAME - The name of the AI agent to use for evaluation. + 3) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -39,8 +39,8 @@ from openai.types.evals.run_retrieve_response import RunRetrieveResponse load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini # [START agent_evaluation_basic] with ( @@ -49,7 +49,7 @@ project_client.get_openai_client() as openai_client, ): agent = project_client.agents.create_version( - agent_name=os.environ["AZURE_AI_AGENT_NAME"], + agent_name=os.environ["FOUNDRY_AGENT_NAME"], definition=PromptAgentDefinition( model=model_deployment_name, instructions="You are a helpful assistant that answers general questions", diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py index 9c81014daa52..73b32e3c8f26 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py @@ -19,10 +19,10 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_AGENT_NAME - The name of the AI agent to use for evaluation. - 3) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_AGENT_NAME - The name of the AI agent to use for evaluation. + 3) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -48,9 +48,9 @@ ): agent = project_client.agents.create_version( - agent_name=os.environ["AZURE_AI_AGENT_NAME"], + agent_name=os.environ["FOUNDRY_AGENT_NAME"], definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py index 37d7244c3808..45de7b5d2e6f 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py @@ -19,9 +19,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -40,8 +40,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model_deployment_name = os.environ["FOUNDRY_MODEL_NAME"] # Define a function tool for the model to use func_tool = FunctionTool( diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py index 7c38cb9a2d5d..8c8ac624e790 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py @@ -28,10 +28,10 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_AGENT_NAME - The name of the AI agent to use for evaluation. - 3) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_AGENT_NAME - The name of the AI agent to use for evaluation. + 3) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -50,7 +50,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -61,9 +61,9 @@ # Create agent agent = project_client.agents.create_version( - agent_name=os.environ["AZURE_AI_AGENT_NAME"], + agent_name=os.environ["FOUNDRY_AGENT_NAME"], definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py index 255ea0c2e660..c1d788d294cb 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. """ @@ -41,7 +41,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py index c3227497204d..e00037924f8c 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Optional. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Optional. The name of the model deployment to use for evaluation. """ @@ -42,8 +42,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME") +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py index b3dfb581b3ce..e17293f21492 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Optional. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Optional. The name of the model deployment to use for evaluation. For Custom Prompt Based Evaluators: @@ -75,8 +75,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME") +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py index 77093c5d704b..256bacef18b4 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py @@ -21,9 +21,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -51,11 +51,11 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME") +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME") if not model_deployment_name: - raise ValueError("AZURE_AI_MODEL_DEPLOYMENT_NAME environment variable is not set") + raise ValueError("FOUNDRY_MODEL_NAME environment variable is not set") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py index c0fe4424bc85..1cf98af0efce 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py @@ -21,9 +21,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -47,7 +47,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -64,7 +64,7 @@ TestingCriterionLabelModel( type="label_model", name="sentiment_analysis", - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], input=[ { "role": "developer", diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py index 8ad0899d2a01..c76c08bf6191 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -39,8 +39,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py index c113c8b67e06..29559c76572a 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. 3) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. 4) DATASET_VERSION - Optional. The version of the Dataset to create and use in this sample. 5) DATA_FOLDER - Optional. The folder path where the data files for upload are located. @@ -44,8 +44,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") dataset_name = os.environ.get("DATASET_NAME", "") dataset_version = os.environ.get("DATASET_VERSION", "1") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py index fa4ec52105a3..f18080047b25 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -41,9 +41,9 @@ endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini +model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini # Construct the paths to the data folder and data file used in this sample script_dir = os.path.dirname(os.path.abspath(__file__)) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py index 9d968bbbf296..8c8100efbca3 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py @@ -18,9 +18,9 @@ pip install openai azure-identity python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -42,10 +42,10 @@ client = OpenAI( api_key=get_bearer_token_provider(DefaultAzureCredential(), "https://ai.azure.com/.default"), - base_url=os.environ["AZURE_AI_PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", + base_url=os.environ["FOUNDRY_PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", ) -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini +model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini data_source_config = DataSourceConfigCustom( { diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py index 33aff799eb4f..57f839a022ee 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py @@ -19,12 +19,12 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv azure-monitor-query Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. 2) APPINSIGHTS_RESOURCE_ID - Required. The Azure Application Insights resource ID that stores agent traces. It has the form: /subscriptions//resourceGroups//providers/Microsoft.Insights/components/. 3) AGENT_ID - Required. The agent identifier emitted by the Azure tracing integration, used to filter traces. - 4) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The Azure OpenAI deployment name to use with the built-in evaluators. + 4) FOUNDRY_MODEL_NAME - Required. The Azure OpenAI deployment name to use with the built-in evaluators. 5) TRACE_LOOKBACK_HOURS - Optional. Number of hours to look back when querying traces and in the evaluation run. Defaults to 1. """ @@ -44,12 +44,12 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] appinsights_resource_id = os.environ[ "APPINSIGHTS_RESOURCE_ID" ] # Sample : /subscriptions//resourceGroups//providers/Microsoft.Insights/components/ agent_id = os.environ["AGENT_ID"] -model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] +model_deployment_name = os.environ["FOUNDRY_MODEL_NAME"] trace_query_hours = int(os.environ.get("TRACE_LOOKBACK_HOURS", "1")) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py index ecb0f7fc86c7..531924ed51bc 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -39,8 +39,8 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py index 0f9377ef9fbf..8273afaa747e 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv pillow Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. The name of the model deployment to use for evaluation. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ import os @@ -47,8 +47,8 @@ file_path = os.path.abspath(__file__) folder_path = os.path.dirname(file_path) -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") def image_to_data_uri(image_path: str) -> str: diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py index 8b0170031dd3..53066a0fce99 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -39,7 +39,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -68,7 +68,7 @@ ) print(f"Evaluation created (id: {eval_object.id}, name: {eval_object.name})") - model = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] + model = os.environ["FOUNDRY_MODEL_NAME"] data_source = { "type": "azure_ai_target_completions", "source": { diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py index 6516c637796e..b9ff12eba463 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py @@ -17,9 +17,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. - 2) AZURE_AI_AGENT_NAME - Required. The name of the Agent to perform red teaming evaluation on. + 2) FOUNDRY_AGENT_NAME - Required. The name of the Agent to perform red teaming evaluation on. """ import os @@ -45,8 +45,8 @@ def main() -> None: load_dotenv() # - endpoint = os.environ.get("AZURE_AI_PROJECT_ENDPOINT", "") - agent_name = os.environ.get("AZURE_AI_AGENT_NAME", "") + endpoint = os.environ.get("FOUNDRY_PROJECT_ENDPOINT", "") + agent_name = os.environ.get("FOUNDRY_AGENT_NAME", "") with ( DefaultAzureCredential() as credential, @@ -56,7 +56,7 @@ def main() -> None: agent_version = project_client.agents.create_version( agent_name=agent_name, definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py index 29397e006414..742c6e100070 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py @@ -17,14 +17,14 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv azure-mgmt-authorization azure-mgmt-resource Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. 2) AZURE_SUBSCRIPTION_ID - Required for RBAC assignment. The Azure subscription ID where the project is located. 3) AZURE_RESOURCE_GROUP_NAME - Required for RBAC assignment. The resource group name where the project is located. 4) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. 5) DATASET_VERSION - Optional. The version of the Dataset to create and use in this sample. 6) DATA_FOLDER - Optional. The folder path where the data files for upload are located. - 7) AZURE_AI_AGENT_NAME - Required. The name of the Agent to perform red teaming evaluation on. + 7) FOUNDRY_AGENT_NAME - Required. The name of the Agent to perform red teaming evaluation on. """ from datetime import datetime @@ -75,13 +75,13 @@ def assign_rbac(): """ load_dotenv() - endpoint = os.environ.get("AZURE_AI_PROJECT_ENDPOINT", "") + endpoint = os.environ.get("FOUNDRY_PROJECT_ENDPOINT", "") subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID", "") resource_group_name = os.environ.get("AZURE_RESOURCE_GROUP_NAME", "") if not endpoint or not subscription_id or not resource_group_name: print( - "Error: AZURE_AI_PROJECT_ENDPOINT, AZURE_SUBSCRIPTION_ID, and AZURE_RESOURCE_GROUP_NAME environment variables are required" + "Error: FOUNDRY_PROJECT_ENDPOINT, AZURE_SUBSCRIPTION_ID, and AZURE_RESOURCE_GROUP_NAME environment variables are required" ) return @@ -214,7 +214,7 @@ def assign_rbac(): def schedule_dataset_evaluation() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] dataset_name = os.environ.get("DATASET_NAME", "") dataset_version = os.environ.get("DATASET_VERSION", "1") # Construct the paths to the data folder and data file used in this sample @@ -327,8 +327,8 @@ def schedule_dataset_evaluation() -> None: def schedule_redteam_evaluation() -> None: load_dotenv() # - endpoint = os.environ.get("AZURE_AI_PROJECT_ENDPOINT", "") - agent_name = os.environ.get("AZURE_AI_AGENT_NAME", "") + endpoint = os.environ.get("FOUNDRY_PROJECT_ENDPOINT", "") + agent_name = os.environ.get("FOUNDRY_AGENT_NAME", "") # Construct the paths to the data folder and data file used in this sample script_dir = os.path.dirname(os.path.abspath(__file__)) @@ -343,7 +343,7 @@ def schedule_redteam_evaluation() -> None: agent_version = project_client.agents.create_version( agent_name=agent_name, definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions="You are a helpful assistant that answers general questions", ), ) diff --git a/sdk/ai/azure-ai-projects/samples/files/sample_files.py b/sdk/ai/azure-ai-projects/samples/files/sample_files.py index 57f8e3fb3187..51dac448a43e 100644 --- a/sdk/ai/azure-ai-projects/samples/files/sample_files.py +++ b/sdk/ai/azure-ai-projects/samples/files/sample_files.py @@ -16,7 +16,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) FILE_PATH - Optional. Path to the file to upload. Defaults to the `data` folder. """ @@ -29,7 +29,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] script_dir = Path(__file__).parent file_path = os.environ.get("FILE_PATH", os.path.join(script_dir, "data", "test_file.jsonl")) diff --git a/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py b/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py index d6bb1491a678..953fa2021c1b 100644 --- a/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py +++ b/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py @@ -16,7 +16,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) FILE_PATH - Optional. Path to the file to upload. Defaults to the `data` folder. """ @@ -30,7 +30,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] script_dir = Path(__file__).parent file_path = os.environ.get("FILE_PATH", os.path.join(script_dir, "data", "test_file.jsonl")) diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py index b8fe46419490..2a31c816741e 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4o` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "gpt-4o-mini") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "dpo_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "dpo_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py index 4e7b5dc91ec0..a21e9ca10387 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4o` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "gpt-4o-mini") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "dpo_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "dpo_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py index c4a44e2d727f..f3eebc756dfc 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4.1` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "Ministral-3B") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "sft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "sft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py index 2fcd5ee8e93c..676a4a030d11 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py @@ -18,7 +18,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `Ministral-3B` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -34,7 +34,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "Ministral-3B") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "sft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "sft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py index 6b1bed171863..9a222cffa076 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `o4-mini` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "o4-mini") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "rft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "rft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py index 701bf384731f..3d759737fbab 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `o4-mini` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "o4-mini") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "rft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "rft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py index 626af3c4bedd..a871254e90dd 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py @@ -21,7 +21,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv azure-mgmt-cognitiveservices Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4.1` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -43,7 +43,7 @@ load_dotenv() # For fine-tuning -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "gpt-4.1") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "sft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "sft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py index cd967034fd92..94e5d2c94603 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py @@ -21,7 +21,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp azure-mgmt-cognitiveservices Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry portal. 2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4.1` model. 3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder. @@ -43,7 +43,7 @@ load_dotenv() # For fine-tuning -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "gpt-4.1") training_file_path = resolve_data_file_path(__file__, "TRAINING_FILE_PATH", "sft_training_set.jsonl") validation_file_path = resolve_data_file_path(__file__, "VALIDATION_FILE_PATH", "sft_validation_set.jsonl") diff --git a/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes.py b/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes.py index fd865b8d8ca1..04ac684cb8d3 100644 --- a/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes.py +++ b/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) INDEX_NAME - Optional. The name of the Index to create and use in this sample. 3) INDEX_VERSION - Optional. The version of the Index to create and use in this sample. @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] index_name = os.environ.get("INDEX_NAME", "index-test") index_version = os.environ.get("INDEX_VERSION", "1.0") ai_search_connection_name = os.environ.get("AI_SEARCH_CONNECTION_NAME", "my-ai-search-connection-name") diff --git a/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes_async.py b/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes_async.py index 9ace80a10df8..d83267929313 100644 --- a/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes_async.py +++ b/sdk/ai/azure-ai-projects/samples/indexes/sample_indexes_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. 2) INDEX_NAME - Optional. The name of the Index to create and use in this sample. 3) INDEX_VERSION - Optional. The version of the Index to create and use in this sample. @@ -37,7 +37,7 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] index_name = os.environ.get("INDEX_NAME", "index-test") index_version = os.environ.get("INDEX_VERSION", "1.0") ai_search_connection_name = os.environ.get("AI_SEARCH_CONNECTION_NAME", "my-ai-search-connection-name") diff --git a/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py b/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py index d09bb48a72ad..f84e6358d7b0 100644 --- a/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py +++ b/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py @@ -8,7 +8,7 @@ DESCRIPTION: This sample demonstrates how to directly interact with MCP (Model Context Protocol) tools using the low-level MCP client library to connect to the Foundry Project's MCP tools API: - {AZURE_AI_PROJECT_ENDPOINT}/mcp_tools?api-version=2025-05-15-preview + {FOUNDRY_PROJECT_ENDPOINT}/mcp_tools?api-version=2025-05-15-preview For agent-based MCP tool usage, see samples in samples/agents/tools/sample_agent_mcp.py and related files in that directory. @@ -29,7 +29,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv mcp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) IMAGE_GEN_DEPLOYMENT_NAME - The deployment name of the image generation model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -64,7 +64,7 @@ # Enable httpx logging to see HTTP requests at the same level logging.getLogger("httpx").setLevel(getattr(logging, log_level, logging.CRITICAL)) -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async def main(): diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py index 6af7c9c90f3c..dc0e03d66528 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py @@ -24,7 +24,7 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -46,7 +46,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py index f8b9643f7547..4192fe11ee57 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py @@ -24,7 +24,7 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -50,7 +50,7 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic.py index 0d3cf82f4fbe..bf87aee7334b 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic.py @@ -22,7 +22,7 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -43,7 +43,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic_async.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic_async.py index 6999a6c154ca..ccd5e77a9b0e 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_basic_async.py @@ -23,7 +23,7 @@ Once you have deployed models, set the deployment name in the variables below. Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -48,7 +48,7 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud.py index 94d1abdab835..38341435e49f 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud.py @@ -20,7 +20,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -37,7 +37,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential, diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud_async.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud_async.py index 730dd3a53534..9dad4f62f2eb 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud_async.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_crud_async.py @@ -20,7 +20,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. 2) MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. @@ -41,7 +41,7 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team.py b/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team.py index 918ae00569e5..eebf4356fee0 100644 --- a/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team.py +++ b/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. Your model deployment name. + 2) FOUNDRY_MODEL_NAME - Required. Your model deployment name. 3) MODEL_ENDPOINT - Required. The Azure AI Model endpoint, as found in the overview page of your Microsoft Foundry project. Example: https://.services.ai.azure.com 4) MODEL_API_KEY - Required. The API key for your Azure AI Model. @@ -38,11 +38,11 @@ load_dotenv() endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ model_endpoint = os.environ["MODEL_ENDPOINT"] # Sample : https://.services.ai.azure.com model_api_key = os.environ["MODEL_API_KEY"] -model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini +model_deployment_name = os.environ["FOUNDRY_MODEL_NAME"] # Sample : gpt-4o-mini with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py b/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py index 261b0f9aaedc..016cc1d532f2 100644 --- a/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py +++ b/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py @@ -16,9 +16,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - Required. Your model deployment name. + 2) FOUNDRY_MODEL_NAME - Required. Your model deployment name. 3) MODEL_ENDPOINT - Required. The Azure AI Model endpoint, as found in the overview page of your Microsoft Foundry project. Example: https://.services.ai.azure.com 4) MODEL_API_KEY - Required. The API key for your Azure AI Model. @@ -43,11 +43,11 @@ async def sample_red_team_async() -> None: """Demonstrates how to perform Red Team operations using the AIProjectClient.""" endpoint = os.environ[ - "AZURE_AI_PROJECT_ENDPOINT" + "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ model_endpoint = os.environ["MODEL_ENDPOINT"] # Sample : https://.services.ai.azure.com model_api_key = os.environ["MODEL_API_KEY"] - model_deployment_name = os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"] # Sample : gpt-4o-mini + model_deployment_name = os.environ["FOUNDRY_MODEL_NAME"] # Sample : gpt-4o-mini async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic.py index 436237b03c46..5840f19b37d2 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -31,7 +31,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -40,13 +40,13 @@ # [START responses] with project_client.get_openai_client() as openai_client: response = openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], input="What is the size of France in square miles?", ) print(f"Response output: {response.output_text}") response = openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], input="And what is the capital city?", previous_response_id=response.id, ) diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_async.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_async.py index b68ab13eddd7..24fddd499dbd 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_async.py @@ -18,9 +18,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async def main() -> None: @@ -43,13 +43,13 @@ async def main() -> None: project_client.get_openai_client() as openai_client, ): response = await openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], input="What is the size of France in square miles?", ) print(f"Response output: {response.output_text}") response = await openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], input="And what is the capital city?", previous_response_id=response.id, ) diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient.py index 9c5d8b656bd7..edc4a40f4d81 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient.py @@ -19,9 +19,9 @@ pip install openai azure-identity python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -34,11 +34,11 @@ openai = OpenAI( api_key=get_bearer_token_provider(DefaultAzureCredential(), "https://ai.azure.com/.default"), - base_url=os.environ["AZURE_AI_PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", + base_url=os.environ["FOUNDRY_PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", ) response = openai.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], input="How many feet are in a mile?", ) diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient_async.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient_async.py index 8a2934ff7418..e437bcb6e1db 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient_async.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_basic_without_aiprojectclient_async.py @@ -19,9 +19,9 @@ pip install openai azure-identity python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -42,13 +42,13 @@ async def main() -> None: openai = AsyncOpenAI( api_key=get_bearer_token_provider(credential, "https://ai.azure.com/.default"), - base_url=os.environ["AZURE_AI_PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", + base_url=os.environ["FOUNDRY_PROJECT_ENDPOINT"].rstrip("/") + "/openai/v1", ) async with openai: response = await openai.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], input="How many feet are in a mile?", ) diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_image_input.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_image_input.py index 542d956b1cf0..b833b23fe052 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_image_input.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_image_input.py @@ -19,9 +19,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] def image_to_base64(image_path: str) -> str: @@ -71,6 +71,6 @@ def image_to_base64(image_path: str) -> str: ], } ], - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], ) print(f"Response output: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py index 86f3b2d1fa47..065046543a61 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -34,7 +34,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -43,7 +43,7 @@ ): with openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], input=[ {"role": "user", "content": "Tell me about the capital city of France"}, ], diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py index 110bb0b1c4be..87db2ada4d88 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -33,7 +33,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -42,7 +42,7 @@ ): with openai_client.responses.stream( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], input=[ {"role": "user", "content": "Tell me about the capital city of France"}, ], diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py index d1c75654083a..1119d969df9c 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py @@ -20,9 +20,9 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + 2) FOUNDRY_MODEL_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Microsoft Foundry project. """ @@ -42,7 +42,7 @@ class CalendarEvent(BaseModel): participants: list[str] -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, @@ -50,7 +50,7 @@ class CalendarEvent(BaseModel): project_client.get_openai_client() as openai_client, ): response = openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["FOUNDRY_MODEL_NAME"], instructions=""" Extracts calendar event information from the input messages, and return it in the desired structured output format. diff --git a/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry.py b/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry.py index 61f11436620b..9820a418f773 100644 --- a/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry.py +++ b/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. """ @@ -28,7 +28,7 @@ load_dotenv() -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry_async.py b/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry_async.py index 7554370401f4..92a76de01f8f 100644 --- a/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry_async.py +++ b/sdk/ai/azure-ai-projects/samples/telemetry/sample_telemetry_async.py @@ -17,7 +17,7 @@ pip install "azure-ai-projects>=2.0.0" python-dotenv aiohttp Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your + 1) FOUNDRY_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your Microsoft Foundry project. """ @@ -32,7 +32,7 @@ async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] async with ( DefaultAzureCredential() as credential, diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py index d7aede04fb65..bdd35586e4e0 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py @@ -273,7 +273,7 @@ def _test_agent_creation_with_tracing_content_recording_enabled_impl(self, use_e with self.create_client(operation_group="tracing", **kwargs) as project_client: - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") print(f"Using model deployment: {model}") agent_definition = PromptAgentDefinition( @@ -387,7 +387,7 @@ def _test_agent_creation_with_tracing_content_recording_disabled_impl(self, use_ with self.create_client(operation_group="agents", **kwargs) as project_client: - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") agent_definition = PromptAgentDefinition( model=model, instructions="You are a helpful AI assistant. Always be polite and provide accurate information.", @@ -609,7 +609,7 @@ def _test_agent_with_structured_output_with_instructions_impl( operation_group = "tracing" if content_recording_enabled else "agents" with self.create_client(operation_group=operation_group, **kwargs) as project_client: - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") test_schema = { "type": "object", @@ -797,7 +797,7 @@ def _test_agent_with_structured_output_without_instructions_impl( operation_group = "tracing" if content_recording_enabled else "agents" with self.create_client(operation_group=operation_group, **kwargs) as project_client: - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") test_schema = { "type": "object", diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py index 96184005a350..e1bbef6d0511 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py @@ -75,7 +75,7 @@ async def _test_create_agent_with_tracing_content_recording_enabled_impl(self, u assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") async with project_client: agent_definition = PromptAgentDefinition( @@ -186,7 +186,7 @@ async def _test_agent_creation_with_tracing_content_recording_disabled_impl(self assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="agents", **kwargs) - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") async with project_client: agent_definition = PromptAgentDefinition( @@ -406,7 +406,7 @@ async def _test_agent_with_structured_output_with_instructions_impl( project_client = self.create_async_client(operation_group=operation_group, **kwargs) async with project_client: - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") test_schema = { "type": "object", @@ -591,7 +591,7 @@ async def _test_agent_with_structured_output_without_instructions_impl( project_client = self.create_async_client(operation_group=operation_group, **kwargs) async with project_client: - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") test_schema = { "type": "object", diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py index 7b8471717f2b..7556c4933eaa 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py @@ -78,7 +78,7 @@ def _get_openai_client_and_deployment(self, **kwargs) -> Tuple[OpenAI, str]: openai_client = project_client.get_openai_client() # Get the model deployment name from test parameters - model_deployment_name = kwargs.get("azure_ai_model_deployment_name") + model_deployment_name = kwargs.get("foundry_model_name") return openai_client, model_deployment_name @@ -234,7 +234,7 @@ def _test_sync_non_streaming_with_content_recording_impl(self, use_events, **kwa with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = client.conversations.create() @@ -356,7 +356,7 @@ def _test_sync_non_streaming_without_content_recording_impl(self, use_events, ** with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = client.conversations.create() @@ -479,7 +479,7 @@ def _test_sync_streaming_with_content_recording_impl(self, use_events, **kwargs) with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = client.conversations.create() @@ -656,7 +656,7 @@ def test_sync_conversations_create(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = client.conversations.create() @@ -701,7 +701,7 @@ def test_sync_list_conversation_items_with_content_recording(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = client.conversations.create() @@ -778,7 +778,7 @@ def test_sync_list_conversation_items_without_content_recording(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = client.conversations.create() @@ -887,7 +887,7 @@ def _test_sync_non_streaming_without_conversation_impl(self, use_events, **kwarg assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") with project_client: # Get the OpenAI client from the project client @@ -994,7 +994,7 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl( with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Define a function tool func_tool = FunctionTool( @@ -1253,7 +1253,7 @@ def _test_sync_function_tool_with_content_recording_streaming_impl( with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Define a function tool func_tool = FunctionTool( @@ -1577,7 +1577,7 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl( with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Define a function tool func_tool = FunctionTool( @@ -1813,7 +1813,7 @@ def _test_sync_function_tool_without_content_recording_streaming_impl( with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Define a function tool func_tool = FunctionTool( @@ -2131,7 +2131,7 @@ def test_sync_function_tool_list_conversation_items_with_content_recording(self, with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Define a function tool func_tool = FunctionTool( @@ -2281,7 +2281,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Define a function tool func_tool = FunctionTool( @@ -2429,7 +2429,7 @@ def test_sync_multiple_text_inputs_with_content_recording_non_streaming(self, ** with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = client.conversations.create() @@ -2526,7 +2526,7 @@ def test_sync_multiple_text_inputs_with_content_recording_streaming(self, **kwar with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = client.conversations.create() @@ -2631,7 +2631,7 @@ def test_sync_multiple_text_inputs_without_content_recording_non_streaming(self, with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = client.conversations.create() @@ -2723,7 +2723,7 @@ def test_sync_multiple_text_inputs_without_content_recording_streaming(self, **k with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = client.conversations.create() @@ -2824,7 +2824,7 @@ def _test_image_only_content_off_binary_off_non_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -2929,7 +2929,7 @@ def _test_image_only_content_off_binary_on_non_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -3033,7 +3033,7 @@ def _test_image_only_content_on_binary_off_non_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -3137,7 +3137,7 @@ def _test_image_only_content_on_binary_on_non_streaming_impl(self, use_events, * with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -3245,7 +3245,7 @@ def _test_text_and_image_content_off_binary_off_non_streaming_impl(self, use_eve with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -3354,7 +3354,7 @@ def _test_text_and_image_content_off_binary_on_non_streaming_impl(self, use_even with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -3463,7 +3463,7 @@ def _test_text_and_image_content_on_binary_off_non_streaming_impl(self, use_even with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -3571,7 +3571,7 @@ def _test_text_and_image_content_on_binary_on_non_streaming_impl(self, use_event with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -3683,7 +3683,7 @@ def _test_image_only_content_off_binary_off_streaming_impl(self, use_events, **k with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -3796,7 +3796,7 @@ def _test_image_only_content_off_binary_on_streaming_impl(self, use_events, **kw with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -3908,7 +3908,7 @@ def _test_image_only_content_on_binary_off_streaming_impl(self, use_events, **kw with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -4020,7 +4020,7 @@ def _test_image_only_content_on_binary_on_streaming_impl(self, use_events, **kwa with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -4136,7 +4136,7 @@ def _test_text_and_image_content_off_binary_off_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -4253,7 +4253,7 @@ def _test_text_and_image_content_off_binary_on_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -4370,7 +4370,7 @@ def _test_text_and_image_content_on_binary_off_streaming_impl(self, use_events, with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -4486,7 +4486,7 @@ def _test_text_and_image_content_on_binary_on_streaming_impl(self, use_events, * with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -4601,7 +4601,7 @@ def test_responses_stream_method_with_content_recording(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -4654,7 +4654,7 @@ def test_responses_stream_method_without_content_recording(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = client.conversations.create() @@ -4709,7 +4709,7 @@ def test_responses_stream_method_with_tools_with_content_recording(self, **kwarg with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Define a function tool function_tool = FunctionTool( @@ -4823,7 +4823,7 @@ def test_responses_stream_method_with_tools_without_content_recording(self, **kw with self.create_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Define a function tool function_tool = FunctionTool( @@ -4960,7 +4960,7 @@ def test_workflow_agent_non_streaming_with_content_recording(self, **kwargs): assert True == AIProjectInstrumentor().is_content_recording_enabled() with self.create_client(operation_group="tracing", **kwargs) as project_client: - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") openai_client = project_client.get_openai_client() # Create Teacher Agent @@ -5159,7 +5159,7 @@ def test_workflow_agent_non_streaming_without_content_recording(self, **kwargs): assert False == AIProjectInstrumentor().is_content_recording_enabled() with self.create_client(operation_group="tracing", **kwargs) as project_client: - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") openai_client = project_client.get_openai_client() workflow_yaml = """ @@ -5276,7 +5276,7 @@ def test_workflow_agent_streaming_with_content_recording(self, **kwargs): assert True == AIProjectInstrumentor().is_content_recording_enabled() with self.create_client(operation_group="tracing", **kwargs) as project_client: - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") openai_client = project_client.get_openai_client() # Create Teacher Agent @@ -5478,7 +5478,7 @@ def test_workflow_agent_streaming_without_content_recording(self, **kwargs): assert False == AIProjectInstrumentor().is_content_recording_enabled() with self.create_client(operation_group="tracing", **kwargs) as project_client: - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") openai_client = project_client.get_openai_client() workflow_yaml = """ diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py index 8f2b18126533..227b353fa800 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py @@ -59,7 +59,7 @@ async def _test_async_non_streaming_with_content_recording_impl(self, use_events assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: # Get the OpenAI client from the project client @@ -165,7 +165,7 @@ async def _test_async_streaming_with_content_recording_impl(self, use_events, ** assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: # Get the OpenAI client from the project client @@ -277,7 +277,7 @@ async def test_async_conversations_create(self, **kwargs): assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: # Get the OpenAI client from the project client @@ -325,7 +325,7 @@ async def test_async_list_conversation_items_with_content_recording(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: # Get the OpenAI client from the project client @@ -417,7 +417,7 @@ async def _test_async_function_tool_with_content_recording_streaming_impl( async with project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Define a function tool func_tool = FunctionTool( @@ -678,7 +678,7 @@ async def _test_async_function_tool_without_content_recording_streaming_impl( async with project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Define a function tool func_tool = FunctionTool( @@ -924,7 +924,7 @@ async def test_async_multiple_text_inputs_with_content_recording_non_streaming(s async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = await client.conversations.create() @@ -1021,7 +1021,7 @@ async def test_async_multiple_text_inputs_with_content_recording_streaming(self, async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = await client.conversations.create() @@ -1126,7 +1126,7 @@ async def test_async_multiple_text_inputs_without_content_recording_non_streamin async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = await client.conversations.create() @@ -1225,7 +1225,7 @@ async def test_async_image_only_content_off_binary_off_non_streaming(self, **kwa assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -1297,7 +1297,7 @@ async def test_async_image_only_content_off_binary_on_non_streaming(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -1368,7 +1368,7 @@ async def test_async_image_only_content_on_binary_off_non_streaming(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -1439,7 +1439,7 @@ async def test_async_image_only_content_on_binary_on_non_streaming(self, **kwarg assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -1514,7 +1514,7 @@ async def test_async_text_and_image_content_off_binary_off_non_streaming(self, * assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -1590,7 +1590,7 @@ async def test_async_text_and_image_content_off_binary_on_non_streaming(self, ** assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -1665,7 +1665,7 @@ async def test_async_text_and_image_content_on_binary_off_non_streaming(self, ** assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -1740,7 +1740,7 @@ async def test_async_text_and_image_content_on_binary_on_non_streaming(self, **k assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -1819,7 +1819,7 @@ async def test_async_image_only_content_off_binary_off_streaming(self, **kwargs) assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -1899,7 +1899,7 @@ async def test_async_image_only_content_off_binary_on_streaming(self, **kwargs): assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -1978,7 +1978,7 @@ async def test_async_image_only_content_on_binary_off_streaming(self, **kwargs): assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -2057,7 +2057,7 @@ async def test_async_image_only_content_on_binary_on_streaming(self, **kwargs): assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -2140,7 +2140,7 @@ async def test_async_text_and_image_content_off_binary_off_streaming(self, **kwa assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -2223,7 +2223,7 @@ async def test_async_text_and_image_content_off_binary_on_streaming(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -2306,7 +2306,7 @@ async def test_async_text_and_image_content_on_binary_off_streaming(self, **kwar assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -2389,7 +2389,7 @@ async def test_async_text_and_image_content_on_binary_on_streaming(self, **kwarg assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -2474,7 +2474,7 @@ async def test_async_multiple_text_inputs_without_content_recording_streaming(se async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = await client.conversations.create() @@ -2582,7 +2582,7 @@ async def test_async_responses_stream_method_with_content_recording(self, **kwar async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = await client.conversations.create() @@ -2662,7 +2662,7 @@ async def test_async_responses_stream_method_without_content_recording(self, **k async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") conversation = await client.conversations.create() @@ -2750,7 +2750,7 @@ async def _test_async_responses_stream_method_with_tools_with_content_recording_ async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Define a function tool function_tool = FunctionTool( @@ -2983,7 +2983,7 @@ async def _test_async_responses_stream_method_with_tools_without_content_recordi async with self.create_async_client(operation_group="tracing", **kwargs) as project_client: client = project_client.get_openai_client() - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") # Define a function tool function_tool = FunctionTool( @@ -3214,7 +3214,7 @@ async def test_async_workflow_agent_non_streaming_with_content_recording(self, * assert True == AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: # Create a simple workflow agent @@ -3331,7 +3331,7 @@ async def test_async_workflow_agent_non_streaming_without_content_recording(self assert False == AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: workflow_yaml = """ @@ -3454,7 +3454,7 @@ async def test_async_workflow_agent_streaming_with_content_recording(self, **kwa assert True == AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: workflow_yaml = """ @@ -3575,7 +3575,7 @@ async def test_async_workflow_agent_streaming_without_content_recording(self, ** assert False == AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: workflow_yaml = """ @@ -3708,7 +3708,7 @@ async def _test_async_prompt_agent_with_responses_non_streaming_impl( assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() @@ -3846,7 +3846,7 @@ async def _test_async_prompt_agent_with_responses_streaming_impl( assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") async with project_client: client = project_client.get_openai_client() diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py index f68d4fdae952..592cf8b36b54 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py @@ -59,7 +59,7 @@ def test_sync_browser_automation_non_streaming_with_content_recording(self, **kw assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -194,7 +194,7 @@ def test_sync_browser_automation_non_streaming_without_content_recording(self, * assert not AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -320,7 +320,7 @@ def test_sync_browser_automation_streaming_with_content_recording(self, **kwargs self.setup_telemetry() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -448,7 +448,7 @@ def test_sync_browser_automation_streaming_without_content_recording(self, **kwa self.setup_telemetry() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py index 46918894c2fb..55e3c949804f 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py @@ -62,7 +62,7 @@ async def test_async_browser_automation_non_streaming_with_content_recording(sel assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -193,7 +193,7 @@ async def test_async_browser_automation_non_streaming_without_content_recording( assert not AIProjectInstrumentor().is_content_recording_enabled() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -315,7 +315,7 @@ async def test_async_browser_automation_streaming_with_content_recording(self, * self.setup_telemetry() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: @@ -440,7 +440,7 @@ async def test_async_browser_automation_streaming_without_content_recording(self self.setup_telemetry() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") browser_automation_connection_id = kwargs.get("browser_automation_project_connection_id") assert deployment_name is not None if browser_automation_connection_id is None: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py index 331d64b9aaa8..c282164e3fcf 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py @@ -66,7 +66,7 @@ def test_sync_code_interpreter_non_streaming_with_content_recording(self, **kwar assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: @@ -254,7 +254,7 @@ def test_sync_code_interpreter_non_streaming_without_content_recording(self, **k assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: @@ -445,7 +445,7 @@ def test_sync_code_interpreter_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: @@ -636,7 +636,7 @@ def test_sync_code_interpreter_streaming_without_content_recording(self, **kwarg assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py index 7e5512b6fbbe..ac91f4b83565 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py @@ -67,7 +67,7 @@ async def test_async_code_interpreter_non_streaming_with_content_recording(self, assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: @@ -254,7 +254,7 @@ async def test_async_code_interpreter_non_streaming_without_content_recording(se assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: @@ -445,7 +445,7 @@ async def test_async_code_interpreter_streaming_with_content_recording(self, **k assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: @@ -636,7 +636,7 @@ async def test_async_code_interpreter_streaming_without_content_recording(self, assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py index ca4f301212f3..17b4ba9285f3 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py @@ -53,7 +53,7 @@ def test_sync_file_search_non_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: @@ -262,7 +262,7 @@ def test_sync_file_search_non_streaming_without_content_recording(self, **kwargs assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: @@ -469,7 +469,7 @@ def test_sync_file_search_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: @@ -674,7 +674,7 @@ def test_sync_file_search_streaming_without_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py index fd6c36261449..704d6bbb7117 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py @@ -54,7 +54,7 @@ async def test_async_file_search_non_streaming_with_content_recording(self, **kw assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: @@ -263,7 +263,7 @@ async def test_async_file_search_non_streaming_without_content_recording(self, * assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: @@ -470,7 +470,7 @@ async def test_async_file_search_streaming_with_content_recording(self, **kwargs assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: @@ -675,7 +675,7 @@ async def test_async_file_search_streaming_without_content_recording(self, **kwa assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py index 4c5c453a3e23..0ac08ad35e2a 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py @@ -59,7 +59,7 @@ def _test_sync_mcp_non_streaming_with_content_recording_impl(self, use_events, * assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: @@ -389,7 +389,7 @@ def _test_sync_mcp_non_streaming_without_content_recording_impl(self, use_events assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: @@ -706,7 +706,7 @@ def _test_sync_mcp_streaming_with_content_recording_impl(self, use_events, **kwa assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: @@ -982,7 +982,7 @@ def _test_sync_mcp_streaming_without_content_recording_impl(self, use_events, ** assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py index d9e82e2951e8..e6401924773a 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py @@ -60,7 +60,7 @@ async def _test_async_mcp_non_streaming_with_content_recording_impl(self, use_ev assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: @@ -389,7 +389,7 @@ async def _test_async_mcp_non_streaming_without_content_recording_impl(self, use assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: @@ -708,7 +708,7 @@ async def _test_async_mcp_streaming_with_content_recording_impl(self, use_events assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: @@ -987,7 +987,7 @@ async def _test_async_mcp_streaming_without_content_recording_impl(self, use_eve assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py index a198327679c3..57f17169897b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py @@ -41,7 +41,7 @@ def _get_openai_client_and_deployment(self, **kwargs) -> Tuple[OpenAI, str]: openai_client = project_client.get_openai_client() # Get the model deployment name from test parameters - model_deployment_name = kwargs.get("azure_ai_model_deployment_name") + model_deployment_name = kwargs.get("foundry_model_name") return openai_client, model_deployment_name diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py index bec6cfa9f2be..7e390d3e3a3a 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py @@ -206,7 +206,7 @@ def test_sync_workflow_non_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: @@ -371,7 +371,7 @@ def test_sync_workflow_non_streaming_without_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: @@ -538,7 +538,7 @@ def test_sync_workflow_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: @@ -706,7 +706,7 @@ def test_sync_workflow_streaming_without_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py index e366e1ec3ef5..75808d2f8a2d 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py @@ -205,7 +205,7 @@ async def test_async_workflow_non_streaming_with_content_recording(self, **kwarg assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: @@ -366,7 +366,7 @@ async def test_async_workflow_non_streaming_without_content_recording(self, **kw assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: @@ -531,7 +531,7 @@ async def test_async_workflow_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: @@ -697,7 +697,7 @@ async def test_async_workflow_streaming_without_content_recording(self, **kwargs assert AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("azure_ai_model_deployment_name") + deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None async with project_client: diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py index 1cc36a6b0455..44c8c315389e 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py @@ -48,7 +48,7 @@ def test_agent_responses_crud(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -158,7 +158,7 @@ def test_agent_responses_crud(self, **kwargs): @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) def test_agent_responses_with_structured_output(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py index b710851c366f..27151090e10a 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py @@ -23,7 +23,7 @@ class TestAgentResponsesCrudAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_responses_crud_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_async_client(operation_group="agents", **kwargs) @@ -129,7 +129,7 @@ async def test_agent_responses_crud_async(self, **kwargs): @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_responses_with_structured_output_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_async_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py index 10414b7a59d1..2e6451effb59 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py @@ -39,7 +39,7 @@ def test_agents_crud(self, **kwargs): GET /agents/{agent_name}/versions/{agent_version} project_client.agents.get_version() """ print("\n") - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") project_client = self.create_client(operation_group="agents", **kwargs) first_agent_name = "MyAgent1" second_agent_name = "MyAgent2" diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py index e9776b7e6257..6eb878e729b8 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py @@ -24,7 +24,7 @@ async def test_agents_crud_async(self, **kwargs): It then gets, lists, and deletes them, validating at each step. It uses different ways of creating agents: strongly typed, dictionary, and IO[bytes]. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") project_client = self.create_async_client(operation_group="agents", **kwargs) first_agent_name = "MyAgent1" second_agent_name = "MyAgent2" diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py index 3953bf1c76d2..4ef0e6b845f6 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py @@ -40,7 +40,7 @@ def test_calculate_and_save(self, **kwargs): 2. Function Tool: Saves the computed result """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -100,7 +100,7 @@ def test_generate_data_and_report(self, **kwargs): 2. Function Tool: Creates a report with the computed statistics """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py index 16f2c2c1ba41..f4996551357b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py @@ -39,7 +39,7 @@ def test_find_and_analyze_data(self, **kwargs): 2. Code Interpreter: Agent calculates the average of those numbers """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -121,7 +121,7 @@ def test_analyze_code_file(self, **kwargs): 2. Code Interpreter: Agent executes the code and returns the computed result """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py index f67e95c020a8..9adb1329774a 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py @@ -32,7 +32,7 @@ def test_data_analysis_workflow(self, **kwargs): Test data analysis workflow: upload data, search, save results. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -163,7 +163,7 @@ def test_empty_vector_store_handling(self, **kwargs): Test how agent handles empty vector store (no files uploaded). """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -242,7 +242,7 @@ def test_python_code_file_search(self, **kwargs): 2. Function Tool: Agent saves the code review findings """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) @@ -372,7 +372,7 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs): - Context retention across searches and function calls """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py index 61d572fa0a37..648a1ed65519 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py @@ -43,7 +43,7 @@ def test_complete_analysis_workflow(self, **kwargs): 3. Function Tool: Agent saves the computed results """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py index 1ae26a32a1a4..f4a20b8f21ad 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py @@ -39,7 +39,7 @@ def test_file_search_and_function_with_conversation(self, **kwargs): - Verifying conversation state preserves all tool interactions """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py index 02d341051ec8..e16a8e0e5722 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py @@ -81,7 +81,7 @@ def test_agent_ai_search_question_answering(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Get AI Search connection and index from environment ai_search_connection_id = kwargs.get("ai_search_project_connection_id") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py index 5bc67d9a2833..0ed41ca18661 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py @@ -148,7 +148,7 @@ async def test_agent_ai_search_question_answering_async_parallel(self, **kwargs) DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_async_client(operation_group="agents", **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py index 78ec18081aa0..190183663823 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py @@ -45,7 +45,7 @@ def test_agent_bing_grounding(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Note: This test requires bing_project_connection_id environment variable # to be set with a valid Bing connection ID from the project @@ -145,7 +145,7 @@ def test_agent_bing_grounding_multiple_queries(self, **kwargs): Bing grounding and provide accurate responses with citations. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") bing_connection_id = kwargs.get("bing_project_connection_id") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py index 7b5d1ea27680..1d49bb0bbb6b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py @@ -41,7 +41,7 @@ def test_agent_code_interpreter_simple_math(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") agent_name = "code-interpreter-simple-agent" with ( @@ -125,7 +125,7 @@ def test_agent_code_interpreter_file_generation(self, **kwargs): DELETE /files/{file_id} openai_client.files.delete() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py index d38d15b2bd0e..50a4b778cb07 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py @@ -28,7 +28,7 @@ async def test_agent_code_interpreter_simple_math_async(self, **kwargs): without any file uploads or downloads - just pure code execution. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") agent_name = "code-interpreter-simple-agent-async" async with ( diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py index e7408afe97fa..9c733529ac42 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py @@ -45,7 +45,7 @@ def test_agent_file_search(self, **kwargs): DELETE /vector_stores/{id} openai_client.vector_stores.delete() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -203,7 +203,7 @@ def test_agent_file_search_multi_turn_conversation(self, **kwargs): while using File Search to answer follow-up questions. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py index e3d96f5a4733..604fbd5323fe 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py @@ -20,7 +20,7 @@ class TestAgentFileSearchAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_file_search_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, @@ -106,7 +106,7 @@ async def test_agent_file_search_multi_turn_conversation_async(self, **kwargs): while using File Search to answer follow-up questions. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py index e97814456771..72079587549e 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py @@ -42,7 +42,7 @@ def test_agent_file_search_stream(self, **kwargs): DELETE /vector_stores/{id} openai_client.vector_stores.delete() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py index fb4e627df2de..42e1348a3521 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py @@ -19,7 +19,7 @@ class TestAgentFileSearchStreamAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_file_search_stream_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py index 264bf97ebf73..05ebd8ddcad0 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py @@ -41,7 +41,7 @@ def test_agent_function_tool(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") agent_name = "function-tool-agent" with ( @@ -172,7 +172,7 @@ def test_agent_function_tool_multi_turn_with_multiple_calls(self, **kwargs): - Ability to use previous function results in subsequent queries """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -381,7 +381,7 @@ def test_agent_function_tool_context_dependent_followup(self, **kwargs): remembering parameters from the first query. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py index f4388b1ccfe9..2344c5a9d498 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py @@ -28,7 +28,7 @@ async def test_agent_function_tool_async(self, **kwargs): 3. Receive function results and incorporate them into responses """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") agent_name = "function-tool-agent-async" # Setup @@ -160,7 +160,7 @@ async def test_agent_function_tool_multi_turn_with_multiple_calls_async(self, ** - Ability to use previous function results in subsequent queries """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup project_client = self.create_async_client(operation_group="agents", **kwargs) @@ -370,7 +370,7 @@ async def test_agent_function_tool_context_dependent_followup_async(self, **kwar remembering parameters from the first query. """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") # Setup async with ( diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py index c0c515839aaf..7dea648d735d 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py @@ -41,7 +41,7 @@ def test_agent_image_generation(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") image_model = kwargs.get("image_generation_model_deployment_name") agent_name = "image-gen-agent" diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py index a4775afb16b9..68b91adb44d4 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py @@ -21,7 +21,7 @@ class TestAgentImageGenerationAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_image_generation_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") image_model = kwargs.get("image_generation_model_deployment_name") agent_name = "image-gen-agent" diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py index 5723478f7569..109e1b93f775 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py @@ -48,7 +48,7 @@ def test_agent_mcp_basic(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -179,7 +179,7 @@ def test_agent_mcp_with_project_connection(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py index 36a951e79183..b9e1dd43c7e1 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py @@ -21,7 +21,7 @@ class TestAgentMCPAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_mcp_basic_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py index 3a1bc4e44d0d..56edc1879033 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py @@ -54,7 +54,7 @@ def test_agent_memory_search(self, **kwargs): DELETE /memory_stores/{memory_store_name} project_client.beta.memory_stores.delete() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") chat_model = kwargs.get("memory_store_chat_model_deployment_name") embedding_model = kwargs.get("memory_store_embedding_model_deployment_name") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py index dc6b69d22354..f830aac14fca 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py @@ -29,7 +29,7 @@ class TestAgentMemorySearchAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_memory_search_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") chat_model = kwargs.get("memory_store_chat_model_deployment_name") embedding_model = kwargs.get("memory_store_embedding_model_deployment_name") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py index de8b85f19723..0bb1aa3cdf33 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py @@ -51,7 +51,7 @@ def test_agent_openapi(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py index 1b3e87ef063a..dc363934a816 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py @@ -31,7 +31,7 @@ class TestAgentOpenApiAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_openapi_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py index 00fdf016e79d..1f4e5e78ef65 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py @@ -40,7 +40,7 @@ def test_function_tool_with_conversation(self, **kwargs): - Using conversation_id parameter """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -201,7 +201,7 @@ def test_file_search_with_conversation(self, **kwargs): - Conversation context retention """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -318,7 +318,7 @@ def test_code_interpreter_with_conversation(self, **kwargs): - Variables/state persistence across turns """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, @@ -403,7 +403,7 @@ def test_code_interpreter_with_file_in_conversation(self, **kwargs): - Server-side code execution with file access and chart generation """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") import os with ( diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py index 9a8f616e9d7f..084785bc53f3 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py @@ -38,7 +38,7 @@ def test_agent_web_search(self, **kwargs): DELETE /agents/{agent_name}/versions/{agent_version} project_client.agents.delete_version() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") with ( self.create_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py index e11732ca4cac..7aac8aef6977 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py @@ -18,7 +18,7 @@ class TestAgentWebSearchAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) async def test_agent_web_search_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") async with ( self.create_async_client(operation_group="agents", **kwargs) as project_client, diff --git a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py index 9e790b1f37c7..634076e78aa6 100644 --- a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py +++ b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py @@ -137,7 +137,7 @@ def test_datasets_upload_file(self, **kwargs): @recorded_by_proxy def test_datasets_upload_folder(self, **kwargs): - endpoint = kwargs.pop("azure_ai_project_endpoint") + endpoint = kwargs.pop("foundry_project_endpoint") print("\n=====> Endpoint:", endpoint) dataset_name = self.test_datasets_params["dataset_name_2"] diff --git a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py index 724b6318b938..3e53e4b3d9ed 100644 --- a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py +++ b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py @@ -138,7 +138,7 @@ async def test_datasets_upload_file(self, **kwargs): @recorded_by_proxy_async async def test_datasets_upload_folder_async(self, **kwargs): - endpoint = kwargs.pop("azure_ai_project_endpoint") + endpoint = kwargs.pop("foundry_project_endpoint") print("\n=====> Endpoint:", endpoint) dataset_name = self.test_datasets_params["dataset_name_4"] diff --git a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py index 53132a89a396..c2345115eb92 100644 --- a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py +++ b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py @@ -18,8 +18,8 @@ class TestDeployments(TestBase): def test_deployments(self, **kwargs): model_publisher = "OpenAI" - model_name = kwargs.get("azure_ai_model_deployment_name") - model_deployment_name = kwargs.get("azure_ai_model_deployment_name") + model_name = kwargs.get("foundry_model_name") + model_deployment_name = kwargs.get("foundry_model_name") with self.create_client(**kwargs) as project_client: diff --git a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py index 06f229c1e15b..b278909621e4 100644 --- a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py +++ b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py @@ -18,8 +18,8 @@ class TestDeploymentsAsync(TestBase): async def test_deployments_async(self, **kwargs): model_publisher = "OpenAI" - model_name = kwargs.get("azure_ai_model_deployment_name") - model_deployment_name = kwargs.get("azure_ai_model_deployment_name") + model_name = kwargs.get("foundry_model_name") + model_deployment_name = kwargs.get("foundry_model_name") async with self.create_async_client(**kwargs) as project_client: diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py index aeb407b0e5f0..cdf1b9f4ceda 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py @@ -334,11 +334,11 @@ def _test_deploy_and_infer_helper( subscription_id = kwargs.get("azure_subscription_id") resource_group = kwargs.get("azure_resource_group") - project_endpoint = kwargs.get("azure_ai_project_endpoint") + project_endpoint = kwargs.get("foundry_project_endpoint") if not all([subscription_id, resource_group, project_endpoint]): pytest.skip( - f"Missing required environment variables for deployment (azure_subscription_id, azure_resource_group, azure_ai_project_endpoint) - skipping {test_prefix} deploy and infer test" + f"Missing required environment variables for deployment (azure_subscription_id, azure_resource_group, foundry_project_endpoint) - skipping {test_prefix} deploy and infer test" ) account_name = self._extract_account_name_from_endpoint(project_endpoint, test_prefix) diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py index b9789e077e92..bd4e29481dbe 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py @@ -345,11 +345,11 @@ async def _test_deploy_and_infer_helper_async( subscription_id = kwargs.get("azure_subscription_id") resource_group = kwargs.get("azure_resource_group") - project_endpoint = kwargs.get("azure_ai_project_endpoint") + project_endpoint = kwargs.get("foundry_project_endpoint") if not all([subscription_id, resource_group, project_endpoint]): pytest.skip( - f"Missing required environment variables for deployment (azure_subscription_id, azure_resource_group, azure_ai_project_endpoint) - skipping {test_prefix} deploy and infer test" + f"Missing required environment variables for deployment (azure_subscription_id, azure_resource_group, foundry_project_endpoint) - skipping {test_prefix} deploy and infer test" ) account_name = self._extract_account_name_from_endpoint(project_endpoint, test_prefix) diff --git a/sdk/ai/azure-ai-projects/tests/responses/test_responses.py b/sdk/ai/azure-ai-projects/tests/responses/test_responses.py index 5165f37ddb03..9ef5758e06f3 100644 --- a/sdk/ai/azure-ai-projects/tests/responses/test_responses.py +++ b/sdk/ai/azure-ai-projects/tests/responses/test_responses.py @@ -57,7 +57,7 @@ def test_responses(self, **kwargs): ------+---------------------------------------------+----------------------------------- POST /openai/responses client.responses.create() """ - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") client = self.create_client(operation_group="agents", **kwargs).get_openai_client() diff --git a/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py b/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py index bf7252962dad..67eb01609039 100644 --- a/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py +++ b/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py @@ -45,7 +45,7 @@ class TestResponsesAsync(TestBase): @recorded_by_proxy_async(RecordedTransport.HTTPX) async def test_responses_async(self, **kwargs): - model = kwargs.get("azure_ai_model_deployment_name") + model = kwargs.get("foundry_model_name") client = self.create_async_client(operation_group="agents", **kwargs).get_openai_client() diff --git a/sdk/ai/azure-ai-projects/tests/samples/README.md b/sdk/ai/azure-ai-projects/tests/samples/README.md index 3296cb7c58ea..981ec1155225 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/README.md +++ b/sdk/ai/azure-ai-projects/tests/samples/README.md @@ -67,7 +67,7 @@ class TestSamples(AzureRecordedTestCase): executor.execute() executor.validate_print_calls_by_llm( instructions=agent_tools_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], + project_endpoint=kwargs["foundry_project_endpoint"], ) ``` @@ -106,7 +106,7 @@ class TestSamplesAsync(AzureRecordedTestCase): await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=agent_tools_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], + project_endpoint=kwargs["foundry_project_endpoint"], ) ``` @@ -122,8 +122,8 @@ from devtools_testutils import EnvironmentVariableLoader servicePreparer = functools.partial( EnvironmentVariableLoader, "", - azure_ai_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", - azure_ai_model_deployment_name="gpt-4o", + foundry_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", + foundry_model_name="gpt-4o", # add other sanitized vars here ) ``` @@ -154,8 +154,8 @@ If you need to remap the values provided by your fixtures to the environment-var ```python env_vars = { - "AZURE_AI_PROJECT_ENDPOINT": kwargs["TEST_AZURE_AI_PROJECT_ENDPOINT"], - "AZURE_AI_MODEL_DEPLOYMENT_NAME": kwargs["TEST_AZURE_AI_MODEL_DEPLOYMENT_NAME"], + "FOUNDRY_PROJECT_ENDPOINT": kwargs["TEST_FOUNDRY_PROJECT_ENDPOINT"], + "FOUNDRY_MODEL_NAME": kwargs["TEST_FOUNDRY_MODEL_NAME"], } executor = SyncSampleExecutor(self, sample_path, env_vars=env_vars, **kwargs) ``` diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py index 2bd4594c843d..494a47c1a3a2 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py @@ -62,8 +62,8 @@ def test_agent_tools_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) @pytest.mark.parametrize( @@ -86,8 +86,8 @@ def test_memory_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=memories_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) @pytest.mark.parametrize( @@ -106,8 +106,8 @@ def test_agents_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=agents_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) @pytest.mark.parametrize( @@ -128,8 +128,8 @@ def test_connections_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) @pytest.mark.parametrize( @@ -148,8 +148,8 @@ def test_files_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) @pytest.mark.parametrize( @@ -168,8 +168,8 @@ def test_deployments_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) @pytest.mark.parametrize( @@ -188,8 +188,8 @@ def test_datasets_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) @pytest.mark.parametrize( @@ -211,6 +211,6 @@ def test_finetuning_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=fine_tuning_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples_async.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples_async.py index 8eccef50195f..ab25f7e1b48a 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples_async.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples_async.py @@ -50,8 +50,8 @@ async def test_agent_tools_samples_async(self, sample_path: str, **kwargs) -> No await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=agent_tools_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) @pytest.mark.parametrize( @@ -75,8 +75,8 @@ async def test_memory_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=memories_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) @pytest.mark.parametrize( @@ -95,8 +95,8 @@ async def test_agents_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=agents_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) @pytest.mark.parametrize( @@ -117,8 +117,8 @@ async def test_connections_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) @pytest.mark.parametrize( @@ -139,8 +139,8 @@ async def test_files_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) @pytest.mark.parametrize( @@ -159,8 +159,8 @@ async def test_deployments_samples(self, sample_path: str, **kwargs) -> None: await executor.execute_async() await executor.validate_print_calls_by_llm_async( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) @pytest.mark.parametrize( @@ -184,6 +184,6 @@ async def test_datasets_samples(self, sample_path: str, **kwargs) -> None: # Proxy server probably not able to parse the captured print content await executor.validate_print_calls_by_llm_async( instructions=resource_management_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py index bcea91df0eb5..9f166609aa0d 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py @@ -19,9 +19,9 @@ evaluationsPreparer = functools.partial( EnvironmentVariableLoader, "", - azure_ai_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", - azure_ai_model_deployment_name="sanitized-model-deployment-name", - azure_ai_agent_name="sanitized-agent-name", + foundry_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", + foundry_model_name="sanitized-model-deployment-name", + foundry_agent_name="sanitized-agent-name", ) evaluations_instructions = """ @@ -184,8 +184,8 @@ def test_evaluation_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=evaluations_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) # To run this test with a specific sample, use: @@ -216,8 +216,8 @@ def test_agentic_evaluator_samples(self, sample_path: str, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=evaluations_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) # To run this test, use: @@ -247,6 +247,6 @@ def test_generic_agentic_evaluator_sample(self, **kwargs) -> None: executor.execute() executor.validate_print_calls_by_llm( instructions=evaluations_instructions, - project_endpoint=kwargs["azure_ai_project_endpoint"], - model=kwargs["azure_ai_model_deployment_name"], + project_endpoint=kwargs["foundry_project_endpoint"], + model=kwargs["foundry_model_name"], ) diff --git a/sdk/ai/azure-ai-projects/tests/test_base.py b/sdk/ai/azure-ai-projects/tests/test_base.py index ac072e7f5844..74fa089c10c2 100644 --- a/sdk/ai/azure-ai-projects/tests/test_base.py +++ b/sdk/ai/azure-ai-projects/tests/test_base.py @@ -41,8 +41,8 @@ servicePreparer = functools.partial( EnvironmentVariableLoader, "", - azure_ai_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", - azure_ai_model_deployment_name="sanitized-model-deployment-name", + foundry_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", + foundry_model_name="sanitized-model-deployment-name", image_generation_model_deployment_name="sanitized-gpt-image", bing_project_connection_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sanitized-resource-group/providers/Microsoft.CognitiveServices/accounts/sanitized-account/projects/sanitized-project/connections/sanitized-bing-connection", ai_search_project_connection_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sanitized-resource-group/providers/Microsoft.CognitiveServices/accounts/sanitized-account/projects/sanitized-project/connections/sanitized-ai-search-connection", @@ -75,8 +75,8 @@ fineTuningServicePreparer = functools.partial( EnvironmentVariableLoader, "", - azure_ai_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", - azure_ai_model_deployment_name="sanitized-model-deployment-name", + foundry_project_endpoint="https://sanitized-account-name.services.ai.azure.com/api/projects/sanitized-project-name", + foundry_model_name="sanitized-model-deployment-name", azure_ai_projects_azure_subscription_id="00000000-0000-0000-0000-000000000000", azure_ai_projects_azure_resource_group="sanitized-resource-group", azure_ai_projects_azure_aoai_account="sanitized-aoai-account", @@ -297,7 +297,7 @@ def open_with_lf( # helper function: create projects client using environment variables def create_client(self, *, allow_preview: bool = False, **kwargs) -> AIProjectClient: # fetch environment variables - endpoint = kwargs.pop("azure_ai_project_endpoint") + endpoint = kwargs.pop("foundry_project_endpoint") credential = self.get_credential(AIProjectClient, is_async=False) print(f"Creating AIProjectClient with endpoint: {endpoint}") @@ -314,7 +314,7 @@ def create_client(self, *, allow_preview: bool = False, **kwargs) -> AIProjectCl # helper function: create async projects client using environment variables def create_async_client(self, *, allow_preview: bool = False, **kwargs) -> AsyncAIProjectClient: # fetch environment variables - endpoint = kwargs.pop("azure_ai_project_endpoint") + endpoint = kwargs.pop("foundry_project_endpoint") credential = self.get_credential(AsyncAIProjectClient, is_async=True) print(f"Creating AsyncAIProjectClient with endpoint: {endpoint}") From c497c29c52f99b58939b56c987e419a98622c3c7 Mon Sep 17 00:00:00 2001 From: aprilk-ms <55356546+aprilk-ms@users.noreply.github.com> Date: Wed, 11 Mar 2026 01:04:18 -0700 Subject: [PATCH 11/16] Add CSV and synthetic data generation evaluation samples (#45603) * Add CSV and synthetic data generation evaluation samples Add two new evaluation samples under sdk/ai/azure-ai-projects/samples/evaluations/: - sample_evaluations_builtin_with_csv.py: Demonstrates evaluating pre-computed responses from a CSV file using the csv data source type. Uploads a CSV file via the datasets API, runs coherence/violence/f1 evaluators, and polls results. - sample_synthetic_data_evaluation.py: Demonstrates synthetic data evaluation (preview) that generates test queries from a prompt, sends them to a model target, and evaluates responses with coherence/violence evaluators. Also adds: - data_folder/sample_data_evaluation.csv: Sample CSV data file with 3 rows - README.md: Updated sample index with both new samples Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Update synthetic eval sample: agent target + cleaner dataset ID retrieval - Switch from model target to agent target (azure_ai_agent) - Create agent version via agents.create_version() before evaluation - Simplify output_dataset_id retrieval using getattr instead of nested hasattr/isinstance checks - Add AZURE_AI_AGENT_NAME env var requirement - Remove input_messages (not needed for agent target) Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Add model target synthetic eval sample, cross-reference both - Add sample_synthetic_data_model_evaluation.py for model target with input_messages system prompt - Update sample_synthetic_data_evaluation.py docstring with cross-reference - Update README.md with both synthetic samples (agent and model) Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Rename synthetic agent sample, clarify README, add prompt/files comments - Rename sample_synthetic_data_evaluation.py to sample_synthetic_data_agent_evaluation.py - Clarify README: JSONL dataset vs CSV dataset descriptions - Remove (preview) from synthetic sample descriptions in README - Add comments about prompt and reference_files options in both synthetic samples Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Skip new eval samples in recording tests Add sample_evaluations_builtin_with_csv.py, sample_synthetic_data_agent_evaluation.py, and sample_synthetic_data_model_evaluation.py to samples_to_skip list since they require file upload prerequisites or are long-running preview features. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Rename env vars per PR review: FOUNDRY_PROJECT_ENDPOINT, FOUNDRY_MODEL_NAME Address review comments from howieleung: - AZURE_AI_PROJECT_ENDPOINT -> FOUNDRY_PROJECT_ENDPOINT - AZURE_AI_MODEL_DEPLOYMENT_NAME -> FOUNDRY_MODEL_NAME Updated in all 3 new samples and README. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Rename AZURE_AI_AGENT_NAME to FOUNDRY_AGENT_NAME per review Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Update changelog with new sample entries Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --------- Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- sdk/ai/azure-ai-projects/CHANGELOG.md | 2 + .../samples/evaluations/README.md | 5 +- .../data_folder/sample_data_evaluation.csv | 4 + .../sample_evaluations_builtin_with_csv.py | 145 +++++++++++++++ .../sample_synthetic_data_agent_evaluation.py | 168 +++++++++++++++++ .../sample_synthetic_data_model_evaluation.py | 171 ++++++++++++++++++ .../tests/samples/test_samples_evaluations.py | 3 + 7 files changed, 497 insertions(+), 1 deletion(-) create mode 100644 sdk/ai/azure-ai-projects/samples/evaluations/data_folder/sample_data_evaluation.csv create mode 100644 sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_csv.py create mode 100644 sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_agent_evaluation.py create mode 100644 sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_model_evaluation.py diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 44dfff20303b..6f22f74efb00 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -19,6 +19,8 @@ * Renamed environment variable `AZURE_AI_PROJECT_ENDPOINT` to `FOUNDRY_PROJECT_ENDPOINT` in all samples. * Renamed environment variable `AZURE_AI_MODEL_DEPLOYMENT_NAME` to `FOUNDRY_MODEL_NAME` in all samples. * Renamed environment variable `AZURE_AI_MODEL_AGENT_NAME` to `FOUNDRY_AGENT_NAME` in all samples. +* Added CSV evaluation sample (`sample_evaluations_builtin_with_csv.py`) demonstrating evaluation with an uploaded CSV dataset. +* Added synthetic data evaluation samples (`sample_synthetic_data_agent_evaluation.py`) and (`sample_synthetic_data_model_evaluation.py`). ### Other Changes diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/README.md b/sdk/ai/azure-ai-projects/samples/evaluations/README.md index b87c5f25d0b5..950aac77e922 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/README.md +++ b/sdk/ai/azure-ai-projects/samples/evaluations/README.md @@ -21,7 +21,8 @@ Set these environment variables: | Sample | Description | |--------|-------------| | [sample_evaluations_builtin_with_inline_data.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py) | Basic evaluation with built-in evaluators using inline data | -| [sample_evaluations_builtin_with_dataset_id.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py) | Evaluate using an uploaded dataset | +| [sample_evaluations_builtin_with_dataset_id.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py) | Evaluate using an uploaded JSONL dataset | +| [sample_evaluations_builtin_with_csv.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_csv.py) | Evaluate using an uploaded CSV dataset | | [sample_eval_catalog.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py) | Browse and use evaluators from the evaluation catalog | ### Agent / Model Evaluation @@ -32,6 +33,8 @@ Set these environment variables: | [sample_agent_response_evaluation.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py) | Evaluate given agent responses | | [sample_agent_response_evaluation_with_function_tool.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py) | Evaluate agent responses with function tools | | [sample_model_evaluation.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py) | Create response from model and evaluate | +| [sample_synthetic_data_agent_evaluation.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_agent_evaluation.py) | Generate synthetic test data, evaluate a Foundry agent | +| [sample_synthetic_data_model_evaluation.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_model_evaluation.py) | Generate synthetic test data, evaluate a model | ### Red Team Evaluations diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/data_folder/sample_data_evaluation.csv b/sdk/ai/azure-ai-projects/samples/evaluations/data_folder/sample_data_evaluation.csv new file mode 100644 index 000000000000..6dd3f1d1c556 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/evaluations/data_folder/sample_data_evaluation.csv @@ -0,0 +1,4 @@ +query,response,context,ground_truth +What is the capital of France?,Paris is the capital of France.,France is a country in Western Europe.,Paris is the capital of France. +What is machine learning?,Machine learning is a subset of AI that learns from data.,Machine learning is a branch of artificial intelligence.,Machine learning is a type of AI that enables computers to learn from data without being explicitly programmed. +Explain neural networks.,Neural networks are computing systems inspired by biological neural networks.,Neural networks are used in deep learning.,Neural networks are a set of algorithms modeled after the human brain designed to recognize patterns. diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_csv.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_csv.py new file mode 100644 index 000000000000..6259ad3aac5c --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_csv.py @@ -0,0 +1,145 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to use the synchronous + `openai.evals.*` methods to create, get and list evaluation and eval runs + using a CSV file uploaded as a dataset. + + Unlike JSONL-based evaluations, this sample uses the `csv` data source type + to run evaluations directly against a CSV file. + +USAGE: + python sample_evaluations_builtin_with_csv.py + + Before running the sample: + + pip install "azure-ai-projects>=2.0.0" python-dotenv + + Set these environment variables with your own values: + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. + 3) DATASET_NAME - Optional. The name of the Dataset to create and use in this sample. + 4) DATASET_VERSION - Optional. The version of the Dataset to create and use in this sample. + 5) DATA_FOLDER - Optional. The folder path where the data files for upload are located. +""" + +import os + +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient + +import time +from pprint import pprint +from openai.types.eval_create_params import DataSourceConfigCustom +from azure.ai.projects.models import ( + DatasetVersion, +) +from dotenv import load_dotenv +from datetime import datetime, timezone + +load_dotenv() + + +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") +dataset_name = os.environ.get("DATASET_NAME", "") +dataset_version = os.environ.get("DATASET_VERSION", "1") + +# Construct the paths to the data folder and CSV data file used in this sample +script_dir = os.path.dirname(os.path.abspath(__file__)) +data_folder = os.environ.get("DATA_FOLDER", os.path.join(script_dir, "data_folder")) +data_file = os.path.join(data_folder, "sample_data_evaluation.csv") + +with ( + DefaultAzureCredential() as credential, + AIProjectClient(endpoint=endpoint, credential=credential) as project_client, + project_client.get_openai_client() as client, +): + + print("Upload a CSV file and create a new Dataset to reference the file.") + dataset: DatasetVersion = project_client.datasets.upload_file( + name=dataset_name or f"eval-csv-data-{datetime.now(timezone.utc).strftime('%Y-%m-%d_%H%M%S_UTC')}", + version=dataset_version, + file_path=data_file, + ) + pprint(dataset) + + data_source_config = DataSourceConfigCustom( + { + "type": "custom", + "item_schema": { + "type": "object", + "properties": { + "query": {"type": "string"}, + "response": {"type": "string"}, + "context": {"type": "string"}, + "ground_truth": {"type": "string"}, + }, + "required": [], + }, + "include_sample_schema": True, + } + ) + + testing_criteria = [ + { + "type": "azure_ai_evaluator", + "name": "violence", + "evaluator_name": "builtin.violence", + "data_mapping": {"query": "{{item.query}}", "response": "{{item.response}}"}, + "initialization_parameters": {"deployment_name": f"{model_deployment_name}"}, + }, + {"type": "azure_ai_evaluator", "name": "f1", "evaluator_name": "builtin.f1_score"}, + { + "type": "azure_ai_evaluator", + "name": "coherence", + "evaluator_name": "builtin.coherence", + "data_mapping": {"query": "{{item.query}}", "response": "{{item.response}}"}, + "initialization_parameters": {"deployment_name": f"{model_deployment_name}"}, + }, + ] + + print("Creating evaluation") + eval_object = client.evals.create( + name="CSV evaluation with built-in evaluators", + data_source_config=data_source_config, # type: ignore + testing_criteria=testing_criteria, # type: ignore + ) + print(f"Evaluation created (id: {eval_object.id}, name: {eval_object.name})") + + print("Creating evaluation run with CSV data source") + eval_run_object = client.evals.runs.create( + eval_id=eval_object.id, + name="csv_evaluation_run", + metadata={"team": "eval-exp", "scenario": "csv-eval-v1"}, + data_source={ # type: ignore + "type": "csv", + "source": { + "type": "file_id", + "id": dataset.id if dataset.id else "", + }, + }, + ) + + print(f"Evaluation run created (id: {eval_run_object.id})") + pprint(eval_run_object) + + while True: + run = client.evals.runs.retrieve(run_id=eval_run_object.id, eval_id=eval_object.id) + if run.status in ["completed", "failed"]: + output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) + pprint(output_items) + print(f"Eval Run Report URL: {run.report_url}") + + break + time.sleep(5) + print("Waiting for evaluation run to complete...") + + client.evals.delete(eval_id=eval_object.id) + print("Evaluation deleted") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_agent_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_agent_evaluation.py new file mode 100644 index 000000000000..c67840c6325d --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_agent_evaluation.py @@ -0,0 +1,168 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to create and run a synthetic data evaluation + against a Foundry agent using the synchronous AIProjectClient. + + Synthetic data evaluation generates test queries based on a prompt you provide, + sends them to a Foundry agent, and evaluates the responses — no pre-existing + test dataset required. The generated queries are stored as a dataset in your + project for reuse. + + For evaluating a deployed model instead of an agent, see + sample_synthetic_data_model_evaluation.py. + + This feature is currently in preview. + +USAGE: + python sample_synthetic_data_agent_evaluation.py + + Before running the sample: + + pip install "azure-ai-projects>=2.0.0" python-dotenv + + Set these environment variables with your own values: + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for generating + synthetic data and for AI-assisted evaluators. + 3) FOUNDRY_AGENT_NAME - Required. The name of the Foundry agent to evaluate. +""" + +import os +import time +from pprint import pprint +from typing import Union +from dotenv import load_dotenv +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import PromptAgentDefinition +from openai.types.evals.run_create_response import RunCreateResponse +from openai.types.evals.run_retrieve_response import RunRetrieveResponse + +load_dotenv() + +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model_deployment_name = os.environ["FOUNDRY_MODEL_NAME"] +agent_name = os.environ["FOUNDRY_AGENT_NAME"] + +with ( + DefaultAzureCredential() as credential, + AIProjectClient(endpoint=endpoint, credential=credential) as project_client, + project_client.get_openai_client() as client, +): + # Create (or update) an agent version to evaluate + agent = project_client.agents.create_version( + agent_name=agent_name, + definition=PromptAgentDefinition( + model=model_deployment_name, + instructions="You are a helpful customer service agent. Be empathetic and solution-oriented.", + ), + ) + print(f"Agent created (name: {agent.name}, version: {agent.version})") + + # Use the azure_ai_source data source config with the synthetic_data_gen_preview scenario. + # The schema is inferred from the service — no custom item_schema is needed. + data_source_config = {"type": "azure_ai_source", "scenario": "synthetic_data_gen_preview"} + + # Define testing criteria using builtin evaluators. + # {{item.query}} references the synthetically generated query. + # {{sample.output_text}} references the agent's plain text response. + testing_criteria = [ + { + "type": "azure_ai_evaluator", + "name": "coherence", + "evaluator_name": "builtin.coherence", + "initialization_parameters": { + "deployment_name": model_deployment_name, + }, + "data_mapping": { + "query": "{{item.query}}", + "response": "{{sample.output_text}}", + }, + }, + { + "type": "azure_ai_evaluator", + "name": "violence", + "evaluator_name": "builtin.violence", + "data_mapping": { + "query": "{{item.query}}", + "response": "{{sample.output_text}}", + }, + }, + ] + + print("Creating evaluation for synthetic data generation") + eval_object = client.evals.create( + name="Synthetic Data Evaluation", + data_source_config=data_source_config, # type: ignore + testing_criteria=testing_criteria, # type: ignore + ) + print(f"Evaluation created (id: {eval_object.id}, name: {eval_object.name})") + + # Configure the synthetic data generation data source with an agent target. + # The service generates queries based on the prompt, sends them to the agent, + # and evaluates the responses. + # + # You can guide query generation in two ways: + # - "prompt": A text description of the queries to generate (used below). + # - "reference_files": A list of dataset asset IDs (uploaded via the datasets API) + # in the format of 'azureai://accounts//projects//data//versions/' + # whose content the service uses as context for generating queries. + # You can use either or both together. + data_source = { + "type": "azure_ai_synthetic_data_gen_preview", + "item_generation_params": { + "type": "synthetic_data_gen_preview", + "samples_count": 5, + "prompt": "Generate customer service questions about returning defective products", + # "reference_files": ["", ""], + "model_deployment_name": model_deployment_name, + "output_dataset_name": "synthetic-eval-dataset", + }, + "target": { + "type": "azure_ai_agent", + "name": agent.name, + "version": agent.version, + }, + } + + eval_run: Union[RunCreateResponse, RunRetrieveResponse] = client.evals.runs.create( + eval_id=eval_object.id, + name="synthetic-data-evaluation-run", + data_source=data_source, # type: ignore + ) + print(f"Evaluation run created (id: {eval_run.id})") + + while eval_run.status not in ["completed", "failed"]: + eval_run = client.evals.runs.retrieve(run_id=eval_run.id, eval_id=eval_object.id) + print(f"Waiting for eval run to complete... current status: {eval_run.status}") + time.sleep(5) + + if eval_run.status == "completed": + print("\n✓ Evaluation run completed successfully!") + print(f"Result Counts: {eval_run.result_counts}") + + output_items = list(client.evals.runs.output_items.list(run_id=eval_run.id, eval_id=eval_object.id)) + print(f"\nOUTPUT ITEMS (Total: {len(output_items)})") + print(f"{'-'*60}") + pprint(output_items) + print(f"{'-'*60}") + + print(f"\nEval Run Report URL: {eval_run.report_url}") + + # The synthetic data generation run stores the generated queries as a dataset. + # Retrieve the output dataset ID from the run's data_source for reuse. + output_dataset_id = getattr(eval_run.data_source, "item_generation_params", {}).get("output_dataset_id") + if output_dataset_id: + print(f"Output Dataset ID (for reuse): {output_dataset_id}") + else: + print("\n✗ Evaluation run failed.") + + client.evals.delete(eval_id=eval_object.id) + print("Evaluation deleted") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_model_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_model_evaluation.py new file mode 100644 index 000000000000..7bf1cc1a2e77 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_model_evaluation.py @@ -0,0 +1,171 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to create and run a synthetic data evaluation + against a deployed model using the synchronous AIProjectClient. + + Synthetic data evaluation generates test queries based on a prompt you provide, + sends them to a deployed model, and evaluates the responses — no pre-existing + test dataset required. The generated queries are stored as a dataset in your + project for reuse. + + For evaluating a Foundry agent instead of a model, see + sample_synthetic_data_agent_evaluation.py. + + This feature is currently in preview. + +USAGE: + python sample_synthetic_data_model_evaluation.py + + Before running the sample: + + pip install "azure-ai-projects>=2.0.0" python-dotenv + + Set these environment variables with your own values: + 1) FOUNDRY_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your + Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. + 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for generating + synthetic data and as the evaluation target. +""" + +import os +import time +from pprint import pprint +from typing import Union +from dotenv import load_dotenv +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient +from openai.types.evals.run_create_response import RunCreateResponse +from openai.types.evals.run_retrieve_response import RunRetrieveResponse + +load_dotenv() + +endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] +model_deployment_name = os.environ["FOUNDRY_MODEL_NAME"] + +with( + DefaultAzureCredential() as credential, + AIProjectClient(endpoint=endpoint, credential=credential) as project_client, + project_client.get_openai_client() as client, +): + # Use the azure_ai_source data source config with the synthetic_data_gen_preview scenario. + # The schema is inferred from the service — no custom item_schema is needed. + data_source_config = {"type": "azure_ai_source", "scenario": "synthetic_data_gen_preview"} + + # Define testing criteria using builtin evaluators. + # {{item.query}} references the synthetically generated query. + # {{sample.output_text}} references the model's response. + testing_criteria = [ + { + "type": "azure_ai_evaluator", + "name": "coherence", + "evaluator_name": "builtin.coherence", + "initialization_parameters": { + "deployment_name": model_deployment_name, + }, + "data_mapping": { + "query": "{{item.query}}", + "response": "{{sample.output_text}}", + }, + }, + { + "type": "azure_ai_evaluator", + "name": "violence", + "evaluator_name": "builtin.violence", + "data_mapping": { + "query": "{{item.query}}", + "response": "{{sample.output_text}}", + }, + }, + ] + + print("Creating evaluation for synthetic data generation") + eval_object = client.evals.create( + name="Synthetic Data Model Evaluation", + data_source_config=data_source_config, # type: ignore + testing_criteria=testing_criteria, # type: ignore + ) + print(f"Evaluation created (id: {eval_object.id}, name: {eval_object.name})") + + # Configure the synthetic data generation data source with a model target. + # The service generates queries based on the prompt, sends them to the model, + # and evaluates the responses. + # + # You can guide query generation in two ways: + # - "prompt": A text description of the queries to generate (used below). + # - "reference_files": A list of dataset asset IDs (uploaded via the datasets API) + # in the format of 'azureai://accounts//projects//data//versions/' + # whose content the service uses as context for generating queries. + # You can use either or both together. + data_source = { + "type": "azure_ai_synthetic_data_gen_preview", + "item_generation_params": { + "type": "synthetic_data_gen_preview", + "samples_count": 5, + "prompt": "Generate customer service questions about returning defective products", + # "reference_files": ["", ""], + "model_deployment_name": model_deployment_name, + "output_dataset_name": "synthetic-model-eval-dataset", + }, + "target": { + "type": "azure_ai_model", + "model": model_deployment_name, + }, + # Optional: add a system prompt to shape the target model's behavior. + # When using input_messages with synthetic data generation, include only + # system/developer role messages — the service provides the generated queries + # as user messages automatically. + "input_messages": { + "type": "template", + "template": [ + { + "type": "message", + "role": "developer", + "content": { + "type": "input_text", + "text": "You are a helpful customer service agent. Be empathetic and solution-oriented.", + }, + } + ], + }, + } + + eval_run: Union[RunCreateResponse, RunRetrieveResponse] = client.evals.runs.create( + eval_id=eval_object.id, + name="synthetic-data-model-evaluation-run", + data_source=data_source, # type: ignore + ) + print(f"Evaluation run created (id: {eval_run.id})") + + while eval_run.status not in ["completed", "failed"]: + eval_run = client.evals.runs.retrieve(run_id=eval_run.id, eval_id=eval_object.id) + print(f"Waiting for eval run to complete... current status: {eval_run.status}") + time.sleep(5) + + if eval_run.status == "completed": + print("\n✓ Evaluation run completed successfully!") + print(f"Result Counts: {eval_run.result_counts}") + + output_items = list(client.evals.runs.output_items.list(run_id=eval_run.id, eval_id=eval_object.id)) + print(f"\nOUTPUT ITEMS (Total: {len(output_items)})") + print(f"{'-'*60}") + pprint(output_items) + print(f"{'-'*60}") + + print(f"\nEval Run Report URL: {eval_run.report_url}") + + # The synthetic data generation run stores the generated queries as a dataset. + # Retrieve the output dataset ID from the run's data_source for reuse. + output_dataset_id = getattr(eval_run.data_source, "item_generation_params", {}).get("output_dataset_id") + if output_dataset_id: + print(f"Output Dataset ID (for reuse): {output_dataset_id}") + else: + print("\n✗ Evaluation run failed.") + + client.evals.delete(eval_id=eval_object.id) + print("Evaluation deleted") diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py index 9f166609aa0d..a1d3a68a0b9b 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py @@ -167,6 +167,9 @@ class TestSamplesEvaluations(AzureRecordedTestCase): "sample_scheduled_evaluations.py", # Missing dependency azure.mgmt.resource (ModuleNotFoundError) "sample_evaluations_builtin_with_dataset_id.py", # Requires dataset upload / Blob Storage prerequisite "sample_continuous_evaluation_rule.py", # Requires manual RBAC assignment in Azure Portal + "sample_evaluations_builtin_with_csv.py", # Requires CSV file upload prerequisite + "sample_synthetic_data_agent_evaluation.py", # Synthetic data gen is long-running preview feature + "sample_synthetic_data_model_evaluation.py", # Synthetic data gen is long-running preview feature ], ), ) From db3ea9d9c686648fc0176f83779535209eb9814c Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 11 Mar 2026 16:02:18 -0700 Subject: [PATCH 12/16] Fix azure-ai-projects linting errors with pylint version 4.0.5 (#45628) --- sdk/ai/azure-ai-projects/README.md | 17 +- sdk/ai/azure-ai-projects/assets.json | 2 +- .../azure/ai/projects/_patch.py | 2 +- .../azure/ai/projects/aio/_patch.py | 2 +- sdk/ai/azure-ai-projects/dev_requirements.txt | 13 +- .../samples/agents/agent_retrieve_helper.py | 4 +- .../samples/agents/sample_agent_basic.py | 2 +- .../agents/sample_agent_basic_async.py | 2 +- .../agents/sample_agent_retrieve_basic.py | 4 +- .../sample_agent_retrieve_basic_async.py | 4 +- .../agents/sample_agent_stream_events.py | 4 +- .../agents/sample_agent_structured_output.py | 2 +- .../sample_agent_structured_output_async.py | 2 +- .../agents/sample_workflow_multi_agent.py | 16 +- .../sample_workflow_multi_agent_async.py | 18 +-- ...sample_agent_basic_with_console_tracing.py | 5 +- ..._with_console_tracing_custom_attributes.py | 1 + .../samples/agents/tools/computer_use_util.py | 1 + .../agents/tools/sample_agent_ai_search.py | 4 +- .../tools/sample_agent_bing_custom_search.py | 6 +- .../tools/sample_agent_bing_grounding.py | 4 +- .../tools/sample_agent_browser_automation.py | 8 +- .../tools/sample_agent_code_interpreter.py | 2 +- ...ample_agent_code_interpreter_with_files.py | 4 +- .../agents/tools/sample_agent_computer_use.py | 14 +- .../tools/sample_agent_computer_use_async.py | 13 +- .../agents/tools/sample_agent_fabric.py | 4 +- .../agents/tools/sample_agent_file_search.py | 7 +- .../sample_agent_file_search_in_stream.py | 18 +-- ...ample_agent_file_search_in_stream_async.py | 17 +- .../tools/sample_agent_function_tool.py | 7 +- .../tools/sample_agent_function_tool_async.py | 7 +- .../samples/agents/tools/sample_agent_mcp.py | 4 +- .../agents/tools/sample_agent_mcp_async.py | 2 +- ...ample_agent_mcp_with_project_connection.py | 4 +- ...agent_mcp_with_project_connection_async.py | 2 +- .../agents/tools/sample_agent_openapi.py | 4 +- ...e_agent_openapi_with_project_connection.py | 2 +- .../agents/tools/sample_agent_sharepoint.py | 6 +- .../agents/tools/sample_agent_to_agent.py | 4 +- .../agents/tools/sample_agent_web_search.py | 4 +- .../tools/sample_agent_web_search_preview.py | 4 +- ...ple_agent_web_search_with_custom_search.py | 4 +- .../agentic_evaluators/sample_coherence.py | 14 +- .../agentic_evaluators/sample_fluency.py | 14 +- .../agent_utils.py | 13 +- .../sample_generic_agentic_evaluator.py | 12 +- .../agentic_evaluators/sample_groundedness.py | 16 +- .../sample_intent_resolution.py | 14 +- .../agentic_evaluators/sample_relevance.py | 14 +- .../sample_response_completeness.py | 14 +- .../sample_task_adherence.py | 14 +- .../sample_task_completion.py | 14 +- .../sample_task_navigation_efficiency.py | 14 +- .../sample_tool_call_accuracy.py | 14 +- .../sample_tool_call_success.py | 15 +- .../sample_tool_input_accuracy.py | 14 +- .../sample_tool_output_utilization.py | 14 +- .../sample_tool_selection.py | 14 +- .../evaluations/sample_agent_evaluation.py | 6 +- .../sample_agent_response_evaluation.py | 4 +- ..._response_evaluation_with_function_tool.py | 7 +- .../sample_continuous_evaluation_rule.py | 4 +- .../evaluations/sample_eval_catalog.py | 13 +- ...mple_eval_catalog_code_based_evaluators.py | 16 +- ...le_eval_catalog_prompt_based_evaluators.py | 28 ++-- .../sample_evaluation_cluster_insight.py | 18 +-- .../sample_evaluation_compare_insight.py | 12 +- .../sample_evaluations_ai_assisted.py | 11 +- .../sample_evaluations_builtin_with_csv.py | 12 +- ...ple_evaluations_builtin_with_dataset_id.py | 12 +- ...le_evaluations_builtin_with_inline_data.py | 13 +- ...valuations_builtin_with_inline_data_oai.py | 12 +- .../sample_evaluations_builtin_with_traces.py | 8 +- .../evaluations/sample_evaluations_graders.py | 9 +- ...aluations_score_model_grader_with_image.py | 16 +- .../evaluations/sample_model_evaluation.py | 4 +- .../evaluations/sample_redteam_evaluations.py | 15 +- .../sample_scheduled_evaluations.py | 29 ++-- .../sample_synthetic_data_agent_evaluation.py | 6 +- .../sample_synthetic_data_model_evaluation.py | 6 +- .../samples/files/sample_files.py | 5 +- .../samples/files/sample_files_async.py | 3 +- .../finetuning/sample_finetuning_dpo_job.py | 2 +- .../sample_finetuning_dpo_job_async.py | 2 +- ...le_finetuning_oss_models_supervised_job.py | 2 +- ...etuning_oss_models_supervised_job_async.py | 2 +- .../sample_finetuning_reinforcement_job.py | 2 +- ...mple_finetuning_reinforcement_job_async.py | 2 +- .../sample_finetuning_supervised_job.py | 5 +- .../sample_finetuning_supervised_job_async.py | 6 +- .../mcp_client/sample_mcp_tool_async.py | 6 +- .../memories/sample_memory_advanced.py | 2 +- .../memories/sample_memory_advanced_async.py | 2 +- .../samples/red_team/sample_red_team_async.py | 1 - .../sample_responses_stream_events.py | 4 +- .../sample_responses_stream_manager.py | 4 +- .../sample_responses_structured_output.py | 2 +- .../agents/telemetry/gen_ai_trace_verifier.py | 17 +- .../agents/telemetry/memory_trace_exporter.py | 4 +- .../telemetry/test_ai_agents_instrumentor.py | 54 ++----- .../test_ai_agents_instrumentor_async.py | 40 ++--- .../telemetry/test_ai_instrumentor_base.py | 10 +- .../telemetry/test_responses_instrumentor.py | 147 +++++++++--------- .../test_responses_instrumentor_async.py | 75 ++++----- ...sponses_instrumentor_browser_automation.py | 37 +++-- ...s_instrumentor_browser_automation_async.py | 42 +++-- ...responses_instrumentor_code_interpreter.py | 51 +++--- ...ses_instrumentor_code_interpreter_async.py | 53 ++++--- ...test_responses_instrumentor_file_search.py | 47 +++--- ...esponses_instrumentor_file_search_async.py | 49 +++--- .../test_responses_instrumentor_mcp.py | 47 +++--- .../test_responses_instrumentor_mcp_async.py | 49 +++--- .../test_responses_instrumentor_metrics.py | 12 +- .../test_responses_instrumentor_workflow.py | 75 +++++---- ...t_responses_instrumentor_workflow_async.py | 80 +++++----- .../test_trace_function_decorator.py | 8 +- .../test_trace_function_decorator_async.py | 16 +- .../test_agent_create_version_exception.py | 1 - .../tests/agents/test_agent_responses_crud.py | 3 +- .../agents/test_agent_responses_crud_async.py | 3 +- .../tests/agents/test_agents_crud.py | 1 - .../tests/agents/test_agents_crud_async.py | 1 - .../tests/agents/test_conversation_crud.py | 3 +- .../agents/test_conversation_crud_async.py | 3 +- .../agents/test_conversation_items_crud.py | 11 +- .../test_conversation_items_crud_async.py | 11 +- ...est_agent_code_interpreter_and_function.py | 4 - ..._agent_file_search_and_code_interpreter.py | 2 - .../test_agent_file_search_and_function.py | 6 +- ...t_file_search_code_interpreter_function.py | 5 - .../test_multitool_with_conversations.py | 7 +- .../agents/tools/test_agent_ai_search.py | 2 +- .../agents/tools/test_agent_bing_grounding.py | 6 +- .../tools/test_agent_code_interpreter.py | 2 +- .../test_agent_code_interpreter_async.py | 1 - .../agents/tools/test_agent_file_search.py | 6 +- .../tools/test_agent_file_search_async.py | 1 - .../tools/test_agent_file_search_stream.py | 1 - .../test_agent_file_search_stream_async.py | 1 - .../agents/tools/test_agent_function_tool.py | 5 +- .../tools/test_agent_function_tool_async.py | 7 +- .../tools/test_agent_image_generation.py | 2 +- .../test_agent_image_generation_async.py | 2 +- .../tests/agents/tools/test_agent_mcp.py | 2 +- .../agents/tools/test_agent_mcp_async.py | 3 +- .../agents/tools/test_agent_memory_search.py | 6 +- .../tools/test_agent_memory_search_async.py | 6 +- .../tests/agents/tools/test_agent_openapi.py | 2 +- .../agents/tools/test_agent_openapi_async.py | 2 +- .../test_agent_tools_with_conversations.py | 5 +- .../agents/tools/test_agent_web_search.py | 1 - .../tools/test_agent_web_search_async.py | 1 - .../tests/connections/test_connections.py | 5 +- .../connections/test_connections_async.py | 4 +- .../tests/datasets/test_datasets.py | 10 +- .../tests/datasets/test_datasets_async.py | 12 +- .../tests/deployments/test_deployments.py | 2 - .../deployments/test_deployments_async.py | 2 - .../tests/files/test_files.py | 2 - .../tests/files/test_files_async.py | 2 - .../tests/finetuning/test_finetuning.py | 14 +- .../tests/finetuning/test_finetuning_async.py | 14 +- .../tests/indexes/test_indexes.py | 3 +- .../tests/indexes/test_indexes_async.py | 3 +- .../tests/redteams/test_redteams.py | 5 +- .../tests/redteams/test_redteams_async.py | 5 +- .../responses/test_openai_client_overrides.py | 2 +- .../test_openai_client_overrides_async.py | 2 +- .../tests/responses/test_responses.py | 6 +- .../tests/responses/test_responses_async.py | 10 +- .../tests/samples/test_samples.py | 3 +- .../tests/samples/test_samples_evaluations.py | 1 + .../tests/telemetry/test_telemetry.py | 2 - .../tests/telemetry/test_telemetry_async.py | 2 - sdk/ai/azure-ai-projects/tests/test_base.py | 29 ++-- 176 files changed, 997 insertions(+), 974 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 15e6b44c62ef..2b30726f33aa 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -177,7 +177,7 @@ with project_client.get_openai_client() as openai_client: conversation_id=conversation.id, items=[{"type": "message", "role": "user", "content": "And what is the capital city?"}], ) - print(f"Added a second user message to the conversation") + print("Added a second user message to the conversation") response = openai_client.responses.create( conversation=conversation.id, @@ -229,7 +229,7 @@ the `code_interpreter_call` output item: ```python code = next((output.code for output in response.output if output.type == "code_interpreter_call"), "") -print(f"Code Interpreter code:") +print("Code Interpreter code:") print(code) ``` @@ -246,7 +246,9 @@ asset_file_path = os.path.abspath( ) # Upload the CSV file for the code interpreter -file = openai_client.files.create(purpose="assistants", file=open(asset_file_path, "rb")) +with open(asset_file_path, "rb") as f: + file = openai_client.files.create(purpose="assistants", file=f) + tool = CodeInterpreterTool(container=AutoCodeInterpreterToolParam(file_ids=[file.id])) ``` @@ -273,9 +275,10 @@ print(f"Vector store created (id: {vector_store.id})") asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info.md")) # Upload file to vector store -file = openai_client.vector_stores.files.upload_and_poll( - vector_store_id=vector_store.id, file=open(asset_file_path, "rb") -) +with open(asset_file_path, "rb") as f: + file = openai_client.vector_stores.files.upload_and_poll( + vector_store_id=vector_store.id, file=f + ) print(f"File uploaded to vector store (id: {file.id})") tool = FileSearchTool(vector_store_ids=[vector_store.id]) @@ -415,7 +418,7 @@ Call external APIs defined by OpenAPI specifications without additional client-s ```python -with open(weather_asset_file_path, "r") as f: +with open(weather_asset_file_path, "r", encoding="utf-8") as f: openapi_weather = cast(dict[str, Any], jsonref.loads(f.read())) tool = OpenApiTool( diff --git a/sdk/ai/azure-ai-projects/assets.json b/sdk/ai/azure-ai-projects/assets.json index 3d798e3359e9..4d184f12bdb4 100644 --- a/sdk/ai/azure-ai-projects/assets.json +++ b/sdk/ai/azure-ai-projects/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-projects", - "Tag": "python/ai/azure-ai-projects_f6ff7973d2" + "Tag": "python/ai/azure-ai-projects_5b25ba9450" } diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index ec64b8f51fc8..98c3e388bb92 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -12,7 +12,7 @@ import re import logging from typing import List, Any -import httpx +import httpx # pylint: disable=networking-import-outside-azure-core-transport from openai import OpenAI from azure.core.tracing.decorator import distributed_trace from azure.core.credentials import TokenCredential diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index 4d23bf74a223..837ca0b1942f 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -11,7 +11,7 @@ import os import logging from typing import List, Any -import httpx +import httpx # pylint: disable=networking-import-outside-azure-core-transport from openai import AsyncOpenAI from azure.core.tracing.decorator import distributed_trace from azure.core.credentials_async import AsyncTokenCredential diff --git a/sdk/ai/azure-ai-projects/dev_requirements.txt b/sdk/ai/azure-ai-projects/dev_requirements.txt index c606cfa5f8d7..3a0781d99156 100644 --- a/sdk/ai/azure-ai-projects/dev_requirements.txt +++ b/sdk/ai/azure-ai-projects/dev_requirements.txt @@ -4,9 +4,16 @@ # pinning remote version due to limitations of azdo feeds with this package https://azuresdkartifacts.z5.web.core.windows.net/python/distributions/distros/opentelemetry_resource_detector_azure-0.1.5-py3-none-any.whl aiohttp -python-dotenv -opentelemetry-sdk azure-core-tracing-opentelemetry -azure-monitor-opentelemetry +azure-mgmt-authorization azure-mgmt-cognitiveservices +azure-mgmt-resource +azure-monitor-opentelemetry +azure-monitor-query jsonref +opentelemetry-sdk +python-dotenv +# Can't include those, because they are not supported in Python 3.9. Samples that use these package +# cannot be run as pytest, because the pipeline will fail on Python 3.9 jobs. +# pillow +# mcp diff --git a/sdk/ai/azure-ai-projects/samples/agents/agent_retrieve_helper.py b/sdk/ai/azure-ai-projects/samples/agents/agent_retrieve_helper.py index 74bfc41cc399..a1efd3b86a52 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/agent_retrieve_helper.py +++ b/sdk/ai/azure-ai-projects/samples/agents/agent_retrieve_helper.py @@ -1,3 +1,4 @@ +# pylint: disable=name-too-long # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -14,10 +15,7 @@ from typing import Generator, AsyncGenerator from azure.ai.projects.models import PromptAgentDefinition - -from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient -from azure.identity.aio import DefaultAzureCredential as AsyncDefaultAzureCredential from azure.ai.projects.aio import AIProjectClient as AsyncAIProjectClient diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py index e14dbbcb6148..2f097665de87 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py @@ -67,7 +67,7 @@ conversation_id=conversation.id, items=[{"type": "message", "role": "user", "content": "And what is the capital city?"}], ) - print(f"Added a second user message to the conversation") + print("Added a second user message to the conversation") response = openai_client.responses.create( conversation=conversation.id, diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py index 009171acdee9..0f9d39bc6685 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py @@ -69,7 +69,7 @@ async def main() -> None: conversation_id=conversation.id, items=[{"type": "message", "role": "user", "content": "And what is the capital city?"}], ) - print(f"Added a second user message to the conversation") + print("Added a second user message to the conversation") response = await openai_client.responses.create( conversation=conversation.id, diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py index 07d039779a94..c1f456866c9e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py @@ -30,9 +30,9 @@ import os from dotenv import load_dotenv +from agent_retrieve_helper import create_and_retrieve_agent_and_conversation # pylint: disable=import-error from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient -from agent_retrieve_helper import create_and_retrieve_agent_and_conversation load_dotenv() @@ -64,7 +64,7 @@ conversation_id=conversation.id, items=[{"type": "message", "role": "user", "content": "How many feet are in a mile?"}], ) - print(f"Added a user message to the conversation") + print("Added a user message to the conversation") response = openai_client.responses.create( conversation=conversation.id, diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py index 3f2c79647935..8c6491746f70 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py @@ -31,9 +31,9 @@ import os import asyncio from dotenv import load_dotenv +from agent_retrieve_helper import create_and_retrieve_agent_and_conversation_async # pylint: disable=import-error from azure.identity.aio import DefaultAzureCredential from azure.ai.projects.aio import AIProjectClient -from agent_retrieve_helper import create_and_retrieve_agent_and_conversation_async load_dotenv() @@ -67,7 +67,7 @@ async def main(): conversation_id=conversation.id, items=[{"type": "message", "role": "user", "content": "How many feet are in a mile?"}], ) - print(f"Added a user message to the conversation") + print("Added a user message to the conversation") response = await openai_client.responses.create( conversation=conversation.id, diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py index 990fb833b7e4..417baa7d3ee5 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py @@ -69,9 +69,9 @@ elif event.type == "response.output_text.delta": print(event.delta, end="", flush=True) elif event.type == "response.text.done": - print(f"\n\nResponse text done. Access final text in 'event.text'") + print("\n\nResponse text done. Access final text in 'event.text'") elif event.type == "response.completed": - print(f"\n\nResponse completed. Access final text in 'event.response.output_text'") + print("\n\nResponse completed. Access final text in 'event.response.output_text'") openai_client.conversations.delete(conversation_id=conversation.id) print("Conversation deleted") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py index b7bd9155561b..5579038bac2c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py @@ -32,6 +32,7 @@ import os from dotenv import load_dotenv +from pydantic import BaseModel, Field from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient from azure.ai.projects.models import ( @@ -39,7 +40,6 @@ PromptAgentDefinitionTextOptions, TextResponseFormatJsonSchema, ) -from pydantic import BaseModel, Field load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py index 48b58d57633c..c4a652cf846b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py @@ -33,6 +33,7 @@ import asyncio import os from dotenv import load_dotenv +from pydantic import BaseModel, Field from azure.identity.aio import DefaultAzureCredential from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import ( @@ -40,7 +41,6 @@ PromptAgentDefinitionTextOptions, TextResponseFormatJsonSchema, ) -from pydantic import BaseModel, Field load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py index 6f3d21766939..f2ce47c55b7c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py @@ -47,8 +47,8 @@ agent_name="teacher-agent", definition=PromptAgentDefinition( model=os.environ["FOUNDRY_MODEL_NAME"], - instructions="""You are a teacher that create pre-school math question for student and check answer. - If the answer is correct, you stop the conversation by saying [COMPLETE]. + instructions="""You are a teacher that create pre-school math question for student and check answer. + If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", ), ) @@ -59,14 +59,14 @@ agent_name="student-agent", definition=PromptAgentDefinition( model=os.environ["FOUNDRY_MODEL_NAME"], - instructions="""You are a student who answers questions from the teacher. + instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), ) print(f"Agent created (id: {student_agent.id}, name: {student_agent.name}, version: {student_agent.version})") # Create Multi-Agent Workflow - workflow_yaml = f""" + workflow_yaml = """ kind: workflow trigger: kind: OnConversationStart @@ -109,7 +109,7 @@ - kind: SendActivity id: send_teacher_reply - activity: "{{Last(Local.LatestMessage).Text}}" + activity: "{{Last(Local.LatestMessage).Text}}" - kind: SetVariable id: set_variable_turncount @@ -158,10 +158,10 @@ for event in stream: print(f"Event {event.sequence_number} type '{event.type}'", end="") if ( - event.type == "response.output_item.added" or event.type == "response.output_item.done" - ) and event.item.type == "workflow_action": + event.type in ("response.output_item.added", "response.output_item.done") + ) and event.item.type == "workflow_action": # pyright: ignore [reportAttributeAccessIssue] print( - f": item action ID '{event.item.action_id}' is '{event.item.status}' (previous action ID: '{event.item.previous_action_id}')", + f": item action ID '{event.item.action_id}' is '{event.item.status}' (previous action ID: '{event.item.previous_action_id}')", # pyright: ignore [reportAttributeAccessIssue] end="", ) elif event.type == "response.completed": diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py index 1d971d0ce3b3..8673b7ac284d 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py @@ -51,8 +51,8 @@ async def main(): agent_name="teacher-agent-async", definition=PromptAgentDefinition( model=os.environ["FOUNDRY_MODEL_NAME"], - instructions="""You are a teacher that create pre-school math question for student and check answer. - If the answer is correct, you stop the conversation by saying [COMPLETE]. + instructions="""You are a teacher that create pre-school math question for student and check answer. + If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", ), ) @@ -62,13 +62,13 @@ async def main(): agent_name="student-agent-async", definition=PromptAgentDefinition( model=os.environ["FOUNDRY_MODEL_NAME"], - instructions="""You are a student who answers questions from the teacher. + instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), ) print(f"Agent created (id: {student_agent.id}, name: {student_agent.name}, version: {student_agent.version})") - workflow_yaml = f""" + workflow_yaml = """ kind: workflow trigger: kind: OnConversationStart @@ -111,8 +111,8 @@ async def main(): - kind: SendActivity id: send_teacher_reply - activity: "{{Last(Local.LatestMessage).Text}}" - + activity: "{{Last(Local.LatestMessage).Text}}" + - kind: SetVariable id: set_variable_turncount variable: Local.TurnCount @@ -160,10 +160,10 @@ async def main(): async for event in stream: print(f"Event {event.sequence_number} type '{event.type}'", end="") if ( - event.type == "response.output_item.added" or event.type == "response.output_item.done" - ) and event.item.type == "workflow_action": + event.type in ("response.output_item.added", "response.output_item.done") + ) and event.item.type == "workflow_action": # pyright: ignore [reportAttributeAccessIssue] print( - f": item action ID '{event.item.action_id}' is '{event.item.status}' (previous action ID: '{event.item.previous_action_id}')", + f": item action ID '{event.item.action_id}' is '{event.item.status}' (previous action ID: '{event.item.previous_action_id}')", # pyright: ignore [reportAttributeAccessIssue] end="", ) elif event.type == "response.completed": diff --git a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py index 82d04285785b..85e1504783d2 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py @@ -1,3 +1,4 @@ +# pylint: disable=wrong-import-position,wrong-import-order,docstring-missing-param,ungrouped-imports # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -50,7 +51,7 @@ load_dotenv() -def display_conversation_item(item: Any) -> None: +def display_conversation_item(item: Any) -> None: # pylint: disable=redefined-outer-name """Safely display conversation item information""" print(f"Item ID: {getattr(item, 'id', 'N/A')}") print(f"Type: {getattr(item, 'type', 'N/A')}") @@ -118,7 +119,7 @@ def display_conversation_item(item: Any) -> None: ) print(f"Answer: {response.output}") - print(f"\n📋 Listing conversation items...") + print("\n📋 Listing conversation items...") items = openai_client.conversations.items.list(conversation_id=conversation.id) # Print all the items diff --git a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py index 7fe16305c996..44251d2ae830 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py +++ b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing_custom_attributes.py @@ -1,3 +1,4 @@ +# pylint: disable=wrong-import-position,wrong-import-order,ungrouped-imports # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/computer_use_util.py b/sdk/ai/azure-ai-projects/samples/agents/tools/computer_use_util.py index c1793779b0fe..cdd9c2e21496 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/computer_use_util.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/computer_use_util.py @@ -1,3 +1,4 @@ +# pylint: disable=docstring-missing-param,docstring-missing-return,docstring-missing-rtype,name-too-long # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py index 75ba32b79d8b..f2b9f01ee657 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py @@ -93,7 +93,7 @@ elif event.type == "response.output_text.delta": print(f"Delta: {event.delta}") elif event.type == "response.text.done": - print(f"\nFollow-up response done!") + print("\nFollow-up response done!") elif event.type == "response.output_item.done": if event.item.type == "message": item = event.item @@ -107,7 +107,7 @@ f"End index: {annotation.end_index}" ) elif event.type == "response.completed": - print(f"\nFollow-up completed!") + print("\nFollow-up completed!") print(f"Agent response: {event.response.output_text}") print("\nCleaning up...") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py index b61867cbe8f2..fd3a6a1910b3 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py @@ -75,7 +75,7 @@ agent_name="MyAgent", definition=PromptAgentDefinition( model=os.environ["FOUNDRY_MODEL_NAME"], - instructions="""You are a helpful agent that can use Bing Custom Search tools to assist users. + instructions="""You are a helpful agent that can use Bing Custom Search tools to assist users. Use the available Bing Custom Search tools to answer questions and perform tasks.""", tools=[tool], ), @@ -97,7 +97,7 @@ elif event.type == "response.output_text.delta": print(f"Delta: {event.delta}") elif event.type == "response.text.done": - print(f"\nFollow-up response done!") + print("\nFollow-up response done!") elif event.type == "response.output_item.done": if event.item.type == "message": item = event.item @@ -111,7 +111,7 @@ f"End index: {annotation.end_index}" ) elif event.type == "response.completed": - print(f"\nFollow-up completed!") + print("\nFollow-up completed!") print(f"Full response: {event.response.output_text}") print("Cleaning up...") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py index 4adb31cec416..386d22dc5e45 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py @@ -97,7 +97,7 @@ elif event.type == "response.output_text.delta": print(f"Delta: {event.delta}") elif event.type == "response.text.done": - print(f"\nFollow-up response done!") + print("\nFollow-up response done!") elif event.type == "response.output_item.done": if event.item.type == "message": item = event.item @@ -107,7 +107,7 @@ if annotation.type == "url_citation": print(f"URL Citation: {annotation.url}") elif event.type == "response.completed": - print(f"\nFollow-up completed!") + print("\nFollow-up completed!") print(f"Full response: {event.response.output_text}") print("\nCleaning up...") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py index 2c0b98a96a5c..457e79f40388 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py @@ -61,8 +61,8 @@ agent_name="MyAgent", definition=PromptAgentDefinition( model=os.environ["FOUNDRY_MODEL_NAME"], - instructions="""You are an Agent helping with browser automation tasks. - You can answer questions, provide information, and assist with various tasks + instructions="""You are an Agent helping with browser automation tasks. + You can answer questions, provide information, and assist with various tasks related to web browsing using the Browser Automation tool available to you.""", tools=[tool], ), @@ -88,7 +88,7 @@ elif event.type == "response.output_text.delta": print(f"Delta: {event.delta}") elif event.type == "response.text.done": - print(f"\nFollow-up response done!") + print("\nFollow-up response done!") elif event.type == "response.output_item.done": item = event.item if item.type == "browser_automation_preview_call": # TODO: support browser_automation_preview_call schema @@ -101,7 +101,7 @@ print(f"Call ID: {getattr(item, 'call_id')}") print(f"Query arguments: {query}") elif event.type == "response.completed": - print(f"\nFollow-up completed!") + print("\nFollow-up completed!") print(f"Full response: {event.response.output_text}") print("\nCleaning up...") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py index e2c64efb44be..0f053b52a860 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py @@ -71,7 +71,7 @@ # Print code executed by the code interpreter tool. # [START code_output_extraction] code = next((output.code for output in response.output if output.type == "code_interpreter_call"), "") - print(f"Code Interpreter code:") + print("Code Interpreter code:") print(code) # [END code_output_extraction] diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py index 336fc0c17944..68942c42cda8 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_with_files.py @@ -47,7 +47,9 @@ ) # Upload the CSV file for the code interpreter - file = openai_client.files.create(purpose="assistants", file=open(asset_file_path, "rb")) + with open(asset_file_path, "rb") as f: + file = openai_client.files.create(purpose="assistants", file=f) + tool = CodeInterpreterTool(container=AutoCodeInterpreterToolParam(file_ids=[file.id])) # [END tool_declaration] diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py index bf448cee686a..d77cc7dae39e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py @@ -31,17 +31,17 @@ import os from dotenv import load_dotenv -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import PromptAgentDefinition, ComputerUsePreviewTool # Import shared helper functions -from computer_use_util import ( +from computer_use_util import ( # pylint: disable=import-error SearchState, load_screenshot_assets, handle_computer_action_and_take_screenshot, print_final_output, ) +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import PromptAgentDefinition, ComputerUsePreviewTool load_dotenv() @@ -61,7 +61,7 @@ print("Successfully loaded screenshot assets") except FileNotFoundError: print("Failed to load required screenshot assets. Please ensure the asset files exist in ../assets/") - exit(1) + exit(1) # pylint: disable=consider-using-sys-exit # [START tool_declaration] tool = ComputerUsePreviewTool(display_width=1026, display_height=769, environment="windows") @@ -72,8 +72,8 @@ definition=PromptAgentDefinition( model=os.environ.get("COMPUTER_USE_MODEL_DEPLOYMENT_NAME", "computer-use-preview"), instructions=""" - You are a computer automation assistant. - + You are a computer automation assistant. + Be direct and efficient. When you reach the search results page, read and describe the actual search result titles and descriptions you can see. """, tools=[tool], diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py index 2720916e1759..5fd68ef7eda6 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py @@ -29,18 +29,19 @@ the "Models + endpoints" tab in your Microsoft Foundry project. """ +# pylint: disable=pointless-string-statement import asyncio import os from dotenv import load_dotenv -from azure.identity.aio import DefaultAzureCredential -from azure.ai.projects.aio import AIProjectClient -from azure.ai.projects.models import PromptAgentDefinition, ComputerUsePreviewTool -from computer_use_util import ( +from computer_use_util import ( # pylint: disable=import-error SearchState, load_screenshot_assets, handle_computer_action_and_take_screenshot, print_final_output, ) +from azure.identity.aio import DefaultAzureCredential +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import PromptAgentDefinition, ComputerUsePreviewTool load_dotenv() @@ -74,8 +75,8 @@ async def main(): definition=PromptAgentDefinition( model=os.environ.get("COMPUTER_USE_MODEL_DEPLOYMENT_NAME", "computer-use-preview"), instructions=""" - You are a computer automation assistant. - + You are a computer automation assistant. + Be direct and efficient. When you reach the search results page, read and describe the actual search result titles and descriptions you can see. """, tools=[computer_use_tool], diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py index 45a2e29ee4ed..3ceb1ee1d28b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py @@ -81,7 +81,7 @@ elif event.type == "response.output_text.delta": print(f"Delta: {event.delta}") elif event.type == "response.text.done": - print(f"\nFollow-up response done!") + print("\nFollow-up response done!") elif event.type == "response.output_item.done": if event.item.type == "message": item = event.item @@ -95,7 +95,7 @@ f"End index: {annotation.end_index}" ) elif event.type == "response.completed": - print(f"\nFollow-up completed!") + print("\nFollow-up completed!") print(f"Full response: {event.response.output_text}") print("\nCleaning up...") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py index c785e1877ade..c604386b5cf7 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py @@ -48,9 +48,10 @@ asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info.md")) # Upload file to vector store - file = openai_client.vector_stores.files.upload_and_poll( - vector_store_id=vector_store.id, file=open(asset_file_path, "rb") - ) + with open(asset_file_path, "rb") as f: + file = openai_client.vector_stores.files.upload_and_poll( + vector_store_id=vector_store.id, file=f + ) print(f"File uploaded to vector store (id: {file.id})") tool = FileSearchTool(vector_store_ids=[vector_store.id]) diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py index 682176ad690c..eb17555821be 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py @@ -26,10 +26,9 @@ import os from dotenv import load_dotenv -from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient from azure.ai.projects.models import PromptAgentDefinition, FileSearchTool -from openai import OpenAI load_dotenv() @@ -51,9 +50,10 @@ # Upload file to vector store try: - file = openai_client.vector_stores.files.upload_and_poll( - vector_store_id=vector_store.id, file=open(asset_file_path, "rb") - ) + with open(asset_file_path, "rb") as f: + file = openai_client.vector_stores.files.upload_and_poll( + vector_store_id=vector_store.id, file=f + ) print(f"File uploaded to vector store (id: {file.id})") except FileNotFoundError: print(f"Warning: Asset file not found at {asset_file_path}") @@ -103,7 +103,7 @@ elif event.type == "response.text.done": print(f"\nResponse done with full message: {event.text}") elif event.type == "response.completed": - print(f"\nResponse completed!") + print("\nResponse completed!") print(f"Full response: {event.response.output_text}") print("\n" + "=" * 60) @@ -129,7 +129,7 @@ elif event.type == "response.output_text.delta": print(f"Delta: {event.delta}") elif event.type == "response.text.done": - print(f"\nFollow-up response done!") + print("\nFollow-up response done!") elif event.type == "response.output_item.done": if event.item.type == "message": item = event.item @@ -139,7 +139,7 @@ if annotation.type == "file_citation": print(f"File Citation - Filename: {annotation.filename}, File ID: {annotation.file_id}") elif event.type == "response.completed": - print(f"\nFollow-up completed!") + print("\nFollow-up completed!") print(f"Agent response: {event.response.output_text}") # Clean up resources @@ -155,7 +155,7 @@ try: openai_client.vector_stores.delete(vector_store.id) print("Vector store deleted") - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught print(f"Warning: Could not delete vector store: {e}") print("\nFile search streaming sample completed!") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py index 8a2fdd22bfc6..0be162b59062 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py @@ -36,7 +36,7 @@ endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] -async def main() -> None: +async def main() -> None: # pylint: disable=too-many-statements async with ( DefaultAzureCredential() as credential, AIProjectClient(endpoint=endpoint, credential=credential) as project_client, @@ -51,9 +51,10 @@ async def main() -> None: # Upload file to vector store try: - file = await openai_client.vector_stores.files.upload_and_poll( - vector_store_id=vector_store.id, file=open(asset_file_path, "rb") - ) + with open(asset_file_path, "rb") as f: + file = await openai_client.vector_stores.files.upload_and_poll( + vector_store_id=vector_store.id, file=f + ) print(f"File uploaded to vector store (id: {file.id})") except FileNotFoundError: print(f"Warning: Asset file not found at {asset_file_path}") @@ -104,7 +105,7 @@ async def main() -> None: elif event.type == "response.text.done": print(f"\nResponse done with full message: {event.text}") elif event.type == "response.completed": - print(f"\nResponse completed!") + print("\nResponse completed!") print(f"Full response: {event.response.output_text}") print("\n" + "=" * 60) @@ -134,7 +135,7 @@ async def main() -> None: elif event.type == "response.output_text.delta": print(f"Delta: {event.delta}") elif event.type == "response.text.done": - print(f"\nFollow-up response done!") + print("\nFollow-up response done!") elif event.type == "response.output_item.done": if event.item.type == "message": item = event.item @@ -144,7 +145,7 @@ async def main() -> None: if annotation.type == "file_citation": print(f"File Citation - Filename: {annotation.filename}, File ID: {annotation.file_id}") elif event.type == "response.completed": - print(f"\nFollow-up completed!") + print("\nFollow-up completed!") print(f"Agent response: {event.response.output_text}") # Clean up resources @@ -160,7 +161,7 @@ async def main() -> None: try: await openai_client.vector_stores.delete(vector_store.id) print("Vector store deleted") - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught print(f"Warning: Could not delete vector store: {e}") print("\nFile search streaming sample completed!") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py index 33166f2ad06f..7e0ac8e0e56e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py @@ -23,13 +23,14 @@ the "Models + endpoints" tab in your Microsoft Foundry project. """ +# pylint: disable=docstring-missing-param,docstring-missing-return,docstring-missing-rtype import os import json from dotenv import load_dotenv -from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import PromptAgentDefinition, Tool, FunctionTool -from azure.identity import DefaultAzureCredential from openai.types.responses.response_input_param import FunctionCallOutput, ResponseInputParam +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import PromptAgentDefinition, FunctionTool load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py index 1ecd17dee095..249f9a936419 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py @@ -24,14 +24,15 @@ the "Models + endpoints" tab in your Microsoft Foundry project. """ +# pylint: disable=docstring-missing-param,docstring-missing-return,docstring-missing-rtype import os import json import asyncio from dotenv import load_dotenv -from azure.ai.projects.aio import AIProjectClient -from azure.ai.projects.models import PromptAgentDefinition, Tool, FunctionTool -from azure.identity.aio import DefaultAzureCredential from openai.types.responses.response_input_param import FunctionCallOutput, ResponseInputParam +from azure.identity.aio import DefaultAzureCredential +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import PromptAgentDefinition, FunctionTool load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py index 9b86f113f234..fb0a553328e9 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py @@ -25,10 +25,10 @@ import os from dotenv import load_dotenv +from openai.types.responses.response_input_param import McpApprovalResponse, ResponseInputParam from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import PromptAgentDefinition, MCPTool, Tool -from openai.types.responses.response_input_param import McpApprovalResponse, ResponseInputParam +from azure.ai.projects.models import PromptAgentDefinition, MCPTool load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py index 917caae9da33..55c32cd0602c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py @@ -26,10 +26,10 @@ import os import asyncio from dotenv import load_dotenv +from openai.types.responses.response_input_param import McpApprovalResponse, ResponseInputParam from azure.identity.aio import DefaultAzureCredential from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import PromptAgentDefinition, MCPTool, Tool -from openai.types.responses.response_input_param import McpApprovalResponse, ResponseInputParam load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py index d34313d8651d..091db4707b59 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py @@ -27,10 +27,10 @@ import os from dotenv import load_dotenv +from openai.types.responses.response_input_param import McpApprovalResponse, ResponseInputParam from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import PromptAgentDefinition, MCPTool, Tool -from openai.types.responses.response_input_param import McpApprovalResponse, ResponseInputParam +from azure.ai.projects.models import PromptAgentDefinition, MCPTool load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py index 974699e42901..8de0155f94ec 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py @@ -28,10 +28,10 @@ import os import asyncio from dotenv import load_dotenv +from openai.types.responses.response_input_param import McpApprovalResponse, ResponseInputParam from azure.identity.aio import DefaultAzureCredential from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import PromptAgentDefinition, MCPTool, Tool -from openai.types.responses.response_input_param import McpApprovalResponse, ResponseInputParam load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py index 5e45e9ec41bf..804c330cd0ce 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py @@ -24,9 +24,9 @@ """ import os +from typing import Any, cast import jsonref from dotenv import load_dotenv -from typing import Any, cast from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient from azure.ai.projects.models import ( @@ -49,7 +49,7 @@ weather_asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/weather_openapi.json")) # [START tool_declaration] - with open(weather_asset_file_path, "r") as f: + with open(weather_asset_file_path, "r", encoding="utf-8") as f: openapi_weather = cast(dict[str, Any], jsonref.loads(f.read())) tool = OpenApiTool( diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py index b580ee51eb2a..6c7d88ba4ba4 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py @@ -27,9 +27,9 @@ """ import os +from typing import Any, cast import jsonref from dotenv import load_dotenv -from typing import Any, cast from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient from azure.ai.projects.models import ( diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py index 6af21a566a6c..394e486cc36a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py @@ -60,7 +60,7 @@ agent_name="MyAgent", definition=PromptAgentDefinition( model=os.environ["FOUNDRY_MODEL_NAME"], - instructions="""You are a helpful agent that can use SharePoint tools to assist users. + instructions="""You are a helpful agent that can use SharePoint tools to assist users. Use the available SharePoint tools to answer questions and perform tasks.""", tools=[tool], ), @@ -85,7 +85,7 @@ elif event.type == "response.output_text.delta": print(f"Delta: {event.delta}") elif event.type == "response.text.done": - print(f"\nFollow-up response done!") + print("\nFollow-up response done!") elif event.type == "response.output_item.done": if event.item.type == "message": item = event.item @@ -99,7 +99,7 @@ f"End index: {annotation.end_index}" ) elif event.type == "response.completed": - print(f"\nFollow-up completed!") + print("\nFollow-up completed!") print(f"Agent response: {event.response.output_text}") print("Cleaning up...") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py index 97ae4365f075..3fe3835b093a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py @@ -85,7 +85,7 @@ elif event.type == "response.output_text.delta": print(f"Delta: {event.delta}") elif event.type == "response.text.done": - print(f"\nFollow-up response done!") + print("\nFollow-up response done!") elif event.type == "response.output_item.done": item = event.item if item.type == "a2a_preview_call": @@ -97,7 +97,7 @@ elif item.type == "a2a_preview_call_output": print(f"Response ID: {getattr(item, 'id')}") elif event.type == "response.completed": - print(f"\nFollow-up completed!") + print("\nFollow-up completed!") print(f"Full response: {event.response.output_text}") print("\nCleaning up...") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py index 5eea41125bf8..3d21a92ae1b6 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py @@ -87,7 +87,7 @@ elif event.type == "response.output_text.delta": print(f"Delta: {event.delta}") elif event.type == "response.text.done": - print(f"\nFollow-up response done!") + print("\nFollow-up response done!") elif event.type == "response.output_item.done": if event.item.type == "message": item = event.item @@ -101,7 +101,7 @@ f"End index: {annotation.end_index}" ) elif event.type == "response.completed": - print(f"\nFollow-up completed!") + print("\nFollow-up completed!") print(f"Full response: {event.response.output_text}") print("\nCleaning up...") project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py index 4148bb7fd29e..cf68ceaf09cf 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py @@ -83,7 +83,7 @@ elif event.type == "response.output_text.delta": print(f"Delta: {event.delta}") elif event.type == "response.text.done": - print(f"\nFollow-up response done!") + print("\nFollow-up response done!") elif event.type == "response.output_item.done": if event.item.type == "message": item = event.item @@ -97,7 +97,7 @@ f"End index: {annotation.end_index}" ) elif event.type == "response.completed": - print(f"\nFollow-up completed!") + print("\nFollow-up completed!") print(f"Full response: {event.response.output_text}") print("\nCleaning up...") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py index b9da3e2e856b..e4ad578919b5 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py @@ -99,7 +99,7 @@ elif event.type == "response.output_text.delta": print(f"Delta: {event.delta}") elif event.type == "response.text.done": - print(f"\nFollow-up response done!") + print("\nFollow-up response done!") elif event.type == "response.output_item.done": if event.item.type == "message": item = event.item @@ -113,7 +113,7 @@ f"End index: {annotation.end_index}" ) elif event.type == "response.completed": - print(f"\nFollow-up completed!") + print("\nFollow-up completed!") print(f"Full response: {event.response.output_text}") print("\nCleaning up...") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py index e3c2f9291a09..693d948c8e6d 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_coherence.py @@ -23,20 +23,20 @@ 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ -from dotenv import load_dotenv -import json import os import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -80,7 +80,7 @@ def main() -> None: data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -114,7 +114,7 @@ def main() -> None: ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -126,7 +126,7 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py index 5ab4e46690ac..0dc7f400fb6c 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_fluency.py @@ -23,20 +23,20 @@ 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ -from dotenv import load_dotenv import os -import json import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -80,7 +80,7 @@ def main() -> None: data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -105,7 +105,7 @@ def main() -> None: ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -117,7 +117,7 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py index 68ef5ef7bbc3..91a0b4251b98 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/agent_utils.py @@ -3,19 +3,20 @@ # Licensed under the MIT License. # ------------------------------------ -from dotenv import load_dotenv import os import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -54,7 +55,7 @@ def run_evaluator( data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -71,7 +72,7 @@ def run_evaluator( ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -83,7 +84,7 @@ def run_evaluator( while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py index b5be2881ae38..1b2afc8d6705 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_generic_agentic_evaluator/sample_generic_agentic_evaluator.py @@ -23,10 +23,11 @@ 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ -from dotenv import load_dotenv import os -from agent_utils import run_evaluator -from schema_mappings import evaluator_to_data_source_config, evaluator_to_data_mapping + +from dotenv import load_dotenv +from agent_utils import run_evaluator # pylint: disable=import-error +from schema_mappings import evaluator_to_data_source_config, evaluator_to_data_mapping # pylint: disable=import-error from openai.types.evals.create_eval_jsonl_run_data_source_param import SourceFileContentContent load_dotenv() @@ -35,9 +36,8 @@ def _get_evaluator_initialization_parameters(evaluator_name: str) -> dict[str, str]: if evaluator_name == "task_navigation_efficiency": return {"matching_mode": "exact_match"} # Can be "exact_match", "in_order_match", or "any_order_match" - else: - model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini - return {"deployment_name": model_deployment_name} + model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") # Sample : gpt-4o-mini + return {"deployment_name": model_deployment_name} def _get_evaluation_contents() -> list[SourceFileContentContent]: diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py index 8aa48ca726a6..958316d45676 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_groundedness.py @@ -23,25 +23,25 @@ 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ -from dotenv import load_dotenv import os -import json import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() -def main() -> None: +def main() -> None: # pylint: disable=too-many-locals endpoint = os.environ[ "FOUNDRY_PROJECT_ENDPOINT" ] # Sample : https://.services.ai.azure.com/api/projects/ @@ -96,7 +96,7 @@ def main() -> None: data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -252,7 +252,7 @@ def main() -> None: ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -264,7 +264,7 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py index 4601d4587925..cda8cdb30c74 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_intent_resolution.py @@ -23,20 +23,20 @@ 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ -from dotenv import load_dotenv import os -import json import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -91,7 +91,7 @@ def main() -> None: data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -238,7 +238,7 @@ def main() -> None: ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -250,7 +250,7 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py index 553770301b91..883e5e56f71b 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_relevance.py @@ -23,20 +23,20 @@ 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ -from dotenv import load_dotenv import os -import json import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -84,7 +84,7 @@ def main() -> None: data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -145,7 +145,7 @@ def main() -> None: ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -157,7 +157,7 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py index c37eafae3d41..a14dc3a67e44 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_response_completeness.py @@ -23,20 +23,20 @@ 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ -from dotenv import load_dotenv import os -import json import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -82,7 +82,7 @@ def main() -> None: data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -124,7 +124,7 @@ def main() -> None: ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -136,7 +136,7 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py index 30066c7ff66d..d46398d61677 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_adherence.py @@ -23,20 +23,20 @@ 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ -from dotenv import load_dotenv import os -import json import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -92,7 +92,7 @@ def main() -> None: data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -210,7 +210,7 @@ def main() -> None: ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -222,7 +222,7 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py index 73dce3ae4070..68ca90bd6bbf 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_completion.py @@ -23,20 +23,20 @@ 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ -from dotenv import load_dotenv import os -import json import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -92,7 +92,7 @@ def main() -> None: data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -248,7 +248,7 @@ def main() -> None: ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -260,7 +260,7 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py index 47dd01c5eeb0..d3d7862bc18e 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_task_navigation_efficiency.py @@ -22,20 +22,20 @@ Microsoft Foundry project. It has the form: https://.services.ai.azure.com/api/projects/. """ -from dotenv import load_dotenv import os -import json import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -83,7 +83,7 @@ def main() -> None: data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -171,7 +171,7 @@ def main() -> None: ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -183,7 +183,7 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py index d3aea2348416..f3620e56f6cc 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_accuracy.py @@ -23,20 +23,20 @@ 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ -from dotenv import load_dotenv import os -import json import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -94,7 +94,7 @@ def main() -> None: data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -293,7 +293,7 @@ def main() -> None: ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -305,7 +305,7 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py index 12e1746db771..7320c28e7e6f 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_call_success.py @@ -23,21 +23,20 @@ 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ -from dotenv import load_dotenv import os -import json import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.eval_create_params import DataSourceConfigCustom from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) -from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -88,7 +87,7 @@ def main() -> None: data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -229,7 +228,7 @@ def main() -> None: ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -241,7 +240,7 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py index b08119e4f88c..0fc2cecb338b 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_input_accuracy.py @@ -23,20 +23,20 @@ 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ -from dotenv import load_dotenv import os -import json import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -92,7 +92,7 @@ def main() -> None: data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -306,7 +306,7 @@ def main() -> None: ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -318,7 +318,7 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py index 72ea0e1f260c..818567f7ad16 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_output_utilization.py @@ -23,20 +23,20 @@ 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ -from dotenv import load_dotenv import os -import json import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -90,7 +90,7 @@ def main() -> None: data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -247,7 +247,7 @@ def main() -> None: ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -259,7 +259,7 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py index 38f49bc9c582..97452fe11881 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/agentic_evaluators/sample_tool_selection.py @@ -23,20 +23,20 @@ 2) FOUNDRY_MODEL_NAME - Required. The name of the model deployment to use for evaluation. """ -from dotenv import load_dotenv import os -import json import time from pprint import pprint -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient +from dotenv import load_dotenv + from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -94,7 +94,7 @@ def main() -> None: data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -212,7 +212,7 @@ def main() -> None: ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -224,7 +224,7 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py index bc54ebd5c70d..7c4296fe0cc8 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py @@ -31,12 +31,12 @@ from typing import Union from pprint import pprint from dotenv import load_dotenv -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import PromptAgentDefinition from openai.types.eval_create_params import DataSourceConfigCustom from openai.types.evals.run_create_response import RunCreateResponse from openai.types.evals.run_retrieve_response import RunRetrieveResponse +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import PromptAgentDefinition load_dotenv() endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py index 73b32e3c8f26..e3e5fd3feb22 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py @@ -31,11 +31,11 @@ from typing import Union from pprint import pprint from dotenv import load_dotenv +from openai.types.evals.run_create_response import RunCreateResponse +from openai.types.evals.run_retrieve_response import RunRetrieveResponse from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient from azure.ai.projects.models import PromptAgentDefinition -from openai.types.evals.run_create_response import RunCreateResponse -from openai.types.evals.run_retrieve_response import RunRetrieveResponse load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py index 45de7b5d2e6f..6aedee95b8ab 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py @@ -25,18 +25,19 @@ the "Models + endpoints" tab in your Microsoft Foundry project. """ +# pylint: disable=docstring-missing-param,docstring-missing-return,docstring-missing-rtype import json import os import time from typing import Union from pprint import pprint from dotenv import load_dotenv -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import PromptAgentDefinition, Tool, FunctionTool from openai.types.responses.response_input_param import FunctionCallOutput, ResponseInputParam from openai.types.evals.run_create_response import RunCreateResponse from openai.types.evals.run_retrieve_response import RunRetrieveResponse +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import PromptAgentDefinition, Tool, FunctionTool load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py index 8c8ac624e790..484bf3260445 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py @@ -118,7 +118,7 @@ conversation_id=conversation.id, items=[{"type": "message", "role": "user", "content": f"Question {i}: What is the capital city?"}], ) - print(f"Added a user message to the conversation") + print("Added a user message to the conversation") response = openai_client.responses.create( conversation=conversation.id, @@ -145,7 +145,7 @@ MAX_LOOP = 20 for _ in range(0, MAX_LOOP): - print(f"Waiting for eval run to complete...") + print("Waiting for eval run to complete...") eval_run_list = openai_client.evals.runs.list( eval_id=eval_object.id, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py index c1d788d294cb..6eed731eb0e6 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog.py @@ -23,6 +23,8 @@ """ import os +from pprint import pprint +from dotenv import load_dotenv from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient from azure.ai.projects.models import ( @@ -36,9 +38,6 @@ EvaluatorMetricType, ) -from pprint import pprint -from dotenv import load_dotenv - load_dotenv() endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] @@ -57,19 +56,19 @@ definition=PromptBasedEvaluatorDefinition( prompt_text="""You are an evaluator. Rate the GROUNDEDNESS (factual correctness without unsupported claims) of the system response to the customer query. - + Scoring (1–5): 1 = Mostly fabricated/incorrect 2 = Many unsupported claims 3 = Mixed: some facts but notable errors/guesses 4 = Mostly factual; minor issues 5 = Fully factual; no unsupported claims - + Return ONLY a single integer 1–5 as score in valid json response e.g {\"score\": int}. - + Query: {query} - + Response: {response} """, diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py index e00037924f8c..dffe0f18ed76 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_code_based_evaluators.py @@ -24,21 +24,19 @@ """ import os -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import EvaluatorCategory, EvaluatorDefinitionType +import time +from pprint import pprint +from dotenv import load_dotenv from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom - -import time -from pprint import pprint - -from dotenv import load_dotenv +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import EvaluatorCategory, EvaluatorDefinitionType load_dotenv() @@ -189,7 +187,7 @@ while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Evaluation run Report URL: {run.report_url}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py index e17293f21492..f89cd061d701 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_eval_catalog_prompt_based_evaluators.py @@ -57,21 +57,19 @@ """ import os -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import EvaluatorCategory, EvaluatorDefinitionType +import time +from pprint import pprint +from dotenv import load_dotenv from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom - -from pprint import pprint -import time - -from dotenv import load_dotenv +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import EvaluatorCategory, EvaluatorDefinitionType load_dotenv() @@ -97,8 +95,8 @@ "prompt_text": """ You are a Groundedness Evaluator. - Your task is to evaluate how well the given response is grounded in the provided ground truth. - Groundedness means the response’s statements are factually supported by the ground truth. + Your task is to evaluate how well the given response is grounded in the provided ground truth. + Groundedness means the response’s statements are factually supported by the ground truth. Evaluate factual alignment only — ignore grammar, fluency, or completeness. --- @@ -116,10 +114,10 @@ --- ### Scoring Scale (1–5): - 5 → Fully grounded. All claims supported by ground truth. - 4 → Mostly grounded. Minor unsupported details. - 3 → Partially grounded. About half the claims supported. - 2 → Mostly ungrounded. Only a few details supported. + 5 → Fully grounded. All claims supported by ground truth. + 4 → Mostly grounded. Minor unsupported details. + 3 → Partially grounded. About half the claims supported. + 2 → Mostly ungrounded. Only a few details supported. 1 → Not grounded. Almost all information unsupported. --- @@ -255,7 +253,7 @@ while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Report URL: {run.report_url}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py index 256bacef18b4..a125dd62ed04 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_cluster_insight.py @@ -32,14 +32,6 @@ from typing import Union from pprint import pprint from dotenv import load_dotenv -from azure.ai.projects.models import ( - OperationState, - EvaluationRunClusterInsightRequest, - Insight, - InsightModelConfiguration, -) -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient from openai.types.eval_create_params import DataSourceConfigCustom, TestingCriterionLabelModel from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, @@ -48,6 +40,14 @@ ) from openai.types.evals.run_create_response import RunCreateResponse from openai.types.evals.run_retrieve_response import RunRetrieveResponse +from azure.ai.projects.models import ( + OperationState, + EvaluationRunClusterInsightRequest, + Insight, + InsightModelConfiguration, +) +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -135,7 +135,7 @@ print(f"Started insight generation (id: {clusterInsight.insight_id})") while clusterInsight.state not in [OperationState.SUCCEEDED, OperationState.FAILED]: - print(f"Waiting for insight to be generated...") + print("Waiting for insight to be generated...") clusterInsight = project_client.beta.insights.get(insight_id=clusterInsight.insight_id) print(f"Insight status: {clusterInsight.state}") time.sleep(5) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py index 1cf98af0efce..0b48752f4a90 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluation_compare_insight.py @@ -31,6 +31,12 @@ import time from pprint import pprint from dotenv import load_dotenv +from openai.types.eval_create_params import DataSourceConfigCustom, TestingCriterionLabelModel +from openai.types.evals.create_eval_jsonl_run_data_source_param import ( + CreateEvalJSONLRunDataSourceParam, + SourceFileContent, +) +from openai.types.evals.run_retrieve_response import RunRetrieveResponse from azure.ai.projects.models import ( OperationState, EvaluationComparisonInsightRequest, @@ -38,12 +44,6 @@ ) from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient -from openai.types.eval_create_params import DataSourceConfigCustom, TestingCriterionLabelModel -from openai.types.evals.create_eval_jsonl_run_data_source_param import ( - CreateEvalJSONLRunDataSourceParam, - SourceFileContent, -) -from openai.types.evals.run_retrieve_response import RunRetrieveResponse load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py index c76c08bf6191..2fc9c4b7ac73 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_ai_assisted.py @@ -24,18 +24,17 @@ """ import os - -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient import time from pprint import pprint +from dotenv import load_dotenv from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom -from dotenv import load_dotenv +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -164,7 +163,7 @@ ), ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Evaluation Run by Id") @@ -174,7 +173,7 @@ while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Report URL: {run.report_url}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_csv.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_csv.py index 6259ad3aac5c..8bb044514edd 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_csv.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_csv.py @@ -30,18 +30,18 @@ """ import os - -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient - import time +from datetime import datetime, timezone from pprint import pprint + +from dotenv import load_dotenv from openai.types.eval_create_params import DataSourceConfigCustom + +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient from azure.ai.projects.models import ( DatasetVersion, ) -from dotenv import load_dotenv -from datetime import datetime, timezone load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py index 29559c76572a..1880f48fa5b3 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_dataset_id.py @@ -27,19 +27,17 @@ """ import os - -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient - import time +from datetime import datetime from pprint import pprint +from dotenv import load_dotenv from openai.types.evals.create_eval_jsonl_run_data_source_param import CreateEvalJSONLRunDataSourceParam, SourceFileID from openai.types.eval_create_params import DataSourceConfigCustom +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient from azure.ai.projects.models import ( DatasetVersion, ) -from dotenv import load_dotenv -from datetime import datetime load_dotenv() @@ -135,7 +133,7 @@ while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Report URL: {run.report_url}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py index f18080047b25..0c0adf21de89 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data.py @@ -24,18 +24,17 @@ """ import os - -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient import time from pprint import pprint +from dotenv import load_dotenv from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom -from dotenv import load_dotenv +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -96,7 +95,7 @@ data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -150,7 +149,7 @@ ), ) - print(f"Eval Run created") + print("Eval Run created") pprint(eval_run_object) print("Get Eval Run by Id") @@ -160,7 +159,7 @@ while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Report URL: {run.report_url}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py index 8c8100efbca3..ce3168f25c58 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_inline_data_oai.py @@ -24,10 +24,9 @@ """ import os - -from azure.identity import DefaultAzureCredential import time from pprint import pprint +from dotenv import load_dotenv from openai import OpenAI from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, @@ -35,8 +34,7 @@ SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom -from dotenv import load_dotenv -from azure.identity import get_bearer_token_provider +from azure.identity import DefaultAzureCredential, get_bearer_token_provider load_dotenv() @@ -87,7 +85,7 @@ data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) -print(f"Evaluation created") +print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -141,12 +139,12 @@ ), ) -print(f"Eval Run created") +print("Eval Run created") pprint(eval_run_object) while True: run = client.evals.runs.retrieve(run_id=eval_run_object.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): print("Get Eval Run by Id") output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py index 57f839a022ee..03d7ee54c949 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_builtin_with_traces.py @@ -1,4 +1,4 @@ -# pylint: disable=line-too-long,useless-suppression +# pylint: disable=line-too-long,useless-suppression,docstring-missing-param,docstring-missing-return,docstring-missing-rtype,unused-argument # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -32,15 +32,13 @@ import os import time from datetime import datetime, timedelta, timezone +from pprint import pprint from typing import Any, Dict, List - from dotenv import load_dotenv from azure.identity import DefaultAzureCredential from azure.monitor.query import LogsQueryClient, LogsQueryStatus from azure.ai.projects import AIProjectClient -from pprint import pprint - load_dotenv() @@ -85,7 +83,7 @@ def get_trace_ids( Returns: List of distinct operation IDs (trace IDs). """ - query = f""" + query = """ dependencies | where timestamp between (datetime({start_time.isoformat()}) .. datetime({end_time.isoformat()})) | extend agent_id = tostring(customDimensions["gen_ai.agent.id"]) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py index 531924ed51bc..f04644142485 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_graders.py @@ -25,17 +25,18 @@ import os -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient import time from pprint import pprint + +from dotenv import load_dotenv from openai.types.evals.create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam, SourceFileContent, SourceFileContentContent, ) from openai.types.eval_create_params import DataSourceConfigCustom -from dotenv import load_dotenv +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() @@ -180,7 +181,7 @@ while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Report URL: {run.report_url}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py index 8273afaa747e..290e2b019a44 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_score_model_grader_with_image.py @@ -24,13 +24,12 @@ import os import base64 -from PIL import Image -from io import BytesIO - -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient import time +from io import BytesIO from pprint import pprint +from PIL import Image + +from dotenv import load_dotenv from openai.types.evals.create_eval_completions_run_data_source_param import ( CreateEvalCompletionsRunDataSourceParam, SourceFileContent, @@ -41,7 +40,8 @@ ) from openai.types.responses import EasyInputMessageParam from openai.types.eval_create_params import DataSourceConfigCustom -from dotenv import load_dotenv +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() file_path = os.path.abspath(__file__) @@ -51,7 +51,7 @@ model_deployment_name = os.environ.get("FOUNDRY_MODEL_NAME", "") -def image_to_data_uri(image_path: str) -> str: +def image_to_data_uri(image_path: str) -> str: # pylint: disable=redefined-outer-name with Image.open(image_path) as img: buffered = BytesIO() img.save(buffered, format=img.format or "PNG") @@ -182,7 +182,7 @@ def image_to_data_uri(image_path: str) -> str: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) pprint(output_items) print(f"Eval Run Report URL: {run.report_url}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py index 53066a0fce99..bd169235a91f 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py @@ -31,11 +31,11 @@ from pprint import pprint from typing import Union from dotenv import load_dotenv -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient from openai.types.eval_create_params import DataSourceConfigCustom from openai.types.evals.run_create_response import RunCreateResponse from openai.types.evals.run_retrieve_response import RunRetrieveResponse +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py index b9ff12eba463..ea8628cbd8d1 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_redteam_evaluations.py @@ -1,4 +1,4 @@ -# pylint: disable=line-too-long,useless-suppression +# pylint: disable=line-too-long,useless-suppression,wrong-import-order # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -38,11 +38,10 @@ ) import json import time -from azure.ai.projects.models import EvaluationTaxonomy from typing import Union -def main() -> None: +def main() -> None: # pylint: disable=too-many-statements load_dotenv() # endpoint = os.environ.get("FOUNDRY_PROJECT_ENDPOINT", "") @@ -67,7 +66,7 @@ def main() -> None: data_source_config = {"type": "azure_ai_source", "scenario": "red_team"} testing_criteria = _get_agent_safety_evaluation_criteria() - print(f"Defining testing criteria for red teaming for agent target") + print("Defining testing criteria for red teaming for agent target") pprint(testing_criteria) print("Creating red teaming evaluation") @@ -95,7 +94,7 @@ def main() -> None: taxonomy = project_client.beta.evaluation_taxonomies.create(name=agent_name, body=eval_taxonomy_input) taxonomy_path = os.path.join(tempfile.gettempdir(), f"taxonomy_{agent_name}.json") - with open(taxonomy_path, "w") as f: + with open(taxonomy_path, "w", encoding="utf-8") as f: f.write(json.dumps(_to_json_primitive(taxonomy), indent=2)) print(f"Red teaming Taxonomy created for agent: {agent_name}. Taxonomy written to {taxonomy_path}") @@ -125,10 +124,10 @@ def main() -> None: while True: run = client.evals.runs.retrieve(run_id=eval_run_response.id, eval_id=eval_object.id) - if run.status == "completed" or run.status == "failed": + if run.status in ("completed", "failed"): output_items = list(client.evals.runs.output_items.list(run_id=run.id, eval_id=eval_object.id)) output_items_path = os.path.join(tempfile.gettempdir(), f"redteam_eval_output_items_{agent_name}.json") - with open(output_items_path, "w") as f: + with open(output_items_path, "w", encoding="utf-8") as f: f.write(json.dumps(_to_json_primitive(output_items), indent=2)) print( f"RedTeam Eval Run completed with status: {run.status}. Output items written to {output_items_path}" @@ -223,7 +222,7 @@ def _to_json_primitive(obj): if hasattr(obj, method): try: return _to_json_primitive(getattr(obj, method)()) - except Exception: + except Exception: # pylint: disable=broad-exception-caught pass if hasattr(obj, "__dict__"): return _to_json_primitive({k: v for k, v in vars(obj).items() if not k.startswith("_")}) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py index 742c6e100070..c24651ac8b0b 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_scheduled_evaluations.py @@ -1,4 +1,4 @@ -# pylint: disable=line-too-long,useless-suppression +# pylint: disable=line-too-long,useless-suppression,wrong-import-order,ungrouped-imports,no-else-raise,raise-missing-from # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -57,7 +57,6 @@ ) import json import time -from azure.ai.projects.models import EvaluationTaxonomy def main() -> None: @@ -69,7 +68,7 @@ def main() -> None: schedule_redteam_evaluation() -def assign_rbac(): +def assign_rbac(): # pylint: disable=too-many-statements """ Assign the "Azure AI User" role to the Microsoft Foundry project's Managed Identity. """ @@ -97,7 +96,7 @@ def assign_rbac(): return account_name = match.group(1) project_name = match.group(2) - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught print(f"Error parsing endpoint: {e}") return @@ -135,7 +134,7 @@ def assign_rbac(): print("Error: Project does not have a managed identity enabled") return - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught print(f"Error retrieving project resource: {e}") return @@ -149,7 +148,7 @@ def assign_rbac(): # Create role assignment role_assignment_name = str(uuid.uuid4()) - print(f"Assigning 'Azure AI User' role to managed identity...") + print("Assigning 'Azure AI User' role to managed identity...") role_assignment = auth_client.role_assignments.create( scope=scope, @@ -161,10 +160,10 @@ def assign_rbac(): }, ) - print(f"Successfully assigned 'Azure AI User' role to project managed identity") + print("Successfully assigned 'Azure AI User' role to project managed identity") print(f"Role assignment ID: {role_assignment.name}") - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught print(f"Error during role assignment: {e}") # Check for specific error types and provide helpful guidance @@ -208,7 +207,7 @@ def assign_rbac(): print("This usually indicates a service availability issue.") else: - print(f"\n❌ UNEXPECTED ERROR:") + print("\n❌ UNEXPECTED ERROR:") print("An unexpected error occurred. Please check the error details above.") raise @@ -275,7 +274,7 @@ def schedule_dataset_evaluation() -> None: data_source_config=data_source_config, # type: ignore testing_criteria=testing_criteria, # type: ignore ) - print(f"Evaluation created") + print("Evaluation created") print("Get Evaluation by Id") eval_object_response = client.evals.retrieve(eval_object.id) @@ -292,7 +291,7 @@ def schedule_dataset_evaluation() -> None: ), } - print(f"Eval Run:") + print("Eval Run:") pprint(eval_run_object) print("Creating Schedule for dataset evaluation") schedule = Schedule( @@ -324,7 +323,7 @@ def schedule_dataset_evaluation() -> None: print("Dataset deleted") -def schedule_redteam_evaluation() -> None: +def schedule_redteam_evaluation() -> None: # pylint: disable=too-many-locals load_dotenv() # endpoint = os.environ.get("FOUNDRY_PROJECT_ENDPOINT", "") @@ -354,7 +353,7 @@ def schedule_redteam_evaluation() -> None: data_source_config = {"type": "azure_ai_source", "scenario": "red_team"} testing_criteria = _get_agent_safety_evaluation_criteria() - print(f"Defining testing criteria for red teaming for agent target") + print("Defining testing criteria for red teaming for agent target") pprint(testing_criteria) print("Creating Evaluation") @@ -384,7 +383,7 @@ def schedule_redteam_evaluation() -> None: taxonomy_path = os.path.join(data_folder, f"taxonomy_{agent_name}.json") # Create the data folder if it doesn't exist os.makedirs(data_folder, exist_ok=True) - with open(taxonomy_path, "w") as f: + with open(taxonomy_path, "w", encoding="utf-8") as f: f.write(json.dumps(_to_json_primitive(taxonomy), indent=2)) print(f"RedTeaming Taxonomy created for agent: {agent_name}. Taxonomy written to {taxonomy_path}") eval_run_object = { @@ -510,7 +509,7 @@ def _to_json_primitive(obj): if hasattr(obj, method): try: return _to_json_primitive(getattr(obj, method)()) - except Exception: + except Exception: # pylint: disable=broad-exception-caught pass if hasattr(obj, "__dict__"): return _to_json_primitive({k: v for k, v in vars(obj).items() if not k.startswith("_")}) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_agent_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_agent_evaluation.py index c67840c6325d..0e01e95fd8d8 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_agent_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_agent_evaluation.py @@ -38,12 +38,14 @@ import time from pprint import pprint from typing import Union + from dotenv import load_dotenv +from openai.types.evals.run_create_response import RunCreateResponse +from openai.types.evals.run_retrieve_response import RunRetrieveResponse + from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient from azure.ai.projects.models import PromptAgentDefinition -from openai.types.evals.run_create_response import RunCreateResponse -from openai.types.evals.run_retrieve_response import RunRetrieveResponse load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_model_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_model_evaluation.py index 7bf1cc1a2e77..bf0a0120299b 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_model_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_model_evaluation.py @@ -37,12 +37,14 @@ import time from pprint import pprint from typing import Union + from dotenv import load_dotenv -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient from openai.types.evals.run_create_response import RunCreateResponse from openai.types.evals.run_retrieve_response import RunRetrieveResponse +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient + load_dotenv() endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] diff --git a/sdk/ai/azure-ai-projects/samples/files/sample_files.py b/sdk/ai/azure-ai-projects/samples/files/sample_files.py index 51dac448a43e..8c96a934f300 100644 --- a/sdk/ai/azure-ai-projects/samples/files/sample_files.py +++ b/sdk/ai/azure-ai-projects/samples/files/sample_files.py @@ -22,10 +22,11 @@ """ import os -from azure.identity import DefaultAzureCredential +from pathlib import Path + from dotenv import load_dotenv +from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient -from pathlib import Path load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py b/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py index 953fa2021c1b..d3bbe42b4896 100644 --- a/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py +++ b/sdk/ai/azure-ai-projects/samples/files/sample_files_async.py @@ -23,10 +23,11 @@ import asyncio import os +from pathlib import Path + from dotenv import load_dotenv from azure.identity.aio import DefaultAzureCredential from azure.ai.projects.aio import AIProjectClient -from pathlib import Path load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py index 2a31c816741e..7366b24b49b1 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job.py @@ -26,9 +26,9 @@ import os from dotenv import load_dotenv +from fine_tuning_sample_helper import resolve_data_file_path # pylint: disable=import-error from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient -from fine_tuning_sample_helper import resolve_data_file_path load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py index a21e9ca10387..8b04fc71b0ae 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_dpo_job_async.py @@ -27,9 +27,9 @@ import asyncio import os from dotenv import load_dotenv +from fine_tuning_sample_helper import resolve_data_file_path # pylint: disable=import-error from azure.identity.aio import DefaultAzureCredential from azure.ai.projects.aio import AIProjectClient -from fine_tuning_sample_helper import resolve_data_file_path load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py index f3eebc756dfc..d944f157314c 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job.py @@ -27,9 +27,9 @@ import os from dotenv import load_dotenv +from fine_tuning_sample_helper import resolve_data_file_path # pylint: disable=import-error from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient -from fine_tuning_sample_helper import resolve_data_file_path load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py index 676a4a030d11..ca4e3e3789ac 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_oss_models_supervised_job_async.py @@ -28,9 +28,9 @@ import os import asyncio from dotenv import load_dotenv +from fine_tuning_sample_helper import resolve_data_file_path # pylint: disable=import-error from azure.identity.aio import DefaultAzureCredential from azure.ai.projects.aio import AIProjectClient -from fine_tuning_sample_helper import resolve_data_file_path load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py index 9a222cffa076..f49c2dc474fd 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job.py @@ -27,9 +27,9 @@ import os from typing import Any, Dict from dotenv import load_dotenv +from fine_tuning_sample_helper import resolve_data_file_path # pylint: disable=import-error from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient -from fine_tuning_sample_helper import resolve_data_file_path load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py index 3d759737fbab..62ab5fed22c2 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_reinforcement_job_async.py @@ -27,9 +27,9 @@ import asyncio import os from dotenv import load_dotenv +from fine_tuning_sample_helper import resolve_data_file_path # pylint: disable=import-error from azure.identity.aio import DefaultAzureCredential from azure.ai.projects.aio import AIProjectClient -from fine_tuning_sample_helper import resolve_data_file_path load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py index a871254e90dd..2da39d05354e 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py @@ -1,4 +1,4 @@ -# pylint: disable=line-too-long,useless-suppression +# pylint: disable=line-too-long,useless-suppression,docstring-missing-param,docstring-missing-return,docstring-missing-rtype # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -32,13 +32,12 @@ """ import os -import time from dotenv import load_dotenv +from fine_tuning_sample_helper import resolve_data_file_path # pylint: disable=import-error from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient from azure.mgmt.cognitiveservices import CognitiveServicesManagementClient from azure.mgmt.cognitiveservices.models import Deployment, DeploymentProperties, DeploymentModel, Sku -from fine_tuning_sample_helper import resolve_data_file_path load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py index 94e5d2c94603..42c93f8b0ebf 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job_async.py @@ -1,4 +1,4 @@ -# pylint: disable=line-too-long,useless-suppression +# pylint: disable=line-too-long,useless-suppression,docstring-missing-param,docstring-missing-return,docstring-missing-rtype # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -34,11 +34,11 @@ import asyncio import os from dotenv import load_dotenv +from fine_tuning_sample_helper import resolve_data_file_path # pylint: disable=import-error from azure.identity.aio import DefaultAzureCredential from azure.ai.projects.aio import AIProjectClient from azure.mgmt.cognitiveservices.aio import CognitiveServicesManagementClient as CognitiveServicesManagementClientAsync from azure.mgmt.cognitiveservices.models import Deployment, DeploymentProperties, DeploymentModel, Sku -from fine_tuning_sample_helper import resolve_data_file_path load_dotenv() @@ -104,7 +104,7 @@ async def deploy_model(openai_client, credential, job_id): deployment=deployment_config, ) - print(f"Waiting for deployment to complete...") + print("Waiting for deployment to complete...") await deployment.result() print(f"Model deployment completed: {deployment_name}") diff --git a/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py b/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py index f84e6358d7b0..278f0f2c867e 100644 --- a/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py +++ b/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py @@ -1,4 +1,4 @@ -# pylint: disable=line-too-long,useless-suppression +# pylint: disable=line-too-long,useless-suppression,used-before-assignment,consider-using-with # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -48,11 +48,11 @@ import os import logging from dotenv import load_dotenv -from azure.ai.projects.aio import AIProjectClient -from azure.identity.aio import DefaultAzureCredential from mcp import ClientSession from mcp.types import ImageContent from mcp.client.streamable_http import streamablehttp_client +from azure.ai.projects.aio import AIProjectClient +from azure.identity.aio import DefaultAzureCredential load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py index dc0e03d66528..e6c1b34b64b5 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced.py @@ -34,6 +34,7 @@ import os from dotenv import load_dotenv +from openai.types.responses import EasyInputMessageParam from azure.core.exceptions import ResourceNotFoundError from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient @@ -42,7 +43,6 @@ MemoryStoreDefaultOptions, MemorySearchOptions, ) -from openai.types.responses import EasyInputMessageParam load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py index 4192fe11ee57..dc6950617e6a 100644 --- a/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py +++ b/sdk/ai/azure-ai-projects/samples/memories/sample_memory_advanced_async.py @@ -35,6 +35,7 @@ import asyncio import os from dotenv import load_dotenv +from openai.types.responses import EasyInputMessageParam from azure.core.exceptions import ResourceNotFoundError from azure.identity.aio import DefaultAzureCredential from azure.ai.projects.aio import AIProjectClient @@ -43,7 +44,6 @@ MemoryStoreDefaultOptions, MemorySearchOptions, ) -from openai.types.responses import EasyInputMessageParam load_dotenv() diff --git a/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py b/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py index 016cc1d532f2..cd7969dc6b9c 100644 --- a/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py +++ b/sdk/ai/azure-ai-projects/samples/red_team/sample_red_team_async.py @@ -52,7 +52,6 @@ async def sample_red_team_async() -> None: async with ( DefaultAzureCredential() as credential, AIProjectClient(endpoint=endpoint, credential=credential) as project_client, - project_client.get_openai_client() as openai_client, ): # [START red_team_sample] print("Creating a Red Team scan for direct model testing") diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py index 065046543a61..f07f5fb6533e 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_events.py @@ -56,6 +56,6 @@ elif event.type == "response.output_text.delta": print(event.delta, end="", flush=True) elif event.type == "response.text.done": - print(f"\n\nResponse text done. Access final text in 'event.text'") + print("\n\nResponse text done. Access final text in 'event.text'") elif event.type == "response.completed": - print(f"\n\nResponse completed. Access final text in 'event.response.output_text'") + print("\n\nResponse completed. Access final text in 'event.response.output_text'") diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py index 87db2ada4d88..87a79415ca13 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_stream_manager.py @@ -54,6 +54,6 @@ elif event.type == "response.output_text.delta": print(event.delta, end="", flush=True) elif event.type == "response.text.done": - print(f"\n\nResponse text done. Access final text in 'event.text'") + print("\n\nResponse text done. Access final text in 'event.text'") elif event.type == "response.completed": - print(f"\n\nResponse completed. Access final text in 'event.response.output_text'") + print("\n\nResponse completed. Access final text in 'event.response.output_text'") diff --git a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py index 1119d969df9c..3f08898a32f6 100644 --- a/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py +++ b/sdk/ai/azure-ai-projects/samples/responses/sample_responses_structured_output.py @@ -28,9 +28,9 @@ import os from dotenv import load_dotenv +from pydantic import BaseModel, Field from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient -from pydantic import BaseModel, Field load_dotenv() diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/gen_ai_trace_verifier.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/gen_ai_trace_verifier.py index 5ad356d2470e..1fa804b06600 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/gen_ai_trace_verifier.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/gen_ai_trace_verifier.py @@ -60,7 +60,7 @@ def check_span_attributes(self, span, attributes): ) if span.attributes[attribute_name] < 0: raise AssertionError("Attribute value " + str(span.attributes[attribute_name]) + " is negative") - elif attribute_value != "" and span.attributes[attribute_name] != attribute_value: + elif attribute_value not in ("", span.attributes[attribute_name]): raise AssertionError( "Attribute value " + str(span.attributes[attribute_name]) @@ -109,7 +109,7 @@ def check_decorator_span_attributes(self, span: Span, attributes: List[tuple]) - raise AssertionError("Attribute value " + str(span_value) + " is not a number") if span_value < 0: raise AssertionError("Attribute value " + str(span_value) + " is negative") - elif attribute_value != "" and span_value != attribute_value: + elif attribute_value not in ("", span_value): raise AssertionError( "Attribute value " + str(span_value) + " does not match with " + str(attribute_value) ) @@ -122,9 +122,9 @@ def check_decorator_span_attributes(self, span: Span, attributes: List[tuple]) - def is_valid_json(self, my_string): try: json.loads(my_string) - except ValueError as e1: + except ValueError: return False - except TypeError as e2: + except TypeError: return False return True @@ -136,12 +136,11 @@ def check_json_string(self, expected_json, actual_json): # Handle both dict and list (array) formats if isinstance(expected_obj, list) and isinstance(actual_obj, list): return self.check_event_lists(expected_obj, actual_obj) - elif isinstance(expected_obj, dict) and isinstance(actual_obj, dict): + if isinstance(expected_obj, dict) and isinstance(actual_obj, dict): return self.check_event_attributes(expected_obj, actual_obj) - else: - raise AssertionError( - f"check_json_string: type mismatch - expected {type(expected_obj).__name__}, got {type(actual_obj).__name__}" - ) + raise AssertionError( + f"check_json_string: type mismatch - expected {type(expected_obj).__name__}, got {type(actual_obj).__name__}" + ) def check_event_lists(self, expected_list, actual_list): """Check if two lists match, handling nested dicts/lists.""" diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/memory_trace_exporter.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/memory_trace_exporter.py index e3d1345c6e73..19b650c9fb28 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/memory_trace_exporter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/memory_trace_exporter.py @@ -2,9 +2,9 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ +from typing import List, Sequence from opentelemetry.sdk.trace import Span from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult -from typing import List, Sequence class MemoryTraceExporter(SpanExporter): @@ -46,4 +46,4 @@ def get_spans_by_name(self, name: str) -> List[Span]: return [span for span in self._trace_list if span.name == name] def get_spans(self) -> List[Span]: - return [span for span in self._trace_list] + return list(self._trace_list) diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py index bdd35586e4e0..ac2a033ec164 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py @@ -3,54 +3,34 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -# cSpell:disable# cSpell:disable -import pytest +# cSpell:disable import os from typing import Optional -from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.core.settings import settings -from gen_ai_trace_verifier import GenAiTraceVerifier -from azure.ai.projects.models import PromptAgentDefinition, PromptAgentDefinitionTextOptions - -from azure.ai.projects.models import ( - Reasoning, - FunctionTool, - # ResponseTextFormatConfigurationText, -) +import pytest +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error from devtools_testutils import ( recorded_by_proxy, ) - from test_base import servicePreparer -from test_ai_instrumentor_base import ( +from test_ai_instrumentor_base import ( # pylint: disable=import-error TestAiAgentsInstrumentorBase, - MessageCreationMode, CONTENT_TRACING_ENV_VARIABLE, ) - +from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils +from azure.core.settings import settings +from azure.ai.projects.models import PromptAgentDefinition, PromptAgentDefinitionTextOptions from azure.ai.projects.telemetry._utils import ( - AZ_NAMESPACE, - AZ_NAMESPACE_VALUE, GEN_AI_AGENT_ID, GEN_AI_AGENT_NAME, GEN_AI_AGENT_VERSION, - GEN_AI_CONVERSATION_ID, GEN_AI_EVENT_CONTENT, GEN_AI_OPERATION_NAME, GEN_AI_PROVIDER_NAME, GEN_AI_REQUEST_MODEL, - GEN_AI_RESPONSE_FINISH_REASONS, - GEN_AI_RESPONSE_ID, - GEN_AI_RESPONSE_MODEL, - GEN_AI_SYSTEM, - GEN_AI_USAGE_INPUT_TOKENS, - GEN_AI_USAGE_OUTPUT_TOKENS, SERVER_ADDRESS, GEN_AI_AGENT_TYPE, GEN_AI_SYSTEM_INSTRUCTION_EVENT, GEN_AI_AGENT_WORKFLOW_EVENT, - GEN_AI_CONVERSATION_ITEM_TYPE, - AZURE_AI_AGENTS_SYSTEM, AGENTS_PROVIDER, AGENT_TYPE_PROMPT, AGENT_TYPE_WORKFLOW, @@ -58,10 +38,10 @@ ) settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable -class TestAiAgentsInstrumentor(TestAiAgentsInstrumentorBase): +class TestAiAgentsInstrumentor(TestAiAgentsInstrumentorBase): # pylint: disable=too-many-public-methods """Tests for AI agents instrumentor.""" @pytest.fixture(scope="function") @@ -78,7 +58,7 @@ def instrument_without_content(self): yield self.cleanup() - def test_instrumentation(self, **kwargs): + def test_instrumentation(self): # Make sure code is not instrumented due to a previous test exception AIProjectInstrumentor().uninstrument() os.environ["AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING"] = "true" @@ -96,7 +76,7 @@ def test_instrumentation(self, **kwargs): os.environ.pop("AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING", None) assert exception_caught == False - def test_instrumenting_twice_does_not_cause_exception(self, **kwargs): + def test_instrumenting_twice_does_not_cause_exception(self): # Make sure code is not instrumented due to a previous test exception AIProjectInstrumentor().uninstrument() os.environ["AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING"] = "true" @@ -112,7 +92,7 @@ def test_instrumenting_twice_does_not_cause_exception(self, **kwargs): os.environ.pop("AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING", None) assert exception_caught == False - def test_uninstrumenting_uninstrumented_does_not_cause_exception(self, **kwargs): + def test_uninstrumenting_uninstrumented_does_not_cause_exception(self): # Make sure code is not instrumented due to a previous test exception AIProjectInstrumentor().uninstrument() exception_caught = False @@ -123,7 +103,7 @@ def test_uninstrumenting_uninstrumented_does_not_cause_exception(self, **kwargs) print(e) assert exception_caught == False - def test_uninstrumenting_twice_does_not_cause_exception(self, **kwargs): + def test_uninstrumenting_twice_does_not_cause_exception(self): # Make sure code is not instrumented due to a previous test exception AIProjectInstrumentor().uninstrument() os.environ["AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING"] = "true" @@ -178,7 +158,7 @@ def test_experimental_genai_tracing_gate(self, env_value: Optional[str], should_ from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor - from memory_trace_exporter import MemoryTraceExporter + from memory_trace_exporter import MemoryTraceExporter # pylint: disable=import-error tracer_provider = TracerProvider() trace._TRACER_PROVIDER = tracer_provider @@ -490,7 +470,7 @@ def _test_workflow_agent_creation_impl(self, use_events: bool, content_recording from azure.ai.projects.models import WorkflowAgentDefinition operation_group = "tracing" if content_recording_enabled else "agents" - with self.create_client(operation_group=operation_group, **kwargs) as project_client: + with self.create_client(operation_group=operation_group, allow_preview=True, **kwargs) as project_client: workflow_yaml = """ kind: workflow @@ -588,7 +568,7 @@ def test_workflow_agent_creation_with_tracing_content_recording_disabled_with_at def _test_agent_with_structured_output_with_instructions_impl( self, use_events: bool, content_recording_enabled: bool, **kwargs - ): + ): # pylint: disable=too-many-locals,too-many-statements """Implementation for agent with structured output and instructions test. :param use_events: If True, use events for messages. If False, use attributes. @@ -776,7 +756,7 @@ def test_agent_with_structured_output_with_instructions_content_recording_disabl def _test_agent_with_structured_output_without_instructions_impl( self, use_events: bool, content_recording_enabled: bool, **kwargs - ): + ): # pylint: disable=too-many-locals,too-many-statements """Implementation for agent with structured output but NO instructions test. :param use_events: If True, use events for messages. If False, use attributes. diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py index e1bbef6d0511..cdb7f5d46ed7 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py @@ -6,48 +6,28 @@ # cSpell:disable import os import pytest -from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.core.settings import settings -from gen_ai_trace_verifier import GenAiTraceVerifier -from azure.ai.projects.models import PromptAgentDefinition, PromptAgentDefinitionTextOptions -from azure.ai.projects.models import ( - Reasoning, - FunctionTool, - # ResponseTextFormatConfigurationText, -) - +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error from devtools_testutils.aio import recorded_by_proxy_async - from test_base import servicePreparer -from test_ai_instrumentor_base import ( +from test_ai_instrumentor_base import ( # pylint: disable=import-error TestAiAgentsInstrumentorBase, - MessageCreationMode, CONTENT_TRACING_ENV_VARIABLE, ) - +from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils +from azure.core.settings import settings +from azure.ai.projects.models import PromptAgentDefinition, PromptAgentDefinitionTextOptions from azure.ai.projects.telemetry._utils import ( - AZ_NAMESPACE, - AZ_NAMESPACE_VALUE, GEN_AI_AGENT_ID, GEN_AI_AGENT_NAME, GEN_AI_AGENT_VERSION, - GEN_AI_CONVERSATION_ID, GEN_AI_EVENT_CONTENT, GEN_AI_OPERATION_NAME, GEN_AI_PROVIDER_NAME, GEN_AI_REQUEST_MODEL, - GEN_AI_RESPONSE_FINISH_REASONS, - GEN_AI_RESPONSE_ID, - GEN_AI_RESPONSE_MODEL, - GEN_AI_SYSTEM, - GEN_AI_USAGE_INPUT_TOKENS, - GEN_AI_USAGE_OUTPUT_TOKENS, SERVER_ADDRESS, GEN_AI_AGENT_TYPE, GEN_AI_SYSTEM_INSTRUCTION_EVENT, GEN_AI_AGENT_WORKFLOW_EVENT, - GEN_AI_CONVERSATION_ITEM_TYPE, - AZURE_AI_AGENTS_SYSTEM, AGENTS_PROVIDER, AGENT_TYPE_PROMPT, AGENT_TYPE_WORKFLOW, @@ -55,7 +35,7 @@ ) settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable class TestAiAgentsInstrumentor(TestAiAgentsInstrumentorBase): @@ -241,7 +221,7 @@ async def _test_agent_creation_with_tracing_content_recording_disabled_impl(self else: # When using attributes and content recording disabled, verify empty structure from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE - import json + import json # pylint: disable=reimported assert span.attributes is not None assert GEN_AI_SYSTEM_MESSAGE in span.attributes @@ -286,7 +266,7 @@ async def _test_workflow_agent_creation_impl(self, use_events: bool, content_rec from azure.ai.projects.models import WorkflowAgentDefinition operation_group = "tracing" if content_recording_enabled else "agents" - project_client = self.create_async_client(operation_group=operation_group, **kwargs) + project_client = self.create_async_client(operation_group=operation_group, allow_preview=True, **kwargs) async with project_client: workflow_yaml = """ @@ -383,7 +363,7 @@ async def test_workflow_agent_creation_with_tracing_content_recording_disabled_w async def _test_agent_with_structured_output_with_instructions_impl( self, use_events: bool, content_recording_enabled: bool, **kwargs - ): + ): # pylint: disable=too-many-locals,too-many-statements """Implementation for structured output with instructions test (async). :param use_events: If True, use events for messages. If False, use attributes. @@ -568,7 +548,7 @@ async def test_agent_with_structured_output_with_instructions_content_recording_ async def _test_agent_with_structured_output_without_instructions_impl( self, use_events: bool, content_recording_enabled: bool, **kwargs - ): + ): # pylint: disable=too-many-locals,too-many-statements """Implementation for structured output without instructions test (async). :param use_events: If True, use events for messages. If False, use attributes. diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_instrumentor_base.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_instrumentor_base.py index 44d97d825b62..945fa1da977e 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_instrumentor_base.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_instrumentor_base.py @@ -11,10 +11,10 @@ from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor -from azure.ai.projects.telemetry import AIProjectInstrumentor -from gen_ai_trace_verifier import GenAiTraceVerifier -from memory_trace_exporter import MemoryTraceExporter +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from memory_trace_exporter import MemoryTraceExporter # pylint: disable=import-error from test_base import TestBase +from azure.ai.projects.telemetry import AIProjectInstrumentor CONTENT_TRACING_ENV_VARIABLE = "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT" EXPERIMENTAL_ENABLE_GENAI_TRACING_ENV_VARIABLE = "AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING" @@ -53,7 +53,7 @@ def setup_telemetry(self): os.environ[EXPERIMENTAL_ENABLE_GENAI_TRACING_ENV_VARIABLE] = "true" tracer_provider = TracerProvider() trace._TRACER_PROVIDER = tracer_provider - self.exporter = MemoryTraceExporter() + self.exporter = MemoryTraceExporter() # pylint: disable=attribute-defined-outside-init span_processor = SimpleSpanProcessor(self.exporter) tracer_provider.add_span_processor(span_processor) AIProjectInstrumentor().instrument() @@ -77,7 +77,7 @@ def _check_spans( event_contents: List[str], run_step_events: Optional[List[List[Dict[str, Any]]]] = None, has_annotations: bool = False, - ): + ): # pylint: disable=too-many-statements """Check the spans for correctness.""" spans = self.exporter.get_spans_by_name("create_agent my-agent") assert len(spans) == 1 diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py index 7556c4933eaa..6f15e3e0ad74 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py @@ -6,8 +6,16 @@ import os import json -import pytest from typing import Optional, Tuple +import pytest +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from openai import OpenAI +from devtools_testutils import recorded_by_proxy, RecordedTransport, set_custom_default_matcher, add_body_key_sanitizer +from test_base import servicePreparer +from test_ai_instrumentor_base import ( # pylint: disable=import-error + TestAiAgentsInstrumentorBase, + CONTENT_TRACING_ENV_VARIABLE, +) from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils from azure.ai.projects.telemetry._utils import ( OPERATION_NAME_CHAT, @@ -18,20 +26,11 @@ _set_use_simple_tool_format, RESPONSES_PROVIDER, ) -from azure.core.settings import settings -from gen_ai_trace_verifier import GenAiTraceVerifier -from openai import OpenAI -from devtools_testutils import recorded_by_proxy, RecordedTransport, set_custom_default_matcher, add_body_key_sanitizer from azure.ai.projects.models import PromptAgentDefinition, FunctionTool - -from test_base import servicePreparer -from test_ai_instrumentor_base import ( - TestAiAgentsInstrumentorBase, - CONTENT_TRACING_ENV_VARIABLE, -) +from azure.core.settings import settings settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable # Environment variable for binary data tracing BINARY_DATA_TRACING_ENV_VARIABLE = "AZURE_TRACING_GEN_AI_INCLUDE_BINARY_DATA" @@ -53,11 +52,11 @@ ) -class TestResponsesInstrumentor(TestAiAgentsInstrumentorBase): +class TestResponsesInstrumentor(TestAiAgentsInstrumentorBase): # pylint: disable=too-many-public-methods """Tests for ResponsesInstrumentor with real endpoints.""" @pytest.fixture(scope="session", autouse=True) - def configure_playback_matcher(self, test_proxy, add_sanitizers): + def configure_playback_matcher(self, test_proxy, add_sanitizers): # pylint: disable=unused-argument """Add body sanitizer and custom matchers for image_url in requests.""" # Sanitize image_url in request body to a consistent placeholder add_body_key_sanitizer(json_path="$..image_url", value="SANITIZED_IMAGE_DATA") @@ -82,7 +81,7 @@ def _get_openai_client_and_deployment(self, **kwargs) -> Tuple[OpenAI, str]: return openai_client, model_deployment_name - def test_instrumentation(self, **kwargs): + def test_instrumentation(self): # Make sure code is not instrumented due to a previous test exception AIProjectInstrumentor().uninstrument() os.environ["AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING"] = "true" @@ -100,7 +99,7 @@ def test_instrumentation(self, **kwargs): os.environ.pop("AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING", None) assert exception_caught == False - def test_instrumenting_twice_does_not_cause_exception(self, **kwargs): + def test_instrumenting_twice_does_not_cause_exception(self): # Make sure code is not instrumented due to a previous test exception AIProjectInstrumentor().uninstrument() os.environ["AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING"] = "true" @@ -116,7 +115,7 @@ def test_instrumenting_twice_does_not_cause_exception(self, **kwargs): os.environ.pop("AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING", None) assert exception_caught == False - def test_uninstrumenting_uninstrumented_does_not_cause_exception(self, **kwargs): + def test_uninstrumenting_uninstrumented_does_not_cause_exception(self): # Make sure code is not instrumented due to a previous test exception AIProjectInstrumentor().uninstrument() exception_caught = False @@ -127,7 +126,7 @@ def test_uninstrumenting_uninstrumented_does_not_cause_exception(self, **kwargs) print(e) assert exception_caught == False - def test_uninstrumenting_twice_does_not_cause_exception(self, **kwargs): + def test_uninstrumenting_twice_does_not_cause_exception(self): # Make sure code is not instrumented due to a previous test exception AIProjectInstrumentor().uninstrument() os.environ["AZURE_EXPERIMENTAL_ENABLE_GENAI_TRACING"] = "true" @@ -168,24 +167,23 @@ def set_env_var(var_name, value): self.cleanup() @pytest.mark.parametrize( - "env_value, expected_enabled, expected_instrumented", + "env_value, expected_enabled", [ - (None, True, True), # Default: enabled and instrumented - ("true", True, True), # Explicitly enabled - ("True", True, True), # Case insensitive - ("TRUE", True, True), # Case insensitive - ("false", False, False), # Explicitly disabled - ("False", False, False), # Case insensitive - ("random", False, False), # Invalid value treated as false - ("0", False, False), # Numeric false - ("1", False, False), # Numeric true but not "true" + (None, True), # Default: enabled and instrumented + ("true", True), # Explicitly enabled + ("True", True), # Case insensitive + ("TRUE", True), # Case insensitive + ("false", False), # Explicitly disabled + ("False", False), # Case insensitive + ("random", False), # Invalid value treated as false + ("0", False), # Numeric false + ("1", False), # Numeric true but not "true" ], ) def test_instrumentation_environment_variable( self, env_value: Optional[str], expected_enabled: bool, - expected_instrumented: bool, ): def set_env_var(var_name, value): if value is None: @@ -460,10 +458,10 @@ def test_sync_non_streaming_without_content_recording_attributes(self, **kwargs) """Test synchronous non-streaming responses with content recording disabled (attribute mode).""" self._test_sync_non_streaming_without_content_recording_impl(False, **kwargs) - def _test_sync_streaming_with_content_recording_impl(self, use_events, **kwargs): + def _test_sync_streaming_with_content_recording_impl( + self, use_events, **kwargs + ): # pylint: disable=too-many-statements """Implementation for testing synchronous streaming responses with content recording enabled.""" - from openai.types.responses.response_input_param import FunctionCallOutput - self.cleanup() _set_use_message_events(use_events) os.environ.update( @@ -656,7 +654,6 @@ def test_sync_conversations_create(self, **kwargs): with self.create_client(operation_group="tracing", **kwargs) as project_client: # Get the OpenAI client from the project client client = project_client.get_openai_client() - deployment_name = kwargs.get("foundry_model_name") # Create a conversation conversation = client.conversations.create() @@ -844,7 +841,7 @@ def test_no_instrumentation_no_spans(self): from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor - from memory_trace_exporter import MemoryTraceExporter + from memory_trace_exporter import MemoryTraceExporter # pylint: disable=import-error tracer_provider = TracerProvider() trace._TRACER_PROVIDER = tracer_provider @@ -972,7 +969,7 @@ def _test_sync_non_streaming_without_conversation_impl(self, use_events, **kwarg assert len(output_messages[0]["parts"][0]["content"]) > 0 assert "finish_reason" in output_messages[0] - def _test_sync_function_tool_with_content_recording_non_streaming_impl( + def _test_sync_function_tool_with_content_recording_non_streaming_impl( # pylint: disable=too-many-locals,too-many-statements self, use_events, use_simple_tool_call_format=False, **kwargs ): """Implementation for testing synchronous function tool usage with content recording (non-streaming).""" @@ -1051,14 +1048,14 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl( ) # Second request - provide function results - response2 = client.responses.create( + _response2 = client.responses.create( conversation=conversation.id, input=input_list, extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=False, ) - assert hasattr(response2, "output") - assert response2.output is not None + assert hasattr(_response2, "output") + assert _response2.output is not None # Cleanup project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) @@ -1231,7 +1228,7 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl( assert len(output_messages[0]["parts"][0]["content"]) > 0 assert "finish_reason" in output_messages[0] - def _test_sync_function_tool_with_content_recording_streaming_impl( + def _test_sync_function_tool_with_content_recording_streaming_impl( # pylint: disable=too-many-locals,too-many-branches,too-many-statements self, use_events, use_simple_tool_call_format=False, **kwargs ): """Implementation for testing synchronous function tool usage with content recording (streaming).""" @@ -1555,7 +1552,7 @@ def test_sync_function_tool_with_content_recording_streaming_simple_format_attri False, use_simple_tool_call_format=True, **kwargs ) - def _test_sync_function_tool_without_content_recording_non_streaming_impl( + def _test_sync_function_tool_without_content_recording_non_streaming_impl( # pylint: disable=too-many-locals,too-many-statements self, use_events, use_simple_tool_call_format=False, **kwargs ): """Implementation for testing synchronous function tool usage without content recording (non-streaming).""" @@ -1634,13 +1631,13 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl( ) # Second request - provide function results - response2 = client.responses.create( + _response2 = client.responses.create( conversation=conversation.id, input=input_list, extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=False, ) - assert hasattr(response2, "output") + assert hasattr(_response2, "output") # Cleanup project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) @@ -1791,7 +1788,7 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl( assert input_messages[0]["parts"][0]["content"]["type"] == "function_call_output" assert "id" in input_messages[0]["parts"][0]["content"] - def _test_sync_function_tool_without_content_recording_streaming_impl( + def _test_sync_function_tool_without_content_recording_streaming_impl( # pylint: disable=too-many-locals,too-many-branches,too-many-statements self, use_events, use_simple_tool_call_format=False, **kwargs ): """Implementation for testing synchronous function tool usage without content recording (streaming).""" @@ -2186,7 +2183,7 @@ def test_sync_function_tool_list_conversation_items_with_content_recording(self, ) # Second request - provide function results - response2 = client.responses.create( + _response2 = client.responses.create( conversation=conversation.id, input=input_list, extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, @@ -2336,7 +2333,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se ) # Second request - provide function results - response2 = client.responses.create( + _response2 = client.responses.create( conversation=conversation.id, input=input_list, extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, @@ -4612,7 +4609,7 @@ def test_responses_stream_method_with_content_recording(self, **kwargs): input="Write a short haiku about testing", ) as stream: # Iterate through events - for event in stream: + for _ in stream: pass # Process events # Get final response @@ -4665,7 +4662,7 @@ def test_responses_stream_method_without_content_recording(self, **kwargs): input="Write a short haiku about testing", ) as stream: # Iterate through events - for event in stream: + for _ in stream: pass # Process events # Get final response @@ -4739,7 +4736,7 @@ def test_responses_stream_method_with_tools_with_content_recording(self, **kwarg input="What's the weather in Boston?", tools=[function_tool], ) as stream: - for event in stream: + for _ in stream: pass # Process events final_response = stream.get_final_response() @@ -4770,7 +4767,7 @@ def test_responses_stream_method_with_tools_with_content_recording(self, **kwarg input=input_list, tools=[function_tool], ) as stream: - for event in stream: + for _ in stream: pass # Process events final_response = stream.get_final_response() @@ -4801,7 +4798,7 @@ def test_responses_stream_method_with_tools_with_content_recording(self, **kwarg assert attributes_match == True # Validate second span (tool output + final response) - span2 = spans[1] + _span2 = spans[1] # pylint: disable=unused-variable @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @@ -4853,7 +4850,7 @@ def test_responses_stream_method_with_tools_without_content_recording(self, **kw input="What's the weather in Boston?", tools=[function_tool], ) as stream: - for event in stream: + for _ in stream: pass # Process events final_response = stream.get_final_response() @@ -4884,7 +4881,7 @@ def test_responses_stream_method_with_tools_without_content_recording(self, **kw input=input_list, tools=[function_tool], ) as stream: - for event in stream: + for _ in stream: pass # Process events final_response = stream.get_final_response() @@ -4942,9 +4939,11 @@ def test_responses_stream_method_with_tools_without_content_recording(self, **kw @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_workflow_agent_non_streaming_with_content_recording(self, **kwargs): + def test_workflow_agent_non_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements """Test workflow agent with non-streaming and content recording enabled.""" - from azure.ai.projects.models import ( + from azure.ai.projects.models import ( # pylint: disable=reimported,redefined-outer-name WorkflowAgentDefinition, PromptAgentDefinition, ) @@ -4959,7 +4958,7 @@ def test_workflow_agent_non_streaming_with_content_recording(self, **kwargs): self.setup_telemetry() assert True == AIProjectInstrumentor().is_content_recording_enabled() - with self.create_client(operation_group="tracing", **kwargs) as project_client: + with self.create_client(operation_group="tracing", allow_preview=True, **kwargs) as project_client: deployment_name = kwargs.get("foundry_model_name") openai_client = project_client.get_openai_client() @@ -4968,7 +4967,7 @@ def test_workflow_agent_non_streaming_with_content_recording(self, **kwargs): agent_name="teacher-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a teacher that create pre-school math question for student and check answer. + instructions="""You are a teacher that create pre-school math question for student and check answer. If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", ), @@ -4979,7 +4978,7 @@ def test_workflow_agent_non_streaming_with_content_recording(self, **kwargs): agent_name="student-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a student who answers questions from the teacher. + instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), ) @@ -5144,7 +5143,9 @@ def test_workflow_agent_non_streaming_with_content_recording(self, **kwargs): @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_workflow_agent_non_streaming_without_content_recording(self, **kwargs): + def test_workflow_agent_non_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-statements """Test workflow agent with non-streaming and content recording disabled.""" from azure.ai.projects.models import WorkflowAgentDefinition @@ -5158,9 +5159,10 @@ def test_workflow_agent_non_streaming_without_content_recording(self, **kwargs): self.setup_telemetry() assert False == AIProjectInstrumentor().is_content_recording_enabled() - with self.create_client(operation_group="tracing", **kwargs) as project_client: - deployment_name = kwargs.get("foundry_model_name") - openai_client = project_client.get_openai_client() + with ( + self.create_client(operation_group="tracing", allow_preview=True, **kwargs) as project_client, + project_client.get_openai_client() as openai_client, + ): workflow_yaml = """ kind: workflow @@ -5180,7 +5182,7 @@ def test_workflow_agent_non_streaming_without_content_recording(self, **kwargs): conversation = openai_client.conversations.create() - response = openai_client.responses.create( + _response = openai_client.responses.create( conversation=conversation.id, extra_body={"agent_reference": {"name": workflow_agent.name, "type": "agent_reference"}}, input="Test workflow", @@ -5258,9 +5260,11 @@ def test_workflow_agent_non_streaming_without_content_recording(self, **kwargs): @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_workflow_agent_streaming_with_content_recording(self, **kwargs): + def test_workflow_agent_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements """Test workflow agent with streaming and content recording enabled.""" - from azure.ai.projects.models import ( + from azure.ai.projects.models import ( # pylint: disable=reimported,redefined-outer-name WorkflowAgentDefinition, PromptAgentDefinition, ) @@ -5275,7 +5279,7 @@ def test_workflow_agent_streaming_with_content_recording(self, **kwargs): self.setup_telemetry() assert True == AIProjectInstrumentor().is_content_recording_enabled() - with self.create_client(operation_group="tracing", **kwargs) as project_client: + with self.create_client(operation_group="tracing", allow_preview=True, **kwargs) as project_client: deployment_name = kwargs.get("foundry_model_name") openai_client = project_client.get_openai_client() @@ -5284,7 +5288,7 @@ def test_workflow_agent_streaming_with_content_recording(self, **kwargs): agent_name="teacher-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a teacher that create pre-school math question for student and check answer. + instructions="""You are a teacher that create pre-school math question for student and check answer. If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", ), @@ -5295,7 +5299,7 @@ def test_workflow_agent_streaming_with_content_recording(self, **kwargs): agent_name="student-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a student who answers questions from the teacher. + instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), ) @@ -5463,7 +5467,7 @@ def test_workflow_agent_streaming_with_content_recording(self, **kwargs): @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_workflow_agent_streaming_without_content_recording(self, **kwargs): + def test_workflow_agent_streaming_without_content_recording(self, **kwargs): # pylint: disable=too-many-statements """Test workflow agent with streaming and content recording disabled.""" from azure.ai.projects.models import WorkflowAgentDefinition @@ -5477,9 +5481,10 @@ def test_workflow_agent_streaming_without_content_recording(self, **kwargs): self.setup_telemetry() assert False == AIProjectInstrumentor().is_content_recording_enabled() - with self.create_client(operation_group="tracing", **kwargs) as project_client: - deployment_name = kwargs.get("foundry_model_name") - openai_client = project_client.get_openai_client() + with ( + self.create_client(operation_group="tracing", allow_preview=True, **kwargs) as project_client, + project_client.get_openai_client() as openai_client, + ): workflow_yaml = """ kind: workflow diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py index 227b353fa800..a0c7c0f5ad6c 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,line-too-long,useless-suppression +# pylint: disable=too-many-lines,line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -7,6 +7,14 @@ import os import json import pytest +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from devtools_testutils.aio import recorded_by_proxy_async +from devtools_testutils import RecordedTransport +from test_base import servicePreparer +from test_ai_instrumentor_base import ( # pylint: disable=import-error + TestAiAgentsInstrumentorBase, + CONTENT_TRACING_ENV_VARIABLE, +) from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils from azure.ai.projects.telemetry._utils import ( OPERATION_NAME_CHAT, @@ -19,14 +27,6 @@ ) from azure.ai.projects.models import FunctionTool, PromptAgentDefinition from azure.core.settings import settings -from gen_ai_trace_verifier import GenAiTraceVerifier -from devtools_testutils.aio import recorded_by_proxy_async -from devtools_testutils import RecordedTransport -from test_base import servicePreparer -from test_ai_instrumentor_base import ( - TestAiAgentsInstrumentorBase, - CONTENT_TRACING_ENV_VARIABLE, -) BINARY_DATA_TRACING_ENV_VARIABLE = "AZURE_TRACING_GEN_AI_INCLUDE_BINARY_DATA" @@ -34,10 +34,10 @@ TEST_IMAGE_BASE64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==" settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable -class TestResponsesInstrumentor(TestAiAgentsInstrumentorBase): +class TestResponsesInstrumentor(TestAiAgentsInstrumentorBase): # pylint: disable=too-many-public-methods """Tests for ResponsesInstrumentor with real endpoints (async).""" async def _test_async_non_streaming_with_content_recording_impl(self, use_events, **kwargs): @@ -277,7 +277,6 @@ async def test_async_conversations_create(self, **kwargs): assert True == AIProjectInstrumentor().is_instrumented() project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("foundry_model_name") async with project_client: # Get the OpenAI client from the project client @@ -388,7 +387,7 @@ async def test_async_list_conversation_items_with_content_recording(self, **kwar events_match = GenAiTraceVerifier().check_span_events(span, expected_events) assert events_match == True - async def _test_async_function_tool_with_content_recording_streaming_impl( + async def _test_async_function_tool_with_content_recording_streaming_impl( # pylint: disable=too-many-locals,too-many-statements self, use_events, use_simple_tool_call_format=False, **kwargs ): """Implementation for testing asynchronous function tool usage with content recording (streaming). @@ -649,7 +648,7 @@ async def test_async_function_tool_with_content_recording_streaming_simple_forma False, use_simple_tool_call_format=True, **kwargs ) - async def _test_async_function_tool_without_content_recording_streaming_impl( + async def _test_async_function_tool_without_content_recording_streaming_impl( # pylint: disable=too-many-locals,too-many-statements self, use_events, use_simple_tool_call_format=False, **kwargs ): """Implementation for testing asynchronous function tool usage without content recording (streaming). @@ -2593,7 +2592,7 @@ async def test_async_responses_stream_method_with_content_recording(self, **kwar input="Write a short haiku about testing", ) as stream: # Iterate through events - async for event in stream: + async for _ in stream: pass # Process events # Get final response @@ -2673,7 +2672,7 @@ async def test_async_responses_stream_method_without_content_recording(self, **k input="Write a short haiku about testing", ) as stream: # Iterate through events - async for event in stream: + async for _ in stream: pass # Process events # Get final response @@ -2724,7 +2723,7 @@ async def test_async_responses_stream_method_without_content_recording(self, **k events_match = GenAiTraceVerifier().check_span_events(span, expected_events) assert events_match == True - async def _test_async_responses_stream_method_with_tools_with_content_recording_impl( + async def _test_async_responses_stream_method_with_tools_with_content_recording_impl( # pylint: disable=too-many-locals,too-many-statements self, use_events, use_simple_tool_call_format=False, **kwargs ): """Implementation for testing async responses.stream() method with function tools and content recording. @@ -2780,7 +2779,7 @@ async def _test_async_responses_stream_method_with_tools_with_content_recording_ input="What's the weather in Boston?", tools=[function_tool], ) as stream: - async for event in stream: + async for _ in stream: pass # Process events final_response = await stream.get_final_response() @@ -2811,7 +2810,7 @@ async def _test_async_responses_stream_method_with_tools_with_content_recording_ input=input_list, tools=[function_tool], ) as stream: - async for event in stream: + async for _ in stream: pass # Process events final_response = await stream.get_final_response() @@ -2957,7 +2956,7 @@ async def test_async_responses_stream_method_with_tools_with_content_recording_s False, use_simple_tool_call_format=True, **kwargs ) - async def _test_async_responses_stream_method_with_tools_without_content_recording_impl( + async def _test_async_responses_stream_method_with_tools_without_content_recording_impl( # pylint: disable=too-many-locals,too-many-statements self, use_events, use_simple_tool_call_format=False, **kwargs ): """Implementation for testing async responses.stream() method with function tools without content recording. @@ -3013,7 +3012,7 @@ async def _test_async_responses_stream_method_with_tools_without_content_recordi input="What\\'s the weather in Boston?", tools=[function_tool], ) as stream: - async for event in stream: + async for _ in stream: pass # Process events final_response = await stream.get_final_response() @@ -3044,7 +3043,7 @@ async def _test_async_responses_stream_method_with_tools_without_content_recordi input=input_list, tools=[function_tool], ) as stream: - async for event in stream: + async for _ in stream: pass # Process events final_response = await stream.get_final_response() @@ -3199,7 +3198,9 @@ async def test_async_responses_stream_method_with_tools_without_content_recordin @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_workflow_agent_non_streaming_with_content_recording(self, **kwargs): + async def test_async_workflow_agent_non_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-statements """Test async workflow agent with non-streaming and content recording enabled.""" from azure.ai.projects.models import WorkflowAgentDefinition @@ -3213,8 +3214,7 @@ async def test_async_workflow_agent_non_streaming_with_content_recording(self, * self.setup_telemetry() assert True == AIProjectInstrumentor().is_content_recording_enabled() - project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("foundry_model_name") + project_client = self.create_async_client(operation_group="tracing", allow_preview=True, **kwargs) async with project_client: # Create a simple workflow agent @@ -3237,7 +3237,7 @@ async def test_async_workflow_agent_non_streaming_with_content_recording(self, * openai_client = project_client.get_openai_client() conversation = await openai_client.conversations.create() - response = await openai_client.responses.create( + _ = await openai_client.responses.create( conversation=conversation.id, extra_body={"agent_reference": {"name": workflow_agent.name, "type": "agent_reference"}}, input="Test workflow", @@ -3316,7 +3316,9 @@ async def test_async_workflow_agent_non_streaming_with_content_recording(self, * @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_workflow_agent_non_streaming_without_content_recording(self, **kwargs): + async def test_async_workflow_agent_non_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-statements """Test async workflow agent with non-streaming and content recording disabled.""" from azure.ai.projects.models import WorkflowAgentDefinition @@ -3330,8 +3332,7 @@ async def test_async_workflow_agent_non_streaming_without_content_recording(self self.setup_telemetry() assert False == AIProjectInstrumentor().is_content_recording_enabled() - project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("foundry_model_name") + project_client = self.create_async_client(operation_group="tracing", allow_preview=True, **kwargs) async with project_client: workflow_yaml = """ @@ -3353,7 +3354,7 @@ async def test_async_workflow_agent_non_streaming_without_content_recording(self openai_client = project_client.get_openai_client() conversation = await openai_client.conversations.create() - response = await openai_client.responses.create( + _ = await openai_client.responses.create( conversation=conversation.id, extra_body={"agent_reference": {"name": workflow_agent.name, "type": "agent_reference"}}, input="Test workflow", @@ -3439,7 +3440,9 @@ async def test_async_workflow_agent_non_streaming_without_content_recording(self @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_workflow_agent_streaming_with_content_recording(self, **kwargs): + async def test_async_workflow_agent_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-statements """Test async workflow agent with streaming and content recording enabled.""" from azure.ai.projects.models import WorkflowAgentDefinition @@ -3453,8 +3456,7 @@ async def test_async_workflow_agent_streaming_with_content_recording(self, **kwa self.setup_telemetry() assert True == AIProjectInstrumentor().is_content_recording_enabled() - project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("foundry_model_name") + project_client = self.create_async_client(operation_group="tracing", allow_preview=True, **kwargs) async with project_client: workflow_yaml = """ @@ -3560,7 +3562,9 @@ async def test_async_workflow_agent_streaming_with_content_recording(self, **kwa @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_workflow_agent_streaming_without_content_recording(self, **kwargs): + async def test_async_workflow_agent_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-statements """Test async workflow agent with streaming and content recording disabled.""" from azure.ai.projects.models import WorkflowAgentDefinition @@ -3574,8 +3578,7 @@ async def test_async_workflow_agent_streaming_without_content_recording(self, ** self.setup_telemetry() assert False == AIProjectInstrumentor().is_content_recording_enabled() - project_client = self.create_async_client(operation_group="tracing", **kwargs) - deployment_name = kwargs.get("foundry_model_name") + project_client = self.create_async_client(operation_group="tracing", allow_preview=True, **kwargs) async with project_client: workflow_yaml = """ diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py index 592cf8b36b54..f9df1a9fb9be 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py @@ -9,6 +9,13 @@ import os import pytest +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from devtools_testutils import recorded_by_proxy, RecordedTransport +from test_base import servicePreparer +from test_ai_instrumentor_base import ( # pylint: disable=import-error + TestAiAgentsInstrumentorBase, + CONTENT_TRACING_ENV_VARIABLE, +) from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils from azure.ai.projects.telemetry._utils import ( OPERATION_NAME_INVOKE_AGENT, @@ -16,19 +23,11 @@ _set_use_message_events, RESPONSES_PROVIDER, ) -from azure.core.settings import settings -from gen_ai_trace_verifier import GenAiTraceVerifier -from devtools_testutils import recorded_by_proxy, RecordedTransport from azure.ai.projects.models import PromptAgentDefinition - -from test_base import servicePreparer -from test_ai_instrumentor_base import ( - TestAiAgentsInstrumentorBase, - CONTENT_TRACING_ENV_VARIABLE, -) +from azure.core.settings import settings settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable @pytest.mark.skip( @@ -37,6 +36,8 @@ class TestResponsesInstrumentorBrowserAutomation(TestAiAgentsInstrumentorBase): """Tests for ResponsesInstrumentor with browser automation agents.""" + # pylint: disable=too-many-nested-blocks + # ======================================== # Sync Browser Automation Tests - Non-Streaming # ======================================== @@ -44,7 +45,9 @@ class TestResponsesInstrumentorBrowserAutomation(TestAiAgentsInstrumentorBase): @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_browser_automation_non_streaming_with_content_recording(self, **kwargs): + def test_sync_browser_automation_non_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements,too-many-nested-blocks """Test synchronous browser automation agent with non-streaming and content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -180,7 +183,9 @@ def test_sync_browser_automation_non_streaming_with_content_recording(self, **kw @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_browser_automation_non_streaming_without_content_recording(self, **kwargs): + def test_sync_browser_automation_non_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements,too-many-nested-blocks """Test synchronous browser automation agent with non-streaming and content recording disabled.""" self.cleanup() _set_use_message_events(True) @@ -310,7 +315,9 @@ def test_sync_browser_automation_non_streaming_without_content_recording(self, * @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_browser_automation_streaming_with_content_recording(self, **kwargs): + def test_sync_browser_automation_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements,too-many-nested-blocks """Test synchronous browser automation agent with streaming and content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -438,7 +445,9 @@ def test_sync_browser_automation_streaming_with_content_recording(self, **kwargs @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_browser_automation_streaming_without_content_recording(self, **kwargs): + def test_sync_browser_automation_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements,too-many-nested-blocks """Test synchronous browser automation agent with streaming and content recording disabled.""" self.cleanup() _set_use_message_events(True) diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py index 55e3c949804f..f26d14d276bf 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py @@ -8,7 +8,16 @@ """ import os +import json import pytest +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from devtools_testutils.aio import recorded_by_proxy_async +from devtools_testutils import RecordedTransport +from test_base import servicePreparer +from test_ai_instrumentor_base import ( # pylint: disable=import-error + TestAiAgentsInstrumentorBase, + CONTENT_TRACING_ENV_VARIABLE, +) from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils from azure.ai.projects.telemetry._utils import ( OPERATION_NAME_INVOKE_AGENT, @@ -16,22 +25,11 @@ _set_use_message_events, RESPONSES_PROVIDER, ) -from azure.core.settings import settings -from gen_ai_trace_verifier import GenAiTraceVerifier -from devtools_testutils.aio import recorded_by_proxy_async -from devtools_testutils import RecordedTransport from azure.ai.projects.models import PromptAgentDefinition - -from test_base import servicePreparer -from test_ai_instrumentor_base import ( - TestAiAgentsInstrumentorBase, - CONTENT_TRACING_ENV_VARIABLE, -) - -import json +from azure.core.settings import settings settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable @pytest.mark.skip( @@ -40,6 +38,8 @@ class TestResponsesInstrumentorBrowserAutomationAsync(TestAiAgentsInstrumentorBase): """Async tests for ResponsesInstrumentor with browser automation agents.""" + # pylint: disable=too-many-nested-blocks + # ======================================== # Async Browser Automation Tests - Non-Streaming # ======================================== @@ -47,7 +47,9 @@ class TestResponsesInstrumentorBrowserAutomationAsync(TestAiAgentsInstrumentorBa @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_browser_automation_non_streaming_with_content_recording(self, **kwargs): + async def test_async_browser_automation_non_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements,too-many-nested-blocks """Test asynchronous browser automation agent with non-streaming and content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -179,7 +181,9 @@ async def test_async_browser_automation_non_streaming_with_content_recording(sel @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_browser_automation_non_streaming_without_content_recording(self, **kwargs): + async def test_async_browser_automation_non_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements,too-many-nested-blocks """Test asynchronous browser automation agent with non-streaming and content recording disabled.""" self.cleanup() _set_use_message_events(True) @@ -305,7 +309,9 @@ async def test_async_browser_automation_non_streaming_without_content_recording( @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_browser_automation_streaming_with_content_recording(self, **kwargs): + async def test_async_browser_automation_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements,too-many-nested-blocks """Test asynchronous browser automation agent with streaming and content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -430,7 +436,9 @@ async def test_async_browser_automation_streaming_with_content_recording(self, * @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_browser_automation_streaming_without_content_recording(self, **kwargs): + async def test_async_browser_automation_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements,too-many-nested-blocks """Test asynchronous browser automation agent with streaming and content recording disabled.""" self.cleanup() _set_use_message_events(True) diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py index c282164e3fcf..219eba910511 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py @@ -8,8 +8,15 @@ """ import os -import pytest from io import BytesIO +import pytest +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from devtools_testutils import recorded_by_proxy, RecordedTransport +from test_base import servicePreparer +from test_ai_instrumentor_base import ( # pylint: disable=import-error + TestAiAgentsInstrumentorBase, + CONTENT_TRACING_ENV_VARIABLE, +) from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils from azure.ai.projects.telemetry._utils import ( OPERATION_NAME_INVOKE_AGENT, @@ -17,23 +24,15 @@ _set_use_message_events, RESPONSES_PROVIDER, ) -from azure.core.settings import settings -from gen_ai_trace_verifier import GenAiTraceVerifier -from devtools_testutils import recorded_by_proxy, RecordedTransport from azure.ai.projects.models import ( PromptAgentDefinition, CodeInterpreterTool, AutoCodeInterpreterToolParam, ) - -from test_base import servicePreparer -from test_ai_instrumentor_base import ( - TestAiAgentsInstrumentorBase, - CONTENT_TRACING_ENV_VARIABLE, -) +from azure.core.settings import settings settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable class TestResponsesInstrumentorCodeInterpreter(TestAiAgentsInstrumentorBase): @@ -44,6 +43,8 @@ class TestResponsesInstrumentorCodeInterpreter(TestAiAgentsInstrumentorBase): with both content recording enabled and disabled, in both streaming and non-streaming modes. """ + # pylint: disable=too-many-nested-blocks + # ======================================== # Sync Code Interpreter Agent Tests - Non-Streaming # ======================================== @@ -51,7 +52,9 @@ class TestResponsesInstrumentorCodeInterpreter(TestAiAgentsInstrumentorBase): @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_code_interpreter_non_streaming_with_content_recording(self, **kwargs): + def test_sync_code_interpreter_non_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements,too-many-nested-blocks """Test synchronous Code Interpreter agent with content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -101,7 +104,7 @@ def test_sync_code_interpreter_non_streaming_with_content_recording(self, **kwar conversation = openai_client.conversations.create() # Ask question that triggers code interpreter - response = openai_client.responses.create( + _ = openai_client.responses.create( conversation=conversation.id, input="Calculate the average operating profit from the transportation data", extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, @@ -109,7 +112,7 @@ def test_sync_code_interpreter_non_streaming_with_content_recording(self, **kwar # Explicitly call and iterate through conversation items items = openai_client.conversations.items.list(conversation_id=conversation.id) - for item in items: + for _ in items: pass # Check spans @@ -239,7 +242,9 @@ def test_sync_code_interpreter_non_streaming_with_content_recording(self, **kwar @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_code_interpreter_non_streaming_without_content_recording(self, **kwargs): + def test_sync_code_interpreter_non_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements,too-many-nested-blocks """Test synchronous Code Interpreter agent with content recording disabled.""" self.cleanup() _set_use_message_events(True) @@ -288,7 +293,7 @@ def test_sync_code_interpreter_non_streaming_without_content_recording(self, **k conversation = openai_client.conversations.create() # Ask question that triggers code interpreter - response = openai_client.responses.create( + _ = openai_client.responses.create( conversation=conversation.id, input="Calculate the average operating profit from the transportation data", extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, @@ -296,7 +301,7 @@ def test_sync_code_interpreter_non_streaming_without_content_recording(self, **k # Explicitly call and iterate through conversation items items = openai_client.conversations.items.list(conversation_id=conversation.id) - for item in items: + for _ in items: pass # Check spans @@ -430,7 +435,9 @@ def test_sync_code_interpreter_non_streaming_without_content_recording(self, **k @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_code_interpreter_streaming_with_content_recording(self, **kwargs): + def test_sync_code_interpreter_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements,too-many-nested-blocks """Test synchronous Code Interpreter agent with streaming and content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -492,7 +499,7 @@ def test_sync_code_interpreter_streaming_with_content_recording(self, **kwargs): # Explicitly call and iterate through conversation items items = openai_client.conversations.items.list(conversation_id=conversation.id) - for item in items: + for _ in items: pass # Check spans @@ -621,7 +628,9 @@ def test_sync_code_interpreter_streaming_with_content_recording(self, **kwargs): @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_code_interpreter_streaming_without_content_recording(self, **kwargs): + def test_sync_code_interpreter_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements,too-many-nested-blocks """Test synchronous Code Interpreter agent with streaming and content recording disabled.""" self.cleanup() _set_use_message_events(True) @@ -683,7 +692,7 @@ def test_sync_code_interpreter_streaming_without_content_recording(self, **kwarg # Explicitly call and iterate through conversation items items = openai_client.conversations.items.list(conversation_id=conversation.id) - for item in items: + for _ in items: pass # Check spans diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py index ac91f4b83565..a6d49b60eb0c 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py @@ -8,8 +8,16 @@ """ import os -import pytest from io import BytesIO +import pytest +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from devtools_testutils.aio import recorded_by_proxy_async +from devtools_testutils import RecordedTransport +from test_base import servicePreparer +from test_ai_instrumentor_base import ( # pylint: disable=import-error + TestAiAgentsInstrumentorBase, + CONTENT_TRACING_ENV_VARIABLE, +) from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils from azure.ai.projects.telemetry._utils import ( OPERATION_NAME_INVOKE_AGENT, @@ -17,24 +25,15 @@ _set_use_message_events, RESPONSES_PROVIDER, ) -from azure.core.settings import settings -from gen_ai_trace_verifier import GenAiTraceVerifier -from devtools_testutils.aio import recorded_by_proxy_async -from devtools_testutils import RecordedTransport from azure.ai.projects.models import ( PromptAgentDefinition, CodeInterpreterTool, AutoCodeInterpreterToolParam, ) - -from test_base import servicePreparer -from test_ai_instrumentor_base import ( - TestAiAgentsInstrumentorBase, - CONTENT_TRACING_ENV_VARIABLE, -) +from azure.core.settings import settings settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable class TestResponsesInstrumentorCodeInterpreterAsync(TestAiAgentsInstrumentorBase): @@ -45,6 +44,8 @@ class TestResponsesInstrumentorCodeInterpreterAsync(TestAiAgentsInstrumentorBase with both content recording enabled and disabled, in both streaming and non-streaming modes. """ + # pylint: disable=too-many-nested-blocks + # ======================================== # Async Code Interpreter Agent Tests - Non-Streaming # ======================================== @@ -52,7 +53,9 @@ class TestResponsesInstrumentorCodeInterpreterAsync(TestAiAgentsInstrumentorBase @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_code_interpreter_non_streaming_with_content_recording(self, **kwargs): + async def test_async_code_interpreter_non_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements,too-many-nested-blocks """Test asynchronous Code Interpreter agent with content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -101,7 +104,7 @@ async def test_async_code_interpreter_non_streaming_with_content_recording(self, conversation = await openai_client.conversations.create() # Ask question that triggers code interpreter - response = await openai_client.responses.create( + _ = await openai_client.responses.create( conversation=conversation.id, input="Calculate the average operating profit from the transportation data", extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, @@ -109,7 +112,7 @@ async def test_async_code_interpreter_non_streaming_with_content_recording(self, # Explicitly call and iterate through conversation items items = await openai_client.conversations.items.list(conversation_id=conversation.id) - async for item in items: + async for _ in items: pass # Check spans @@ -239,7 +242,9 @@ async def test_async_code_interpreter_non_streaming_with_content_recording(self, @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_code_interpreter_non_streaming_without_content_recording(self, **kwargs): + async def test_async_code_interpreter_non_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements,too-many-nested-blocks """Test asynchronous Code Interpreter agent with content recording disabled.""" self.cleanup() _set_use_message_events(True) @@ -288,7 +293,7 @@ async def test_async_code_interpreter_non_streaming_without_content_recording(se conversation = await openai_client.conversations.create() # Ask question that triggers code interpreter - response = await openai_client.responses.create( + _ = await openai_client.responses.create( conversation=conversation.id, input="Calculate the average operating profit from the transportation data", extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, @@ -296,7 +301,7 @@ async def test_async_code_interpreter_non_streaming_without_content_recording(se # Explicitly call and iterate through conversation items items = await openai_client.conversations.items.list(conversation_id=conversation.id) - async for item in items: + async for _ in items: pass # Check spans @@ -430,7 +435,9 @@ async def test_async_code_interpreter_non_streaming_without_content_recording(se @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_code_interpreter_streaming_with_content_recording(self, **kwargs): + async def test_async_code_interpreter_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements,too-many-nested-blocks """Test asynchronous Code Interpreter agent with streaming and content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -492,7 +499,7 @@ async def test_async_code_interpreter_streaming_with_content_recording(self, **k # Explicitly call and iterate through conversation items items = await openai_client.conversations.items.list(conversation_id=conversation.id) - async for item in items: + async for _ in items: pass # Check spans @@ -621,7 +628,9 @@ async def test_async_code_interpreter_streaming_with_content_recording(self, **k @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_code_interpreter_streaming_without_content_recording(self, **kwargs): + async def test_async_code_interpreter_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements,too-many-nested-blocks """Test asynchronous Code Interpreter agent with streaming and content recording disabled.""" self.cleanup() _set_use_message_events(True) @@ -683,7 +692,7 @@ async def test_async_code_interpreter_streaming_without_content_recording(self, # Explicitly call and iterate through conversation items items = await openai_client.conversations.items.list(conversation_id=conversation.id) - async for item in items: + async for _ in items: pass # Check spans diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py index 17b4ba9285f3..6af63135e120 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py @@ -8,8 +8,15 @@ """ import os -import pytest from io import BytesIO +import pytest +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from devtools_testutils import recorded_by_proxy, RecordedTransport +from test_base import servicePreparer +from test_ai_instrumentor_base import ( # pylint: disable=import-error + TestAiAgentsInstrumentorBase, + CONTENT_TRACING_ENV_VARIABLE, +) from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils from azure.ai.projects.telemetry._utils import ( OPERATION_NAME_INVOKE_AGENT, @@ -17,28 +24,24 @@ _set_use_message_events, RESPONSES_PROVIDER, ) -from azure.core.settings import settings -from gen_ai_trace_verifier import GenAiTraceVerifier -from devtools_testutils import recorded_by_proxy, RecordedTransport from azure.ai.projects.models import PromptAgentDefinition, FileSearchTool - -from test_base import servicePreparer -from test_ai_instrumentor_base import ( - TestAiAgentsInstrumentorBase, - CONTENT_TRACING_ENV_VARIABLE, -) +from azure.core.settings import settings settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable class TestResponsesInstrumentorFileSearch(TestAiAgentsInstrumentorBase): """Tests for ResponsesInstrumentor with File Search tool.""" + # pylint: disable=too-many-nested-blocks + @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_file_search_non_streaming_with_content_recording(self, **kwargs): + def test_sync_file_search_non_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Test synchronous File Search agent with non-streaming and content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -113,7 +116,7 @@ def test_sync_file_search_non_streaming_with_content_recording(self, **kwargs): # Explicitly call and iterate through conversation items items = openai_client.conversations.items.list(conversation_id=conversation.id) - for item in items: + for _ in items: pass # Just iterate to consume items # Check spans @@ -247,7 +250,9 @@ def test_sync_file_search_non_streaming_with_content_recording(self, **kwargs): @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_file_search_non_streaming_without_content_recording(self, **kwargs): + def test_sync_file_search_non_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Test synchronous File Search agent with non-streaming and content recording disabled.""" self.cleanup() _set_use_message_events(True) @@ -322,7 +327,7 @@ def test_sync_file_search_non_streaming_without_content_recording(self, **kwargs # Explicitly call and iterate through conversation items items = openai_client.conversations.items.list(conversation_id=conversation.id) - for item in items: + for _ in items: pass # Just iterate to consume items # Check spans @@ -454,7 +459,9 @@ def test_sync_file_search_non_streaming_without_content_recording(self, **kwargs @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_file_search_streaming_with_content_recording(self, **kwargs): + def test_sync_file_search_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Test synchronous File Search agent with streaming and content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -531,7 +538,7 @@ def test_sync_file_search_streaming_with_content_recording(self, **kwargs): # Explicitly call and iterate through conversation items items = openai_client.conversations.items.list(conversation_id=conversation.id) - for item in items: + for _ in items: pass # Check spans @@ -659,7 +666,9 @@ def test_sync_file_search_streaming_with_content_recording(self, **kwargs): @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_file_search_streaming_without_content_recording(self, **kwargs): + def test_sync_file_search_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Test synchronous File Search agent with streaming and content recording disabled.""" self.cleanup() _set_use_message_events(True) @@ -736,7 +745,7 @@ def test_sync_file_search_streaming_without_content_recording(self, **kwargs): # Explicitly call and iterate through conversation items items = openai_client.conversations.items.list(conversation_id=conversation.id) - for item in items: + for _ in items: pass # Check spans diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py index 704d6bbb7117..33f0d6e4edd7 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py @@ -8,8 +8,16 @@ """ import os -import pytest from io import BytesIO +import pytest +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from devtools_testutils.aio import recorded_by_proxy_async +from devtools_testutils import RecordedTransport +from test_base import servicePreparer +from test_ai_instrumentor_base import ( # pylint: disable=import-error + TestAiAgentsInstrumentorBase, + CONTENT_TRACING_ENV_VARIABLE, +) from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils from azure.ai.projects.telemetry._utils import ( OPERATION_NAME_INVOKE_AGENT, @@ -17,29 +25,24 @@ _set_use_message_events, RESPONSES_PROVIDER, ) -from azure.core.settings import settings -from gen_ai_trace_verifier import GenAiTraceVerifier -from devtools_testutils.aio import recorded_by_proxy_async -from devtools_testutils import RecordedTransport from azure.ai.projects.models import PromptAgentDefinition, FileSearchTool - -from test_base import servicePreparer -from test_ai_instrumentor_base import ( - TestAiAgentsInstrumentorBase, - CONTENT_TRACING_ENV_VARIABLE, -) +from azure.core.settings import settings settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable class TestResponsesInstrumentorFileSearchAsync(TestAiAgentsInstrumentorBase): """Async tests for ResponsesInstrumentor with File Search tool.""" + # pylint: disable=too-many-nested-blocks + @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_file_search_non_streaming_with_content_recording(self, **kwargs): + async def test_async_file_search_non_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Test asynchronous File Search agent with non-streaming and content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -114,7 +117,7 @@ async def test_async_file_search_non_streaming_with_content_recording(self, **kw # Explicitly call and iterate through conversation items items = await openai_client.conversations.items.list(conversation_id=conversation.id) - async for item in items: + async for _ in items: pass # Just iterate to consume items # Check spans @@ -248,7 +251,9 @@ async def test_async_file_search_non_streaming_with_content_recording(self, **kw @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_file_search_non_streaming_without_content_recording(self, **kwargs): + async def test_async_file_search_non_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Test asynchronous File Search agent with non-streaming and content recording disabled.""" self.cleanup() _set_use_message_events(True) @@ -323,7 +328,7 @@ async def test_async_file_search_non_streaming_without_content_recording(self, * # Explicitly call and iterate through conversation items items = await openai_client.conversations.items.list(conversation_id=conversation.id) - async for item in items: + async for _ in items: pass # Just iterate to consume items # Check spans @@ -455,7 +460,9 @@ async def test_async_file_search_non_streaming_without_content_recording(self, * @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_file_search_streaming_with_content_recording(self, **kwargs): + async def test_async_file_search_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Test asynchronous File Search agent with streaming and content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -532,7 +539,7 @@ async def test_async_file_search_streaming_with_content_recording(self, **kwargs # Explicitly call and iterate through conversation items items = await openai_client.conversations.items.list(conversation_id=conversation.id) - async for item in items: + async for _ in items: pass # Check spans @@ -660,7 +667,9 @@ async def test_async_file_search_streaming_with_content_recording(self, **kwargs @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_file_search_streaming_without_content_recording(self, **kwargs): + async def test_async_file_search_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Test asynchronous File Search agent with streaming and content recording disabled.""" self.cleanup() _set_use_message_events(True) @@ -737,7 +746,7 @@ async def test_async_file_search_streaming_without_content_recording(self, **kwa # Explicitly call and iterate through conversation items items = await openai_client.conversations.items.list(conversation_id=conversation.id) - async for item in items: + async for _ in items: pass # Check spans diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py index 0ac08ad35e2a..7a47c7f08936 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py @@ -9,6 +9,14 @@ import os import pytest +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from devtools_testutils import recorded_by_proxy, RecordedTransport +from openai.types.responses.response_input_param import McpApprovalResponse +from test_base import servicePreparer +from test_ai_instrumentor_base import ( # pylint: disable=import-error + TestAiAgentsInstrumentorBase, + CONTENT_TRACING_ENV_VARIABLE, +) from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils from azure.ai.projects.telemetry._utils import ( OPERATION_NAME_INVOKE_AGENT, @@ -16,30 +24,25 @@ _set_use_message_events, RESPONSES_PROVIDER, ) -from azure.core.settings import settings -from gen_ai_trace_verifier import GenAiTraceVerifier -from devtools_testutils import recorded_by_proxy, RecordedTransport from azure.ai.projects.models import PromptAgentDefinition, MCPTool -from openai.types.responses.response_input_param import McpApprovalResponse - -from test_base import servicePreparer -from test_ai_instrumentor_base import ( - TestAiAgentsInstrumentorBase, - CONTENT_TRACING_ENV_VARIABLE, -) +from azure.core.settings import settings settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable class TestResponsesInstrumentorMCP(TestAiAgentsInstrumentorBase): """Tests for ResponsesInstrumentor with MCP agents.""" + # pylint: disable=too-many-nested-blocks + # ======================================== # Sync MCP Agent Tests - Non-Streaming # ======================================== - def _test_sync_mcp_non_streaming_with_content_recording_impl(self, use_events, **kwargs): + def _test_sync_mcp_non_streaming_with_content_recording_impl( + self, use_events, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Implementation for testing synchronous MCP agent with non-streaming and content recording enabled. Args: @@ -117,7 +120,7 @@ def _test_sync_mcp_non_streaming_with_content_recording_impl(self, use_events, * # Explicitly call and iterate through conversation items items = openai_client.conversations.items.list(conversation_id=conversation.id) - for item in items: + for _ in items: pass # Iterate to consume items # Check spans @@ -369,7 +372,9 @@ def test_sync_mcp_non_streaming_with_content_recording_attributes(self, **kwargs """Test synchronous MCP agent with non-streaming and content recording enabled (attribute-based messages).""" self._test_sync_mcp_non_streaming_with_content_recording_impl(False, **kwargs) - def _test_sync_mcp_non_streaming_without_content_recording_impl(self, use_events, **kwargs): + def _test_sync_mcp_non_streaming_without_content_recording_impl( + self, use_events, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Implementation for testing synchronous MCP agent with non-streaming and content recording disabled. Args: @@ -447,7 +452,7 @@ def _test_sync_mcp_non_streaming_without_content_recording_impl(self, use_events # Explicitly call and iterate through conversation items items = openai_client.conversations.items.list(conversation_id=conversation.id) - for item in items: + for _ in items: pass # Just iterate to consume items # Check spans @@ -686,7 +691,9 @@ def test_sync_mcp_non_streaming_without_content_recording_attributes(self, **kwa # Sync MCP Agent Tests - Streaming # ======================================== - def _test_sync_mcp_streaming_with_content_recording_impl(self, use_events, **kwargs): + def _test_sync_mcp_streaming_with_content_recording_impl( + self, use_events, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Implementation for testing synchronous MCP agent with streaming and content recording enabled. Args: @@ -769,7 +776,7 @@ def _test_sync_mcp_streaming_with_content_recording_impl(self, use_events, **kwa # Explicitly call and iterate through conversation items items = openai_client.conversations.items.list(conversation_id=conversation.id) - for item in items: + for _ in items: pass # Check spans @@ -962,7 +969,9 @@ def test_sync_mcp_streaming_with_content_recording_attributes(self, **kwargs): """Test synchronous MCP agent with streaming and content recording enabled (attribute-based messages).""" self._test_sync_mcp_streaming_with_content_recording_impl(False, **kwargs) - def _test_sync_mcp_streaming_without_content_recording_impl(self, use_events, **kwargs): + def _test_sync_mcp_streaming_without_content_recording_impl( + self, use_events, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Implementation for testing synchronous MCP agent with streaming and content recording disabled. Args: @@ -1045,7 +1054,7 @@ def _test_sync_mcp_streaming_without_content_recording_impl(self, use_events, ** # Explicitly call and iterate through conversation items items = openai_client.conversations.items.list(conversation_id=conversation.id) - for item in items: + for _ in items: pass # Check spans diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py index e6401924773a..71ad6dfad50b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py @@ -9,6 +9,15 @@ import os import pytest +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from devtools_testutils.aio import recorded_by_proxy_async +from devtools_testutils import RecordedTransport +from openai.types.responses.response_input_param import McpApprovalResponse +from test_base import servicePreparer +from test_ai_instrumentor_base import ( # pylint: disable=import-error + TestAiAgentsInstrumentorBase, + CONTENT_TRACING_ENV_VARIABLE, +) from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils from azure.ai.projects.telemetry._utils import ( OPERATION_NAME_INVOKE_AGENT, @@ -16,31 +25,25 @@ _set_use_message_events, RESPONSES_PROVIDER, ) -from azure.core.settings import settings -from gen_ai_trace_verifier import GenAiTraceVerifier -from devtools_testutils.aio import recorded_by_proxy_async -from devtools_testutils import RecordedTransport from azure.ai.projects.models import PromptAgentDefinition, MCPTool -from openai.types.responses.response_input_param import McpApprovalResponse - -from test_base import servicePreparer -from test_ai_instrumentor_base import ( - TestAiAgentsInstrumentorBase, - CONTENT_TRACING_ENV_VARIABLE, -) +from azure.core.settings import settings settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable class TestResponsesInstrumentorMCPAsync(TestAiAgentsInstrumentorBase): """Async tests for ResponsesInstrumentor with MCP agents.""" + # pylint: disable=too-many-nested-blocks + # ======================================== # Async MCP Agent Tests - Non-Streaming # ======================================== - async def _test_async_mcp_non_streaming_with_content_recording_impl(self, use_events, **kwargs): + async def _test_async_mcp_non_streaming_with_content_recording_impl( + self, use_events, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Implementation for testing asynchronous MCP agent with non-streaming and content recording enabled. Args: @@ -118,7 +121,7 @@ async def _test_async_mcp_non_streaming_with_content_recording_impl(self, use_ev # Explicitly call and iterate through conversation items items = await openai_client.conversations.items.list(conversation_id=conversation.id) - async for item in items: + async for _ in items: pass # Just iterate to consume items # Check spans @@ -369,7 +372,9 @@ async def test_async_mcp_non_streaming_with_content_recording_attributes(self, * """Test asynchronous MCP agent with non-streaming and content recording enabled (attribute-based messages).""" await self._test_async_mcp_non_streaming_with_content_recording_impl(False, **kwargs) - async def _test_async_mcp_non_streaming_without_content_recording_impl(self, use_events, **kwargs): + async def _test_async_mcp_non_streaming_without_content_recording_impl( + self, use_events, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Implementation for testing asynchronous MCP agent with non-streaming and content recording disabled. Args: @@ -447,7 +452,7 @@ async def _test_async_mcp_non_streaming_without_content_recording_impl(self, use # Explicitly call and iterate through conversation items items = await openai_client.conversations.items.list(conversation_id=conversation.id) - async for item in items: + async for _ in items: pass # Just iterate to consume items # Check spans @@ -688,7 +693,9 @@ async def test_async_mcp_non_streaming_without_content_recording_attributes(self # Async MCP Agent Tests - Streaming # ======================================== - async def _test_async_mcp_streaming_with_content_recording_impl(self, use_events, **kwargs): + async def _test_async_mcp_streaming_with_content_recording_impl( + self, use_events, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Implementation for testing asynchronous MCP agent with streaming and content recording enabled. Args: @@ -771,7 +778,7 @@ async def _test_async_mcp_streaming_with_content_recording_impl(self, use_events # Explicitly call and iterate through conversation items items = await openai_client.conversations.items.list(conversation_id=conversation.id) - async for item in items: + async for _ in items: pass # Check spans @@ -967,7 +974,9 @@ async def test_async_mcp_streaming_with_content_recording_attributes(self, **kwa """Test asynchronous MCP agent with streaming and content recording enabled (attribute-based messages).""" await self._test_async_mcp_streaming_with_content_recording_impl(False, **kwargs) - async def _test_async_mcp_streaming_without_content_recording_impl(self, use_events, **kwargs): + async def _test_async_mcp_streaming_without_content_recording_impl( + self, use_events, **kwargs + ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Implementation for testing asynchronous MCP agent with streaming and content recording disabled. Args: @@ -1050,7 +1059,7 @@ async def _test_async_mcp_streaming_without_content_recording_impl(self, use_eve # Explicitly call and iterate through conversation items items = await openai_client.conversations.items.list(conversation_id=conversation.id) - async for item in items: + async for _ in items: pass # Check spans diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py index 57f17169897b..b4f5b187c865 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_metrics.py @@ -5,23 +5,23 @@ # ------------------------------------ import os -import pytest from typing import Tuple -from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.core.settings import settings +import pytest from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import InMemoryMetricReader from opentelemetry import metrics from openai import OpenAI from test_base import servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport -from test_ai_instrumentor_base import ( +from test_ai_instrumentor_base import ( # pylint: disable=import-error TestAiAgentsInstrumentorBase, CONTENT_TRACING_ENV_VARIABLE, ) +from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils +from azure.core.settings import settings settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable # Set up global metrics collection like in the sample global_metric_reader = InMemoryMetricReader() @@ -182,7 +182,7 @@ def test_metrics_collection_conversation_create(self, **kwargs): assert True == AIProjectInstrumentor().is_instrumented() # Get OpenAI client and deployment - client, deployment_name = self._get_openai_client_and_deployment(**kwargs) + client, _ = self._get_openai_client_and_deployment(**kwargs) # Create a conversation conversation = client.conversations.create() diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py index 7e390d3e3a3a..b2c117a75ba5 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py @@ -9,6 +9,13 @@ import os import pytest +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from devtools_testutils import recorded_by_proxy, RecordedTransport +from test_base import servicePreparer +from test_ai_instrumentor_base import ( # pylint: disable=import-error + TestAiAgentsInstrumentorBase, + CONTENT_TRACING_ENV_VARIABLE, +) from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils from azure.ai.projects.telemetry._utils import ( OPERATION_NAME_INVOKE_AGENT, @@ -16,22 +23,14 @@ _set_use_message_events, RESPONSES_PROVIDER, ) -from azure.core.settings import settings -from gen_ai_trace_verifier import GenAiTraceVerifier -from devtools_testutils import recorded_by_proxy, RecordedTransport from azure.ai.projects.models import ( PromptAgentDefinition, WorkflowAgentDefinition, ) - -from test_base import servicePreparer -from test_ai_instrumentor_base import ( - TestAiAgentsInstrumentorBase, - CONTENT_TRACING_ENV_VARIABLE, -) +from azure.core.settings import settings settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable def checkWorkflowEventContents(content, content_recording_enabled): @@ -191,7 +190,9 @@ def _create_student_teacher_workflow(self, project_client, student_agent, teache @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_workflow_non_streaming_with_content_recording(self, **kwargs): + def test_sync_workflow_non_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements """Test synchronous workflow agent with non-streaming and content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -205,7 +206,7 @@ def test_sync_workflow_non_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_content_recording_enabled() assert AIProjectInstrumentor().is_instrumented() - project_client = self.create_client(operation_group="tracing", **kwargs) + project_client = self.create_client(operation_group="tracing", allow_preview=True, **kwargs) deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None @@ -217,8 +218,8 @@ def test_sync_workflow_non_streaming_with_content_recording(self, **kwargs): agent_name="teacher-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a teacher that creates pre-school math questions for students and checks answers. - If the answer is correct, you stop the conversation by saying [COMPLETE]. + instructions="""You are a teacher that creates pre-school math questions for students and checks answers. + If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", ), ) @@ -228,7 +229,7 @@ def test_sync_workflow_non_streaming_with_content_recording(self, **kwargs): agent_name="student-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a student who answers questions from the teacher. + instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), ) @@ -314,7 +315,7 @@ def test_sync_workflow_non_streaming_with_content_recording(self, **kwargs): try: data = json.loads(event_content) - except Exception: + except Exception: # pylint: disable=broad-exception-caught continue if isinstance(data, list) and any(entry.get("role") == "workflow" for entry in data): checkWorkflowEventContents(event_content, True) @@ -356,7 +357,9 @@ def test_sync_workflow_non_streaming_with_content_recording(self, **kwargs): @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_workflow_non_streaming_without_content_recording(self, **kwargs): + def test_sync_workflow_non_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements """Test synchronous workflow agent with non-streaming and content recording disabled.""" self.cleanup() _set_use_message_events(True) @@ -370,7 +373,7 @@ def test_sync_workflow_non_streaming_without_content_recording(self, **kwargs): assert not AIProjectInstrumentor().is_content_recording_enabled() assert AIProjectInstrumentor().is_instrumented() - project_client = self.create_client(operation_group="tracing", **kwargs) + project_client = self.create_client(operation_group="tracing", allow_preview=True, **kwargs) deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None @@ -382,8 +385,8 @@ def test_sync_workflow_non_streaming_without_content_recording(self, **kwargs): agent_name="teacher-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a teacher that creates pre-school math questions for students and checks answers. - If the answer is correct, you stop the conversation by saying [COMPLETE]. + instructions="""You are a teacher that creates pre-school math questions for students and checks answers. + If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", ), ) @@ -393,7 +396,7 @@ def test_sync_workflow_non_streaming_without_content_recording(self, **kwargs): agent_name="student-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a student who answers questions from the teacher. + instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), ) @@ -476,7 +479,7 @@ def test_sync_workflow_non_streaming_without_content_recording(self, **kwargs): try: data = json.loads(event_content) - except Exception: + except Exception: # pylint: disable=broad-exception-caught continue if isinstance(data, list) and any(entry.get("role") == "workflow" for entry in data): checkWorkflowEventContents(event_content, False) @@ -523,7 +526,9 @@ def test_sync_workflow_non_streaming_without_content_recording(self, **kwargs): @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_workflow_streaming_with_content_recording(self, **kwargs): + def test_sync_workflow_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements """Test synchronous workflow agent with streaming and content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -537,7 +542,7 @@ def test_sync_workflow_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_content_recording_enabled() assert AIProjectInstrumentor().is_instrumented() - project_client = self.create_client(operation_group="tracing", **kwargs) + project_client = self.create_client(operation_group="tracing", allow_preview=True, **kwargs) deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None @@ -549,8 +554,8 @@ def test_sync_workflow_streaming_with_content_recording(self, **kwargs): agent_name="teacher-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a teacher that creates pre-school math questions for students and checks answers. - If the answer is correct, you stop the conversation by saying [COMPLETE]. + instructions="""You are a teacher that creates pre-school math questions for students and checks answers. + If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", ), ) @@ -560,7 +565,7 @@ def test_sync_workflow_streaming_with_content_recording(self, **kwargs): agent_name="student-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a student who answers questions from the teacher. + instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), ) @@ -648,7 +653,7 @@ def test_sync_workflow_streaming_with_content_recording(self, **kwargs): try: data = json.loads(event_content) - except Exception: + except Exception: # pylint: disable=broad-exception-caught continue if isinstance(data, list) and any(entry.get("role") == "workflow" for entry in data): checkWorkflowEventContents(event_content, True) @@ -691,7 +696,9 @@ def test_sync_workflow_streaming_with_content_recording(self, **kwargs): @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_workflow_streaming_without_content_recording(self, **kwargs): + def test_sync_workflow_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements """Test synchronous workflow agent with streaming and content recording disabled.""" self.cleanup() _set_use_message_events(True) @@ -705,7 +712,7 @@ def test_sync_workflow_streaming_without_content_recording(self, **kwargs): assert not AIProjectInstrumentor().is_content_recording_enabled() assert AIProjectInstrumentor().is_instrumented() - project_client = self.create_client(operation_group="tracing", **kwargs) + project_client = self.create_client(operation_group="tracing", allow_preview=True, **kwargs) deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None @@ -717,8 +724,8 @@ def test_sync_workflow_streaming_without_content_recording(self, **kwargs): agent_name="teacher-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a teacher that creates pre-school math questions for students and checks answers. - If the answer is correct, you stop the conversation by saying [COMPLETE]. + instructions="""You are a teacher that creates pre-school math questions for students and checks answers. + If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", ), ) @@ -728,7 +735,7 @@ def test_sync_workflow_streaming_without_content_recording(self, **kwargs): agent_name="student-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a student who answers questions from the teacher. + instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), ) @@ -816,7 +823,7 @@ def test_sync_workflow_streaming_without_content_recording(self, **kwargs): try: data = json.loads(event_content) - except Exception: + except Exception: # pylint: disable=broad-exception-caught continue if isinstance(data, list) and any(entry.get("role") == "workflow" for entry in data): checkWorkflowEventContents(event_content, False) diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py index 75808d2f8a2d..4125772e8a42 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py @@ -7,8 +7,17 @@ Async tests for ResponsesInstrumentor with workflow agents. """ +import json import os import pytest +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from devtools_testutils.aio import recorded_by_proxy_async +from devtools_testutils import RecordedTransport +from test_base import servicePreparer +from test_ai_instrumentor_base import ( # pylint: disable=import-error + TestAiAgentsInstrumentorBase, + CONTENT_TRACING_ENV_VARIABLE, +) from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils from azure.ai.projects.telemetry._utils import ( OPERATION_NAME_INVOKE_AGENT, @@ -16,25 +25,14 @@ _set_use_message_events, RESPONSES_PROVIDER, ) -from azure.core.settings import settings -from gen_ai_trace_verifier import GenAiTraceVerifier -from devtools_testutils.aio import recorded_by_proxy_async -from devtools_testutils import RecordedTransport from azure.ai.projects.models import ( PromptAgentDefinition, WorkflowAgentDefinition, ) - -from test_base import servicePreparer -from test_ai_instrumentor_base import ( - TestAiAgentsInstrumentorBase, - CONTENT_TRACING_ENV_VARIABLE, -) - -import json +from azure.core.settings import settings settings.tracing_implementation = "OpenTelemetry" -_utils._span_impl_type = settings.tracing_implementation() +_utils._span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable def checkWorkflowEventContents(content, content_recording_enabled): @@ -190,7 +188,9 @@ async def _create_student_teacher_workflow(self, project_client, student_agent, @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_workflow_non_streaming_with_content_recording(self, **kwargs): + async def test_async_workflow_non_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements """Test asynchronous workflow agent with non-streaming and content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -204,7 +204,7 @@ async def test_async_workflow_non_streaming_with_content_recording(self, **kwarg assert AIProjectInstrumentor().is_content_recording_enabled() assert AIProjectInstrumentor().is_instrumented() - project_client = self.create_async_client(operation_group="tracing", **kwargs) + project_client = self.create_async_client(operation_group="tracing", allow_preview=True, **kwargs) deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None @@ -216,8 +216,8 @@ async def test_async_workflow_non_streaming_with_content_recording(self, **kwarg agent_name="teacher-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a teacher that creates pre-school math questions for students and checks answers. - If the answer is correct, you stop the conversation by saying [COMPLETE]. + instructions="""You are a teacher that creates pre-school math questions for students and checks answers. + If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", ), ) @@ -227,7 +227,7 @@ async def test_async_workflow_non_streaming_with_content_recording(self, **kwarg agent_name="student-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a student who answers questions from the teacher. + instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), ) @@ -306,7 +306,7 @@ async def test_async_workflow_non_streaming_with_content_recording(self, **kwarg continue try: data = json.loads(event_content) - except Exception: + except Exception: # pylint: disable=broad-exception-caught continue if isinstance(data, list) and any(entry.get("role") == "workflow" for entry in data): checkWorkflowEventContents(event_content, True) @@ -351,7 +351,9 @@ async def test_async_workflow_non_streaming_with_content_recording(self, **kwarg @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_workflow_non_streaming_without_content_recording(self, **kwargs): + async def test_async_workflow_non_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements """Test asynchronous workflow agent with non-streaming and content recording disabled.""" self.cleanup() _set_use_message_events(True) @@ -365,7 +367,7 @@ async def test_async_workflow_non_streaming_without_content_recording(self, **kw assert not AIProjectInstrumentor().is_content_recording_enabled() assert AIProjectInstrumentor().is_instrumented() - project_client = self.create_async_client(operation_group="tracing", **kwargs) + project_client = self.create_async_client(operation_group="tracing", allow_preview=True, **kwargs) deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None @@ -377,8 +379,8 @@ async def test_async_workflow_non_streaming_without_content_recording(self, **kw agent_name="teacher-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a teacher that creates pre-school math questions for students and checks answers. - If the answer is correct, you stop the conversation by saying [COMPLETE]. + instructions="""You are a teacher that creates pre-school math questions for students and checks answers. + If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", ), ) @@ -388,7 +390,7 @@ async def test_async_workflow_non_streaming_without_content_recording(self, **kw agent_name="student-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a student who answers questions from the teacher. + instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), ) @@ -467,7 +469,7 @@ async def test_async_workflow_non_streaming_without_content_recording(self, **kw continue try: data = json.loads(event_content) - except Exception: + except Exception: # pylint: disable=broad-exception-caught continue if isinstance(data, list) and any(entry.get("role") == "workflow" for entry in data): checkWorkflowEventContents(event_content, False) @@ -516,7 +518,9 @@ async def test_async_workflow_non_streaming_without_content_recording(self, **kw @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_workflow_streaming_with_content_recording(self, **kwargs): + async def test_async_workflow_streaming_with_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements """Test asynchronous workflow agent with streaming and content recording enabled.""" self.cleanup() _set_use_message_events(True) @@ -530,7 +534,7 @@ async def test_async_workflow_streaming_with_content_recording(self, **kwargs): assert AIProjectInstrumentor().is_content_recording_enabled() assert AIProjectInstrumentor().is_instrumented() - project_client = self.create_async_client(operation_group="tracing", **kwargs) + project_client = self.create_async_client(operation_group="tracing", allow_preview=True, **kwargs) deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None @@ -542,8 +546,8 @@ async def test_async_workflow_streaming_with_content_recording(self, **kwargs): agent_name="teacher-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a teacher that creates pre-school math questions for students and checks answers. - If the answer is correct, you stop the conversation by saying [COMPLETE]. + instructions="""You are a teacher that creates pre-school math questions for students and checks answers. + If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", ), ) @@ -553,7 +557,7 @@ async def test_async_workflow_streaming_with_content_recording(self, **kwargs): agent_name="student-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a student who answers questions from the teacher. + instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), ) @@ -637,7 +641,7 @@ async def test_async_workflow_streaming_with_content_recording(self, **kwargs): continue try: data = json.loads(event_content) - except Exception: + except Exception: # pylint: disable=broad-exception-caught continue if isinstance(data, list) and any(entry.get("role") == "workflow" for entry in data): checkWorkflowEventContents(event_content, True) @@ -682,7 +686,9 @@ async def test_async_workflow_streaming_with_content_recording(self, **kwargs): @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_workflow_streaming_without_content_recording(self, **kwargs): + async def test_async_workflow_streaming_without_content_recording( + self, **kwargs + ): # pylint: disable=too-many-locals,too-many-statements """Test asynchronous workflow agent with streaming and content recording disabled.""" self.cleanup() _set_use_message_events(True) @@ -696,7 +702,7 @@ async def test_async_workflow_streaming_without_content_recording(self, **kwargs assert not AIProjectInstrumentor().is_content_recording_enabled() assert AIProjectInstrumentor().is_instrumented() - project_client = self.create_async_client(operation_group="tracing", **kwargs) + project_client = self.create_async_client(operation_group="tracing", allow_preview=True, **kwargs) deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None @@ -708,8 +714,8 @@ async def test_async_workflow_streaming_without_content_recording(self, **kwargs agent_name="teacher-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a teacher that creates pre-school math questions for students and checks answers. - If the answer is correct, you stop the conversation by saying [COMPLETE]. + instructions="""You are a teacher that creates pre-school math questions for students and checks answers. + If the answer is correct, you stop the conversation by saying [COMPLETE]. If the answer is wrong, you ask student to fix it.""", ), ) @@ -719,7 +725,7 @@ async def test_async_workflow_streaming_without_content_recording(self, **kwargs agent_name="student-agent", definition=PromptAgentDefinition( model=deployment_name, - instructions="""You are a student who answers questions from the teacher. + instructions="""You are a student who answers questions from the teacher. When the teacher gives you a question, you answer it.""", ), ) @@ -803,7 +809,7 @@ async def test_async_workflow_streaming_without_content_recording(self, **kwargs continue try: data = json.loads(event_content) - except Exception: + except Exception: # pylint: disable=broad-exception-caught continue if isinstance(data, list) and any(entry.get("role") == "workflow" for entry in data): checkWorkflowEventContents(event_content, False) diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_trace_function_decorator.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_trace_function_decorator.py index 7aa983af2106..568821c8e09f 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_trace_function_decorator.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_trace_function_decorator.py @@ -4,13 +4,15 @@ # ------------------------------------ """Tests for the trace_function decorator with synchronous functions.""" +# pylint: disable=unused-argument + import pytest from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from memory_trace_exporter import MemoryTraceExporter # pylint: disable=import-error from azure.ai.projects.telemetry._trace_function import trace_function -from gen_ai_trace_verifier import GenAiTraceVerifier -from memory_trace_exporter import MemoryTraceExporter class TestTraceFunctionDecorator: @@ -21,7 +23,7 @@ def setup_telemetry(self): """Setup telemetry for tests.""" tracer_provider = TracerProvider() trace._TRACER_PROVIDER = tracer_provider - self.exporter = MemoryTraceExporter() + self.exporter = MemoryTraceExporter() # pylint: disable=attribute-defined-outside-init span_processor = SimpleSpanProcessor(self.exporter) tracer_provider.add_span_processor(span_processor) yield diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_trace_function_decorator_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_trace_function_decorator_async.py index 9e4824859198..250bbfc58eff 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_trace_function_decorator_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_trace_function_decorator_async.py @@ -4,24 +4,26 @@ # ------------------------------------ """Tests for the trace_function decorator with asynchronous functions.""" +# pylint: disable=unused-argument + import pytest from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from gen_ai_trace_verifier import GenAiTraceVerifier # pylint: disable=import-error +from memory_trace_exporter import MemoryTraceExporter # pylint: disable=import-error from azure.ai.projects.telemetry._trace_function import trace_function -from gen_ai_trace_verifier import GenAiTraceVerifier -from memory_trace_exporter import MemoryTraceExporter class TestTraceFunctionDecoratorAsync: """Tests for trace_function decorator with asynchronous functions.""" @pytest.fixture(scope="function") - def setup_telemetry(self): + def setup_telemetry(self): # pylint: disable=attribute-defined-outside-init """Setup telemetry for tests.""" tracer_provider = TracerProvider() trace._TRACER_PROVIDER = tracer_provider - self.exporter = MemoryTraceExporter() + self.exporter = MemoryTraceExporter() # pylint: disable=attribute-defined-outside-init span_processor = SimpleSpanProcessor(self.exporter) tracer_provider.add_span_processor(span_processor) yield @@ -199,7 +201,7 @@ async def test_async_function_with_boolean_parameters(self, setup_telemetry): async def check_status_async(is_active: bool, is_verified: bool) -> str: if is_active and is_verified: return "approved" - elif is_active: + if is_active: return "pending" return "inactive" @@ -359,7 +361,7 @@ async def process_data_async(name: str, count: int, active: bool, scores: list) assert attributes_match is True @pytest.mark.asyncio - async def test_async_function_with_default_parameters(self, setup_telemetry): + async def test_async_function_with_default_parameters(self, setup_telemetry): # pylint: disable=unused-argument """Test decorator with async function using default parameters.""" @trace_function() @@ -386,7 +388,7 @@ async def create_user_async(name: str, role: str = "user", active: bool = True) assert attributes_match is True @pytest.mark.asyncio - async def test_async_function_list_return_value(self, setup_telemetry): + async def test_async_function_list_return_value(self, setup_telemetry): # pylint: disable=unused-argument """Test decorator with async function returning a list.""" @trace_function() diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agent_create_version_exception.py b/sdk/ai/azure-ai-projects/tests/agents/test_agent_create_version_exception.py index 6cc81e4f5553..d013c33e9847 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agent_create_version_exception.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agent_create_version_exception.py @@ -5,7 +5,6 @@ # ------------------------------------ # cSpell:disable -import functools import pytest from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py index 44c8c315389e..4a8727c0e5e8 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py @@ -13,7 +13,6 @@ TextResponseFormatJsonSchema, PromptAgentDefinitionTextOptions, ) -import pytest class TestAgentResponsesCrud(TestBase): @@ -75,7 +74,7 @@ def test_agent_responses_crud(self, **kwargs): print(f"Response id: {response.id}, output text: {response.output_text}") assert "5280" in response.output_text or "5,280" in response.output_text - items = openai_client.conversations.items.create( + _ = openai_client.conversations.items.create( conversation.id, items=[{"type": "message", "role": "user", "content": "And how many meters?"}], ) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py index 27151090e10a..22aeff937bc5 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py @@ -14,7 +14,6 @@ TextResponseFormatJsonSchema, PromptAgentDefinitionTextOptions, ) -import pytest class TestAgentResponsesCrudAsync(TestBase): @@ -76,7 +75,7 @@ async def test_agent_responses_crud_async(self, **kwargs): conversation_id=conversation.id, items=[{"type": "message", "role": "user", "content": "And how many meters?"}], ) - print(f"Added a second user message to the conversation") + print("Added a second user message to the conversation") response = await openai_client.responses.create( conversation=conversation.id, diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py index 2e6451effb59..702d7786fbdb 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud.py @@ -9,7 +9,6 @@ from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy from azure.ai.projects.models import PromptAgentDefinition, AgentDetails, AgentVersionDetails -import pytest class TestAgentCrud(TestBase): diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py index 6eb878e729b8..7dae5621f724 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_crud_async.py @@ -9,7 +9,6 @@ from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async from azure.ai.projects.models import PromptAgentDefinition, AgentDetails, AgentVersionDetails -import pytest class TestAgentCrudAsync(TestBase): diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_conversation_crud.py b/sdk/ai/azure-ai-projects/tests/agents/test_conversation_crud.py index 448cf90f4b49..a597d78a64d0 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_conversation_crud.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_conversation_crud.py @@ -7,7 +7,6 @@ from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport -import pytest # from azure.ai.projects.models import ResponsesUserMessageItemParam, ItemContentInputText @@ -88,7 +87,7 @@ def test_conversation_crud(self, **kwargs): metadata = {"key1": "value1", "key2": "value2"} conversation = client.conversations.update(conversation_id=conversation1.id, metadata=metadata) TestBase._validate_conversation(conversation, expected_id=conversation1.id, expected_metadata=metadata) - print(f"Conversation updated") + print("Conversation updated") conversation = client.conversations.retrieve(conversation_id=conversation1.id) TestBase._validate_conversation(conversation) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_conversation_crud_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_conversation_crud_async.py index b8e26610aec5..b8638956cccd 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_conversation_crud_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_conversation_crud_async.py @@ -8,7 +8,6 @@ from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async from devtools_testutils import RecordedTransport -import pytest # from azure.ai.projects.models import ResponsesUserMessageItemParam, ItemContentInputText @@ -66,7 +65,7 @@ async def test_conversation_crud_async(self, **kwargs): metadata = {"key1": "value1", "key2": "value2"} conversation = await client.conversations.update(conversation_id=conversation1.id, metadata=metadata) TestBase._validate_conversation(conversation, expected_id=conversation1.id, expected_metadata=metadata) - print(f"Conversation updated") + print("Conversation updated") conversation = await client.conversations.retrieve(conversation_id=conversation1.id) TestBase._validate_conversation(conversation) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_conversation_items_crud.py b/sdk/ai/azure-ai-projects/tests/agents/test_conversation_items_crud.py index d1b43a519465..e72730729d6d 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_conversation_items_crud.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_conversation_items_crud.py @@ -7,7 +7,6 @@ from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport -import pytest class TestConversationItemsCrud(TestBase): @@ -45,7 +44,7 @@ def test_conversation_items_crud(self, **kwargs): print(f"Created conversation (id: {conversation.id})") try: - print(f"Test create_items") + print("Test create_items") # Create items with short-form and long-form text message as Dict # See https://platform.openai.com/docs/api-reference/conversations/create-items items = [ @@ -58,7 +57,7 @@ def test_conversation_items_crud(self, **kwargs): ) assert items.has_more is False item_list = items.data - print(f"Created item with short-form and long form text messages as Dict") + print("Created item with short-form and long form text messages as Dict") assert len(item_list) == 2 self._validate_conversation_item( item_list[0], @@ -106,7 +105,7 @@ def test_conversation_items_crud(self, **kwargs): # item3_id = item_list[0].id # item4_id = item_list[1].id - print(f"Test retrieve item") + print("Test retrieve item") item = client.conversations.items.retrieve(conversation_id=conversation.id, item_id=item1_id) self._validate_conversation_item( item, @@ -117,14 +116,14 @@ def test_conversation_items_crud(self, **kwargs): expected_content_text="first message", ) - print(f"Test list items") + print("Test list items") item_count = 0 for item in client.conversations.items.list(conversation.id): item_count += 1 self._validate_conversation_item(item) assert item_count == 2 - print(f"Test delete item") + print("Test delete item") # result = client.conversations.items.delete(conversation_id=conversation.id, item_id=item4_id) # assert result.id == conversation.id result = client.conversations.items.delete(conversation_id=conversation.id, item_id=item2_id) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_conversation_items_crud_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_conversation_items_crud_async.py index e3da364d1c4c..830ff107d458 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_conversation_items_crud_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_conversation_items_crud_async.py @@ -8,7 +8,6 @@ from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async from devtools_testutils import RecordedTransport -import pytest class TestConversationItemsCrudAsync(TestBase): @@ -24,7 +23,7 @@ async def test_conversation_items_crud_async(self, **kwargs): print(f"Created conversation (id: {conversation.id})") try: - print(f"Test create_items") + print("Test create_items") # Create items with short-form and long-form text message as Dict # See https://platform.openai.com/docs/api-reference/conversations/create-items items = [ @@ -37,7 +36,7 @@ async def test_conversation_items_crud_async(self, **kwargs): ) assert items.has_more is False item_list = items.data - print(f"Created item with short-form and long form text messages as Dict") + print("Created item with short-form and long form text messages as Dict") assert len(item_list) == 2 self._validate_conversation_item( item_list[0], @@ -85,7 +84,7 @@ async def test_conversation_items_crud_async(self, **kwargs): # item3_id = item_list[0].id # item4_id = item_list[1].id - print(f"Test retrieve item") + print("Test retrieve item") item = await client.conversations.items.retrieve(conversation_id=conversation.id, item_id=item1_id) self._validate_conversation_item( item, @@ -96,14 +95,14 @@ async def test_conversation_items_crud_async(self, **kwargs): expected_content_text="first message", ) - print(f"Test list items") + print("Test list items") item_count = 0 async for item in client.conversations.items.list(conversation.id): item_count += 1 self._validate_conversation_item(item) assert item_count == 2 - print(f"Test delete item") + print("Test delete item") # result = await client.conversations.items.delete(conversation_id=conversation.id, item_id=item4_id) # assert result.id == conversation.id result = await client.conversations.items.delete(conversation_id=conversation.id, item_id=item2_id) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py index 4ef0e6b845f6..6d49b63dc4d8 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py @@ -5,8 +5,6 @@ # ------------------------------------ # cSpell:disable -import pytest - """ Multi-Tool Tests: Code Interpreter + Function Tool @@ -14,7 +12,6 @@ All tests use the same tool combination but different inputs and workflows. """ -import json from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport from azure.ai.projects.models import ( @@ -23,7 +20,6 @@ AutoCodeInterpreterToolParam, FunctionTool, ) -from openai.types.responses.response_input_param import FunctionCallOutput, ResponseInputParam class TestAgentCodeInterpreterAndFunction(TestBase): diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py index f4996551357b..7bcdc9c57b42 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py @@ -5,8 +5,6 @@ # ------------------------------------ # cSpell:disable -import pytest - """ Multi-Tool Tests: File Search + Code Interpreter diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py index 9adb1329774a..d55fdca08792 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py @@ -5,8 +5,6 @@ # ------------------------------------ # cSpell:disable -import pytest - """ Multi-Tool Tests: File Search + Function Tool @@ -18,8 +16,8 @@ from io import BytesIO from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport -from azure.ai.projects.models import PromptAgentDefinition, FileSearchTool, FunctionTool from openai.types.responses.response_input_param import FunctionCallOutput, ResponseInputParam +from azure.ai.projects.models import PromptAgentDefinition, FileSearchTool, FunctionTool class TestAgentFileSearchAndFunction(TestBase): @@ -362,7 +360,7 @@ def calculate_sum(numbers): @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_multi_turn_search_and_save_workflow(self, **kwargs): + def test_multi_turn_search_and_save_workflow(self, **kwargs): # pylint: disable=too-many-statements,too-many-locals """ Test multi-turn workflow: search documents, ask follow-ups, save findings. diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py index 648a1ed65519..d362946f24b0 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py @@ -4,9 +4,6 @@ # Licensed under the MIT License. # ------------------------------------ # cSpell:disable - -import pytest - """ Multi-Tool Tests: File Search + Code Interpreter + Function Tool @@ -14,7 +11,6 @@ All tests use the same 3-tool combination but different inputs and workflows. """ -import json from io import BytesIO from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport @@ -25,7 +21,6 @@ AutoCodeInterpreterToolParam, FunctionTool, ) -from openai.types.responses.response_input_param import FunctionCallOutput, ResponseInputParam class TestAgentFileSearchCodeInterpreterFunction(TestBase): diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py index f4a20b8f21ad..2baea19a4160 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py @@ -2,9 +2,6 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ - -import pytest - """ Test agents using multiple tools within conversations. @@ -14,6 +11,7 @@ import json from io import BytesIO +from openai.types.responses.response_input_param import FunctionCallOutput, ResponseInputParam from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport from azure.ai.projects.models import ( @@ -21,14 +19,13 @@ FileSearchTool, PromptAgentDefinition, ) -from openai.types.responses.response_input_param import FunctionCallOutput, ResponseInputParam class TestMultiToolWithConversations(TestBase): @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_file_search_and_function_with_conversation(self, **kwargs): + def test_file_search_and_function_with_conversation(self, **kwargs): # pylint: disable=too-many-statements """ Test using multiple tools (FileSearch + Function) within one conversation. diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py index e16a8e0e5722..70ff70113560 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py @@ -52,7 +52,7 @@ class TestAgentAISearch(TestBase): condition=(not is_live_and_not_recording()), reason="Skipped because we cannot record network calls with OpenAI client", ) - def test_agent_ai_search_question_answering(self, **kwargs): + def test_agent_ai_search_question_answering(self, **kwargs): # pylint: disable=too-many-statements """ Test agent with Azure AI Search capabilities for question answering. diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py index 190183663823..1b860ff45792 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,line-too-long,useless-suppression +# pylint: disable=too-many-lines,line-too-long,useless-suppression,too-many-nested-blocks # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -100,7 +100,7 @@ def test_agent_bing_grounding(self, **kwargs): elif event.type == "response.output_text.delta": print(f"Delta: {event.delta}") elif event.type == "response.text.done": - print(f"Follow-up response done!") + print("Follow-up response done!") elif event.type == "response.output_item.done": if event.item.type == "message": item = event.item @@ -112,7 +112,7 @@ def test_agent_bing_grounding(self, **kwargs): print(f"URL Citation: {annotation.url}") url_citations.append(annotation.url) elif event.type == "response.completed": - print(f"Follow-up completed!") + print("Follow-up completed!") print(f"Full response: {event.response.output_text}") output_text = event.response.output_text diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py index 1d49bb0bbb6b..3cffa5ef23a1 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,line-too-long,useless-suppression +# pylint: disable=too-many-lines,line-too-long,useless-suppression,too-many-nested-blocks # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py index 50a4b778cb07..9147c068adc1 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py @@ -5,7 +5,6 @@ # ------------------------------------ # cSpell:disable -import pytest from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async from devtools_testutils import RecordedTransport diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py index 9c733529ac42..011c580ae17f 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py @@ -6,8 +6,8 @@ # cSpell:disable import os -import pytest from io import BytesIO +import pytest from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport from azure.ai.projects.models import PromptAgentDefinition, FileSearchTool @@ -156,7 +156,7 @@ def test_agent_file_search_unsupported_file_type(self, **kwargs): # Attempt to upload unsupported file type print("\nAttempting to upload CSV file (unsupported format)...") try: - file = openai_client.vector_stores.files.upload_and_poll( + _ = openai_client.vector_stores.files.upload_and_poll( vector_store_id=vector_store.id, file=csv_file, ) @@ -164,7 +164,7 @@ def test_agent_file_search_unsupported_file_type(self, **kwargs): openai_client.vector_stores.delete(vector_store.id) pytest.fail("Expected BadRequestError for CSV file upload, but upload succeeded") - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught error_message = str(e) print(f"\n✓ Upload correctly rejected with error: {error_message[:200]}...") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py index 604fbd5323fe..1bda2739a3e4 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py @@ -6,7 +6,6 @@ # cSpell:disable import os -import pytest from io import BytesIO from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py index 72079587549e..1ff21ef6c57d 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py @@ -6,7 +6,6 @@ # cSpell:disable import os -import pytest from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport from azure.ai.projects.models import PromptAgentDefinition, FileSearchTool diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py index 42e1348a3521..c0357be6376d 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py @@ -6,7 +6,6 @@ # cSpell:disable import os -import pytest from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async from devtools_testutils import RecordedTransport diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py index 05ebd8ddcad0..fe8349055685 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py @@ -6,11 +6,10 @@ # cSpell:disable import json -import pytest from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport -from azure.ai.projects.models import PromptAgentDefinition, FunctionTool from openai.types.responses.response_input_param import FunctionCallOutput, ResponseInputParam +from azure.ai.projects.models import PromptAgentDefinition, FunctionTool class TestAgentFunctionTool(TestBase): @@ -162,7 +161,7 @@ def test_agent_function_tool(self, **kwargs): @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_agent_function_tool_multi_turn_with_multiple_calls(self, **kwargs): + def test_agent_function_tool_multi_turn_with_multiple_calls(self, **kwargs): # pylint: disable=too-many-statements """ Test multi-turn conversation where agent calls functions multiple times. diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py index 2344c5a9d498..660ac4333b38 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py @@ -6,12 +6,11 @@ # cSpell:disable import json -import pytest from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async from devtools_testutils import RecordedTransport -from azure.ai.projects.models import PromptAgentDefinition, FunctionTool from openai.types.responses.response_input_param import FunctionCallOutput, ResponseInputParam +from azure.ai.projects.models import PromptAgentDefinition, FunctionTool class TestAgentFunctionToolAsync(TestBase): @@ -150,7 +149,9 @@ async def test_agent_function_tool_async(self, **kwargs): @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_agent_function_tool_multi_turn_with_multiple_calls_async(self, **kwargs): + async def test_agent_function_tool_multi_turn_with_multiple_calls_async( + self, **kwargs + ): # pylint: disable=too-many-statements """ Test multi-turn conversation where agent calls functions multiple times (async version). diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py index 7dea648d735d..dfec50f05ae5 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,line-too-long,useless-suppression +# pylint: disable=too-many-lines,line-too-long,useless-suppression,broad-exception-caught # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py index 68b91adb44d4..00f8edc2d866 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,line-too-long,useless-suppression +# pylint: disable=too-many-lines,line-too-long,useless-suppression,broad-exception-caught # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py index 109e1b93f775..2067a94e11b0 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py @@ -9,8 +9,8 @@ from test_base import TestBase, servicePreparer from devtools_testutils import is_live_and_not_recording from devtools_testutils import recorded_by_proxy, RecordedTransport -from azure.ai.projects.models import PromptAgentDefinition, MCPTool, Tool from openai.types.responses.response_input_param import McpApprovalResponse, ResponseInputParam +from azure.ai.projects.models import PromptAgentDefinition, MCPTool, Tool class TestAgentMCP(TestBase): diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py index b9e1dd43c7e1..471f1b4809f1 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py @@ -5,12 +5,11 @@ # ------------------------------------ # cSpell:disable -import pytest from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async from devtools_testutils import RecordedTransport -from azure.ai.projects.models import PromptAgentDefinition, MCPTool, Tool from openai.types.responses.response_input_param import McpApprovalResponse, ResponseInputParam +from azure.ai.projects.models import PromptAgentDefinition, MCPTool, Tool class TestAgentMCPAsync(TestBase): diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py index 56edc1879033..9f2fc80b6301 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py @@ -1,13 +1,13 @@ -# pylint: disable=too-many-lines,line-too-long,useless-suppression +# pylint: disable=too-many-lines,line-too-long,useless-suppression,broad-exception-caught # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ # cSpell:disable -import pytest import time from typing import Final +import pytest from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport, is_live, is_live_and_not_recording from azure.core.exceptions import ResourceNotFoundError @@ -26,7 +26,7 @@ class TestAgentMemorySearch(TestBase): ) @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_agent_memory_search(self, **kwargs): + def test_agent_memory_search(self, **kwargs): # pylint: disable=too-many-statements """ Test agent with Memory Search tool for contextual memory retrieval. diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py index f830aac14fca..bea2d6053eb3 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,line-too-long,useless-suppression +# pylint: disable=too-many-lines,line-too-long,useless-suppression,broad-exception-caught # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -6,8 +6,8 @@ # cSpell:disable import asyncio -import pytest from typing import Final +import pytest from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async from devtools_testutils import RecordedTransport, is_live, is_live_and_not_recording @@ -27,7 +27,7 @@ class TestAgentMemorySearchAsync(TestBase): ) @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_agent_memory_search_async(self, **kwargs): + async def test_agent_memory_search_async(self, **kwargs): # pylint: disable=too-many-statements model = kwargs.get("foundry_model_name") chat_model = kwargs.get("memory_store_chat_model_deployment_name") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py index 0bb1aa3cdf33..874c4414c427 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py @@ -65,7 +65,7 @@ def test_agent_openapi(self, **kwargs): assert os.path.exists(weather_asset_file_path), f"OpenAPI spec file not found at: {weather_asset_file_path}" print(f"Using OpenAPI spec file: {weather_asset_file_path}") - with open(weather_asset_file_path, "r") as f: + with open(weather_asset_file_path, "r", encoding="utf-8") as f: openapi_weather = jsonref.loads(f.read()) # Create OpenAPI tool diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py index dc363934a816..6ef72d8b3338 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py @@ -45,7 +45,7 @@ async def test_agent_openapi_async(self, **kwargs): assert os.path.exists(weather_asset_file_path), f"OpenAPI spec file not found at: {weather_asset_file_path}" print(f"Using OpenAPI spec file: {weather_asset_file_path}") - with open(weather_asset_file_path, "r") as f: + with open(weather_asset_file_path, "r", encoding="utf-8") as f: openapi_weather = jsonref.loads(f.read()) # Create OpenAPI tool diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py index 1f4e5e78ef65..6e3cafa96e6e 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py @@ -12,9 +12,9 @@ """ import json -import pytest from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport +from openai.types.responses.response_input_param import FunctionCallOutput, ResponseInputParam from azure.ai.projects.models import ( FunctionTool, FileSearchTool, @@ -22,14 +22,13 @@ AutoCodeInterpreterToolParam, PromptAgentDefinition, ) -from openai.types.responses.response_input_param import FunctionCallOutput, ResponseInputParam class TestAgentToolsWithConversations(TestBase): @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_function_tool_with_conversation(self, **kwargs): + def test_function_tool_with_conversation(self, **kwargs): # pylint: disable=too-many-statements """ Test using FunctionTool within a conversation. diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py index 084785bc53f3..1f6634a8640f 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py @@ -5,7 +5,6 @@ # ------------------------------------ # cSpell:disable -import pytest from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport from azure.ai.projects.models import PromptAgentDefinition, WebSearchPreviewTool, ApproximateLocation diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py index 7aac8aef6977..b73bc967d9db 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py @@ -5,7 +5,6 @@ # ------------------------------------ # cSpell:disable -import pytest from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async from devtools_testutils import RecordedTransport diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py index f2b6423e1cd0..4b44ff719a3b 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py @@ -4,12 +4,11 @@ # Licensed under the MIT License. # ------------------------------------ -import pytest +from test_base import TestBase, servicePreparer +from devtools_testutils import recorded_by_proxy from azure.ai.projects.models import ConnectionType, CredentialType, CustomCredential import azure.ai.projects.models as _models from azure.ai.projects._utils.model_base import _deserialize -from test_base import TestBase, servicePreparer -from devtools_testutils import recorded_by_proxy class TestConnections(TestBase): diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py index 137381284c3b..6394099ad7fc 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py @@ -3,11 +3,9 @@ # Licensed under the MIT License. # ------------------------------------ -import pytest -from azure.ai.projects.aio import AIProjectClient -from azure.ai.projects.models import ConnectionType from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async +from azure.ai.projects.models import ConnectionType class TestConnectionsAsync(TestBase): diff --git a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py index 634076e78aa6..2293587f2bdf 100644 --- a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py +++ b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets.py @@ -5,12 +5,11 @@ # ------------------------------------ import os import re -import pytest +from test_base import TestBase, servicePreparer +from devtools_testutils import recorded_by_proxy, is_live, add_general_regex_sanitizer from azure.ai.projects import AIProjectClient from azure.ai.projects.models import DatasetVersion, DatasetType from azure.ai.projects.models._enums import ConnectionType -from test_base import TestBase, servicePreparer -from devtools_testutils import recorded_by_proxy, is_live, add_general_regex_sanitizer from azure.core.exceptions import HttpResponseError # Construct the paths to the data folder and data file used in this test @@ -37,7 +36,7 @@ def test_datasets_upload_file(self, **kwargs): with self.create_client(**kwargs) as project_client: - print(f"Get the default Azure Storage connection to use for uploading files.") + print("Get the default Azure Storage connection to use for uploading files.") connection_name = project_client.connections.get_default(ConnectionType.AZURE_STORAGE_ACCOUNT).name print( f"[test_datasets_upload_file] Upload a single file and create a new Dataset `{dataset_name}`, version `{dataset_version}`, to reference the file." @@ -90,6 +89,7 @@ def test_datasets_upload_file(self, **kwargs): print(dataset_credential) TestBase.validate_dataset_credential(dataset_credential) + # pylint: disable=pointless-string-statement """ print("[test_datasets_upload_file] List latest versions of all Datasets:") empty = True @@ -152,7 +152,7 @@ def test_datasets_upload_folder(self, **kwargs): credential=self.get_credential(AIProjectClient, is_async=False), ) as project_client: - print(f"Get the default Azure Storage connection to use for uploading files.") + print("Get the default Azure Storage connection to use for uploading files.") connection_name = project_client.connections.get_default(ConnectionType.AZURE_STORAGE_ACCOUNT).name print( f"[test_datasets_upload_folder] Upload files in a folder (including sub-folders) and create a new version `{dataset_version}` in the same Dataset, to reference the files." diff --git a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py index 3e53e4b3d9ed..eedaa1da5424 100644 --- a/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py +++ b/sdk/ai/azure-ai-projects/tests/datasets/test_datasets_async.py @@ -5,13 +5,12 @@ # ------------------------------------ import os import re -import pytest -from azure.ai.projects.aio import AIProjectClient -from azure.ai.projects.models import DatasetVersion, DatasetType -from azure.ai.projects.models._enums import ConnectionType from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async from devtools_testutils import is_live, add_general_regex_sanitizer +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import DatasetVersion, DatasetType +from azure.ai.projects.models._enums import ConnectionType from azure.core.exceptions import HttpResponseError # Construct the paths to the data folder and data file used in this test @@ -38,7 +37,7 @@ async def test_datasets_upload_file(self, **kwargs): async with self.create_async_client(**kwargs) as project_client: - print(f"Get the default Azure Storage connection to use for uploading files.") + print("Get the default Azure Storage connection to use for uploading files.") connection_name = (await project_client.connections.get_default(ConnectionType.AZURE_STORAGE_ACCOUNT)).name print( f"[test_datasets_upload_file] Upload a single file and create a new Dataset `{dataset_name}`, version `{dataset_version}`, to reference the file." @@ -91,6 +90,7 @@ async def test_datasets_upload_file(self, **kwargs): print(dataset_credential) TestBase.validate_dataset_credential(dataset_credential) + # pylint: disable=pointless-string-statement """ print("[test_datasets_upload_file] List latest versions of all Datasets:") empty = True @@ -153,7 +153,7 @@ async def test_datasets_upload_folder_async(self, **kwargs): credential=self.get_credential(AIProjectClient, is_async=True), ) as project_client: - print(f"Get the default Azure Storage connection to use for uploading files.") + print("Get the default Azure Storage connection to use for uploading files.") connection_name = (await project_client.connections.get_default(ConnectionType.AZURE_STORAGE_ACCOUNT)).name print( f"[test_datasets_upload_folder] Upload files in a folder (including sub-folders) and create a new version `{dataset_version}` in the same Dataset, to reference the files." diff --git a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py index c2345115eb92..ee91043c1f75 100644 --- a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py +++ b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments.py @@ -3,8 +3,6 @@ # Licensed under the MIT License. # ------------------------------------ -import pytest -from azure.ai.projects import AIProjectClient from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy diff --git a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py index b278909621e4..24e6630f3e24 100644 --- a/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py +++ b/sdk/ai/azure-ai-projects/tests/deployments/test_deployments_async.py @@ -3,8 +3,6 @@ # Licensed under the MIT License. # ------------------------------------ -import pytest -from azure.ai.projects.aio import AIProjectClient from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async diff --git a/sdk/ai/azure-ai-projects/tests/files/test_files.py b/sdk/ai/azure-ai-projects/tests/files/test_files.py index f934ce955547..56e1125e8b07 100644 --- a/sdk/ai/azure-ai-projects/tests/files/test_files.py +++ b/sdk/ai/azure-ai-projects/tests/files/test_files.py @@ -3,8 +3,6 @@ # Licensed under the MIT License. # ------------------------------------ -import re -import pytest from pathlib import Path from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, RecordedTransport diff --git a/sdk/ai/azure-ai-projects/tests/files/test_files_async.py b/sdk/ai/azure-ai-projects/tests/files/test_files_async.py index cc85b778e1a5..968dc7b04d50 100644 --- a/sdk/ai/azure-ai-projects/tests/files/test_files_async.py +++ b/sdk/ai/azure-ai-projects/tests/files/test_files_async.py @@ -3,8 +3,6 @@ # Licensed under the MIT License. # ------------------------------------ -import re -import pytest from pathlib import Path from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py index cdf1b9f4ceda..1d5b61e920be 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py @@ -5,9 +5,9 @@ # ------------------------------------ import os -import pytest import time from pathlib import Path +import pytest from test_base import ( TestBase, servicePreparer, @@ -318,7 +318,7 @@ def _test_rft_create_job_helper(self, model_type, training_type, **kwargs): self._cleanup_test_file(openai_client, train_file.id) self._cleanup_test_file(openai_client, validation_file.id) - def _extract_account_name_from_endpoint(self, project_endpoint, test_prefix): + def _extract_account_name_from_endpoint(self, project_endpoint: str) -> str: endpoint_clean = project_endpoint.replace("https://", "").replace("http://", "") if ".services.ai.azure.com" not in endpoint_clean: raise ValueError( @@ -327,7 +327,13 @@ def _extract_account_name_from_endpoint(self, project_endpoint, test_prefix): return endpoint_clean.split(".services.ai.azure.com")[0] def _test_deploy_and_infer_helper( - self, completed_job_id, deployment_format, deployment_capacity, test_prefix, inference_content, **kwargs + self, + completed_job_id: str, + deployment_format: str, + deployment_capacity: int, + test_prefix: str, + inference_content: str, + **kwargs, ): if not completed_job_id: pytest.skip(f"completed_job_id parameter not set - skipping {test_prefix} deploy and infer test") @@ -341,7 +347,7 @@ def _test_deploy_and_infer_helper( f"Missing required environment variables for deployment (azure_subscription_id, azure_resource_group, foundry_project_endpoint) - skipping {test_prefix} deploy and infer test" ) - account_name = self._extract_account_name_from_endpoint(project_endpoint, test_prefix) + account_name = self._extract_account_name_from_endpoint(project_endpoint) print(f"[{test_prefix}] Account name: {account_name}") with self.create_client(**kwargs) as project_client: diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py index bd4e29481dbe..1effc1f06bdb 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py @@ -5,9 +5,9 @@ # ------------------------------------ import os -import pytest import asyncio from pathlib import Path +import pytest from test_base import ( TestBase, servicePreparer, @@ -329,7 +329,7 @@ async def _test_rft_create_job_helper_async(self, model_type, training_type, **k await self._cleanup_test_file_async(openai_client, train_file.id) await self._cleanup_test_file_async(openai_client, validation_file.id) - def _extract_account_name_from_endpoint(self, project_endpoint, test_prefix): + def _extract_account_name_from_endpoint(self, project_endpoint: str) -> str: endpoint_clean = project_endpoint.replace("https://", "").replace("http://", "") if ".services.ai.azure.com" not in endpoint_clean: raise ValueError( @@ -338,7 +338,13 @@ def _extract_account_name_from_endpoint(self, project_endpoint, test_prefix): return endpoint_clean.split(".services.ai.azure.com")[0] async def _test_deploy_and_infer_helper_async( - self, completed_job_id, deployment_format, deployment_capacity, test_prefix, inference_content, **kwargs + self, + completed_job_id: str, + deployment_format: str, + deployment_capacity: int, + test_prefix: str, + inference_content: str, + **kwargs, ): if not completed_job_id: pytest.skip(f"completed_job_id parameter not set - skipping {test_prefix} deploy and infer test") @@ -352,7 +358,7 @@ async def _test_deploy_and_infer_helper_async( f"Missing required environment variables for deployment (azure_subscription_id, azure_resource_group, foundry_project_endpoint) - skipping {test_prefix} deploy and infer test" ) - account_name = self._extract_account_name_from_endpoint(project_endpoint, test_prefix) + account_name = self._extract_account_name_from_endpoint(project_endpoint) print(f"[{test_prefix}] Account name: {account_name}") project_client = self.create_async_client(**kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/indexes/test_indexes.py b/sdk/ai/azure-ai-projects/tests/indexes/test_indexes.py index b09f404b0a08..db48230296c8 100644 --- a/sdk/ai/azure-ai-projects/tests/indexes/test_indexes.py +++ b/sdk/ai/azure-ai-projects/tests/indexes/test_indexes.py @@ -5,10 +5,9 @@ # ------------------------------------ import pytest -from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import AzureAISearchIndex, IndexType from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy +from azure.ai.projects.models import AzureAISearchIndex, IndexType @pytest.mark.skip(reason="Backend throw 400 on index list api") diff --git a/sdk/ai/azure-ai-projects/tests/indexes/test_indexes_async.py b/sdk/ai/azure-ai-projects/tests/indexes/test_indexes_async.py index a42af11fb517..69bd4c268057 100644 --- a/sdk/ai/azure-ai-projects/tests/indexes/test_indexes_async.py +++ b/sdk/ai/azure-ai-projects/tests/indexes/test_indexes_async.py @@ -5,10 +5,9 @@ # ------------------------------------ import pytest -from azure.ai.projects.aio import AIProjectClient -from azure.ai.projects.models import AzureAISearchIndex, IndexType from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async +from azure.ai.projects.models import AzureAISearchIndex, IndexType @pytest.mark.skip(reason="Backend throw 400 on index list api") diff --git a/sdk/ai/azure-ai-projects/tests/redteams/test_redteams.py b/sdk/ai/azure-ai-projects/tests/redteams/test_redteams.py index 425bc89e98ee..3285c1afb474 100644 --- a/sdk/ai/azure-ai-projects/tests/redteams/test_redteams.py +++ b/sdk/ai/azure-ai-projects/tests/redteams/test_redteams.py @@ -4,15 +4,14 @@ # ------------------------------------ import pytest -from azure.ai.projects import AIProjectClient +from test_base import TestBase, servicePreparer +from devtools_testutils import recorded_by_proxy from azure.ai.projects.models import ( RedTeam, AzureOpenAIModelConfiguration, AttackStrategy, RiskCategory, ) -from test_base import TestBase, servicePreparer -from devtools_testutils import recorded_by_proxy @pytest.mark.skip( diff --git a/sdk/ai/azure-ai-projects/tests/redteams/test_redteams_async.py b/sdk/ai/azure-ai-projects/tests/redteams/test_redteams_async.py index 65dc52989320..3fda37b15180 100644 --- a/sdk/ai/azure-ai-projects/tests/redteams/test_redteams_async.py +++ b/sdk/ai/azure-ai-projects/tests/redteams/test_redteams_async.py @@ -4,15 +4,14 @@ # ------------------------------------ import pytest -from azure.ai.projects.aio import AIProjectClient +from test_base import TestBase, servicePreparer +from devtools_testutils.aio import recorded_by_proxy_async from azure.ai.projects.models import ( RedTeam, AzureOpenAIModelConfiguration, AttackStrategy, RiskCategory, ) -from test_base import TestBase, servicePreparer -from devtools_testutils.aio import recorded_by_proxy_async @pytest.mark.skip( diff --git a/sdk/ai/azure-ai-projects/tests/responses/test_openai_client_overrides.py b/sdk/ai/azure-ai-projects/tests/responses/test_openai_client_overrides.py index 2aacdfe74707..782c5024f43a 100644 --- a/sdk/ai/azure-ai-projects/tests/responses/test_openai_client_overrides.py +++ b/sdk/ai/azure-ai-projects/tests/responses/test_openai_client_overrides.py @@ -9,9 +9,9 @@ """ import os +from typing import Any import pytest import httpx -from typing import Any from azure.core.credentials import TokenCredential from azure.ai.projects import AIProjectClient diff --git a/sdk/ai/azure-ai-projects/tests/responses/test_openai_client_overrides_async.py b/sdk/ai/azure-ai-projects/tests/responses/test_openai_client_overrides_async.py index 0abf63f41963..de8c484fd9f6 100644 --- a/sdk/ai/azure-ai-projects/tests/responses/test_openai_client_overrides_async.py +++ b/sdk/ai/azure-ai-projects/tests/responses/test_openai_client_overrides_async.py @@ -9,9 +9,9 @@ """ import os +from typing import Any import pytest import httpx -from typing import Any from azure.core.credentials_async import AsyncTokenCredential from azure.ai.projects.aio import AIProjectClient diff --git a/sdk/ai/azure-ai-projects/tests/responses/test_responses.py b/sdk/ai/azure-ai-projects/tests/responses/test_responses.py index 9ef5758e06f3..9ea48ffe05e7 100644 --- a/sdk/ai/azure-ai-projects/tests/responses/test_responses.py +++ b/sdk/ai/azure-ai-projects/tests/responses/test_responses.py @@ -5,11 +5,11 @@ # ------------------------------------ # cSpell:disable +from typing import Any, Dict, Optional import pytest import httpx from devtools_testutils import recorded_by_proxy, RecordedTransport from test_base import TestBase, servicePreparer -from typing import Any, Dict, Optional from openai import OpenAI from azure.core.credentials import TokenCredential from azure.ai.projects import AIProjectClient @@ -105,12 +105,12 @@ def test_responses(self, **kwargs): ) def test_user_agent_patching_via_response_create( self, project_ua, openai_default_header, expected_ua, patch_openai - ): + ): # pylint: disable=redefined-outer-name,unused-argument client = _build_client(project_ua, openai_default_header) calls = [] - def fake_send(request: httpx.Request, *args: Any, **kwargs: Any): + def fake_send(request: httpx.Request, *_args: Any, **kwargs: Any): # Capture headers that would be sent over the wire. calls.append(dict(request.headers)) return httpx.Response( diff --git a/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py b/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py index 67eb01609039..e816e6741737 100644 --- a/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py +++ b/sdk/ai/azure-ai-projects/tests/responses/test_responses_async.py @@ -5,16 +5,16 @@ # ------------------------------------ # cSpell:disable +from typing import Any, Dict, Optional import pytest import httpx -from typing import Any, Dict, Optional from openai import AsyncOpenAI -from azure.core.credentials import AccessToken -from azure.core.credentials_async import AsyncTokenCredential -from azure.ai.projects.aio import AIProjectClient from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async from devtools_testutils import RecordedTransport +from azure.core.credentials import AccessToken +from azure.core.credentials_async import AsyncTokenCredential +from azure.ai.projects.aio import AIProjectClient BASE_OPENAI_UA = AsyncOpenAI(api_key="dummy").user_agent @@ -99,7 +99,7 @@ async def test_user_agent_patching_via_response_create(self, project_ua, openai_ calls = [] - async def fake_send(request: httpx.Request, *args: Any, **kwargs: Any): + async def fake_send(request: httpx.Request, *_args: Any, **kwargs: Any): # Capture headers that would be sent over the wire. calls.append(dict(request.headers)) return httpx.Response( diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py index 494a47c1a3a2..5416b3269e7b 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py @@ -73,6 +73,7 @@ def test_agent_tools_samples(self, sample_path: str, **kwargs) -> None: samples_to_skip=[ "sample_memory_advanced.py", "sample_memory_basic.py", + "sample_memory_crud.py", # Sample works fine. But AI thinks something is wrong. ], ), ) @@ -94,7 +95,7 @@ def test_memory_samples(self, sample_path: str, **kwargs) -> None: "sample_path", get_sample_paths( "agents", - samples_to_skip=[""], + samples_to_skip=["sample_workflow_multi_agent.py"], # I see in sample spew: "Event 10 type 'response.failed'" with error message in payload "The specified agent was not found. Please verify that the agent name and version are correct". ), ) @servicePreparer() diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py index a1d3a68a0b9b..18336f6122a9 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py @@ -170,6 +170,7 @@ class TestSamplesEvaluations(AzureRecordedTestCase): "sample_evaluations_builtin_with_csv.py", # Requires CSV file upload prerequisite "sample_synthetic_data_agent_evaluation.py", # Synthetic data gen is long-running preview feature "sample_synthetic_data_model_evaluation.py", # Synthetic data gen is long-running preview feature + "sample_eval_catalog_prompt_based_evaluators.py", # For some reason fails with 500 (Internal server error) ], ), ) diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry.py b/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry.py index 9ade1692ae0a..7cb0bd587649 100644 --- a/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry.py +++ b/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry.py @@ -3,8 +3,6 @@ # Licensed under the MIT License. # ------------------------------------ -import pytest -from azure.ai.projects import AIProjectClient from test_base import TestBase, servicePreparer from devtools_testutils import recorded_by_proxy, is_live diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry_async.py b/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry_async.py index d0aee2d61e4b..378108b22e84 100644 --- a/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry_async.py +++ b/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry_async.py @@ -3,8 +3,6 @@ # Licensed under the MIT License. # ------------------------------------ -import pytest -from azure.ai.projects.aio import AIProjectClient from test_base import TestBase, servicePreparer from devtools_testutils.aio import recorded_by_proxy_async from devtools_testutils import is_live diff --git a/sdk/ai/azure-ai-projects/tests/test_base.py b/sdk/ai/azure-ai-projects/tests/test_base.py index 74fa089c10c2..af3d3e3f30a3 100644 --- a/sdk/ai/azure-ai-projects/tests/test_base.py +++ b/sdk/ai/azure-ai-projects/tests/test_base.py @@ -10,13 +10,14 @@ import os import tempfile from typing import Optional, Any, Dict, Final, IO, Union, overload, Literal, TextIO, BinaryIO +from openai.types.responses import Response +from openai.types.conversations import ConversationItem +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader from azure.ai.projects.models import ( - ApiKeyCredentials, AzureAISearchIndex, Connection, ConnectionType, CredentialType, - CustomCredential, DatasetCredential, DatasetType, DatasetVersion, @@ -26,11 +27,8 @@ IndexType, ModelDeployment, ) -from openai.types.responses import Response -from openai.types.conversations import ConversationItem from azure.ai.projects.models._models import AgentDetails, AgentVersionDetails -from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader -from azure.ai.projects import AIProjectClient as AIProjectClient +from azure.ai.projects import AIProjectClient from azure.ai.projects.aio import AIProjectClient as AsyncAIProjectClient # Store reference to built-in open before any mocking occurs @@ -157,11 +155,10 @@ def patched_open_crlf_to_lf(*args, **kwargs): if args: # File path was passed as positional arg return _BUILTIN_OPEN(temp_path, *args[1:], **kwargs) - else: - # File path was passed as keyword arg - kwargs = kwargs.copy() - kwargs["file"] = temp_path - return _BUILTIN_OPEN(**kwargs) + # File path was passed as keyword arg + kwargs = kwargs.copy() + kwargs["file"] = temp_path + return _BUILTIN_OPEN(**kwargs) return _BUILTIN_OPEN(*args, **kwargs) @@ -187,7 +184,7 @@ class TestBase(AzureRecordedTestCase): } test_indexes_params = { - "index_name": f"test-index-name", + "index_name": "test-index-name", "index_version": "1", "ai_search_connection_name": "my-ai-search-connection", "ai_search_index_name": "my-ai-search-index", @@ -438,7 +435,7 @@ def validate_deployment( expected_model_deployment_name: Optional[str] = None, expected_model_publisher: Optional[str] = None, ): - assert type(deployment) == ModelDeployment + assert isinstance(deployment, ModelDeployment) assert deployment.type == DeploymentType.MODEL_DEPLOYMENT assert deployment.model_version is not None # Comment out the below, since I see that `Cohere-embed-v3-english` has an empty capabilities dict. @@ -465,7 +462,7 @@ def validate_index( TestBase.assert_equal_or_not_none(index.version, expected_index_version) if expected_index_type == IndexType.AZURE_SEARCH: - assert type(index) == AzureAISearchIndex + assert isinstance(index, AzureAISearchIndex) assert index.type == IndexType.AZURE_SEARCH TestBase.assert_equal_or_not_none(index.connection_name, expected_ai_search_connection_name) TestBase.assert_equal_or_not_none(index.index_name, expected_ai_search_index_name) @@ -485,7 +482,7 @@ def validate_dataset( if expected_dataset_type: assert dataset.type == expected_dataset_type else: - assert dataset.type == DatasetType.URI_FILE or dataset.type == DatasetType.URI_FOLDER + assert dataset.type in (DatasetType.URI_FILE, DatasetType.URI_FOLDER) TestBase.assert_equal_or_not_none(dataset.name, expected_dataset_name) TestBase.assert_equal_or_not_none(dataset.version, expected_dataset_version) @@ -632,7 +629,7 @@ def validate_fine_tuning_job( TestBase.assert_equal_or_not_none(job_obj.status, expected_status) def _request_callback(self, pipeline_request) -> None: - self.pipeline_request = pipeline_request + self.pipeline_request = pipeline_request # pylint: disable=attribute-defined-outside-init @staticmethod def _are_json_equal(json_str1: str, json_str2: str) -> bool: From cb4ef7b39a5fe13b785550e49a9b76a15dabeb48 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 12 Mar 2026 08:33:52 -0700 Subject: [PATCH 13/16] Re-emit from latest TypeSpec in branch `feature/foundry-staging` (#45659) --- sdk/ai/azure-ai-projects/README.md | 4 +- .../azure-ai-projects/apiview-properties.json | 9 +- .../ai/projects/aio/operations/_operations.py | 6902 ++++++++-------- .../azure/ai/projects/models/__init__.py | 10 + .../azure/ai/projects/models/_enums.py | 13 + .../azure/ai/projects/models/_models.py | 448 +- .../ai/projects/operations/_operations.py | 7170 +++++++++-------- .../azure-ai-projects/post-emitter-fixes.cmd | 2 +- .../agents/tools/sample_agent_file_search.py | 4 +- .../sample_agent_file_search_in_stream.py | 4 +- ...ample_agent_file_search_in_stream_async.py | 4 +- .../sample_synthetic_data_model_evaluation.py | 2 +- ...t_responses_instrumentor_workflow_async.py | 2 +- .../tests/samples/test_samples.py | 6 +- .../tests/samples/test_samples_evaluations.py | 2 +- 15 files changed, 7590 insertions(+), 6992 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 2b30726f33aa..93d3cd788fb8 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -276,9 +276,7 @@ asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../as # Upload file to vector store with open(asset_file_path, "rb") as f: - file = openai_client.vector_stores.files.upload_and_poll( - vector_store_id=vector_store.id, file=f - ) + file = openai_client.vector_stores.files.upload_and_poll(vector_store_id=vector_store.id, file=f) print(f"File uploaded to vector store (id: {file.id})") tool = FileSearchTool(vector_store_ids=[vector_store.id]) diff --git a/sdk/ai/azure-ai-projects/apiview-properties.json b/sdk/ai/azure-ai-projects/apiview-properties.json index e57c56c7a12e..67119b7f250f 100644 --- a/sdk/ai/azure-ai-projects/apiview-properties.json +++ b/sdk/ai/azure-ai-projects/apiview-properties.json @@ -84,6 +84,7 @@ "azure.ai.projects.models.DeleteAgentResponse": "Azure.AI.Projects.DeleteAgentResponse", "azure.ai.projects.models.DeleteAgentVersionResponse": "Azure.AI.Projects.DeleteAgentVersionResponse", "azure.ai.projects.models.DeleteMemoryStoreResult": "Azure.AI.Projects.DeleteMemoryStoreResponse", + "azure.ai.projects.models.DeleteToolsetResponse": "Azure.AI.Projects.DeleteToolsetResponse", "azure.ai.projects.models.Deployment": "Azure.AI.Projects.Deployment", "azure.ai.projects.models.EmbeddingConfiguration": "Azure.AI.Projects.EmbeddingConfiguration", "azure.ai.projects.models.EntraIDCredentials": "Azure.AI.Projects.EntraIDCredentials", @@ -205,6 +206,7 @@ "azure.ai.projects.models.ToolChoiceWebSearchPreview20250311": "OpenAI.ToolChoiceWebSearchPreview20250311", "azure.ai.projects.models.ToolDescription": "Azure.AI.Projects.ToolDescription", "azure.ai.projects.models.ToolProjectConnection": "Azure.AI.Projects.ToolProjectConnection", + "azure.ai.projects.models.ToolsetObject": "Azure.AI.Projects.ToolsetObject", "azure.ai.projects.models.UserProfileMemoryItem": "Azure.AI.Projects.UserProfileMemoryItem", "azure.ai.projects.models.WebSearchApproximateLocation": "OpenAI.WebSearchApproximateLocation", "azure.ai.projects.models.WebSearchConfiguration": "Azure.AI.Projects.WebSearchConfiguration", @@ -213,6 +215,8 @@ "azure.ai.projects.models.WebSearchToolFilters": "OpenAI.WebSearchToolFilters", "azure.ai.projects.models.WeeklyRecurrenceSchedule": "Azure.AI.Projects.WeeklyRecurrenceSchedule", "azure.ai.projects.models.WorkflowAgentDefinition": "Azure.AI.Projects.WorkflowAgentDefinition", + "azure.ai.projects.models.WorkIQPreviewTool": "Azure.AI.Projects.WorkIQPreviewTool", + "azure.ai.projects.models.WorkIQPreviewToolParameters": "Azure.AI.Projects.WorkIQPreviewToolParameters", "azure.ai.projects.models.EvaluationTaxonomyInputType": "Azure.AI.Projects.EvaluationTaxonomyInputType", "azure.ai.projects.models.RiskCategory": "Azure.AI.Projects.RiskCategory", "azure.ai.projects.models.FoundryFeaturesOptInKeys": "Azure.AI.Projects.FoundryFeaturesOptInKeys", @@ -237,8 +241,7 @@ "azure.ai.projects.models.RecurrenceType": "Azure.AI.Projects.RecurrenceType", "azure.ai.projects.models.DayOfWeek": "Azure.AI.Projects.DayOfWeek", "azure.ai.projects.models.ScheduleTaskType": "Azure.AI.Projects.ScheduleTaskType", - "azure.ai.projects.models.AgentObjectType": "Azure.AI.Projects.AgentObjectType", - "azure.ai.projects.models.AgentKind": "Azure.AI.Projects.AgentKind", + "azure.ai.projects.models.ToolsetObjectType": "Azure.AI.Projects.ToolsetObjectType", "azure.ai.projects.models.ToolType": "OpenAI.ToolType", "azure.ai.projects.models.AzureAISearchQueryType": "Azure.AI.Projects.AzureAISearchQueryType", "azure.ai.projects.models.ContainerMemoryLimit": "OpenAI.ContainerMemoryLimit", @@ -253,6 +256,8 @@ "azure.ai.projects.models.FunctionShellToolParamEnvironmentType": "OpenAI.FunctionShellToolParamEnvironmentType", "azure.ai.projects.models.ContainerSkillType": "OpenAI.ContainerSkillType", "azure.ai.projects.models.SearchContextSize": "OpenAI.SearchContextSize", + "azure.ai.projects.models.AgentObjectType": "Azure.AI.Projects.AgentObjectType", + "azure.ai.projects.models.AgentKind": "Azure.AI.Projects.AgentKind", "azure.ai.projects.models.AgentProtocol": "Azure.AI.Projects.AgentProtocol", "azure.ai.projects.models.ToolChoiceParamType": "OpenAI.ToolChoiceParamType", "azure.ai.projects.models.TextResponseFormatConfigurationType": "OpenAI.TextResponseFormatConfigurationType", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index 3499ba5338e5..5daa5483b071 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -38,8 +38,6 @@ from ...models._enums import _AgentDefinitionOptInKeys, _FoundryFeaturesOptInKeys from ...operations._operations import ( _get_agent_definition_opt_in_keys, - build_agents_create_agent_from_manifest_request, - build_agents_create_agent_request, build_agents_create_version_from_manifest_request, build_agents_create_version_request, build_agents_delete_request, @@ -48,8 +46,6 @@ build_agents_get_version_request, build_agents_list_request, build_agents_list_versions_request, - build_agents_update_agent_from_manifest_request, - build_agents_update_agent_request, build_beta_evaluation_taxonomies_create_request, build_beta_evaluation_taxonomies_delete_request, build_beta_evaluation_taxonomies_get_request, @@ -83,6 +79,11 @@ build_beta_schedules_get_run_request, build_beta_schedules_list_request, build_beta_schedules_list_runs_request, + build_beta_toolsets_create_request, + build_beta_toolsets_delete_request, + build_beta_toolsets_get_request, + build_beta_toolsets_list_request, + build_beta_toolsets_update_request, build_connections_get_request, build_connections_get_with_credentials_request, build_connections_list_request, @@ -116,7 +117,7 @@ _SERIALIZER.client_side_validation = False -class BetaOperations: +class BetaOperations: # pylint: disable=too-many-instance-attributes """ .. warning:: **DO NOT** instantiate this class directly. @@ -141,6 +142,7 @@ def __init__(self, *args, **kwargs) -> None: self.memory_stores = BetaMemoryStoresOperations(self._client, self._config, self._serialize, self._deserialize) self.red_teams = BetaRedTeamsOperations(self._client, self._config, self._serialize, self._deserialize) self.schedules = BetaSchedulesOperations(self._client, self._config, self._serialize, self._deserialize) + self.toolsets = BetaToolsetsOperations(self._client, self._config, self._serialize, self._deserialize) class AgentsOperations: @@ -225,65 +227,16 @@ async def get(self, agent_name: str, **kwargs: Any) -> _models.AgentDetails: return deserialized # type: ignore - @overload - async def _create_agent( - self, - *, - name: str, - definition: _models.AgentDefinition, - content_type: str = "application/json", - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentDetails: ... - @overload - async def _create_agent( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... - @overload - async def _create_agent( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... - @distributed_trace_async - async def _create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - name: str = _Unset, - definition: _models.AgentDefinition = _Unset, - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentDetails: - """Creates the agent. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. - - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :paramtype name: str - :keyword definition: The agent definition. This can be a workflow, hosted agent, or a simple - agent definition. Required. - :paramtype definition: ~azure.ai.projects.models.AgentDefinition - :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be - useful for storing additional information about the object in a structured - format, and querying for objects via API or the dashboard. + async def delete(self, agent_name: str, **kwargs: Any) -> _models.DeleteAgentResponse: + """Deletes an agent. - Keys are strings with a maximum length of 64 characters. Values are strings - with a maximum length of 512 characters. Default value is None. - :paramtype metadata: dict[str, str] - :keyword description: A human-readable description of the agent. Default value is None. - :paramtype description: str - :return: AgentDetails. The AgentDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentDetails + :param agent_name: The name of the agent to delete. Required. + :type agent_name: str + :return: DeleteAgentResponse. The DeleteAgentResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DeleteAgentResponse :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Optional[str] = _get_agent_definition_opt_in_keys if self._config.allow_preview else None # type: ignore error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -292,31 +245,14 @@ async def _create_agent( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentDetails] = kwargs.pop("cls", None) - - if body is _Unset: - if name is _Unset: - raise TypeError("missing required argument: name") - if definition is _Unset: - raise TypeError("missing required argument: definition") - body = {"definition": definition, "description": description, "metadata": metadata, "name": name} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + cls: ClsType[_models.DeleteAgentResponse] = kwargs.pop("cls", None) - _request = build_agents_create_agent_request( - foundry_features=_foundry_features, - content_type=content_type, + _request = build_agents_delete_request( + agent_name=agent_name, api_version=self._config.api_version, - content=_content, headers=_headers, params=_params, ) @@ -349,15 +285,110 @@ async def _create_agent( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.AgentDetails, response.json()) + deserialized = _deserialize(_models.DeleteAgentResponse, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + @distributed_trace + def list( + self, + *, + kind: Optional[Union[str, _models.AgentKind]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.PageOrder]] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> AsyncItemPaged["_models.AgentDetails"]: + """Returns the list of all agents. + + :keyword kind: Filter agents by kind. If not provided, all agents are returned. Known values + are: "prompt", "hosted", and "workflow". Default value is None. + :paramtype kind: str or ~azure.ai.projects.models.AgentKind + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the + default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the ``created_at`` timestamp of the objects. ``asc`` for + ascending order and``desc`` + for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.PageOrder + :keyword before: A cursor for use in pagination. ``before`` is an object ID that defines your + place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + Default value is None. + :paramtype before: str + :return: An iterator like instance of AgentDetails + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.AgentDetails] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.AgentDetails]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(_continuation_token=None): + + _request = build_agents_list_request( + kind=kind, + limit=limit, + order=order, + after=_continuation_token, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize( + List[_models.AgentDetails], + deserialized.get("data", []), + ) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("last_id") or None, AsyncList(list_of_elem) + + async def get_next(_continuation_token=None): + _request = prepare_request(_continuation_token) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + @overload - async def _update_agent( + async def create_version( self, agent_name: str, *, @@ -366,18 +397,84 @@ async def _update_agent( metadata: Optional[dict[str, str]] = None, description: Optional[str] = None, **kwargs: Any - ) -> _models.AgentDetails: ... + ) -> _models.AgentVersionDetails: + """Create a new agent version. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :keyword definition: The agent definition. This can be a workflow, hosted agent, or a simple + agent definition. Required. + :paramtype definition: ~azure.ai.projects.models.AgentDefinition + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be + useful for storing additional information about the object in a structured + format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings + with a maximum length of 512 characters. Default value is None. + :paramtype metadata: dict[str, str] + :keyword description: A human-readable description of the agent. Default value is None. + :paramtype description: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload - async def _update_agent( + async def create_version( self, agent_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... + ) -> _models.AgentVersionDetails: + """Create a new agent version. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload - async def _update_agent( + async def create_version( self, agent_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... + ) -> _models.AgentVersionDetails: + """Create a new agent version. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ @distributed_trace_async - async def _update_agent( + async def create_version( self, agent_name: str, body: Union[JSON, IO[bytes]] = _Unset, @@ -386,11 +483,15 @@ async def _update_agent( metadata: Optional[dict[str, str]] = None, description: Optional[str] = None, **kwargs: Any - ) -> _models.AgentDetails: - """Updates the agent by adding a new version if there are any changes to the agent definition. If - no changes, returns the existing agent version. + ) -> _models.AgentVersionDetails: + """Create a new agent version. - :param agent_name: The name of the agent to retrieve. Required. + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. :type agent_name: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] @@ -406,8 +507,8 @@ async def _update_agent( :paramtype metadata: dict[str, str] :keyword description: A human-readable description of the agent. Default value is None. :paramtype description: str - :return: AgentDetails. The AgentDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentDetails + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails :raises ~azure.core.exceptions.HttpResponseError: """ _foundry_features: Optional[str] = _get_agent_definition_opt_in_keys if self._config.allow_preview else None # type: ignore @@ -423,7 +524,7 @@ async def _update_agent( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentDetails] = kwargs.pop("cls", None) + cls: ClsType[_models.AgentVersionDetails] = kwargs.pop("cls", None) if body is _Unset: if definition is _Unset: @@ -437,7 +538,7 @@ async def _update_agent( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_agents_update_agent_request( + _request = build_agents_create_version_request( agent_name=agent_name, foundry_features=_foundry_features, content_type=content_type, @@ -475,7 +576,7 @@ async def _update_agent( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.AgentDetails, response.json()) + deserialized = _deserialize(_models.AgentVersionDetails, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -483,49 +584,117 @@ async def _update_agent( return deserialized # type: ignore @overload - async def _create_agent_from_manifest( + async def create_version_from_manifest( self, + agent_name: str, *, - name: str, manifest_id: str, parameter_values: dict[str, Any], content_type: str = "application/json", metadata: Optional[dict[str, str]] = None, description: Optional[str] = None, **kwargs: Any - ) -> _models.AgentDetails: ... + ) -> _models.AgentVersionDetails: + """Create a new agent version from a manifest. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :keyword manifest_id: The manifest ID to import the agent version from. Required. + :paramtype manifest_id: str + :keyword parameter_values: The inputs to the manifest that will result in a fully materialized + Agent. Required. + :paramtype parameter_values: dict[str, any] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be + useful for storing additional information about the object in a structured + format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings + with a maximum length of 512 characters. Default value is None. + :paramtype metadata: dict[str, str] + :keyword description: A human-readable description of the agent. Default value is None. + :paramtype description: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload - async def _create_agent_from_manifest( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... + async def create_version_from_manifest( + self, agent_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentVersionDetails: + """Create a new agent version from a manifest. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload - async def _create_agent_from_manifest( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... + async def create_version_from_manifest( + self, agent_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentVersionDetails: + """Create a new agent version from a manifest. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ @distributed_trace_async - async def _create_agent_from_manifest( + async def create_version_from_manifest( self, + agent_name: str, body: Union[JSON, IO[bytes]] = _Unset, *, - name: str = _Unset, manifest_id: str = _Unset, parameter_values: dict[str, Any] = _Unset, metadata: Optional[dict[str, str]] = None, description: Optional[str] = None, **kwargs: Any - ) -> _models.AgentDetails: - """Creates an agent from a manifest. + ) -> _models.AgentVersionDetails: + """Create a new agent version from a manifest. - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword name: The unique name that identifies the agent. Name can be used to + :param agent_name: The unique name that identifies the agent. Name can be used to retrieve/update/delete the agent. * Must start and end with alphanumeric characters, * Can contain hyphens in the middle * Must not exceed 63 characters. Required. - :paramtype name: str + :type agent_name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] :keyword manifest_id: The manifest ID to import the agent version from. Required. :paramtype manifest_id: str :keyword parameter_values: The inputs to the manifest that will result in a fully materialized @@ -540,8 +709,8 @@ async def _create_agent_from_manifest( :paramtype metadata: dict[str, str] :keyword description: A human-readable description of the agent. Default value is None. :paramtype description: str - :return: AgentDetails. The AgentDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentDetails + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -556,11 +725,9 @@ async def _create_agent_from_manifest( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentDetails] = kwargs.pop("cls", None) + cls: ClsType[_models.AgentVersionDetails] = kwargs.pop("cls", None) if body is _Unset: - if name is _Unset: - raise TypeError("missing required argument: name") if manifest_id is _Unset: raise TypeError("missing required argument: manifest_id") if parameter_values is _Unset: @@ -569,7 +736,6 @@ async def _create_agent_from_manifest( "description": description, "manifest_id": manifest_id, "metadata": metadata, - "name": name, "parameter_values": parameter_values, } body = {k: v for k, v in body.items() if v is not None} @@ -580,7 +746,8 @@ async def _create_agent_from_manifest( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_agents_create_agent_from_manifest_request( + _request = build_agents_create_version_from_manifest_request( + agent_name=agent_name, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -616,69 +783,23 @@ async def _create_agent_from_manifest( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.AgentDetails, response.json()) + deserialized = _deserialize(_models.AgentVersionDetails, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @overload - async def _update_agent_from_manifest( - self, - agent_name: str, - *, - manifest_id: str, - parameter_values: dict[str, Any], - content_type: str = "application/json", - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentDetails: ... - @overload - async def _update_agent_from_manifest( - self, agent_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... - @overload - async def _update_agent_from_manifest( - self, agent_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... - @distributed_trace_async - async def _update_agent_from_manifest( - self, - agent_name: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - manifest_id: str = _Unset, - parameter_values: dict[str, Any] = _Unset, - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentDetails: - """Updates the agent from a manifest by adding a new version if there are any changes to the agent - definition. If no changes, returns the existing agent version. + async def get_version(self, agent_name: str, agent_version: str, **kwargs: Any) -> _models.AgentVersionDetails: + """Retrieves a specific version of an agent. - :param agent_name: The name of the agent to update. Required. + :param agent_name: The name of the agent to retrieve. Required. :type agent_name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword manifest_id: The manifest ID to import the agent version from. Required. - :paramtype manifest_id: str - :keyword parameter_values: The inputs to the manifest that will result in a fully materialized - Agent. Required. - :paramtype parameter_values: dict[str, any] - :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be - useful for storing additional information about the object in a structured - format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings - with a maximum length of 512 characters. Default value is None. - :paramtype metadata: dict[str, str] - :keyword description: A human-readable description of the agent. Default value is None. - :paramtype description: str - :return: AgentDetails. The AgentDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentDetails + :param agent_version: The version of the agent to retrieve. Required. + :type agent_version: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -689,36 +810,15 @@ async def _update_agent_from_manifest( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentDetails] = kwargs.pop("cls", None) - - if body is _Unset: - if manifest_id is _Unset: - raise TypeError("missing required argument: manifest_id") - if parameter_values is _Unset: - raise TypeError("missing required argument: parameter_values") - body = { - "description": description, - "manifest_id": manifest_id, - "metadata": metadata, - "parameter_values": parameter_values, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + cls: ClsType[_models.AgentVersionDetails] = kwargs.pop("cls", None) - _request = build_agents_update_agent_from_manifest_request( + _request = build_agents_get_version_request( agent_name=agent_name, - content_type=content_type, + agent_version=agent_version, api_version=self._config.api_version, - content=_content, headers=_headers, params=_params, ) @@ -751,7 +851,7 @@ async def _update_agent_from_manifest( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.AgentDetails, response.json()) + deserialized = _deserialize(_models.AgentVersionDetails, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -759,13 +859,18 @@ async def _update_agent_from_manifest( return deserialized # type: ignore @distributed_trace_async - async def delete(self, agent_name: str, **kwargs: Any) -> _models.DeleteAgentResponse: - """Deletes an agent. + async def delete_version( + self, agent_name: str, agent_version: str, **kwargs: Any + ) -> _models.DeleteAgentVersionResponse: + """Deletes a specific version of an agent. :param agent_name: The name of the agent to delete. Required. :type agent_name: str - :return: DeleteAgentResponse. The DeleteAgentResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DeleteAgentResponse + :param agent_version: The version of the agent to delete. Required. + :type agent_version: str + :return: DeleteAgentVersionResponse. The DeleteAgentVersionResponse is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.DeleteAgentVersionResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -779,10 +884,11 @@ async def delete(self, agent_name: str, **kwargs: Any) -> _models.DeleteAgentRes _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.DeleteAgentResponse] = kwargs.pop("cls", None) + cls: ClsType[_models.DeleteAgentVersionResponse] = kwargs.pop("cls", None) - _request = build_agents_delete_request( + _request = build_agents_delete_version_request( agent_name=agent_name, + agent_version=agent_version, api_version=self._config.api_version, headers=_headers, params=_params, @@ -816,7 +922,7 @@ async def delete(self, agent_name: str, **kwargs: Any) -> _models.DeleteAgentRes if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.DeleteAgentResponse, response.json()) + deserialized = _deserialize(_models.DeleteAgentVersionResponse, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -824,20 +930,19 @@ async def delete(self, agent_name: str, **kwargs: Any) -> _models.DeleteAgentRes return deserialized # type: ignore @distributed_trace - def list( + def list_versions( self, + agent_name: str, *, - kind: Optional[Union[str, _models.AgentKind]] = None, limit: Optional[int] = None, order: Optional[Union[str, _models.PageOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> AsyncItemPaged["_models.AgentDetails"]: - """Returns the list of all agents. + ) -> AsyncItemPaged["_models.AgentVersionDetails"]: + """Returns the list of versions of an agent. - :keyword kind: Filter agents by kind. If not provided, all agents are returned. Known values - are: "prompt", "hosted", and "workflow". Default value is None. - :paramtype kind: str or ~azure.ai.projects.models.AgentKind + :param agent_name: The name of the agent to retrieve versions for. Required. + :type agent_name: str :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. Default value is None. @@ -852,14 +957,14 @@ def list( subsequent call can include before=obj_foo in order to fetch the previous page of the list. Default value is None. :paramtype before: str - :return: An iterator like instance of AgentDetails - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.AgentDetails] + :return: An iterator like instance of AgentVersionDetails + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.AgentVersionDetails] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.AgentDetails]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.AgentVersionDetails]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -871,8 +976,8 @@ def list( def prepare_request(_continuation_token=None): - _request = build_agents_list_request( - kind=kind, + _request = build_agents_list_versions_request( + agent_name=agent_name, limit=limit, order=order, after=_continuation_token, @@ -890,7 +995,7 @@ def prepare_request(_continuation_token=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.AgentDetails], + List[_models.AgentVersionDetails], deserialized.get("data", []), ) if cls: @@ -918,131 +1023,34 @@ async def get_next(_continuation_token=None): return AsyncItemPaged(get_next, extract_data) - @overload - async def create_version( - self, - agent_name: str, - *, - definition: _models.AgentDefinition, - content_type: str = "application/json", - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version. - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. +class EvaluationRulesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :keyword definition: The agent definition. This can be a workflow, hosted agent, or a simple - agent definition. Required. - :paramtype definition: ~azure.ai.projects.models.AgentDefinition - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be - useful for storing additional information about the object in a structured - format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings - with a maximum length of 512 characters. Default value is None. - :paramtype metadata: dict[str, str] - :keyword description: A human-readable description of the agent. Default value is None. - :paramtype description: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_version( - self, agent_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version. - - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. - - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_version( - self, agent_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version. - - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. + Instead, you should access the following operations through + :class:`~azure.ai.projects.aio.AIProjectClient`'s + :attr:`evaluation_rules` attribute. + """ - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async - async def create_version( - self, - agent_name: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - definition: _models.AgentDefinition = _Unset, - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version. - - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. - - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword definition: The agent definition. This can be a workflow, hosted agent, or a simple - agent definition. Required. - :paramtype definition: ~azure.ai.projects.models.AgentDefinition - :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be - useful for storing additional information about the object in a structured - format, and querying for objects via API or the dashboard. + async def get(self, id: str, **kwargs: Any) -> _models.EvaluationRule: + """Get an evaluation rule. - Keys are strings with a maximum length of 64 characters. Values are strings - with a maximum length of 512 characters. Default value is None. - :paramtype metadata: dict[str, str] - :keyword description: A human-readable description of the agent. Default value is None. - :paramtype description: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Optional[str] = _get_agent_definition_opt_in_keys if self._config.allow_preview else None # type: ignore error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -1051,30 +1059,14 @@ async def create_version( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentVersionDetails] = kwargs.pop("cls", None) - - if body is _Unset: - if definition is _Unset: - raise TypeError("missing required argument: definition") - body = {"definition": definition, "description": description, "metadata": metadata} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + cls: ClsType[_models.EvaluationRule] = kwargs.pop("cls", None) - _request = build_agents_create_version_request( - agent_name=agent_name, - foundry_features=_foundry_features, - content_type=content_type, + _request = build_evaluation_rules_get_request( + id=id, api_version=self._config.api_version, - content=_content, headers=_headers, params=_params, ) @@ -1098,152 +1090,136 @@ async def create_version( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) + raise HttpResponseError(response=response) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.AgentVersionDetails, response.json()) + deserialized = _deserialize(_models.EvaluationRule, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @overload - async def create_version_from_manifest( - self, - agent_name: str, - *, - manifest_id: str, - parameter_values: dict[str, Any], - content_type: str = "application/json", - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version from a manifest. - - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. - - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :keyword manifest_id: The manifest ID to import the agent version from. Required. - :paramtype manifest_id: str - :keyword parameter_values: The inputs to the manifest that will result in a fully materialized - Agent. Required. - :paramtype parameter_values: dict[str, any] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be - useful for storing additional information about the object in a structured - format, and querying for objects via API or the dashboard. + @distributed_trace_async + async def delete(self, id: str, **kwargs: Any) -> None: + """Delete an evaluation rule. - Keys are strings with a maximum length of 64 characters. Values are strings - with a maximum length of 512 characters. Default value is None. - :paramtype metadata: dict[str, str] - :keyword description: A human-readable description of the agent. Default value is None. - :paramtype description: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :return: None + :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @overload - async def create_version_from_manifest( - self, agent_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version from a manifest. + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. + cls: ClsType[None] = kwargs.pop("cls", None) - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :param body: Required. - :type body: JSON + _request = build_evaluation_rules_delete_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @overload + async def create_or_update( + self, id: str, evaluation_rule: _models.EvaluationRule, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. + + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Required. + :type evaluation_rule: ~azure.ai.projects.models.EvaluationRule :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def create_version_from_manifest( - self, agent_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version from a manifest. + async def create_or_update( + self, id: str, evaluation_rule: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Required. + :type evaluation_rule: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :param body: Required. - :type body: IO[bytes] + @overload + async def create_or_update( + self, id: str, evaluation_rule: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. + + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Required. + :type evaluation_rule: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async - async def create_version_from_manifest( - self, - agent_name: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - manifest_id: str = _Unset, - parameter_values: dict[str, Any] = _Unset, - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version from a manifest. - - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. - - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword manifest_id: The manifest ID to import the agent version from. Required. - :paramtype manifest_id: str - :keyword parameter_values: The inputs to the manifest that will result in a fully materialized - Agent. Required. - :paramtype parameter_values: dict[str, any] - :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be - useful for storing additional information about the object in a structured - format, and querying for objects via API or the dashboard. + async def create_or_update( + self, id: str, evaluation_rule: Union[_models.EvaluationRule, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. - Keys are strings with a maximum length of 64 characters. Values are strings - with a maximum length of 512 characters. Default value is None. - :paramtype metadata: dict[str, str] - :keyword description: A human-readable description of the agent. Default value is None. - :paramtype description: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Is one of the following types: + EvaluationRule, JSON, IO[bytes] Required. + :type evaluation_rule: ~azure.ai.projects.models.EvaluationRule or JSON or IO[bytes] + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Optional[Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW]] = _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW if self._config.allow_preview else None # type: ignore error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -1256,29 +1232,18 @@ async def create_version_from_manifest( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentVersionDetails] = kwargs.pop("cls", None) + cls: ClsType[_models.EvaluationRule] = kwargs.pop("cls", None) - if body is _Unset: - if manifest_id is _Unset: - raise TypeError("missing required argument: manifest_id") - if parameter_values is _Unset: - raise TypeError("missing required argument: parameter_values") - body = { - "description": description, - "manifest_id": manifest_id, - "metadata": metadata, - "parameter_values": parameter_values, - } - body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(body, (IOBase, bytes)): - _content = body + if isinstance(evaluation_rule, (IOBase, bytes)): + _content = evaluation_rule else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(evaluation_rule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_agents_create_version_from_manifest_request( - agent_name=agent_name, + _request = build_evaluation_rules_create_or_update_request( + id=id, + foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -1298,41 +1263,52 @@ async def create_version_from_manifest( response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [200, 201]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) + raise HttpResponseError(response=response) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.AgentVersionDetails, response.json()) + deserialized = _deserialize(_models.EvaluationRule, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @distributed_trace_async - async def get_version(self, agent_name: str, agent_version: str, **kwargs: Any) -> _models.AgentVersionDetails: - """Retrieves a specific version of an agent. + @distributed_trace + def list( + self, + *, + action_type: Optional[Union[str, _models.EvaluationRuleActionType]] = None, + agent_name: Optional[str] = None, + enabled: Optional[bool] = None, + **kwargs: Any + ) -> AsyncItemPaged["_models.EvaluationRule"]: + """List all evaluation rules. - :param agent_name: The name of the agent to retrieve. Required. - :type agent_name: str - :param agent_version: The version of the agent to retrieve. Required. - :type agent_version: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails + :keyword action_type: Filter by the type of evaluation rule. Known values are: + "continuousEvaluation" and "humanEvaluationPreview". Default value is None. + :paramtype action_type: str or ~azure.ai.projects.models.EvaluationRuleActionType + :keyword agent_name: Filter by the agent name. Default value is None. + :paramtype agent_name: str + :keyword enabled: Filter by the enabled status. Default value is None. + :paramtype enabled: bool + :return: An iterator like instance of EvaluationRule + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.EvaluationRule] :raises ~azure.core.exceptions.HttpResponseError: """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.EvaluationRule]] = kwargs.pop("cls", None) + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -1341,67 +1317,99 @@ async def get_version(self, agent_name: str, agent_version: str, **kwargs: Any) } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} + def prepare_request(next_link=None): + if not next_link: - cls: ClsType[_models.AgentVersionDetails] = kwargs.pop("cls", None) + _request = build_evaluation_rules_list_request( + action_type=action_type, + agent_name=agent_name, + enabled=enabled, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - _request = build_agents_get_version_request( - agent_name=agent_name, - agent_version=agent_version, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) + return _request - response = pipeline_response.http_response + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize( + List[_models.EvaluationRule], + deserialized.get("value", []), + ) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs ) - raise HttpResponseError(response=response, model=error) + response = pipeline_response.http_response - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.AgentVersionDetails, response.json()) + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return pipeline_response - return deserialized # type: ignore + return AsyncItemPaged(get_next, extract_data) - @distributed_trace_async - async def delete_version( - self, agent_name: str, agent_version: str, **kwargs: Any - ) -> _models.DeleteAgentVersionResponse: - """Deletes a specific version of an agent. - :param agent_name: The name of the agent to delete. Required. - :type agent_name: str - :param agent_version: The version of the agent to delete. Required. - :type agent_version: str - :return: DeleteAgentVersionResponse. The DeleteAgentVersionResponse is compatible with - MutableMapping - :rtype: ~azure.ai.projects.models.DeleteAgentVersionResponse +class ConnectionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.aio.AIProjectClient`'s + :attr:`connections` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def _get(self, name: str, **kwargs: Any) -> _models.Connection: + """Get a connection by name, without populating connection credentials. + + :param name: The friendly name of the connection, provided by the user. Required. + :type name: str + :return: Connection. The Connection is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Connection :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -1415,11 +1423,10 @@ async def delete_version( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.DeleteAgentVersionResponse] = kwargs.pop("cls", None) + cls: ClsType[_models.Connection] = kwargs.pop("cls", None) - _request = build_agents_delete_version_request( - agent_name=agent_name, - agent_version=agent_version, + _request = build_connections_get_request( + name=name, api_version=self._config.api_version, headers=_headers, params=_params, @@ -1444,58 +1451,114 @@ async def delete_version( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.DeleteAgentVersionResponse, response.json()) + deserialized = _deserialize(_models.Connection, response.json()) if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def _get_with_credentials(self, name: str, **kwargs: Any) -> _models.Connection: + """Get a connection by name, with its connection credentials. + + :param name: The friendly name of the connection, provided by the user. Required. + :type name: str + :return: Connection. The Connection is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Connection + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Connection] = kwargs.pop("cls", None) + + _request = build_connections_get_with_credentials_request( + name=name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.Connection, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized # type: ignore @distributed_trace - def list_versions( + def list( self, - agent_name: str, *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.PageOrder]] = None, - before: Optional[str] = None, + connection_type: Optional[Union[str, _models.ConnectionType]] = None, + default_connection: Optional[bool] = None, **kwargs: Any - ) -> AsyncItemPaged["_models.AgentVersionDetails"]: - """Returns the list of versions of an agent. + ) -> AsyncItemPaged["_models.Connection"]: + """List all connections in the project, without populating connection credentials. - :param agent_name: The name of the agent to retrieve versions for. Required. - :type agent_name: str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the - default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the ``created_at`` timestamp of the objects. ``asc`` for - ascending order and``desc`` - for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.projects.models.PageOrder - :keyword before: A cursor for use in pagination. ``before`` is an object ID that defines your - place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page of the list. - Default value is None. - :paramtype before: str - :return: An iterator like instance of AgentVersionDetails - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.AgentVersionDetails] + :keyword connection_type: List connections of this specific type. Known values are: + "AzureOpenAI", "AzureBlob", "AzureStorageAccount", "CognitiveSearch", "CosmosDB", "ApiKey", + "AppConfig", "AppInsights", "CustomKeys", and "RemoteTool_Preview". Default value is None. + :paramtype connection_type: str or ~azure.ai.projects.models.ConnectionType + :keyword default_connection: List connections that are default connections. Default value is + None. + :paramtype default_connection: bool + :return: An iterator like instance of Connection + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.Connection] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.AgentVersionDetails]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.Connection]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -1505,36 +1568,57 @@ def list_versions( } error_map.update(kwargs.pop("error_map", {}) or {}) - def prepare_request(_continuation_token=None): + def prepare_request(next_link=None): + if not next_link: + + _request = build_connections_list_request( + connection_type=connection_type, + default_connection=default_connection, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - _request = build_agents_list_versions_request( - agent_name=agent_name, - limit=limit, - order=order, - after=_continuation_token, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) return _request async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.AgentVersionDetails], - deserialized.get("data", []), + List[_models.Connection], + deserialized.get("value", []), ) if cls: list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("last_id") or None, AsyncList(list_of_elem) + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) - async def get_next(_continuation_token=None): - _request = prepare_request(_continuation_token) + async def get_next(next_link=None): + _request = prepare_request(next_link) _stream = False pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access @@ -1544,25 +1628,21 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) + raise HttpResponseError(response=response) return pipeline_response return AsyncItemPaged(get_next, extract_data) -class EvaluationRulesOperations: +class DatasetsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.ai.projects.aio.AIProjectClient`'s - :attr:`evaluation_rules` attribute. + :attr:`datasets` attribute. """ def __init__(self, *args, **kwargs) -> None: @@ -1572,16 +1652,21 @@ def __init__(self, *args, **kwargs) -> None: self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") - @distributed_trace_async - async def get(self, id: str, **kwargs: Any) -> _models.EvaluationRule: - """Get an evaluation rule. + @distributed_trace + def list_versions(self, name: str, **kwargs: Any) -> AsyncItemPaged["_models.DatasetVersion"]: + """List all versions of the given DatasetVersion. - :param id: Unique identifier for the evaluation rule. Required. - :type id: str - :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationRule + :param name: The name of the resource. Required. + :type name: str + :return: An iterator like instance of DatasetVersion + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.DatasetVersion] :raises ~azure.core.exceptions.HttpResponseError: """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.DatasetVersion]] = kwargs.pop("cls", None) + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -1590,13 +1675,185 @@ async def get(self, id: str, **kwargs: Any) -> _models.EvaluationRule: } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.EvaluationRule] = kwargs.pop("cls", None) + def prepare_request(next_link=None): + if not next_link: - _request = build_evaluation_rules_get_request( - id=id, + _request = build_datasets_list_versions_request( + name=name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize( + List[_models.DatasetVersion], + deserialized.get("value", []), + ) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace + def list(self, **kwargs: Any) -> AsyncItemPaged["_models.DatasetVersion"]: + """List the latest version of each DatasetVersion. + + :return: An iterator like instance of DatasetVersion + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.DatasetVersion] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.DatasetVersion]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_datasets_list_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize( + List[_models.DatasetVersion], + deserialized.get("value", []), + ) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def get(self, name: str, version: str, **kwargs: Any) -> _models.DatasetVersion: + """Get the specific version of the DatasetVersion. The service returns 404 Not Found error if the + DatasetVersion does not exist. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to retrieve. Required. + :type version: str + :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetVersion + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.DatasetVersion] = kwargs.pop("cls", None) + + _request = build_datasets_get_request( + name=name, + version=version, api_version=self._config.api_version, headers=_headers, params=_params, @@ -1626,7 +1883,7 @@ async def get(self, id: str, **kwargs: Any) -> _models.EvaluationRule: if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.EvaluationRule, response.json()) + deserialized = _deserialize(_models.DatasetVersion, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1634,11 +1891,14 @@ async def get(self, id: str, **kwargs: Any) -> _models.EvaluationRule: return deserialized # type: ignore @distributed_trace_async - async def delete(self, id: str, **kwargs: Any) -> None: - """Delete an evaluation rule. + async def delete(self, name: str, version: str, **kwargs: Any) -> None: + """Delete the specific version of the DatasetVersion. The service returns 204 No Content if the + DatasetVersion was deleted successfully or if the DatasetVersion does not exist. - :param id: Unique identifier for the evaluation rule. Required. - :type id: str + :param name: The name of the resource. Required. + :type name: str + :param version: The version of the DatasetVersion to delete. Required. + :type version: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -1656,8 +1916,9 @@ async def delete(self, id: str, **kwargs: Any) -> None: cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_evaluation_rules_delete_request( - id=id, + _request = build_datasets_delete_request( + name=name, + version=version, api_version=self._config.api_version, headers=_headers, params=_params, @@ -1683,74 +1944,99 @@ async def delete(self, id: str, **kwargs: Any) -> None: @overload async def create_or_update( - self, id: str, evaluation_rule: _models.EvaluationRule, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationRule: - """Create or update an evaluation rule. + self, + name: str, + version: str, + dataset_version: _models.DatasetVersion, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.DatasetVersion: + """Create a new or update an existing DatasetVersion with the given version id. - :param id: Unique identifier for the evaluation rule. Required. - :type id: str - :param evaluation_rule: Evaluation rule resource. Required. - :type evaluation_rule: ~azure.ai.projects.models.EvaluationRule + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to create or update. Required. + :type version: str + :param dataset_version: The DatasetVersion to create or update. Required. + :type dataset_version: ~azure.ai.projects.models.DatasetVersion :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". + Default value is "application/merge-patch+json". :paramtype content_type: str - :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationRule + :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetVersion :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def create_or_update( - self, id: str, evaluation_rule: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationRule: - """Create or update an evaluation rule. + self, + name: str, + version: str, + dataset_version: JSON, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.DatasetVersion: + """Create a new or update an existing DatasetVersion with the given version id. - :param id: Unique identifier for the evaluation rule. Required. - :type id: str - :param evaluation_rule: Evaluation rule resource. Required. - :type evaluation_rule: JSON + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to create or update. Required. + :type version: str + :param dataset_version: The DatasetVersion to create or update. Required. + :type dataset_version: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". + Default value is "application/merge-patch+json". :paramtype content_type: str - :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationRule + :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetVersion :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def create_or_update( - self, id: str, evaluation_rule: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationRule: - """Create or update an evaluation rule. + self, + name: str, + version: str, + dataset_version: IO[bytes], + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.DatasetVersion: + """Create a new or update an existing DatasetVersion with the given version id. - :param id: Unique identifier for the evaluation rule. Required. - :type id: str - :param evaluation_rule: Evaluation rule resource. Required. - :type evaluation_rule: IO[bytes] + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to create or update. Required. + :type version: str + :param dataset_version: The DatasetVersion to create or update. Required. + :type dataset_version: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". + Default value is "application/merge-patch+json". :paramtype content_type: str - :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationRule + :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetVersion :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async async def create_or_update( - self, id: str, evaluation_rule: Union[_models.EvaluationRule, JSON, IO[bytes]], **kwargs: Any - ) -> _models.EvaluationRule: - """Create or update an evaluation rule. + self, name: str, version: str, dataset_version: Union[_models.DatasetVersion, JSON, IO[bytes]], **kwargs: Any + ) -> _models.DatasetVersion: + """Create a new or update an existing DatasetVersion with the given version id. - :param id: Unique identifier for the evaluation rule. Required. - :type id: str - :param evaluation_rule: Evaluation rule resource. Is one of the following types: - EvaluationRule, JSON, IO[bytes] Required. - :type evaluation_rule: ~azure.ai.projects.models.EvaluationRule or JSON or IO[bytes] - :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationRule + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to create or update. Required. + :type version: str + :param dataset_version: The DatasetVersion to create or update. Is one of the following types: + DatasetVersion, JSON, IO[bytes] Required. + :type dataset_version: ~azure.ai.projects.models.DatasetVersion or JSON or IO[bytes] + :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetVersion :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Optional[Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW]] = _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW if self._config.allow_preview else None # type: ignore error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -1763,18 +2049,18 @@ async def create_or_update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EvaluationRule] = kwargs.pop("cls", None) + cls: ClsType[_models.DatasetVersion] = kwargs.pop("cls", None) - content_type = content_type or "application/json" + content_type = content_type or "application/merge-patch+json" _content = None - if isinstance(evaluation_rule, (IOBase, bytes)): - _content = evaluation_rule + if isinstance(dataset_version, (IOBase, bytes)): + _content = dataset_version else: - _content = json.dumps(evaluation_rule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(dataset_version, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_evaluation_rules_create_or_update_request( - id=id, - foundry_features=_foundry_features, + _request = build_datasets_create_or_update_request( + name=name, + version=version, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -1806,40 +2092,113 @@ async def create_or_update( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.EvaluationRule, response.json()) + deserialized = _deserialize(_models.DatasetVersion, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @distributed_trace - def list( + @overload + async def pending_upload( self, + name: str, + version: str, + pending_upload_request: _models.PendingUploadRequest, *, - action_type: Optional[Union[str, _models.EvaluationRuleActionType]] = None, - agent_name: Optional[str] = None, - enabled: Optional[bool] = None, + content_type: str = "application/json", **kwargs: Any - ) -> AsyncItemPaged["_models.EvaluationRule"]: - """List all evaluation rules. + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of a dataset for a specific version. - :keyword action_type: Filter by the type of evaluation rule. Known values are: - "continuousEvaluation" and "humanEvaluationPreview". Default value is None. - :paramtype action_type: str or ~azure.ai.projects.models.EvaluationRuleActionType - :keyword agent_name: Filter by the agent name. Default value is None. - :paramtype agent_name: str - :keyword enabled: Filter by the enabled status. Default value is None. - :paramtype enabled: bool - :return: An iterator like instance of EvaluationRule - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.EvaluationRule] + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse :raises ~azure.core.exceptions.HttpResponseError: """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.EvaluationRule]] = kwargs.pop("cls", None) + @overload + async def pending_upload( + self, + name: str, + version: str, + pending_upload_request: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of a dataset for a specific version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def pending_upload( + self, + name: str, + version: str, + pending_upload_request: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of a dataset for a specific version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def pending_upload( + self, + name: str, + version: str, + pending_upload_request: Union[_models.PendingUploadRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of a dataset for a specific version. + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Is one of the following + types: PendingUploadRequest, JSON, IO[bytes] Required. + :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest or JSON or + IO[bytes] + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -1848,99 +2207,70 @@ def list( } error_map.update(kwargs.pop("error_map", {}) or {}) - def prepare_request(next_link=None): - if not next_link: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} - _request = build_evaluation_rules_list_request( - action_type=action_type, - agent_name=agent_name, - enabled=enabled, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.PendingUploadResponse] = kwargs.pop("cls", None) - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + content_type = content_type or "application/json" + _content = None + if isinstance(pending_upload_request, (IOBase, bytes)): + _content = pending_upload_request + else: + _content = json.dumps(pending_upload_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - return _request + _request = build_datasets_pending_upload_request( + name=name, + version=version, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - async def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize( - List[_models.EvaluationRule], - deserialized.get("value", []), - ) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) - async def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - return pipeline_response - - return AsyncItemPaged(get_next, extract_data) + response = pipeline_response.http_response + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) -class ConnectionsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.PendingUploadResponse, response.json()) - Instead, you should access the following operations through - :class:`~azure.ai.projects.aio.AIProjectClient`'s - :attr:`connections` attribute. - """ + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + return deserialized # type: ignore @distributed_trace_async - async def _get(self, name: str, **kwargs: Any) -> _models.Connection: - """Get a connection by name, without populating connection credentials. + async def get_credentials(self, name: str, version: str, **kwargs: Any) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with a Dataset version. - :param name: The friendly name of the connection, provided by the user. Required. + :param name: The name of the resource. Required. :type name: str - :return: Connection. The Connection is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Connection + :param version: The specific version id of the DatasetVersion to operate on. Required. + :type version: str + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -1954,10 +2284,11 @@ async def _get(self, name: str, **kwargs: Any) -> _models.Connection: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Connection] = kwargs.pop("cls", None) + cls: ClsType[_models.DatasetCredential] = kwargs.pop("cls", None) - _request = build_connections_get_request( + _request = build_datasets_get_credentials_request( name=name, + version=version, api_version=self._config.api_version, headers=_headers, params=_params, @@ -1984,29 +2315,42 @@ async def _get(self, name: str, **kwargs: Any) -> _models.Connection: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Connection, response.json()) + deserialized = _deserialize(_models.DatasetCredential, response.json()) if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + +class DeploymentsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.aio.AIProjectClient`'s + :attr:`deployments` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + @distributed_trace_async - async def _get_with_credentials(self, name: str, **kwargs: Any) -> _models.Connection: - """Get a connection by name, with its connection credentials. + async def get(self, name: str, **kwargs: Any) -> _models.Deployment: + """Get a deployed model. - :param name: The friendly name of the connection, provided by the user. Required. + :param name: Name of the deployment. Required. :type name: str - :return: Connection. The Connection is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Connection + :return: Deployment. The Deployment is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Deployment :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -2020,9 +2364,9 @@ async def _get_with_credentials(self, name: str, **kwargs: Any) -> _models.Conne _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Connection] = kwargs.pop("cls", None) + cls: ClsType[_models.Deployment] = kwargs.pop("cls", None) - _request = build_connections_get_with_credentials_request( + _request = build_deployments_get_request( name=name, api_version=self._config.api_version, headers=_headers, @@ -2058,7 +2402,7 @@ async def _get_with_credentials(self, name: str, **kwargs: Any) -> _models.Conne if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Connection, response.json()) + deserialized = _deserialize(_models.Deployment, response.json()) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore @@ -2069,27 +2413,29 @@ async def _get_with_credentials(self, name: str, **kwargs: Any) -> _models.Conne def list( self, *, - connection_type: Optional[Union[str, _models.ConnectionType]] = None, - default_connection: Optional[bool] = None, + model_publisher: Optional[str] = None, + model_name: Optional[str] = None, + deployment_type: Optional[Union[str, _models.DeploymentType]] = None, **kwargs: Any - ) -> AsyncItemPaged["_models.Connection"]: - """List all connections in the project, without populating connection credentials. + ) -> AsyncItemPaged["_models.Deployment"]: + """List all deployed models in the project. - :keyword connection_type: List connections of this specific type. Known values are: - "AzureOpenAI", "AzureBlob", "AzureStorageAccount", "CognitiveSearch", "CosmosDB", "ApiKey", - "AppConfig", "AppInsights", "CustomKeys", and "RemoteTool_Preview". Default value is None. - :paramtype connection_type: str or ~azure.ai.projects.models.ConnectionType - :keyword default_connection: List connections that are default connections. Default value is - None. - :paramtype default_connection: bool - :return: An iterator like instance of Connection - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.Connection] + :keyword model_publisher: Model publisher to filter models by. Default value is None. + :paramtype model_publisher: str + :keyword model_name: Model name (the publisher specific name) to filter models by. Default + value is None. + :paramtype model_name: str + :keyword deployment_type: Type of deployment to filter list by. "ModelDeployment" Default value + is None. + :paramtype deployment_type: str or ~azure.ai.projects.models.DeploymentType + :return: An iterator like instance of Deployment + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.Deployment] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.Connection]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.Deployment]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -2102,9 +2448,10 @@ def list( def prepare_request(next_link=None): if not next_link: - _request = build_connections_list_request( - connection_type=connection_type, - default_connection=default_connection, + _request = build_deployments_list_request( + model_publisher=model_publisher, + model_name=model_name, + deployment_type=deployment_type, api_version=self._config.api_version, headers=_headers, params=_params, @@ -2141,7 +2488,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.Connection], + List[_models.Deployment], deserialized.get("value", []), ) if cls: @@ -2166,14 +2513,14 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) -class DatasetsOperations: +class IndexesOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.ai.projects.aio.AIProjectClient`'s - :attr:`datasets` attribute. + :attr:`indexes` attribute. """ def __init__(self, *args, **kwargs) -> None: @@ -2184,19 +2531,19 @@ def __init__(self, *args, **kwargs) -> None: self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def list_versions(self, name: str, **kwargs: Any) -> AsyncItemPaged["_models.DatasetVersion"]: - """List all versions of the given DatasetVersion. + def list_versions(self, name: str, **kwargs: Any) -> AsyncItemPaged["_models.Index"]: + """List all versions of the given Index. :param name: The name of the resource. Required. :type name: str - :return: An iterator like instance of DatasetVersion - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.DatasetVersion] + :return: An iterator like instance of Index + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.Index] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.DatasetVersion]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.Index]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -2209,7 +2556,7 @@ def list_versions(self, name: str, **kwargs: Any) -> AsyncItemPaged["_models.Dat def prepare_request(next_link=None): if not next_link: - _request = build_datasets_list_versions_request( + _request = build_indexes_list_versions_request( name=name, api_version=self._config.api_version, headers=_headers, @@ -2247,7 +2594,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.DatasetVersion], + List[_models.Index], deserialized.get("value", []), ) if cls: @@ -2272,17 +2619,17 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) @distributed_trace - def list(self, **kwargs: Any) -> AsyncItemPaged["_models.DatasetVersion"]: - """List the latest version of each DatasetVersion. + def list(self, **kwargs: Any) -> AsyncItemPaged["_models.Index"]: + """List the latest version of each Index. - :return: An iterator like instance of DatasetVersion - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.DatasetVersion] + :return: An iterator like instance of Index + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.Index] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.DatasetVersion]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.Index]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -2295,7 +2642,7 @@ def list(self, **kwargs: Any) -> AsyncItemPaged["_models.DatasetVersion"]: def prepare_request(next_link=None): if not next_link: - _request = build_datasets_list_request( + _request = build_indexes_list_request( api_version=self._config.api_version, headers=_headers, params=_params, @@ -2332,7 +2679,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.DatasetVersion], + List[_models.Index], deserialized.get("value", []), ) if cls: @@ -2357,16 +2704,16 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def get(self, name: str, version: str, **kwargs: Any) -> _models.DatasetVersion: - """Get the specific version of the DatasetVersion. The service returns 404 Not Found error if the - DatasetVersion does not exist. + async def get(self, name: str, version: str, **kwargs: Any) -> _models.Index: + """Get the specific version of the Index. The service returns 404 Not Found error if the Index + does not exist. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the DatasetVersion to retrieve. Required. + :param version: The specific version id of the Index to retrieve. Required. :type version: str - :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetVersion + :return: Index. The Index is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Index :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -2380,9 +2727,9 @@ async def get(self, name: str, version: str, **kwargs: Any) -> _models.DatasetVe _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.DatasetVersion] = kwargs.pop("cls", None) + cls: ClsType[_models.Index] = kwargs.pop("cls", None) - _request = build_datasets_get_request( + _request = build_indexes_get_request( name=name, version=version, api_version=self._config.api_version, @@ -2414,7 +2761,7 @@ async def get(self, name: str, version: str, **kwargs: Any) -> _models.DatasetVe if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.DatasetVersion, response.json()) + deserialized = _deserialize(_models.Index, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2423,12 +2770,12 @@ async def get(self, name: str, version: str, **kwargs: Any) -> _models.DatasetVe @distributed_trace_async async def delete(self, name: str, version: str, **kwargs: Any) -> None: - """Delete the specific version of the DatasetVersion. The service returns 204 No Content if the - DatasetVersion was deleted successfully or if the DatasetVersion does not exist. + """Delete the specific version of the Index. The service returns 204 No Content if the Index was + deleted successfully or if the Index does not exist. :param name: The name of the resource. Required. :type name: str - :param version: The version of the DatasetVersion to delete. Required. + :param version: The version of the Index to delete. Required. :type version: str :return: None :rtype: None @@ -2447,7 +2794,7 @@ async def delete(self, name: str, version: str, **kwargs: Any) -> None: cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_datasets_delete_request( + _request = build_indexes_delete_request( name=name, version=version, api_version=self._config.api_version, @@ -2478,50 +2825,44 @@ async def create_or_update( self, name: str, version: str, - dataset_version: _models.DatasetVersion, + index: _models.Index, *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.DatasetVersion: - """Create a new or update an existing DatasetVersion with the given version id. + ) -> _models.Index: + """Create a new or update an existing Index with the given version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the DatasetVersion to create or update. Required. + :param version: The specific version id of the Index to create or update. Required. :type version: str - :param dataset_version: The DatasetVersion to create or update. Required. - :type dataset_version: ~azure.ai.projects.models.DatasetVersion + :param index: The Index to create or update. Required. + :type index: ~azure.ai.projects.models.Index :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/merge-patch+json". :paramtype content_type: str - :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetVersion + :return: Index. The Index is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Index :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def create_or_update( - self, - name: str, - version: str, - dataset_version: JSON, - *, - content_type: str = "application/merge-patch+json", - **kwargs: Any - ) -> _models.DatasetVersion: - """Create a new or update an existing DatasetVersion with the given version id. + self, name: str, version: str, index: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Index: + """Create a new or update an existing Index with the given version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the DatasetVersion to create or update. Required. + :param version: The specific version id of the Index to create or update. Required. :type version: str - :param dataset_version: The DatasetVersion to create or update. Required. - :type dataset_version: JSON + :param index: The Index to create or update. Required. + :type index: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/merge-patch+json". :paramtype content_type: str - :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetVersion + :return: Index. The Index is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Index :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2530,42 +2871,42 @@ async def create_or_update( self, name: str, version: str, - dataset_version: IO[bytes], + index: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.DatasetVersion: - """Create a new or update an existing DatasetVersion with the given version id. + ) -> _models.Index: + """Create a new or update an existing Index with the given version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the DatasetVersion to create or update. Required. + :param version: The specific version id of the Index to create or update. Required. :type version: str - :param dataset_version: The DatasetVersion to create or update. Required. - :type dataset_version: IO[bytes] + :param index: The Index to create or update. Required. + :type index: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/merge-patch+json". :paramtype content_type: str - :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetVersion + :return: Index. The Index is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Index :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async async def create_or_update( - self, name: str, version: str, dataset_version: Union[_models.DatasetVersion, JSON, IO[bytes]], **kwargs: Any - ) -> _models.DatasetVersion: - """Create a new or update an existing DatasetVersion with the given version id. + self, name: str, version: str, index: Union[_models.Index, JSON, IO[bytes]], **kwargs: Any + ) -> _models.Index: + """Create a new or update an existing Index with the given version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the DatasetVersion to create or update. Required. + :param version: The specific version id of the Index to create or update. Required. :type version: str - :param dataset_version: The DatasetVersion to create or update. Is one of the following types: - DatasetVersion, JSON, IO[bytes] Required. - :type dataset_version: ~azure.ai.projects.models.DatasetVersion or JSON or IO[bytes] - :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetVersion + :param index: The Index to create or update. Is one of the following types: Index, JSON, + IO[bytes] Required. + :type index: ~azure.ai.projects.models.Index or JSON or IO[bytes] + :return: Index. The Index is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Index :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -2580,16 +2921,16 @@ async def create_or_update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.DatasetVersion] = kwargs.pop("cls", None) + cls: ClsType[_models.Index] = kwargs.pop("cls", None) content_type = content_type or "application/merge-patch+json" _content = None - if isinstance(dataset_version, (IOBase, bytes)): - _content = dataset_version + if isinstance(index, (IOBase, bytes)): + _content = index else: - _content = json.dumps(dataset_version, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(index, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_datasets_create_or_update_request( + _request = build_indexes_create_or_update_request( name=name, version=version, content_type=content_type, @@ -2623,113 +2964,44 @@ async def create_or_update( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.DatasetVersion, response.json()) + deserialized = _deserialize(_models.Index, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @overload - async def pending_upload( - self, - name: str, - version: str, - pending_upload_request: _models.PendingUploadRequest, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of a dataset for a specific version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the DatasetVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Required. - :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def pending_upload( - self, - name: str, - version: str, - pending_upload_request: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of a dataset for a specific version. - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the DatasetVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Required. - :type pending_upload_request: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ +class BetaEvaluationTaxonomiesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. - @overload - async def pending_upload( - self, - name: str, - version: str, - pending_upload_request: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of a dataset for a specific version. + Instead, you should access the following operations through + :class:`~azure.ai.projects.aio.AIProjectClient`'s + :attr:`evaluation_taxonomies` attribute. + """ - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the DatasetVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Required. - :type pending_upload_request: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async - async def pending_upload( - self, - name: str, - version: str, - pending_upload_request: Union[_models.PendingUploadRequest, JSON, IO[bytes]], - **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of a dataset for a specific version. + async def get(self, name: str, **kwargs: Any) -> _models.EvaluationTaxonomy: + """Get an evaluation run by name. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the DatasetVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Is one of the following - types: PendingUploadRequest, JSON, IO[bytes] Required. - :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest or JSON or - IO[bytes] - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -2738,25 +3010,15 @@ async def pending_upload( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.PendingUploadResponse] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(pending_upload_request, (IOBase, bytes)): - _content = pending_upload_request - else: - _content = json.dumps(pending_upload_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + cls: ClsType[_models.EvaluationTaxonomy] = kwargs.pop("cls", None) - _request = build_datasets_pending_upload_request( + _request = build_beta_evaluation_taxonomies_get_request( name=name, - version=version, - content_type=content_type, + foundry_features=_foundry_features, api_version=self._config.api_version, - content=_content, headers=_headers, params=_params, ) @@ -2785,25 +3047,35 @@ async def pending_upload( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.PendingUploadResponse, response.json()) + deserialized = _deserialize(_models.EvaluationTaxonomy, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @distributed_trace_async - async def get_credentials(self, name: str, version: str, **kwargs: Any) -> _models.DatasetCredential: - """Get the SAS credential to access the storage account associated with a Dataset version. + @distributed_trace + def list( + self, *, input_name: Optional[str] = None, input_type: Optional[str] = None, **kwargs: Any + ) -> AsyncItemPaged["_models.EvaluationTaxonomy"]: + """List evaluation taxonomies. - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the DatasetVersion to operate on. Required. - :type version: str - :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetCredential + :keyword input_name: Filter by the evaluation input name. Default value is None. + :paramtype input_name: str + :keyword input_type: Filter by taxonomy input type. Default value is None. + :paramtype input_type: str + :return: An iterator like instance of EvaluationTaxonomy + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.EvaluationTaxonomy] :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.EvaluationTaxonomy]] = kwargs.pop("cls", None) + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -2812,78 +3084,200 @@ async def get_credentials(self, name: str, version: str, **kwargs: Any) -> _mode } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} + def prepare_request(next_link=None): + if not next_link: - cls: ClsType[_models.DatasetCredential] = kwargs.pop("cls", None) + _request = build_beta_evaluation_taxonomies_list_request( + foundry_features=_foundry_features, + input_name=input_name, + input_type=input_type, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - _request = build_datasets_get_credentials_request( - name=name, - version=version, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", + urllib.parse.urljoin(next_link, _parsed_next_link.path), + params=_next_request_params, + headers={"Foundry-Features": _SERIALIZER.header("foundry_features", _foundry_features, "str")}, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) + return _request - response = pipeline_response.http_response + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize( + List[_models.EvaluationTaxonomy], + deserialized.get("value", []), + ) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def delete(self, name: str, **kwargs: Any) -> None: + """Delete an evaluation taxonomy by name. + + :param name: The name of the resource. Required. + :type name: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_beta_evaluation_taxonomies_delete_request( + name=name, + foundry_features=_foundry_features, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.DatasetCredential, response.json()) - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return cls(pipeline_response, None, {}) # type: ignore - return deserialized # type: ignore + @overload + async def create( + self, name: str, body: _models.EvaluationTaxonomy, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Create an evaluation taxonomy. + :param name: The name of the evaluation taxonomy. Required. + :type name: str + :param body: The evaluation taxonomy. Required. + :type body: ~azure.ai.projects.models.EvaluationTaxonomy + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :raises ~azure.core.exceptions.HttpResponseError: + """ -class DeploymentsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. + @overload + async def create( + self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Create an evaluation taxonomy. - Instead, you should access the following operations through - :class:`~azure.ai.projects.aio.AIProjectClient`'s - :attr:`deployments` attribute. - """ + :param name: The name of the evaluation taxonomy. Required. + :type name: str + :param body: The evaluation taxonomy. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :raises ~azure.core.exceptions.HttpResponseError: + """ - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + @overload + async def create( + self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Create an evaluation taxonomy. + + :param name: The name of the evaluation taxonomy. Required. + :type name: str + :param body: The evaluation taxonomy. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :raises ~azure.core.exceptions.HttpResponseError: + """ @distributed_trace_async - async def get(self, name: str, **kwargs: Any) -> _models.Deployment: - """Get a deployed model. + async def create( + self, name: str, body: Union[_models.EvaluationTaxonomy, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Create an evaluation taxonomy. - :param name: Name of the deployment. Required. + :param name: The name of the evaluation taxonomy. Required. :type name: str - :return: Deployment. The Deployment is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Deployment + :param body: The evaluation taxonomy. Is one of the following types: EvaluationTaxonomy, JSON, + IO[bytes] Required. + :type body: ~azure.ai.projects.models.EvaluationTaxonomy or JSON or IO[bytes] + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -2892,14 +3286,25 @@ async def get(self, name: str, **kwargs: Any) -> _models.Deployment: } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Deployment] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EvaluationTaxonomy] = kwargs.pop("cls", None) - _request = build_deployments_get_request( + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_beta_evaluation_taxonomies_create_request( name=name, + foundry_features=_foundry_features, + content_type=content_type, api_version=self._config.api_version, + content=_content, headers=_headers, params=_params, ) @@ -2916,7 +3321,7 @@ async def get(self, name: str, **kwargs: Any) -> _models.Deployment: response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [200, 201]: if _stream: try: await response.read() # Load the body in memory and close the socket @@ -2925,133 +3330,159 @@ async def get(self, name: str, **kwargs: Any) -> _models.Deployment: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Deployment, response.json()) + deserialized = _deserialize(_models.EvaluationTaxonomy, response.json()) if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @distributed_trace - def list( - self, - *, - model_publisher: Optional[str] = None, - model_name: Optional[str] = None, - deployment_type: Optional[Union[str, _models.DeploymentType]] = None, - **kwargs: Any - ) -> AsyncItemPaged["_models.Deployment"]: - """List all deployed models in the project. + @overload + async def update( + self, name: str, body: _models.EvaluationTaxonomy, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Update an evaluation taxonomy. - :keyword model_publisher: Model publisher to filter models by. Default value is None. - :paramtype model_publisher: str - :keyword model_name: Model name (the publisher specific name) to filter models by. Default - value is None. - :paramtype model_name: str - :keyword deployment_type: Type of deployment to filter list by. "ModelDeployment" Default value - is None. - :paramtype deployment_type: str or ~azure.ai.projects.models.DeploymentType - :return: An iterator like instance of Deployment - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.Deployment] + :param name: The name of the evaluation taxonomy. Required. + :type name: str + :param body: The evaluation taxonomy. Required. + :type body: ~azure.ai.projects.models.EvaluationTaxonomy + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy :raises ~azure.core.exceptions.HttpResponseError: """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.Deployment]] = kwargs.pop("cls", None) + @overload + async def update( + self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Update an evaluation taxonomy. - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) + :param name: The name of the evaluation taxonomy. Required. + :type name: str + :param body: The evaluation taxonomy. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :raises ~azure.core.exceptions.HttpResponseError: + """ - def prepare_request(next_link=None): - if not next_link: + @overload + async def update( + self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Update an evaluation taxonomy. - _request = build_deployments_list_request( - model_publisher=model_publisher, - model_name=model_name, - deployment_type=deployment_type, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + :param name: The name of the evaluation taxonomy. Required. + :type name: str + :param body: The evaluation taxonomy. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :raises ~azure.core.exceptions.HttpResponseError: + """ - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + @distributed_trace_async + async def update( + self, name: str, body: Union[_models.EvaluationTaxonomy, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Update an evaluation taxonomy. - return _request + :param name: The name of the evaluation taxonomy. Required. + :type name: str + :param body: The evaluation taxonomy. Is one of the following types: EvaluationTaxonomy, JSON, + IO[bytes] Required. + :type body: ~azure.ai.projects.models.EvaluationTaxonomy or JSON or IO[bytes] + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - async def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize( - List[_models.Deployment], - deserialized.get("value", []), - ) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} - async def get_next(next_link=None): - _request = prepare_request(next_link) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EvaluationTaxonomy] = kwargs.pop("cls", None) - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + _request = build_beta_evaluation_taxonomies_update_request( + name=name, + foundry_features=_foundry_features, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - return pipeline_response + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) - return AsyncItemPaged(get_next, extract_data) + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.EvaluationTaxonomy, response.json()) -class IndexesOperations: + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class BetaEvaluatorsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.ai.projects.aio.AIProjectClient`'s - :attr:`indexes` attribute. + :attr:`evaluators` attribute. """ def __init__(self, *args, **kwargs) -> None: @@ -3062,19 +3493,36 @@ def __init__(self, *args, **kwargs) -> None: self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def list_versions(self, name: str, **kwargs: Any) -> AsyncItemPaged["_models.Index"]: - """List all versions of the given Index. + def list_versions( + self, + name: str, + *, + type: Optional[Union[Literal["builtin"], Literal["custom"], Literal["all"], str]] = None, + limit: Optional[int] = None, + **kwargs: Any + ) -> AsyncItemPaged["_models.EvaluatorVersion"]: + """List all versions of the given evaluator. :param name: The name of the resource. Required. :type name: str - :return: An iterator like instance of Index - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.Index] + :keyword type: Filter evaluators by type. Possible values: 'all', 'custom', 'builtin'. Is one + of the following types: Literal["builtin"], Literal["custom"], Literal["all"], str Default + value is None. + :paramtype type: str or str or str or str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :return: An iterator like instance of EvaluatorVersion + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.EvaluatorVersion] :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.Index]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.EvaluatorVersion]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -3087,8 +3535,11 @@ def list_versions(self, name: str, **kwargs: Any) -> AsyncItemPaged["_models.Ind def prepare_request(next_link=None): if not next_link: - _request = build_indexes_list_versions_request( + _request = build_beta_evaluators_list_versions_request( name=name, + foundry_features=_foundry_features, + type=type, + limit=limit, api_version=self._config.api_version, headers=_headers, params=_params, @@ -3111,7 +3562,10 @@ def prepare_request(next_link=None): ) _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + "GET", + urllib.parse.urljoin(next_link, _parsed_next_link.path), + params=_next_request_params, + headers={"Foundry-Features": _SERIALIZER.header("foundry_features", _foundry_features, "str")}, ) path_format_arguments = { "endpoint": self._serialize.url( @@ -3125,7 +3579,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.Index], + List[_models.EvaluatorVersion], deserialized.get("value", []), ) if cls: @@ -3150,17 +3604,33 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) @distributed_trace - def list(self, **kwargs: Any) -> AsyncItemPaged["_models.Index"]: - """List the latest version of each Index. + def list( + self, + *, + type: Optional[Union[Literal["builtin"], Literal["custom"], Literal["all"], str]] = None, + limit: Optional[int] = None, + **kwargs: Any + ) -> AsyncItemPaged["_models.EvaluatorVersion"]: + """List the latest version of each evaluator. - :return: An iterator like instance of Index - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.Index] + :keyword type: Filter evaluators by type. Possible values: 'all', 'custom', 'builtin'. Is one + of the following types: Literal["builtin"], Literal["custom"], Literal["all"], str Default + value is None. + :paramtype type: str or str or str or str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :return: An iterator like instance of EvaluatorVersion + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.EvaluatorVersion] :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.Index]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.EvaluatorVersion]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -3173,7 +3643,10 @@ def list(self, **kwargs: Any) -> AsyncItemPaged["_models.Index"]: def prepare_request(next_link=None): if not next_link: - _request = build_indexes_list_request( + _request = build_beta_evaluators_list_request( + foundry_features=_foundry_features, + type=type, + limit=limit, api_version=self._config.api_version, headers=_headers, params=_params, @@ -3196,7 +3669,10 @@ def prepare_request(next_link=None): ) _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + "GET", + urllib.parse.urljoin(next_link, _parsed_next_link.path), + params=_next_request_params, + headers={"Foundry-Features": _SERIALIZER.header("foundry_features", _foundry_features, "str")}, ) path_format_arguments = { "endpoint": self._serialize.url( @@ -3210,7 +3686,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.Index], + List[_models.EvaluatorVersion], deserialized.get("value", []), ) if cls: @@ -3235,18 +3711,21 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def get(self, name: str, version: str, **kwargs: Any) -> _models.Index: - """Get the specific version of the Index. The service returns 404 Not Found error if the Index - does not exist. + async def get_version(self, name: str, version: str, **kwargs: Any) -> _models.EvaluatorVersion: + """Get the specific version of the EvaluatorVersion. The service returns 404 Not Found error if + the EvaluatorVersion does not exist. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the Index to retrieve. Required. + :param version: The specific version id of the EvaluatorVersion to retrieve. Required. :type version: str - :return: Index. The Index is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Index + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -3258,11 +3737,12 @@ async def get(self, name: str, version: str, **kwargs: Any) -> _models.Index: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Index] = kwargs.pop("cls", None) + cls: ClsType[_models.EvaluatorVersion] = kwargs.pop("cls", None) - _request = build_indexes_get_request( + _request = build_beta_evaluators_get_version_request( name=name, version=version, + foundry_features=_foundry_features, api_version=self._config.api_version, headers=_headers, params=_params, @@ -3292,7 +3772,7 @@ async def get(self, name: str, version: str, **kwargs: Any) -> _models.Index: if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Index, response.json()) + deserialized = _deserialize(_models.EvaluatorVersion, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3300,18 +3780,21 @@ async def get(self, name: str, version: str, **kwargs: Any) -> _models.Index: return deserialized # type: ignore @distributed_trace_async - async def delete(self, name: str, version: str, **kwargs: Any) -> None: - """Delete the specific version of the Index. The service returns 204 No Content if the Index was - deleted successfully or if the Index does not exist. + async def delete_version(self, name: str, version: str, **kwargs: Any) -> None: + """Delete the specific version of the EvaluatorVersion. The service returns 204 No Content if the + EvaluatorVersion was deleted successfully or if the EvaluatorVersion does not exist. :param name: The name of the resource. Required. :type name: str - :param version: The version of the Index to delete. Required. + :param version: The version of the EvaluatorVersion to delete. Required. :type version: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -3325,9 +3808,10 @@ async def delete(self, name: str, version: str, **kwargs: Any) -> None: cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_indexes_delete_request( + _request = build_beta_evaluators_delete_version_request( name=name, version=version, + foundry_features=_foundry_features, api_version=self._config.api_version, headers=_headers, params=_params, @@ -3352,94 +3836,82 @@ async def delete(self, name: str, version: str, **kwargs: Any) -> None: return cls(pipeline_response, None, {}) # type: ignore @overload - async def create_or_update( + async def create_version( self, name: str, - version: str, - index: _models.Index, + evaluator_version: _models.EvaluatorVersion, *, - content_type: str = "application/merge-patch+json", + content_type: str = "application/json", **kwargs: Any - ) -> _models.Index: - """Create a new or update an existing Index with the given version id. + ) -> _models.EvaluatorVersion: + """Create a new EvaluatorVersion with auto incremented version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the Index to create or update. Required. - :type version: str - :param index: The Index to create or update. Required. - :type index: ~azure.ai.projects.models.Index + :param evaluator_version: Required. + :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". + Default value is "application/json". :paramtype content_type: str - :return: Index. The Index is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Index + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def create_or_update( - self, name: str, version: str, index: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.Index: - """Create a new or update an existing Index with the given version id. + async def create_version( + self, name: str, evaluator_version: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluatorVersion: + """Create a new EvaluatorVersion with auto incremented version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the Index to create or update. Required. - :type version: str - :param index: The Index to create or update. Required. - :type index: JSON + :param evaluator_version: Required. + :type evaluator_version: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". + Default value is "application/json". :paramtype content_type: str - :return: Index. The Index is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Index + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def create_or_update( - self, - name: str, - version: str, - index: IO[bytes], - *, - content_type: str = "application/merge-patch+json", - **kwargs: Any - ) -> _models.Index: - """Create a new or update an existing Index with the given version id. + async def create_version( + self, name: str, evaluator_version: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluatorVersion: + """Create a new EvaluatorVersion with auto incremented version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the Index to create or update. Required. - :type version: str - :param index: The Index to create or update. Required. - :type index: IO[bytes] + :param evaluator_version: Required. + :type evaluator_version: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/merge-patch+json". + Default value is "application/json". :paramtype content_type: str - :return: Index. The Index is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Index + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async - async def create_or_update( - self, name: str, version: str, index: Union[_models.Index, JSON, IO[bytes]], **kwargs: Any - ) -> _models.Index: - """Create a new or update an existing Index with the given version id. + async def create_version( + self, name: str, evaluator_version: Union[_models.EvaluatorVersion, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluatorVersion: + """Create a new EvaluatorVersion with auto incremented version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the Index to create or update. Required. - :type version: str - :param index: The Index to create or update. Is one of the following types: Index, JSON, - IO[bytes] Required. - :type index: ~azure.ai.projects.models.Index or JSON or IO[bytes] - :return: Index. The Index is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Index + :param evaluator_version: Is one of the following types: EvaluatorVersion, JSON, IO[bytes] + Required. + :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion or JSON or IO[bytes] + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -3452,18 +3924,18 @@ async def create_or_update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Index] = kwargs.pop("cls", None) + cls: ClsType[_models.EvaluatorVersion] = kwargs.pop("cls", None) - content_type = content_type or "application/merge-patch+json" + content_type = content_type or "application/json" _content = None - if isinstance(index, (IOBase, bytes)): - _content = index + if isinstance(evaluator_version, (IOBase, bytes)): + _content = evaluator_version else: - _content = json.dumps(index, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(evaluator_version, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_indexes_create_or_update_request( + _request = build_beta_evaluators_create_version_request( name=name, - version=version, + foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -3483,7 +3955,7 @@ async def create_or_update( response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [201]: if _stream: try: await response.read() # Load the body in memory and close the socket @@ -3495,39 +3967,104 @@ async def create_or_update( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Index, response.json()) + deserialized = _deserialize(_models.EvaluatorVersion, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + @overload + async def update_version( + self, + name: str, + version: str, + evaluator_version: _models.EvaluatorVersion, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EvaluatorVersion: + """Update an existing EvaluatorVersion with the given version id. -class BetaEvaluationTaxonomiesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. + :param name: The name of the resource. Required. + :type name: str + :param version: The version of the EvaluatorVersion to update. Required. + :type version: str + :param evaluator_version: Evaluator resource. Required. + :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion + :raises ~azure.core.exceptions.HttpResponseError: + """ - Instead, you should access the following operations through - :class:`~azure.ai.projects.aio.AIProjectClient`'s - :attr:`evaluation_taxonomies` attribute. - """ + @overload + async def update_version( + self, name: str, version: str, evaluator_version: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluatorVersion: + """Update an existing EvaluatorVersion with the given version id. - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + :param name: The name of the resource. Required. + :type name: str + :param version: The version of the EvaluatorVersion to update. Required. + :type version: str + :param evaluator_version: Evaluator resource. Required. + :type evaluator_version: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_version( + self, + name: str, + version: str, + evaluator_version: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EvaluatorVersion: + """Update an existing EvaluatorVersion with the given version id. + + :param name: The name of the resource. Required. + :type name: str + :param version: The version of the EvaluatorVersion to update. Required. + :type version: str + :param evaluator_version: Evaluator resource. Required. + :type evaluator_version: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion + :raises ~azure.core.exceptions.HttpResponseError: + """ @distributed_trace_async - async def get(self, name: str, **kwargs: Any) -> _models.EvaluationTaxonomy: - """Get an evaluation run by name. + async def update_version( + self, + name: str, + version: str, + evaluator_version: Union[_models.EvaluatorVersion, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.EvaluatorVersion: + """Update an existing EvaluatorVersion with the given version id. :param name: The name of the resource. Required. :type name: str - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :param version: The version of the EvaluatorVersion to update. Required. + :type version: str + :param evaluator_version: Evaluator resource. Is one of the following types: EvaluatorVersion, + JSON, IO[bytes] Required. + :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion or JSON or IO[bytes] + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion :raises ~azure.core.exceptions.HttpResponseError: """ _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( @@ -3541,15 +4078,26 @@ async def get(self, name: str, **kwargs: Any) -> _models.EvaluationTaxonomy: } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.EvaluationTaxonomy] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EvaluatorVersion] = kwargs.pop("cls", None) - _request = build_beta_evaluation_taxonomies_get_request( + content_type = content_type or "application/json" + _content = None + if isinstance(evaluator_version, (IOBase, bytes)): + _content = evaluator_version + else: + _content = json.dumps(evaluator_version, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_beta_evaluators_update_version_request( name=name, + version=version, foundry_features=_foundry_features, + content_type=content_type, api_version=self._config.api_version, + content=_content, headers=_headers, params=_params, ) @@ -3578,232 +4126,111 @@ async def get(self, name: str, **kwargs: Any) -> _models.EvaluationTaxonomy: if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.EvaluationTaxonomy, response.json()) + deserialized = _deserialize(_models.EvaluatorVersion, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @distributed_trace - def list( - self, *, input_name: Optional[str] = None, input_type: Optional[str] = None, **kwargs: Any - ) -> AsyncItemPaged["_models.EvaluationTaxonomy"]: - """List evaluation taxonomies. - - :keyword input_name: Filter by the evaluation input name. Default value is None. - :paramtype input_name: str - :keyword input_type: Filter by taxonomy input type. Default value is None. - :paramtype input_type: str - :return: An iterator like instance of EvaluationTaxonomy - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.EvaluationTaxonomy] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW - ) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[List[_models.EvaluationTaxonomy]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_beta_evaluation_taxonomies_list_request( - foundry_features=_foundry_features, - input_name=input_name, - input_type=input_type, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", - urllib.parse.urljoin(next_link, _parsed_next_link.path), - params=_next_request_params, - headers={"Foundry-Features": _SERIALIZER.header("foundry_features", _foundry_features, "str")}, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - async def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize( - List[_models.EvaluationTaxonomy], - deserialized.get("value", []), - ) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, AsyncList(list_of_elem) - - async def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - return pipeline_response - - return AsyncItemPaged(get_next, extract_data) - - @distributed_trace_async - async def delete(self, name: str, **kwargs: Any) -> None: - """Delete an evaluation taxonomy by name. + @overload + async def pending_upload( + self, + name: str, + version: str, + pending_upload_request: _models.PendingUploadRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. :param name: The name of the resource. Required. :type name: str - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[None] = kwargs.pop("cls", None) - - _request = build_beta_evaluation_taxonomies_delete_request( - name=name, - foundry_features=_foundry_features, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @overload - async def create( - self, name: str, body: _models.EvaluationTaxonomy, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Create an evaluation taxonomy. - - :param name: The name of the evaluation taxonomy. Required. - :type name: str - :param body: The evaluation taxonomy. Required. - :type body: ~azure.ai.projects.models.EvaluationTaxonomy + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def create( - self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Create an evaluation taxonomy. + async def pending_upload( + self, + name: str, + version: str, + pending_upload_request: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. - :param name: The name of the evaluation taxonomy. Required. + :param name: The name of the resource. Required. :type name: str - :param body: The evaluation taxonomy. Required. - :type body: JSON + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def create( - self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Create an evaluation taxonomy. + async def pending_upload( + self, + name: str, + version: str, + pending_upload_request: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. - :param name: The name of the evaluation taxonomy. Required. + :param name: The name of the resource. Required. :type name: str - :param body: The evaluation taxonomy. Required. - :type body: IO[bytes] + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async - async def create( - self, name: str, body: Union[_models.EvaluationTaxonomy, JSON, IO[bytes]], **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Create an evaluation taxonomy. + async def pending_upload( + self, + name: str, + version: str, + pending_upload_request: Union[_models.PendingUploadRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. - :param name: The name of the evaluation taxonomy. Required. + :param name: The name of the resource. Required. :type name: str - :param body: The evaluation taxonomy. Is one of the following types: EvaluationTaxonomy, JSON, - IO[bytes] Required. - :type body: ~azure.ai.projects.models.EvaluationTaxonomy or JSON or IO[bytes] - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Is one of the following + types: PendingUploadRequest, JSON, IO[bytes] Required. + :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest or JSON or + IO[bytes] + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse :raises ~azure.core.exceptions.HttpResponseError: """ _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( @@ -3821,17 +4248,18 @@ async def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EvaluationTaxonomy] = kwargs.pop("cls", None) + cls: ClsType[_models.PendingUploadResponse] = kwargs.pop("cls", None) content_type = content_type or "application/json" _content = None - if isinstance(body, (IOBase, bytes)): - _content = body + if isinstance(pending_upload_request, (IOBase, bytes)): + _content = pending_upload_request else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(pending_upload_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_evaluation_taxonomies_create_request( + _request = build_beta_evaluators_pending_upload_request( name=name, + version=version, foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, @@ -3852,7 +4280,7 @@ async def create( response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: await response.read() # Load the body in memory and close the socket @@ -3864,7 +4292,7 @@ async def create( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.EvaluationTaxonomy, response.json()) + deserialized = _deserialize(_models.PendingUploadResponse, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3872,72 +4300,103 @@ async def create( return deserialized # type: ignore @overload - async def update( - self, name: str, body: _models.EvaluationTaxonomy, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Update an evaluation taxonomy. + async def get_credentials( + self, + name: str, + version: str, + credential_request: _models.EvaluatorCredentialRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. - :param name: The name of the evaluation taxonomy. Required. + :param name: The name of the resource. Required. :type name: str - :param body: The evaluation taxonomy. Required. - :type body: ~azure.ai.projects.models.EvaluationTaxonomy + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Required. + :type credential_request: ~azure.ai.projects.models.EvaluatorCredentialRequest :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def update( - self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Update an evaluation taxonomy. + async def get_credentials( + self, + name: str, + version: str, + credential_request: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. - :param name: The name of the evaluation taxonomy. Required. + :param name: The name of the resource. Required. :type name: str - :param body: The evaluation taxonomy. Required. - :type body: JSON + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Required. + :type credential_request: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def update( - self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Update an evaluation taxonomy. + async def get_credentials( + self, + name: str, + version: str, + credential_request: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. - :param name: The name of the evaluation taxonomy. Required. + :param name: The name of the resource. Required. :type name: str - :param body: The evaluation taxonomy. Required. - :type body: IO[bytes] + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Required. + :type credential_request: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async - async def update( - self, name: str, body: Union[_models.EvaluationTaxonomy, JSON, IO[bytes]], **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Update an evaluation taxonomy. + async def get_credentials( + self, + name: str, + version: str, + credential_request: Union[_models.EvaluatorCredentialRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. - :param name: The name of the evaluation taxonomy. Required. + :param name: The name of the resource. Required. :type name: str - :param body: The evaluation taxonomy. Is one of the following types: EvaluationTaxonomy, JSON, - IO[bytes] Required. - :type body: ~azure.ai.projects.models.EvaluationTaxonomy or JSON or IO[bytes] - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Is one of the following types: + EvaluatorCredentialRequest, JSON, IO[bytes] Required. + :type credential_request: ~azure.ai.projects.models.EvaluatorCredentialRequest or JSON or + IO[bytes] + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential :raises ~azure.core.exceptions.HttpResponseError: """ _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( @@ -3955,17 +4414,18 @@ async def update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EvaluationTaxonomy] = kwargs.pop("cls", None) + cls: ClsType[_models.DatasetCredential] = kwargs.pop("cls", None) content_type = content_type or "application/json" _content = None - if isinstance(body, (IOBase, bytes)): - _content = body + if isinstance(credential_request, (IOBase, bytes)): + _content = credential_request else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(credential_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_evaluation_taxonomies_update_request( + _request = build_beta_evaluators_get_credentials_request( name=name, + version=version, foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, @@ -3998,7 +4458,7 @@ async def update( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.EvaluationTaxonomy, response.json()) + deserialized = _deserialize(_models.DatasetCredential, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4006,14 +4466,14 @@ async def update( return deserialized # type: ignore -class BetaEvaluatorsOperations: +class BetaInsightsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.ai.projects.aio.AIProjectClient`'s - :attr:`evaluators` attribute. + :attr:`insights` attribute. """ def __init__(self, *args, **kwargs) -> None: @@ -4023,38 +4483,71 @@ def __init__(self, *args, **kwargs) -> None: self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") - @distributed_trace - def list_versions( - self, - name: str, - *, - type: Optional[Union[Literal["builtin"], Literal["custom"], Literal["all"], str]] = None, - limit: Optional[int] = None, - **kwargs: Any - ) -> AsyncItemPaged["_models.EvaluatorVersion"]: - """List all versions of the given evaluator. + @overload + async def generate( + self, insight: _models.Insight, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Insight: + """Generate Insights. - :param name: The name of the resource. Required. - :type name: str - :keyword type: Filter evaluators by type. Possible values: 'all', 'custom', 'builtin'. Is one - of the following types: Literal["builtin"], Literal["custom"], Literal["all"], str Default - value is None. - :paramtype type: str or str or str or str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :return: An iterator like instance of EvaluatorVersion - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.EvaluatorVersion] + :param insight: Complete evaluation configuration including data source, evaluators, and result + settings. Required. + :type insight: ~azure.ai.projects.models.Insight + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Insight. The Insight is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Insight :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW - ) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.EvaluatorVersion]] = kwargs.pop("cls", None) + @overload + async def generate( + self, insight: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Insight: + """Generate Insights. + + :param insight: Complete evaluation configuration including data source, evaluators, and result + settings. Required. + :type insight: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Insight. The Insight is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Insight + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def generate( + self, insight: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Insight: + """Generate Insights. + + :param insight: Complete evaluation configuration including data source, evaluators, and result + settings. Required. + :type insight: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Insight. The Insight is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Insight + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def generate(self, insight: Union[_models.Insight, JSON, IO[bytes]], **kwargs: Any) -> _models.Insight: + """Generate Insights. + :param insight: Complete evaluation configuration including data source, evaluators, and result + settings. Is one of the following types: Insight, JSON, IO[bytes] Required. + :type insight: ~azure.ai.projects.models.Insight or JSON or IO[bytes] + :return: Insight. The Insight is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Insight + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW + ) error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -4063,105 +4556,166 @@ def list_versions( } error_map.update(kwargs.pop("error_map", {}) or {}) - def prepare_request(next_link=None): - if not next_link: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} - _request = build_beta_evaluators_list_versions_request( - name=name, - foundry_features=_foundry_features, - type=type, - limit=limit, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Insight] = kwargs.pop("cls", None) - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", - urllib.parse.urljoin(next_link, _parsed_next_link.path), - params=_next_request_params, - headers={"Foundry-Features": _SERIALIZER.header("foundry_features", _foundry_features, "str")}, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + content_type = content_type or "application/json" + _content = None + if isinstance(insight, (IOBase, bytes)): + _content = insight + else: + _content = json.dumps(insight, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - return _request + _request = build_beta_insights_generate_request( + foundry_features=_foundry_features, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - async def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize( - List[_models.EvaluatorVersion], - deserialized.get("value", []), - ) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) - async def get_next(next_link=None): - _request = prepare_request(next_link) + response = pipeline_response.http_response - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response + if response.status_code not in [201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.Insight, response.json()) - return pipeline_response + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore - return AsyncItemPaged(get_next, extract_data) + return deserialized # type: ignore + + @distributed_trace_async + async def get( + self, insight_id: str, *, include_coordinates: Optional[bool] = None, **kwargs: Any + ) -> _models.Insight: + """Get a specific insight by Id. + + :param insight_id: The unique identifier for the insights report. Required. + :type insight_id: str + :keyword include_coordinates: Whether to include coordinates for visualization in the response. + Defaults to false. Default value is None. + :paramtype include_coordinates: bool + :return: Insight. The Insight is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Insight + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Insight] = kwargs.pop("cls", None) + + _request = build_beta_insights_get_request( + insight_id=insight_id, + foundry_features=_foundry_features, + include_coordinates=include_coordinates, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.Insight, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore @distributed_trace def list( self, *, - type: Optional[Union[Literal["builtin"], Literal["custom"], Literal["all"], str]] = None, - limit: Optional[int] = None, + type: Optional[Union[str, _models.InsightType]] = None, + eval_id: Optional[str] = None, + run_id: Optional[str] = None, + agent_name: Optional[str] = None, + include_coordinates: Optional[bool] = None, **kwargs: Any - ) -> AsyncItemPaged["_models.EvaluatorVersion"]: - """List the latest version of each evaluator. + ) -> AsyncItemPaged["_models.Insight"]: + """List all insights in reverse chronological order (newest first). - :keyword type: Filter evaluators by type. Possible values: 'all', 'custom', 'builtin'. Is one - of the following types: Literal["builtin"], Literal["custom"], Literal["all"], str Default - value is None. - :paramtype type: str or str or str or str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :return: An iterator like instance of EvaluatorVersion - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.EvaluatorVersion] + :keyword type: Filter by the type of analysis. Known values are: "EvaluationRunClusterInsight", + "AgentClusterInsight", and "EvaluationComparison". Default value is None. + :paramtype type: str or ~azure.ai.projects.models.InsightType + :keyword eval_id: Filter by the evaluation ID. Default value is None. + :paramtype eval_id: str + :keyword run_id: Filter by the evaluation run ID. Default value is None. + :paramtype run_id: str + :keyword agent_name: Filter by the agent name. Default value is None. + :paramtype agent_name: str + :keyword include_coordinates: Whether to include coordinates for visualization in the response. + Defaults to false. Default value is None. + :paramtype include_coordinates: bool + :return: An iterator like instance of Insight + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.Insight] :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW ) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.EvaluatorVersion]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.Insight]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -4174,10 +4728,13 @@ def list( def prepare_request(next_link=None): if not next_link: - _request = build_beta_evaluators_list_request( + _request = build_beta_insights_list_request( foundry_features=_foundry_features, type=type, - limit=limit, + eval_id=eval_id, + run_id=run_id, + agent_name=agent_name, + include_coordinates=include_coordinates, api_version=self._config.api_version, headers=_headers, params=_params, @@ -4217,7 +4774,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.EvaluatorVersion], + List[_models.Insight], deserialized.get("value", []), ) if cls: @@ -4241,90 +4798,116 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) - @distributed_trace_async - async def get_version(self, name: str, version: str, **kwargs: Any) -> _models.EvaluatorVersion: - """Get the specific version of the EvaluatorVersion. The service returns 404 Not Found error if - the EvaluatorVersion does not exist. - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to retrieve. Required. - :type version: str - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) +class BetaMemoryStoresOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} + Instead, you should access the following operations through + :class:`~azure.ai.projects.aio.AIProjectClient`'s + :attr:`memory_stores` attribute. + """ - cls: ClsType[_models.EvaluatorVersion] = kwargs.pop("cls", None) + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") - _request = build_beta_evaluators_get_version_request( - name=name, - version=version, - foundry_features=_foundry_features, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + @overload + async def create( + self, + *, + name: str, + definition: _models.MemoryStoreDefinition, + content_type: str = "application/json", + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, + **kwargs: Any + ) -> _models.MemoryStoreDetails: + """Create a memory store. - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) + :keyword name: The name of the memory store. Required. + :paramtype name: str + :keyword definition: The memory store definition. Required. + :paramtype definition: ~azure.ai.projects.models.MemoryStoreDefinition + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword description: A human-readable description of the memory store. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default + value is None. + :paramtype metadata: dict[str, str] + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ - response = pipeline_response.http_response + @overload + async def create( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreDetails: + """Create a memory store. - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.EvaluatorVersion, response.json()) + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + @overload + async def create( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreDetails: + """Create a memory store. - return deserialized # type: ignore + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ @distributed_trace_async - async def delete_version(self, name: str, version: str, **kwargs: Any) -> None: - """Delete the specific version of the EvaluatorVersion. The service returns 204 No Content if the - EvaluatorVersion was deleted successfully or if the EvaluatorVersion does not exist. + async def create( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + definition: _models.MemoryStoreDefinition = _Unset, + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, + **kwargs: Any + ) -> _models.MemoryStoreDetails: + """Create a memory store. - :param name: The name of the resource. Required. - :type name: str - :param version: The version of the EvaluatorVersion to delete. Required. - :type version: str - :return: None - :rtype: None + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: The name of the memory store. Required. + :paramtype name: str + :keyword definition: The memory store definition. Required. + :paramtype definition: ~azure.ai.projects.models.MemoryStoreDefinition + :keyword description: A human-readable description of the memory store. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default + value is None. + :paramtype metadata: dict[str, str] + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -4334,16 +4917,31 @@ async def delete_version(self, name: str, version: str, **kwargs: Any) -> None: } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[None] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.MemoryStoreDetails] = kwargs.pop("cls", None) - _request = build_beta_evaluators_delete_version_request( - name=name, - version=version, + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + if definition is _Unset: + raise TypeError("missing required argument: definition") + body = {"definition": definition, "description": description, "metadata": metadata, "name": name} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_beta_memory_stores_create_request( foundry_features=_foundry_features, + content_type=content_type, api_version=self._config.api_version, + content=_content, headers=_headers, params=_params, ) @@ -4352,96 +4950,127 @@ async def delete_version(self, name: str, version: str, **kwargs: Any) -> None: } _request.url = self._client.format_url(_request.url, **path_format_arguments) - _stream = False + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs ) response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.MemoryStoreDetails, response.json()) if cls: - return cls(pipeline_response, None, {}) # type: ignore + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore @overload - async def create_version( + async def update( self, name: str, - evaluator_version: _models.EvaluatorVersion, *, content_type: str = "application/json", + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, **kwargs: Any - ) -> _models.EvaluatorVersion: - """Create a new EvaluatorVersion with auto incremented version id. + ) -> _models.MemoryStoreDetails: + """Update a memory store. - :param name: The name of the resource. Required. + :param name: The name of the memory store to update. Required. :type name: str - :param evaluator_version: Required. - :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion + :keyword description: A human-readable description of the memory store. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default + value is None. + :paramtype metadata: dict[str, str] + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def create_version( - self, name: str, evaluator_version: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluatorVersion: - """Create a new EvaluatorVersion with auto incremented version id. + async def update( + self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreDetails: + """Update a memory store. - :param name: The name of the resource. Required. + :param name: The name of the memory store to update. Required. :type name: str - :param evaluator_version: Required. - :type evaluator_version: JSON + :param body: Required. + :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def create_version( - self, name: str, evaluator_version: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluatorVersion: - """Create a new EvaluatorVersion with auto incremented version id. + async def update( + self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreDetails: + """Update a memory store. - :param name: The name of the resource. Required. + :param name: The name of the memory store to update. Required. :type name: str - :param evaluator_version: Required. - :type evaluator_version: IO[bytes] + :param body: Required. + :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async - async def create_version( - self, name: str, evaluator_version: Union[_models.EvaluatorVersion, JSON, IO[bytes]], **kwargs: Any - ) -> _models.EvaluatorVersion: - """Create a new EvaluatorVersion with auto incremented version id. + async def update( + self, + name: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, + **kwargs: Any + ) -> _models.MemoryStoreDetails: + """Update a memory store. - :param name: The name of the resource. Required. + :param name: The name of the memory store to update. Required. :type name: str - :param evaluator_version: Is one of the following types: EvaluatorVersion, JSON, IO[bytes] - Required. - :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion or JSON or IO[bytes] - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword description: A human-readable description of the memory store. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default + value is None. + :paramtype metadata: dict[str, str] + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -4455,16 +5084,19 @@ async def create_version( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EvaluatorVersion] = kwargs.pop("cls", None) + cls: ClsType[_models.MemoryStoreDetails] = kwargs.pop("cls", None) + if body is _Unset: + body = {"description": description, "metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(evaluator_version, (IOBase, bytes)): - _content = evaluator_version + if isinstance(body, (IOBase, bytes)): + _content = body else: - _content = json.dumps(evaluator_version, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_evaluators_create_version_request( + _request = build_beta_memory_stores_update_request( name=name, foundry_features=_foundry_features, content_type=content_type, @@ -4486,120 +5118,204 @@ async def create_version( response = pipeline_response.http_response - if response.status_code not in [201]: + if response.status_code not in [200]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.EvaluatorVersion, response.json()) + deserialized = _deserialize(_models.MemoryStoreDetails, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @overload - async def update_version( - self, - name: str, - version: str, - evaluator_version: _models.EvaluatorVersion, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.EvaluatorVersion: - """Update an existing EvaluatorVersion with the given version id. + @distributed_trace_async + async def get(self, name: str, **kwargs: Any) -> _models.MemoryStoreDetails: + """Retrieve a memory store. - :param name: The name of the resource. Required. + :param name: The name of the memory store to retrieve. Required. :type name: str - :param version: The version of the EvaluatorVersion to update. Required. - :type version: str - :param evaluator_version: Evaluator resource. Required. - :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @overload - async def update_version( - self, name: str, version: str, evaluator_version: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluatorVersion: - """Update an existing EvaluatorVersion with the given version id. + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} - :param name: The name of the resource. Required. - :type name: str - :param version: The version of the EvaluatorVersion to update. Required. - :type version: str - :param evaluator_version: Evaluator resource. Required. - :type evaluator_version: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion - :raises ~azure.core.exceptions.HttpResponseError: - """ + cls: ClsType[_models.MemoryStoreDetails] = kwargs.pop("cls", None) - @overload - async def update_version( + _request = build_beta_memory_stores_get_request( + name=name, + foundry_features=_foundry_features, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.MemoryStoreDetails, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list( self, - name: str, - version: str, - evaluator_version: IO[bytes], *, - content_type: str = "application/json", + limit: Optional[int] = None, + order: Optional[Union[str, _models.PageOrder]] = None, + before: Optional[str] = None, **kwargs: Any - ) -> _models.EvaluatorVersion: - """Update an existing EvaluatorVersion with the given version id. + ) -> AsyncItemPaged["_models.MemoryStoreDetails"]: + """List all memory stores. - :param name: The name of the resource. Required. - :type name: str - :param version: The version of the EvaluatorVersion to update. Required. - :type version: str - :param evaluator_version: Evaluator resource. Required. - :type evaluator_version: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the + default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the ``created_at`` timestamp of the objects. ``asc`` for + ascending order and``desc`` + for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.PageOrder + :keyword before: A cursor for use in pagination. ``before`` is an object ID that defines your + place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + Default value is None. + :paramtype before: str + :return: An iterator like instance of MemoryStoreDetails + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.MemoryStoreDetails] :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW + ) + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.MemoryStoreDetails]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(_continuation_token=None): + + _request = build_beta_memory_stores_list_request( + foundry_features=_foundry_features, + limit=limit, + order=order, + after=_continuation_token, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize( + List[_models.MemoryStoreDetails], + deserialized.get("data", []), + ) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("last_id") or None, AsyncList(list_of_elem) + + async def get_next(_continuation_token=None): + _request = prepare_request(_continuation_token) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def update_version( - self, - name: str, - version: str, - evaluator_version: Union[_models.EvaluatorVersion, JSON, IO[bytes]], - **kwargs: Any - ) -> _models.EvaluatorVersion: - """Update an existing EvaluatorVersion with the given version id. + async def delete(self, name: str, **kwargs: Any) -> _models.DeleteMemoryStoreResult: + """Delete a memory store. - :param name: The name of the resource. Required. + :param name: The name of the memory store to delete. Required. :type name: str - :param version: The version of the EvaluatorVersion to update. Required. - :type version: str - :param evaluator_version: Evaluator resource. Is one of the following types: EvaluatorVersion, - JSON, IO[bytes] Required. - :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion or JSON or IO[bytes] - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion + :return: DeleteMemoryStoreResult. The DeleteMemoryStoreResult is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DeleteMemoryStoreResult :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -4609,26 +5325,15 @@ async def update_version( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EvaluatorVersion] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(evaluator_version, (IOBase, bytes)): - _content = evaluator_version - else: - _content = json.dumps(evaluator_version, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + cls: ClsType[_models.DeleteMemoryStoreResult] = kwargs.pop("cls", None) - _request = build_beta_evaluators_update_version_request( + _request = build_beta_memory_stores_delete_request( name=name, - version=version, foundry_features=_foundry_features, - content_type=content_type, api_version=self._config.api_version, - content=_content, headers=_headers, params=_params, ) @@ -4652,12 +5357,16 @@ async def update_version( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.EvaluatorVersion, response.json()) + deserialized = _deserialize(_models.DeleteMemoryStoreResult, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4665,107 +5374,60 @@ async def update_version( return deserialized # type: ignore @overload - async def pending_upload( + async def _search_memories( self, name: str, - version: str, - pending_upload_request: _models.PendingUploadRequest, *, + scope: str, content_type: str = "application/json", + items: Optional[List[dict[str, Any]]] = None, + previous_search_id: Optional[str] = None, + options: Optional[_models.MemorySearchOptions] = None, **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of an evaluator for a specific version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Required. - :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def pending_upload( - self, - name: str, - version: str, - pending_upload_request: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of an evaluator for a specific version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Required. - :type pending_upload_request: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def pending_upload( - self, - name: str, - version: str, - pending_upload_request: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of an evaluator for a specific version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Required. - :type pending_upload_request: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ + ) -> _models.MemoryStoreSearchResult: ... + @overload + async def _search_memories( + self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreSearchResult: ... + @overload + async def _search_memories( + self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreSearchResult: ... @distributed_trace_async - async def pending_upload( + async def _search_memories( self, name: str, - version: str, - pending_upload_request: Union[_models.PendingUploadRequest, JSON, IO[bytes]], + body: Union[JSON, IO[bytes]] = _Unset, + *, + scope: str = _Unset, + items: Optional[List[dict[str, Any]]] = None, + previous_search_id: Optional[str] = None, + options: Optional[_models.MemorySearchOptions] = None, **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of an evaluator for a specific version. + ) -> _models.MemoryStoreSearchResult: + """Search for relevant memories from a memory store based on conversation context. - :param name: The name of the resource. Required. + :param name: The name of the memory store to search. Required. :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Is one of the following - types: PendingUploadRequest, JSON, IO[bytes] Required. - :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest or JSON or - IO[bytes] - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword scope: The namespace that logically groups and isolates memories, such as a user ID. + Required. + :paramtype scope: str + :keyword items: Items for which to search for relevant memories. Default value is None. + :paramtype items: list[dict[str, any]] + :keyword previous_search_id: The unique ID of the previous search request, enabling incremental + memory search from where the last operation left off. Default value is None. + :paramtype previous_search_id: str + :keyword options: Memory search options. Default value is None. + :paramtype options: ~azure.ai.projects.models.MemorySearchOptions + :return: MemoryStoreSearchResult. The MemoryStoreSearchResult is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreSearchResult :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -4779,18 +5441,27 @@ async def pending_upload( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.PendingUploadResponse] = kwargs.pop("cls", None) + cls: ClsType[_models.MemoryStoreSearchResult] = kwargs.pop("cls", None) + if body is _Unset: + if scope is _Unset: + raise TypeError("missing required argument: scope") + body = { + "items": items, + "options": options, + "previous_search_id": previous_search_id, + "scope": scope, + } + body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(pending_upload_request, (IOBase, bytes)): - _content = pending_upload_request + if isinstance(body, (IOBase, bytes)): + _content = body else: - _content = json.dumps(pending_upload_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_evaluators_pending_upload_request( + _request = build_beta_memory_stores_search_memories_request( name=name, - version=version, foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, @@ -4818,120 +5489,35 @@ async def pending_upload( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.PendingUploadResponse, response.json()) + deserialized = _deserialize(_models.MemoryStoreSearchResult, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @overload - async def get_credentials( - self, - name: str, - version: str, - credential_request: _models.EvaluatorCredentialRequest, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.DatasetCredential: - """Get the SAS credential to access the storage account associated with an Evaluator version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param credential_request: The credential request parameters. Required. - :type credential_request: ~azure.ai.projects.models.EvaluatorCredentialRequest - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetCredential - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def get_credentials( - self, - name: str, - version: str, - credential_request: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.DatasetCredential: - """Get the SAS credential to access the storage account associated with an Evaluator version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param credential_request: The credential request parameters. Required. - :type credential_request: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetCredential - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def get_credentials( + async def _update_memories_initial( self, name: str, - version: str, - credential_request: IO[bytes], + body: Union[JSON, IO[bytes]] = _Unset, *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.DatasetCredential: - """Get the SAS credential to access the storage account associated with an Evaluator version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param credential_request: The credential request parameters. Required. - :type credential_request: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetCredential - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def get_credentials( - self, - name: str, - version: str, - credential_request: Union[_models.EvaluatorCredentialRequest, JSON, IO[bytes]], + scope: str = _Unset, + items: Optional[List[dict[str, Any]]] = None, + previous_update_id: Optional[str] = None, + update_delay: Optional[int] = None, **kwargs: Any - ) -> _models.DatasetCredential: - """Get the SAS credential to access the storage account associated with an Evaluator version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param credential_request: The credential request parameters. Is one of the following types: - EvaluatorCredentialRequest, JSON, IO[bytes] Required. - :type credential_request: ~azure.ai.projects.models.EvaluatorCredentialRequest or JSON or - IO[bytes] - :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetCredential - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) -> AsyncIterator[bytes]: + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -4945,18 +5531,27 @@ async def get_credentials( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.DatasetCredential] = kwargs.pop("cls", None) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + if body is _Unset: + if scope is _Unset: + raise TypeError("missing required argument: scope") + body = { + "items": items, + "previous_update_id": previous_update_id, + "scope": scope, + "update_delay": update_delay, + } + body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(credential_request, (IOBase, bytes)): - _content = credential_request + if isinstance(body, (IOBase, bytes)): + _content = body else: - _content = json.dumps(credential_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_evaluators_get_credentials_request( + _request = build_beta_memory_stores_update_memories_request( name=name, - version=version, foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, @@ -4970,114 +5565,237 @@ async def get_credentials( _request.url = self._client.format_url(_request.url, **path_format_arguments) _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) + _stream = True pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs ) response = pipeline_response.http_response - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.DatasetCredential, response.json()) + response_headers = {} + response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location")) + + deserialized = response.iter_bytes() if _decompress else response.iter_raw() if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized # type: ignore + @overload + async def _begin_update_memories( + self, + name: str, + *, + scope: str, + content_type: str = "application/json", + items: Optional[List[dict[str, Any]]] = None, + previous_update_id: Optional[str] = None, + update_delay: Optional[int] = None, + **kwargs: Any + ) -> AsyncLROPoller[_models.MemoryStoreUpdateCompletedResult]: ... + @overload + async def _begin_update_memories( + self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> AsyncLROPoller[_models.MemoryStoreUpdateCompletedResult]: ... + @overload + async def _begin_update_memories( + self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> AsyncLROPoller[_models.MemoryStoreUpdateCompletedResult]: ... -class BetaInsightsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. + @distributed_trace_async + async def _begin_update_memories( + self, + name: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + scope: str = _Unset, + items: Optional[List[dict[str, Any]]] = None, + previous_update_id: Optional[str] = None, + update_delay: Optional[int] = None, + **kwargs: Any + ) -> AsyncLROPoller[_models.MemoryStoreUpdateCompletedResult]: + """Update memory store with conversation memories. - Instead, you should access the following operations through - :class:`~azure.ai.projects.aio.AIProjectClient`'s - :attr:`insights` attribute. - """ + :param name: The name of the memory store to update. Required. + :type name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword scope: The namespace that logically groups and isolates memories, such as a user ID. + Required. + :paramtype scope: str + :keyword items: Conversation items to be stored in memory. Default value is None. + :paramtype items: list[dict[str, any]] + :keyword previous_update_id: The unique ID of the previous update request, enabling incremental + memory updates from where the last operation left off. Default value is None. + :paramtype previous_update_id: str + :keyword update_delay: Timeout period before processing the memory update in seconds. + If a new update request is received during this period, it will cancel the current request and + reset the timeout. + Set to 0 to immediately trigger the update without delay. + Defaults to 300 (5 minutes). Default value is None. + :paramtype update_delay: int + :return: An instance of AsyncLROPoller that returns MemoryStoreUpdateCompletedResult. The + MemoryStoreUpdateCompletedResult is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.ai.projects.models.MemoryStoreUpdateCompletedResult] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW + ) + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.MemoryStoreUpdateCompletedResult] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._update_memories_initial( + name=name, + body=body, + foundry_features=_foundry_features, + scope=scope, + items=items, + previous_update_id=previous_update_id, + update_delay=update_delay, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Operation-Location"] = self._deserialize( + "str", response.headers.get("Operation-Location") + ) + + deserialized = _deserialize(_models.MemoryStoreUpdateCompletedResult, response.json().get("result", {})) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.MemoryStoreUpdateCompletedResult].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.MemoryStoreUpdateCompletedResult]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) @overload - async def generate( - self, insight: _models.Insight, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Insight: - """Generate Insights. + async def delete_scope( + self, name: str, *, scope: str, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreDeleteScopeResult: + """Delete all memories associated with a specific scope from a memory store. - :param insight: Complete evaluation configuration including data source, evaluators, and result - settings. Required. - :type insight: ~azure.ai.projects.models.Insight + :param name: The name of the memory store. Required. + :type name: str + :keyword scope: The namespace that logically groups and isolates memories to delete, such as a + user ID. Required. + :paramtype scope: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: Insight. The Insight is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Insight + :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def generate( - self, insight: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Insight: - """Generate Insights. + async def delete_scope( + self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreDeleteScopeResult: + """Delete all memories associated with a specific scope from a memory store. - :param insight: Complete evaluation configuration including data source, evaluators, and result - settings. Required. - :type insight: JSON + :param name: The name of the memory store. Required. + :type name: str + :param body: Required. + :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: Insight. The Insight is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Insight + :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def generate( - self, insight: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Insight: - """Generate Insights. + async def delete_scope( + self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreDeleteScopeResult: + """Delete all memories associated with a specific scope from a memory store. - :param insight: Complete evaluation configuration including data source, evaluators, and result - settings. Required. - :type insight: IO[bytes] + :param name: The name of the memory store. Required. + :type name: str + :param body: Required. + :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: Insight. The Insight is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Insight + :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async - async def generate(self, insight: Union[_models.Insight, JSON, IO[bytes]], **kwargs: Any) -> _models.Insight: - """Generate Insights. + async def delete_scope( + self, name: str, body: Union[JSON, IO[bytes]] = _Unset, *, scope: str = _Unset, **kwargs: Any + ) -> _models.MemoryStoreDeleteScopeResult: + """Delete all memories associated with a specific scope from a memory store. - :param insight: Complete evaluation configuration including data source, evaluators, and result - settings. Is one of the following types: Insight, JSON, IO[bytes] Required. - :type insight: ~azure.ai.projects.models.Insight or JSON or IO[bytes] - :return: Insight. The Insight is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Insight + :param name: The name of the memory store. Required. + :type name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword scope: The namespace that logically groups and isolates memories to delete, such as a + user ID. Required. + :paramtype scope: str + :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -5091,16 +5809,22 @@ async def generate(self, insight: Union[_models.Insight, JSON, IO[bytes]], **kwa _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Insight] = kwargs.pop("cls", None) + cls: ClsType[_models.MemoryStoreDeleteScopeResult] = kwargs.pop("cls", None) + if body is _Unset: + if scope is _Unset: + raise TypeError("missing required argument: scope") + body = {"scope": scope} + body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(insight, (IOBase, bytes)): - _content = insight + if isinstance(body, (IOBase, bytes)): + _content = body else: - _content = json.dumps(insight, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_insights_generate_request( + _request = build_beta_memory_stores_delete_scope_request( + name=name, foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, @@ -5121,42 +5845,59 @@ async def generate(self, insight: Union[_models.Insight, JSON, IO[bytes]], **kwa response = pipeline_response.http_response - if response.status_code not in [201]: + if response.status_code not in [200]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Insight, response.json()) + deserialized = _deserialize(_models.MemoryStoreDeleteScopeResult, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + +class BetaRedTeamsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.aio.AIProjectClient`'s + :attr:`red_teams` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + @distributed_trace_async - async def get( - self, insight_id: str, *, include_coordinates: Optional[bool] = None, **kwargs: Any - ) -> _models.Insight: - """Get a specific insight by Id. + async def get(self, name: str, **kwargs: Any) -> _models.RedTeam: + """Get a redteam by name. - :param insight_id: The unique identifier for the insights report. Required. - :type insight_id: str - :keyword include_coordinates: Whether to include coordinates for visualization in the response. - Defaults to false. Default value is None. - :paramtype include_coordinates: bool - :return: Insight. The Insight is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Insight + :param name: Identifier of the red team run. Required. + :type name: str + :return: RedTeam. The RedTeam is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.RedTeam :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -5169,12 +5910,11 @@ async def get( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Insight] = kwargs.pop("cls", None) + cls: ClsType[_models.RedTeam] = kwargs.pop("cls", None) - _request = build_beta_insights_get_request( - insight_id=insight_id, + _request = build_beta_red_teams_get_request( + name=name, foundry_features=_foundry_features, - include_coordinates=include_coordinates, api_version=self._config.api_version, headers=_headers, params=_params, @@ -5204,7 +5944,7 @@ async def get( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Insight, response.json()) + deserialized = _deserialize(_models.RedTeam, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -5212,41 +5952,20 @@ async def get( return deserialized # type: ignore @distributed_trace - def list( - self, - *, - type: Optional[Union[str, _models.InsightType]] = None, - eval_id: Optional[str] = None, - run_id: Optional[str] = None, - agent_name: Optional[str] = None, - include_coordinates: Optional[bool] = None, - **kwargs: Any - ) -> AsyncItemPaged["_models.Insight"]: - """List all insights in reverse chronological order (newest first). + def list(self, **kwargs: Any) -> AsyncItemPaged["_models.RedTeam"]: + """List a redteam by name. - :keyword type: Filter by the type of analysis. Known values are: "EvaluationRunClusterInsight", - "AgentClusterInsight", and "EvaluationComparison". Default value is None. - :paramtype type: str or ~azure.ai.projects.models.InsightType - :keyword eval_id: Filter by the evaluation ID. Default value is None. - :paramtype eval_id: str - :keyword run_id: Filter by the evaluation run ID. Default value is None. - :paramtype run_id: str - :keyword agent_name: Filter by the agent name. Default value is None. - :paramtype agent_name: str - :keyword include_coordinates: Whether to include coordinates for visualization in the response. - Defaults to false. Default value is None. - :paramtype include_coordinates: bool - :return: An iterator like instance of Insight - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.Insight] + :return: An iterator like instance of RedTeam + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.RedTeam] :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW ) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.Insight]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.RedTeam]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -5259,13 +5978,8 @@ def list( def prepare_request(next_link=None): if not next_link: - _request = build_beta_insights_list_request( + _request = build_beta_red_teams_list_request( foundry_features=_foundry_features, - type=type, - eval_id=eval_id, - run_id=run_id, - agent_name=agent_name, - include_coordinates=include_coordinates, api_version=self._config.api_version, headers=_headers, params=_params, @@ -5305,7 +6019,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.Insight], + List[_models.RedTeam], deserialized.get("value", []), ) if cls: @@ -5329,116 +6043,65 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) - -class BetaMemoryStoresOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.projects.aio.AIProjectClient`'s - :attr:`memory_stores` attribute. - """ - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") - @overload async def create( - self, - *, - name: str, - definition: _models.MemoryStoreDefinition, - content_type: str = "application/json", - description: Optional[str] = None, - metadata: Optional[dict[str, str]] = None, - **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Create a memory store. + self, red_team: _models.RedTeam, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.RedTeam: + """Creates a redteam run. - :keyword name: The name of the memory store. Required. - :paramtype name: str - :keyword definition: The memory store definition. Required. - :paramtype definition: ~azure.ai.projects.models.MemoryStoreDefinition + :param red_team: Redteam to be run. Required. + :type red_team: ~azure.ai.projects.models.RedTeam :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword description: A human-readable description of the memory store. Default value is None. - :paramtype description: str - :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default - value is None. - :paramtype metadata: dict[str, str] - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :return: RedTeam. The RedTeam is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.RedTeam :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def create( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Create a memory store. + async def create(self, red_team: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.RedTeam: + """Creates a redteam run. - :param body: Required. - :type body: JSON + :param red_team: Redteam to be run. Required. + :type red_team: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :return: RedTeam. The RedTeam is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.RedTeam :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def create( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Create a memory store. + self, red_team: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.RedTeam: + """Creates a redteam run. - :param body: Required. - :type body: IO[bytes] + :param red_team: Redteam to be run. Required. + :type red_team: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :return: RedTeam. The RedTeam is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.RedTeam :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async - async def create( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - name: str = _Unset, - definition: _models.MemoryStoreDefinition = _Unset, - description: Optional[str] = None, - metadata: Optional[dict[str, str]] = None, - **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Create a memory store. + async def create(self, red_team: Union[_models.RedTeam, JSON, IO[bytes]], **kwargs: Any) -> _models.RedTeam: + """Creates a redteam run. - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword name: The name of the memory store. Required. - :paramtype name: str - :keyword definition: The memory store definition. Required. - :paramtype definition: ~azure.ai.projects.models.MemoryStoreDefinition - :keyword description: A human-readable description of the memory store. Default value is None. - :paramtype description: str - :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default - value is None. - :paramtype metadata: dict[str, str] - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :param red_team: Redteam to be run. Is one of the following types: RedTeam, JSON, IO[bytes] + Required. + :type red_team: ~azure.ai.projects.models.RedTeam or JSON or IO[bytes] + :return: RedTeam. The RedTeam is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.RedTeam :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -5452,23 +6115,16 @@ async def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.MemoryStoreDetails] = kwargs.pop("cls", None) + cls: ClsType[_models.RedTeam] = kwargs.pop("cls", None) - if body is _Unset: - if name is _Unset: - raise TypeError("missing required argument: name") - if definition is _Unset: - raise TypeError("missing required argument: definition") - body = {"definition": definition, "description": description, "metadata": metadata, "name": name} - body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(body, (IOBase, bytes)): - _content = body + if isinstance(red_team, (IOBase, bytes)): + _content = red_team else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(red_team, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_memory_stores_create_request( + _request = build_beta_red_teams_create_request( foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, @@ -5489,7 +6145,7 @@ async def create( response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [201]: if _stream: try: await response.read() # Load the body in memory and close the socket @@ -5505,185 +6161,43 @@ async def create( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.MemoryStoreDetails, response.json()) + deserialized = _deserialize(_models.RedTeam, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @overload - async def update( - self, - name: str, - *, - content_type: str = "application/json", - description: Optional[str] = None, - metadata: Optional[dict[str, str]] = None, - **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Update a memory store. - - :param name: The name of the memory store to update. Required. - :type name: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword description: A human-readable description of the memory store. Default value is None. - :paramtype description: str - :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default - value is None. - :paramtype metadata: dict[str, str] - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update( - self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Update a memory store. - - :param name: The name of the memory store to update. Required. - :type name: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update( - self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Update a memory store. - - :param name: The name of the memory store to update. Required. - :type name: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def update( - self, - name: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - description: Optional[str] = None, - metadata: Optional[dict[str, str]] = None, - **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Update a memory store. - - :param name: The name of the memory store to update. Required. - :type name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword description: A human-readable description of the memory store. Default value is None. - :paramtype description: str - :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default - value is None. - :paramtype metadata: dict[str, str] - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.MemoryStoreDetails] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"description": description, "metadata": metadata} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_beta_memory_stores_update_request( - name=name, - foundry_features=_foundry_features, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.MemoryStoreDetails, response.json()) +class BetaSchedulesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + Instead, you should access the following operations through + :class:`~azure.ai.projects.aio.AIProjectClient`'s + :attr:`schedules` attribute. + """ - return deserialized # type: ignore + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async - async def get(self, name: str, **kwargs: Any) -> _models.MemoryStoreDetails: - """Retrieve a memory store. + async def delete(self, schedule_id: str, **kwargs: Any) -> None: + """Delete a schedule. - :param name: The name of the memory store to retrieve. Required. - :type name: str - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :param schedule_id: Identifier of the schedule. Required. + :type schedule_id: str + :return: None + :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -5696,670 +6210,12 @@ async def get(self, name: str, **kwargs: Any) -> _models.MemoryStoreDetails: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.MemoryStoreDetails] = kwargs.pop("cls", None) - - _request = build_beta_memory_stores_get_request( - name=name, - foundry_features=_foundry_features, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.MemoryStoreDetails, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list( - self, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.PageOrder]] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> AsyncItemPaged["_models.MemoryStoreDetails"]: - """List all memory stores. - - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the - default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the ``created_at`` timestamp of the objects. ``asc`` for - ascending order and``desc`` - for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.projects.models.PageOrder - :keyword before: A cursor for use in pagination. ``before`` is an object ID that defines your - place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page of the list. - Default value is None. - :paramtype before: str - :return: An iterator like instance of MemoryStoreDetails - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.MemoryStoreDetails] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW - ) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[List[_models.MemoryStoreDetails]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(_continuation_token=None): - - _request = build_beta_memory_stores_list_request( - foundry_features=_foundry_features, - limit=limit, - order=order, - after=_continuation_token, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - return _request - - async def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize( - List[_models.MemoryStoreDetails], - deserialized.get("data", []), - ) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("last_id") or None, AsyncList(list_of_elem) - - async def get_next(_continuation_token=None): - _request = prepare_request(_continuation_token) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return AsyncItemPaged(get_next, extract_data) - - @distributed_trace_async - async def delete(self, name: str, **kwargs: Any) -> _models.DeleteMemoryStoreResult: - """Delete a memory store. - - :param name: The name of the memory store to delete. Required. - :type name: str - :return: DeleteMemoryStoreResult. The DeleteMemoryStoreResult is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DeleteMemoryStoreResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.DeleteMemoryStoreResult] = kwargs.pop("cls", None) - - _request = build_beta_memory_stores_delete_request( - name=name, - foundry_features=_foundry_features, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.DeleteMemoryStoreResult, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def _search_memories( - self, - name: str, - *, - scope: str, - content_type: str = "application/json", - items: Optional[List[dict[str, Any]]] = None, - previous_search_id: Optional[str] = None, - options: Optional[_models.MemorySearchOptions] = None, - **kwargs: Any - ) -> _models.MemoryStoreSearchResult: ... - @overload - async def _search_memories( - self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreSearchResult: ... - @overload - async def _search_memories( - self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreSearchResult: ... - - @distributed_trace_async - async def _search_memories( - self, - name: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - scope: str = _Unset, - items: Optional[List[dict[str, Any]]] = None, - previous_search_id: Optional[str] = None, - options: Optional[_models.MemorySearchOptions] = None, - **kwargs: Any - ) -> _models.MemoryStoreSearchResult: - """Search for relevant memories from a memory store based on conversation context. - - :param name: The name of the memory store to search. Required. - :type name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword scope: The namespace that logically groups and isolates memories, such as a user ID. - Required. - :paramtype scope: str - :keyword items: Items for which to search for relevant memories. Default value is None. - :paramtype items: list[dict[str, any]] - :keyword previous_search_id: The unique ID of the previous search request, enabling incremental - memory search from where the last operation left off. Default value is None. - :paramtype previous_search_id: str - :keyword options: Memory search options. Default value is None. - :paramtype options: ~azure.ai.projects.models.MemorySearchOptions - :return: MemoryStoreSearchResult. The MemoryStoreSearchResult is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreSearchResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.MemoryStoreSearchResult] = kwargs.pop("cls", None) - - if body is _Unset: - if scope is _Unset: - raise TypeError("missing required argument: scope") - body = { - "items": items, - "options": options, - "previous_search_id": previous_search_id, - "scope": scope, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_beta_memory_stores_search_memories_request( - name=name, - foundry_features=_foundry_features, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.MemoryStoreSearchResult, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - async def _update_memories_initial( - self, - name: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - scope: str = _Unset, - items: Optional[List[dict[str, Any]]] = None, - previous_update_id: Optional[str] = None, - update_delay: Optional[int] = None, - **kwargs: Any - ) -> AsyncIterator[bytes]: - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) - - if body is _Unset: - if scope is _Unset: - raise TypeError("missing required argument: scope") - body = { - "items": items, - "previous_update_id": previous_update_id, - "scope": scope, - "update_delay": update_delay, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_beta_memory_stores_update_memories_request( - name=name, - foundry_features=_foundry_features, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _decompress = kwargs.pop("decompress", True) - _stream = True - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [202]: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location")) - - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @overload - async def _begin_update_memories( - self, - name: str, - *, - scope: str, - content_type: str = "application/json", - items: Optional[List[dict[str, Any]]] = None, - previous_update_id: Optional[str] = None, - update_delay: Optional[int] = None, - **kwargs: Any - ) -> AsyncLROPoller[_models.MemoryStoreUpdateCompletedResult]: ... - @overload - async def _begin_update_memories( - self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> AsyncLROPoller[_models.MemoryStoreUpdateCompletedResult]: ... - @overload - async def _begin_update_memories( - self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> AsyncLROPoller[_models.MemoryStoreUpdateCompletedResult]: ... - - @distributed_trace_async - async def _begin_update_memories( - self, - name: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - scope: str = _Unset, - items: Optional[List[dict[str, Any]]] = None, - previous_update_id: Optional[str] = None, - update_delay: Optional[int] = None, - **kwargs: Any - ) -> AsyncLROPoller[_models.MemoryStoreUpdateCompletedResult]: - """Update memory store with conversation memories. - - :param name: The name of the memory store to update. Required. - :type name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword scope: The namespace that logically groups and isolates memories, such as a user ID. - Required. - :paramtype scope: str - :keyword items: Conversation items to be stored in memory. Default value is None. - :paramtype items: list[dict[str, any]] - :keyword previous_update_id: The unique ID of the previous update request, enabling incremental - memory updates from where the last operation left off. Default value is None. - :paramtype previous_update_id: str - :keyword update_delay: Timeout period before processing the memory update in seconds. - If a new update request is received during this period, it will cancel the current request and - reset the timeout. - Set to 0 to immediately trigger the update without delay. - Defaults to 300 (5 minutes). Default value is None. - :paramtype update_delay: int - :return: An instance of AsyncLROPoller that returns MemoryStoreUpdateCompletedResult. The - MemoryStoreUpdateCompletedResult is compatible with MutableMapping - :rtype: - ~azure.core.polling.AsyncLROPoller[~azure.ai.projects.models.MemoryStoreUpdateCompletedResult] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW - ) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.MemoryStoreUpdateCompletedResult] = kwargs.pop("cls", None) - polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - if cont_token is None: - raw_result = await self._update_memories_initial( - name=name, - body=body, - foundry_features=_foundry_features, - scope=scope, - items=items, - previous_update_id=previous_update_id, - update_delay=update_delay, - content_type=content_type, - cls=lambda x, y, z: x, - headers=_headers, - params=_params, - **kwargs - ) - await raw_result.http_response.read() # type: ignore - kwargs.pop("error_map", None) - - def get_long_running_output(pipeline_response): - response_headers = {} - response = pipeline_response.http_response - response_headers["Operation-Location"] = self._deserialize( - "str", response.headers.get("Operation-Location") - ) - - deserialized = _deserialize(_models.MemoryStoreUpdateCompletedResult, response.json().get("result", {})) - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - return deserialized - - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - if polling is True: - polling_method: AsyncPollingMethod = cast( - AsyncPollingMethod, - AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), - ) - elif polling is False: - polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) - else: - polling_method = polling - if cont_token: - return AsyncLROPoller[_models.MemoryStoreUpdateCompletedResult].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - client=self._client, - deserialization_callback=get_long_running_output, - ) - return AsyncLROPoller[_models.MemoryStoreUpdateCompletedResult]( - self._client, raw_result, get_long_running_output, polling_method # type: ignore - ) - - @overload - async def delete_scope( - self, name: str, *, scope: str, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreDeleteScopeResult: - """Delete all memories associated with a specific scope from a memory store. - - :param name: The name of the memory store. Required. - :type name: str - :keyword scope: The namespace that logically groups and isolates memories to delete, such as a - user ID. Required. - :paramtype scope: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with - MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def delete_scope( - self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreDeleteScopeResult: - """Delete all memories associated with a specific scope from a memory store. - - :param name: The name of the memory store. Required. - :type name: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with - MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def delete_scope( - self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreDeleteScopeResult: - """Delete all memories associated with a specific scope from a memory store. - - :param name: The name of the memory store. Required. - :type name: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with - MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def delete_scope( - self, name: str, body: Union[JSON, IO[bytes]] = _Unset, *, scope: str = _Unset, **kwargs: Any - ) -> _models.MemoryStoreDeleteScopeResult: - """Delete all memories associated with a specific scope from a memory store. - - :param name: The name of the memory store. Required. - :type name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword scope: The namespace that logically groups and isolates memories to delete, such as a - user ID. Required. - :paramtype scope: str - :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with - MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.MemoryStoreDeleteScopeResult] = kwargs.pop("cls", None) - - if body is _Unset: - if scope is _Unset: - raise TypeError("missing required argument: scope") - body = {"scope": scope} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_beta_memory_stores_delete_scope_request( - name=name, + _request = build_beta_schedules_delete_request( + schedule_id=schedule_id, foundry_features=_foundry_features, - content_type=content_type, api_version=self._config.api_version, - content=_content, headers=_headers, params=_params, ) @@ -6368,67 +6224,32 @@ async def delete_scope( } _request.url = self._client.format_url(_request.url, **path_format_arguments) - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) + _stream = False pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs ) response = pipeline_response.http_response - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass + if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.MemoryStoreDeleteScopeResult, response.json()) + raise HttpResponseError(response=response) if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - -class BetaRedTeamsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.projects.aio.AIProjectClient`'s - :attr:`red_teams` attribute. - """ - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + return cls(pipeline_response, None, {}) # type: ignore @distributed_trace_async - async def get(self, name: str, **kwargs: Any) -> _models.RedTeam: - """Get a redteam by name. + async def get(self, schedule_id: str, **kwargs: Any) -> _models.Schedule: + """Get a schedule by id. - :param name: Identifier of the red team run. Required. - :type name: str - :return: RedTeam. The RedTeam is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.RedTeam + :param schedule_id: Identifier of the schedule. Required. + :type schedule_id: str + :return: Schedule. The Schedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Schedule :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -6441,10 +6262,10 @@ async def get(self, name: str, **kwargs: Any) -> _models.RedTeam: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.RedTeam] = kwargs.pop("cls", None) + cls: ClsType[_models.Schedule] = kwargs.pop("cls", None) - _request = build_beta_red_teams_get_request( - name=name, + _request = build_beta_schedules_get_request( + schedule_id=schedule_id, foundry_features=_foundry_features, api_version=self._config.api_version, headers=_headers, @@ -6475,7 +6296,7 @@ async def get(self, name: str, **kwargs: Any) -> _models.RedTeam: if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.RedTeam, response.json()) + deserialized = _deserialize(_models.Schedule, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -6483,20 +6304,31 @@ async def get(self, name: str, **kwargs: Any) -> _models.RedTeam: return deserialized # type: ignore @distributed_trace - def list(self, **kwargs: Any) -> AsyncItemPaged["_models.RedTeam"]: - """List a redteam by name. + def list( + self, + *, + type: Optional[Union[str, _models.ScheduleTaskType]] = None, + enabled: Optional[bool] = None, + **kwargs: Any + ) -> AsyncItemPaged["_models.Schedule"]: + """List all schedules. - :return: An iterator like instance of RedTeam - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.RedTeam] + :keyword type: Filter by the type of schedule. Known values are: "Evaluation" and "Insight". + Default value is None. + :paramtype type: str or ~azure.ai.projects.models.ScheduleTaskType + :keyword enabled: Filter by the enabled status. Default value is None. + :paramtype enabled: bool + :return: An iterator like instance of Schedule + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.Schedule] :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW ) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.RedTeam]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.Schedule]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -6509,8 +6341,10 @@ def list(self, **kwargs: Any) -> AsyncItemPaged["_models.RedTeam"]: def prepare_request(next_link=None): if not next_link: - _request = build_beta_red_teams_list_request( + _request = build_beta_schedules_list_request( foundry_features=_foundry_features, + type=type, + enabled=enabled, api_version=self._config.api_version, headers=_headers, params=_params, @@ -6550,7 +6384,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.RedTeam], + List[_models.Schedule], deserialized.get("value", []), ) if cls: @@ -6575,156 +6409,72 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) @overload - async def create( - self, red_team: _models.RedTeam, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.RedTeam: - """Creates a redteam run. + async def create_or_update( + self, schedule_id: str, schedule: _models.Schedule, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Schedule: + """Create or update operation template. - :param red_team: Redteam to be run. Required. - :type red_team: ~azure.ai.projects.models.RedTeam + :param schedule_id: Identifier of the schedule. Required. + :type schedule_id: str + :param schedule: The resource instance. Required. + :type schedule: ~azure.ai.projects.models.Schedule :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: RedTeam. The RedTeam is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.RedTeam + :return: Schedule. The Schedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Schedule :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def create(self, red_team: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.RedTeam: - """Creates a redteam run. + async def create_or_update( + self, schedule_id: str, schedule: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Schedule: + """Create or update operation template. - :param red_team: Redteam to be run. Required. - :type red_team: JSON + :param schedule_id: Identifier of the schedule. Required. + :type schedule_id: str + :param schedule: The resource instance. Required. + :type schedule: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: RedTeam. The RedTeam is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.RedTeam + :return: Schedule. The Schedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Schedule :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def create( - self, red_team: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.RedTeam: - """Creates a redteam run. - - :param red_team: Redteam to be run. Required. - :type red_team: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: RedTeam. The RedTeam is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.RedTeam - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create(self, red_team: Union[_models.RedTeam, JSON, IO[bytes]], **kwargs: Any) -> _models.RedTeam: - """Creates a redteam run. - - :param red_team: Redteam to be run. Is one of the following types: RedTeam, JSON, IO[bytes] - Required. - :type red_team: ~azure.ai.projects.models.RedTeam or JSON or IO[bytes] - :return: RedTeam. The RedTeam is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.RedTeam - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.RedTeam] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(red_team, (IOBase, bytes)): - _content = red_team - else: - _content = json.dumps(red_team, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_beta_red_teams_create_request( - foundry_features=_foundry_features, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.RedTeam, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - -class BetaSchedulesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.projects.aio.AIProjectClient`'s - :attr:`schedules` attribute. - """ + async def create_or_update( + self, schedule_id: str, schedule: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Schedule: + """Create or update operation template. - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + :param schedule_id: Identifier of the schedule. Required. + :type schedule_id: str + :param schedule: The resource instance. Required. + :type schedule: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Schedule. The Schedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Schedule + :raises ~azure.core.exceptions.HttpResponseError: + """ @distributed_trace_async - async def delete(self, schedule_id: str, **kwargs: Any) -> None: - """Delete a schedule. + async def create_or_update( + self, schedule_id: str, schedule: Union[_models.Schedule, JSON, IO[bytes]], **kwargs: Any + ) -> _models.Schedule: + """Create or update operation template. :param schedule_id: Identifier of the schedule. Required. :type schedule_id: str - :return: None - :rtype: None + :param schedule: The resource instance. Is one of the following types: Schedule, JSON, + IO[bytes] Required. + :type schedule: ~azure.ai.projects.models.Schedule or JSON or IO[bytes] + :return: Schedule. The Schedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Schedule :raises ~azure.core.exceptions.HttpResponseError: """ _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( @@ -6738,15 +6488,25 @@ async def delete(self, schedule_id: str, **kwargs: Any) -> None: } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[None] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Schedule] = kwargs.pop("cls", None) - _request = build_beta_schedules_delete_request( + content_type = content_type or "application/json" + _content = None + if isinstance(schedule, (IOBase, bytes)): + _content = schedule + else: + _content = json.dumps(schedule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_beta_schedules_create_or_update_request( schedule_id=schedule_id, foundry_features=_foundry_features, + content_type=content_type, api_version=self._config.api_version, + content=_content, headers=_headers, params=_params, ) @@ -6755,28 +6515,43 @@ async def delete(self, schedule_id: str, **kwargs: Any) -> None: } _request.url = self._client.format_url(_request.url, **path_format_arguments) - _stream = False + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs ) response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.Schedule, response.json()) + if cls: - return cls(pipeline_response, None, {}) # type: ignore + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore @distributed_trace_async - async def get(self, schedule_id: str, **kwargs: Any) -> _models.Schedule: - """Get a schedule by id. + async def get_run(self, schedule_id: str, run_id: str, **kwargs: Any) -> _models.ScheduleRun: + """Get a schedule run by id. - :param schedule_id: Identifier of the schedule. Required. + :param schedule_id: The unique identifier of the schedule. Required. :type schedule_id: str - :return: Schedule. The Schedule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Schedule + :param run_id: The unique identifier of the schedule run. Required. + :type run_id: str + :return: ScheduleRun. The ScheduleRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ScheduleRun :raises ~azure.core.exceptions.HttpResponseError: """ _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( @@ -6793,10 +6568,11 @@ async def get(self, schedule_id: str, **kwargs: Any) -> _models.Schedule: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Schedule] = kwargs.pop("cls", None) + cls: ClsType[_models.ScheduleRun] = kwargs.pop("cls", None) - _request = build_beta_schedules_get_request( + _request = build_beta_schedules_get_run_request( schedule_id=schedule_id, + run_id=run_id, foundry_features=_foundry_features, api_version=self._config.api_version, headers=_headers, @@ -6822,12 +6598,16 @@ async def get(self, schedule_id: str, **kwargs: Any) -> _models.Schedule: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Schedule, response.json()) + deserialized = _deserialize(_models.ScheduleRun, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -6835,22 +6615,25 @@ async def get(self, schedule_id: str, **kwargs: Any) -> _models.Schedule: return deserialized # type: ignore @distributed_trace - def list( + def list_runs( self, + schedule_id: str, *, type: Optional[Union[str, _models.ScheduleTaskType]] = None, enabled: Optional[bool] = None, **kwargs: Any - ) -> AsyncItemPaged["_models.Schedule"]: - """List all schedules. + ) -> AsyncItemPaged["_models.ScheduleRun"]: + """List all schedule runs. + :param schedule_id: Identifier of the schedule. Required. + :type schedule_id: str :keyword type: Filter by the type of schedule. Known values are: "Evaluation" and "Insight". Default value is None. :paramtype type: str or ~azure.ai.projects.models.ScheduleTaskType :keyword enabled: Filter by the enabled status. Default value is None. :paramtype enabled: bool - :return: An iterator like instance of Schedule - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.Schedule] + :return: An iterator like instance of ScheduleRun + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.ScheduleRun] :raises ~azure.core.exceptions.HttpResponseError: """ _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( @@ -6859,7 +6642,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.Schedule]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.ScheduleRun]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -6872,7 +6655,8 @@ def list( def prepare_request(next_link=None): if not next_link: - _request = build_beta_schedules_list_request( + _request = build_beta_schedules_list_runs_request( + schedule_id=schedule_id, foundry_features=_foundry_features, type=type, enabled=enabled, @@ -6915,7 +6699,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.Schedule], + List[_models.ScheduleRun], deserialized.get("value", []), ) if cls: @@ -6939,77 +6723,285 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) + +class BetaToolsetsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.aio.AIProjectClient`'s + :attr:`toolsets` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + @overload - async def create_or_update( - self, schedule_id: str, schedule: _models.Schedule, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Schedule: - """Create or update operation template. + async def create( + self, + *, + name: str, + tools: List[_models.Tool], + content_type: str = "application/json", + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, + **kwargs: Any + ) -> _models.ToolsetObject: + """Create a toolset. + + :keyword name: The name of the toolset. Required. + :paramtype name: str + :keyword tools: The list of tools to include in the toolset. Required. + :paramtype tools: list[~azure.ai.projects.models.Tool] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword description: A human-readable description of the toolset. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the toolset. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ToolsetObject: + """Create a toolset. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ToolsetObject: + """Create a toolset. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + tools: List[_models.Tool] = _Unset, + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, + **kwargs: Any + ) -> _models.ToolsetObject: + """Create a toolset. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: The name of the toolset. Required. + :paramtype name: str + :keyword tools: The list of tools to include in the toolset. Required. + :paramtype tools: list[~azure.ai.projects.models.Tool] + :keyword description: A human-readable description of the toolset. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the toolset. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ToolsetObject] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + if tools is _Unset: + raise TypeError("missing required argument: tools") + body = {"description": description, "metadata": metadata, "name": name, "tools": tools} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_beta_toolsets_create_request( + foundry_features=_foundry_features, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.ToolsetObject, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update( + self, + tool_set_name: str, + *, + tools: List[_models.Tool], + content_type: str = "application/json", + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, + **kwargs: Any + ) -> _models.ToolsetObject: + """Update a toolset. - :param schedule_id: Identifier of the schedule. Required. - :type schedule_id: str - :param schedule: The resource instance. Required. - :type schedule: ~azure.ai.projects.models.Schedule + :param tool_set_name: The name of the toolset to update. Required. + :type tool_set_name: str + :keyword tools: The list of tools to include in the toolset. Required. + :paramtype tools: list[~azure.ai.projects.models.Tool] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: Schedule. The Schedule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Schedule + :keyword description: A human-readable description of the toolset. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the toolset. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def create_or_update( - self, schedule_id: str, schedule: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Schedule: - """Create or update operation template. + async def update( + self, tool_set_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ToolsetObject: + """Update a toolset. - :param schedule_id: Identifier of the schedule. Required. - :type schedule_id: str - :param schedule: The resource instance. Required. - :type schedule: JSON + :param tool_set_name: The name of the toolset to update. Required. + :type tool_set_name: str + :param body: Required. + :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: Schedule. The Schedule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Schedule + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def create_or_update( - self, schedule_id: str, schedule: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Schedule: - """Create or update operation template. + async def update( + self, tool_set_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ToolsetObject: + """Update a toolset. - :param schedule_id: Identifier of the schedule. Required. - :type schedule_id: str - :param schedule: The resource instance. Required. - :type schedule: IO[bytes] + :param tool_set_name: The name of the toolset to update. Required. + :type tool_set_name: str + :param body: Required. + :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: Schedule. The Schedule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Schedule + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async - async def create_or_update( - self, schedule_id: str, schedule: Union[_models.Schedule, JSON, IO[bytes]], **kwargs: Any - ) -> _models.Schedule: - """Create or update operation template. + async def update( + self, + tool_set_name: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tools: List[_models.Tool] = _Unset, + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, + **kwargs: Any + ) -> _models.ToolsetObject: + """Update a toolset. - :param schedule_id: Identifier of the schedule. Required. - :type schedule_id: str - :param schedule: The resource instance. Is one of the following types: Schedule, JSON, - IO[bytes] Required. - :type schedule: ~azure.ai.projects.models.Schedule or JSON or IO[bytes] - :return: Schedule. The Schedule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Schedule + :param tool_set_name: The name of the toolset to update. Required. + :type tool_set_name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tools: The list of tools to include in the toolset. Required. + :paramtype tools: list[~azure.ai.projects.models.Tool] + :keyword description: A human-readable description of the toolset. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the toolset. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -7023,17 +7015,22 @@ async def create_or_update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Schedule] = kwargs.pop("cls", None) + cls: ClsType[_models.ToolsetObject] = kwargs.pop("cls", None) + if body is _Unset: + if tools is _Unset: + raise TypeError("missing required argument: tools") + body = {"description": description, "metadata": metadata, "tools": tools} + body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(schedule, (IOBase, bytes)): - _content = schedule + if isinstance(body, (IOBase, bytes)): + _content = body else: - _content = json.dumps(schedule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_schedules_create_or_update_request( - schedule_id=schedule_id, + _request = build_beta_toolsets_update_request( + tool_set_name=tool_set_name, foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, @@ -7054,19 +7051,23 @@ async def create_or_update( response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Schedule, response.json()) + deserialized = _deserialize(_models.ToolsetObject, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -7074,19 +7075,17 @@ async def create_or_update( return deserialized # type: ignore @distributed_trace_async - async def get_run(self, schedule_id: str, run_id: str, **kwargs: Any) -> _models.ScheduleRun: - """Get a schedule run by id. + async def get(self, tool_set_name: str, **kwargs: Any) -> _models.ToolsetObject: + """Retrieve a toolset. - :param schedule_id: The unique identifier of the schedule. Required. - :type schedule_id: str - :param run_id: The unique identifier of the schedule run. Required. - :type run_id: str - :return: ScheduleRun. The ScheduleRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ScheduleRun + :param tool_set_name: The name of the toolset to retrieve. Required. + :type tool_set_name: str + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -7099,11 +7098,10 @@ async def get_run(self, schedule_id: str, run_id: str, **kwargs: Any) -> _models _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ScheduleRun] = kwargs.pop("cls", None) + cls: ClsType[_models.ToolsetObject] = kwargs.pop("cls", None) - _request = build_beta_schedules_get_run_request( - schedule_id=schedule_id, - run_id=run_id, + _request = build_beta_toolsets_get_request( + tool_set_name=tool_set_name, foundry_features=_foundry_features, api_version=self._config.api_version, headers=_headers, @@ -7138,7 +7136,7 @@ async def get_run(self, schedule_id: str, run_id: str, **kwargs: Any) -> _models if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.ScheduleRun, response.json()) + deserialized = _deserialize(_models.ToolsetObject, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -7146,34 +7144,41 @@ async def get_run(self, schedule_id: str, run_id: str, **kwargs: Any) -> _models return deserialized # type: ignore @distributed_trace - def list_runs( + def list( self, - schedule_id: str, *, - type: Optional[Union[str, _models.ScheduleTaskType]] = None, - enabled: Optional[bool] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.PageOrder]] = None, + before: Optional[str] = None, **kwargs: Any - ) -> AsyncItemPaged["_models.ScheduleRun"]: - """List all schedule runs. + ) -> AsyncItemPaged["_models.ToolsetObject"]: + """List all toolsets. - :param schedule_id: Identifier of the schedule. Required. - :type schedule_id: str - :keyword type: Filter by the type of schedule. Known values are: "Evaluation" and "Insight". + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the + default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the ``created_at`` timestamp of the objects. ``asc`` for + ascending order and``desc`` + for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.PageOrder + :keyword before: A cursor for use in pagination. ``before`` is an object ID that defines your + place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. Default value is None. - :paramtype type: str or ~azure.ai.projects.models.ScheduleTaskType - :keyword enabled: Filter by the enabled status. Default value is None. - :paramtype enabled: bool - :return: An iterator like instance of ScheduleRun - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.ScheduleRun] + :paramtype before: str + :return: An iterator like instance of ToolsetObject + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.ToolsetObject] :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW ) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.ScheduleRun]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.ToolsetObject]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -7183,62 +7188,36 @@ def list_runs( } error_map.update(kwargs.pop("error_map", {}) or {}) - def prepare_request(next_link=None): - if not next_link: - - _request = build_beta_schedules_list_runs_request( - schedule_id=schedule_id, - foundry_features=_foundry_features, - type=type, - enabled=enabled, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", - urllib.parse.urljoin(next_link, _parsed_next_link.path), - params=_next_request_params, - headers={"Foundry-Features": _SERIALIZER.header("foundry_features", _foundry_features, "str")}, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + def prepare_request(_continuation_token=None): + _request = build_beta_toolsets_list_request( + foundry_features=_foundry_features, + limit=limit, + order=order, + after=_continuation_token, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) return _request async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.ScheduleRun], - deserialized.get("value", []), + List[_models.ToolsetObject], + deserialized.get("data", []), ) if cls: list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + return deserialized.get("last_id") or None, AsyncList(list_of_elem) - async def get_next(next_link=None): - _request = prepare_request(next_link) + async def get_next(_continuation_token=None): + _request = prepare_request(_continuation_token) _stream = False pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access @@ -7248,8 +7227,81 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) return pipeline_response return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def delete(self, tool_set_name: str, **kwargs: Any) -> _models.DeleteToolsetResponse: + """Delete a toolset. + + :param tool_set_name: The name of the toolset to delete. Required. + :type tool_set_name: str + :return: DeleteToolsetResponse. The DeleteToolsetResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DeleteToolsetResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.DeleteToolsetResponse] = kwargs.pop("cls", None) + + _request = build_beta_toolsets_delete_request( + tool_set_name=tool_set_name, + foundry_features=_foundry_features, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.DeleteToolsetResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py index 110c948952f0..e93d0b108a9d 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py @@ -84,6 +84,7 @@ DeleteAgentResponse, DeleteAgentVersionResponse, DeleteMemoryStoreResult, + DeleteToolsetResponse, Deployment, EmbeddingConfiguration, EntraIDCredentials, @@ -217,6 +218,7 @@ ToolChoiceWebSearchPreview20250311, ToolDescription, ToolProjectConnection, + ToolsetObject, Trigger, UserProfileMemoryItem, WebSearchApproximateLocation, @@ -225,6 +227,8 @@ WebSearchTool, WebSearchToolFilters, WeeklyRecurrenceSchedule, + WorkIQPreviewTool, + WorkIQPreviewToolParameters, WorkflowAgentDefinition, ) @@ -277,6 +281,7 @@ TextResponseFormatConfigurationType, ToolChoiceParamType, ToolType, + ToolsetObjectType, TreatmentEffectType, TriggerType, ) @@ -355,6 +360,7 @@ "DeleteAgentResponse", "DeleteAgentVersionResponse", "DeleteMemoryStoreResult", + "DeleteToolsetResponse", "Deployment", "EmbeddingConfiguration", "EntraIDCredentials", @@ -488,6 +494,7 @@ "ToolChoiceWebSearchPreview20250311", "ToolDescription", "ToolProjectConnection", + "ToolsetObject", "Trigger", "UserProfileMemoryItem", "WebSearchApproximateLocation", @@ -496,6 +503,8 @@ "WebSearchTool", "WebSearchToolFilters", "WeeklyRecurrenceSchedule", + "WorkIQPreviewTool", + "WorkIQPreviewToolParameters", "WorkflowAgentDefinition", "AgentKind", "AgentObjectType", @@ -545,6 +554,7 @@ "TextResponseFormatConfigurationType", "ToolChoiceParamType", "ToolType", + "ToolsetObjectType", "TreatmentEffectType", "TriggerType", ] diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py index ae6e559371ff..923dd800ef60 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py @@ -371,6 +371,8 @@ class _FoundryFeaturesOptInKeys(str, Enum, metaclass=CaseInsensitiveEnumMeta): """INSIGHTS_V1_PREVIEW.""" MEMORY_STORES_V1_PREVIEW = "MemoryStores=V1Preview" """MEMORY_STORES_V1_PREVIEW.""" + TOOLSET_V1_PREVIEW = "Toolsets=V1Preview" + """TOOLSET_V1_PREVIEW.""" class FunctionShellToolParamEnvironmentType(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -670,6 +672,15 @@ class ToolChoiceParamType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """CODE_INTERPRETER.""" +class ToolsetObjectType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of ToolsetObjectType.""" + + TOOLSET = "toolset" + """TOOLSET.""" + TOOLSET_DELETED = "toolset.deleted" + """TOOLSET_DELETED.""" + + class ToolType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Type of ToolType.""" @@ -709,6 +720,8 @@ class ToolType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """SHAREPOINT_GROUNDING_PREVIEW.""" MEMORY_SEARCH_PREVIEW = "memory_search_preview" """MEMORY_SEARCH_PREVIEW.""" + WORK_IQ_PREVIEW = "work_iq_preview" + """WORK_IQ_PREVIEW.""" AZURE_AI_SEARCH = "azure_ai_search" """AZURE_AI_SEARCH.""" AZURE_FUNCTION = "azure_function" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index bae28a926cec..f009264826d7 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -38,6 +38,7 @@ TextResponseFormatConfigurationType, ToolChoiceParamType, ToolType, + ToolsetObjectType, TriggerType, ) @@ -54,14 +55,14 @@ class Tool(_Model): CaptureStructuredOutputsTool, CodeInterpreterTool, ComputerUsePreviewTool, CustomToolParam, MicrosoftFabricPreviewTool, FileSearchTool, FunctionTool, ImageGenTool, LocalShellToolParam, MCPTool, MemorySearchPreviewTool, OpenApiTool, SharepointPreviewTool, FunctionShellToolParam, - WebSearchTool, WebSearchPreviewTool + WebSearchTool, WebSearchPreviewTool, WorkIQPreviewTool :ivar type: Required. Known values are: "function", "file_search", "computer_use_preview", "web_search", "mcp", "code_interpreter", "image_generation", "local_shell", "shell", "custom", "web_search_preview", "apply_patch", "a2a_preview", "bing_custom_search_preview", "browser_automation_preview", "fabric_dataagent_preview", "sharepoint_grounding_preview", - "memory_search_preview", "azure_ai_search", "azure_function", "bing_grounding", - "capture_structured_outputs", and "openapi". + "memory_search_preview", "work_iq_preview", "azure_ai_search", "azure_function", + "bing_grounding", "capture_structured_outputs", and "openapi". :vartype type: str or ~azure.ai.projects.models.ToolType """ @@ -71,8 +72,9 @@ class Tool(_Model): \"web_search\", \"mcp\", \"code_interpreter\", \"image_generation\", \"local_shell\", \"shell\", \"custom\", \"web_search_preview\", \"apply_patch\", \"a2a_preview\", \"bing_custom_search_preview\", \"browser_automation_preview\", \"fabric_dataagent_preview\", - \"sharepoint_grounding_preview\", \"memory_search_preview\", \"azure_ai_search\", - \"azure_function\", \"bing_grounding\", \"capture_structured_outputs\", and \"openapi\".""" + \"sharepoint_grounding_preview\", \"memory_search_preview\", \"work_iq_preview\", + \"azure_ai_search\", \"azure_function\", \"bing_grounding\", \"capture_structured_outputs\", + and \"openapi\".""" @overload def __init__( @@ -97,6 +99,10 @@ class A2APreviewTool(Tool, discriminator="a2a_preview"): :ivar type: The type of the tool. Always ``"a2a_preview``. Required. A2A_PREVIEW. :vartype type: str or ~azure.ai.projects.models.A2A_PREVIEW + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar base_url: Base URL of the agent. :vartype base_url: str :ivar agent_card_path: The path to the agent card relative to the ``base_url``. If not @@ -110,6 +116,10 @@ class A2APreviewTool(Tool, discriminator="a2a_preview"): type: Literal[ToolType.A2A_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The type of the tool. Always ``\"a2a_preview``. Required. A2A_PREVIEW.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" base_url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Base URL of the agent.""" agent_card_path: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -123,6 +133,8 @@ class A2APreviewTool(Tool, discriminator="a2a_preview"): def __init__( self, *, + name: Optional[str] = None, + description: Optional[str] = None, base_url: Optional[str] = None, agent_card_path: Optional[str] = None, project_connection_id: Optional[str] = None, @@ -614,6 +626,10 @@ class AISearchIndexResource(_Model): :vartype project_connection_id: str :ivar index_name: The name of an index in an IndexResource attached to this agent. :vartype index_name: str + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar query_type: Type of query in an AIIndexResource attached to this agent. Known values are: "simple", "semantic", "vector", "vector_simple_hybrid", and "vector_semantic_hybrid". :vartype query_type: str or ~azure.ai.projects.models.AzureAISearchQueryType @@ -630,6 +646,10 @@ class AISearchIndexResource(_Model): """An index connection ID in an IndexResource attached to this agent.""" index_name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of an index in an IndexResource attached to this agent.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" query_type: Optional[Union[str, "_models.AzureAISearchQueryType"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -649,6 +669,8 @@ def __init__( *, project_connection_id: Optional[str] = None, index_name: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, query_type: Optional[Union[str, "_models.AzureAISearchQueryType"]] = None, top_k: Optional[int] = None, filter: Optional[str] = None, # pylint: disable=redefined-builtin @@ -1138,12 +1160,20 @@ class AzureAISearchTool(Tool, discriminator="azure_ai_search"): :ivar type: The object type, which is always 'azure_ai_search'. Required. AZURE_AI_SEARCH. :vartype type: str or ~azure.ai.projects.models.AZURE_AI_SEARCH + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar azure_ai_search: The azure ai search index resource. Required. :vartype azure_ai_search: ~azure.ai.projects.models.AzureAISearchToolResource """ type: Literal[ToolType.AZURE_AI_SEARCH] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'azure_ai_search'. Required. AZURE_AI_SEARCH.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" azure_ai_search: "_models.AzureAISearchToolResource" = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -1154,6 +1184,8 @@ def __init__( self, *, azure_ai_search: "_models.AzureAISearchToolResource", + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -1171,11 +1203,19 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class AzureAISearchToolResource(_Model): """A set of index resources used by the ``azure_ai_search`` tool. + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar indexes: The indices attached to this agent. There can be a maximum of 1 index resource attached to the agent. Required. :vartype indexes: list[~azure.ai.projects.models.AISearchIndexResource] """ + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" indexes: list["_models.AISearchIndexResource"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -1187,6 +1227,8 @@ def __init__( self, *, indexes: list["_models.AISearchIndexResource"], + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -1469,6 +1511,10 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BingCustomSearchConfiguration(_Model): """A bing custom search configuration. + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar project_connection_id: Project connection id for grounding with bing search. Required. :vartype project_connection_id: str :ivar instance_name: Name of the custom configuration instance given to config. Required. @@ -1484,6 +1530,10 @@ class BingCustomSearchConfiguration(_Model): :vartype freshness: str """ + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" project_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Project connection id for grounding with bing search. Required.""" instance_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -1504,6 +1554,8 @@ def __init__( *, project_connection_id: str, instance_name: str, + name: Optional[str] = None, + description: Optional[str] = None, market: Optional[str] = None, set_lang: Optional[str] = None, count: Optional[int] = None, @@ -1527,6 +1579,10 @@ class BingCustomSearchPreviewTool(Tool, discriminator="bing_custom_search_previe :ivar type: The object type, which is always 'bing_custom_search_preview'. Required. BING_CUSTOM_SEARCH_PREVIEW. :vartype type: str or ~azure.ai.projects.models.BING_CUSTOM_SEARCH_PREVIEW + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar bing_custom_search_preview: The bing custom search tool parameters. Required. :vartype bing_custom_search_preview: ~azure.ai.projects.models.BingCustomSearchToolParameters """ @@ -1534,6 +1590,10 @@ class BingCustomSearchPreviewTool(Tool, discriminator="bing_custom_search_previe type: Literal[ToolType.BING_CUSTOM_SEARCH_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'bing_custom_search_preview'. Required. BING_CUSTOM_SEARCH_PREVIEW.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" bing_custom_search_preview: "_models.BingCustomSearchToolParameters" = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -1544,6 +1604,8 @@ def __init__( self, *, bing_custom_search_preview: "_models.BingCustomSearchToolParameters", + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -1561,11 +1623,19 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BingCustomSearchToolParameters(_Model): """The bing custom search tool parameters. + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar search_configurations: The project connections attached to this tool. There can be a maximum of 1 connection resource attached to the tool. Required. :vartype search_configurations: list[~azure.ai.projects.models.BingCustomSearchConfiguration] """ + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" search_configurations: list["_models.BingCustomSearchConfiguration"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -1577,6 +1647,8 @@ def __init__( self, *, search_configurations: list["_models.BingCustomSearchConfiguration"], + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -1593,6 +1665,10 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BingGroundingSearchConfiguration(_Model): """Search configuration for Bing Grounding. + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar project_connection_id: Project connection id for grounding with bing search. Required. :vartype project_connection_id: str :ivar market: The market where the results come from. @@ -1606,6 +1682,10 @@ class BingGroundingSearchConfiguration(_Model): :vartype freshness: str """ + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" project_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Project connection id for grounding with bing search. Required.""" market: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -1623,6 +1703,8 @@ def __init__( self, *, project_connection_id: str, + name: Optional[str] = None, + description: Optional[str] = None, market: Optional[str] = None, set_lang: Optional[str] = None, count: Optional[int] = None, @@ -1643,12 +1725,20 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BingGroundingSearchToolParameters(_Model): """The bing grounding search tool parameters. + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar search_configurations: The search configurations attached to this tool. There can be a maximum of 1 search configuration resource attached to the tool. Required. :vartype search_configurations: list[~azure.ai.projects.models.BingGroundingSearchConfiguration] """ + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" search_configurations: list["_models.BingGroundingSearchConfiguration"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -1660,6 +1750,8 @@ def __init__( self, *, search_configurations: list["_models.BingGroundingSearchConfiguration"], + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -1679,12 +1771,20 @@ class BingGroundingTool(Tool, discriminator="bing_grounding"): :ivar type: The object type, which is always 'bing_grounding'. Required. BING_GROUNDING. :vartype type: str or ~azure.ai.projects.models.BING_GROUNDING + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar bing_grounding: The bing grounding search tool parameters. Required. :vartype bing_grounding: ~azure.ai.projects.models.BingGroundingSearchToolParameters """ type: Literal[ToolType.BING_GROUNDING] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'bing_grounding'. Required. BING_GROUNDING.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" bing_grounding: "_models.BingGroundingSearchToolParameters" = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -1695,6 +1795,8 @@ def __init__( self, *, bing_grounding: "_models.BingGroundingSearchToolParameters", + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -1778,6 +1880,10 @@ class BrowserAutomationPreviewTool(Tool, discriminator="browser_automation_previ :ivar type: The object type, which is always 'browser_automation_preview'. Required. BROWSER_AUTOMATION_PREVIEW. :vartype type: str or ~azure.ai.projects.models.BROWSER_AUTOMATION_PREVIEW + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar browser_automation_preview: The Browser Automation Tool parameters. Required. :vartype browser_automation_preview: ~azure.ai.projects.models.BrowserAutomationToolParameters """ @@ -1785,6 +1891,10 @@ class BrowserAutomationPreviewTool(Tool, discriminator="browser_automation_previ type: Literal[ToolType.BROWSER_AUTOMATION_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'browser_automation_preview'. Required. BROWSER_AUTOMATION_PREVIEW.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" browser_automation_preview: "_models.BrowserAutomationToolParameters" = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -1795,6 +1905,8 @@ def __init__( self, *, browser_automation_preview: "_models.BrowserAutomationToolParameters", + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -1812,11 +1924,19 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BrowserAutomationToolConnectionParameters(_Model): # pylint: disable=name-too-long """Definition of input parameters for the connection used by the Browser Automation Tool. + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar project_connection_id: The ID of the project connection to your Azure Playwright resource. Required. :vartype project_connection_id: str """ + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" project_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the project connection to your Azure Playwright resource. Required.""" @@ -1825,6 +1945,8 @@ def __init__( self, *, project_connection_id: str, + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -1841,11 +1963,19 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BrowserAutomationToolParameters(_Model): """Definition of input parameters for the Browser Automation Tool. + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar connection: The project connection parameters associated with the Browser Automation Tool. Required. :vartype connection: ~azure.ai.projects.models.BrowserAutomationToolConnectionParameters """ + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" connection: "_models.BrowserAutomationToolConnectionParameters" = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -1856,6 +1986,8 @@ def __init__( self, *, connection: "_models.BrowserAutomationToolConnectionParameters", + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -2237,6 +2369,8 @@ class CodeBasedEvaluatorDefinition(EvaluatorDefinition, discriminator="code"): :vartype entry_point: str :ivar image_tag: The container image tag to use for evaluator code execution. :vartype image_tag: str + :ivar blob_uri: The blob URI for the evaluator storage. + :vartype blob_uri: str """ type: Literal[EvaluatorDefinitionType.CODE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore @@ -2248,6 +2382,8 @@ class CodeBasedEvaluatorDefinition(EvaluatorDefinition, discriminator="code"): 'answer_length_evaluator.py').""" image_tag: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The container image tag to use for evaluator code execution.""" + blob_uri: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The blob URI for the evaluator storage.""" @overload def __init__( @@ -2259,6 +2395,7 @@ def __init__( code_text: Optional[str] = None, entry_point: Optional[str] = None, image_tag: Optional[str] = None, + blob_uri: Optional[str] = None, ) -> None: ... @overload @@ -2279,6 +2416,10 @@ class CodeInterpreterTool(Tool, discriminator="code_interpreter"): :ivar type: The type of the code interpreter tool. Always ``code_interpreter``. Required. CODE_INTERPRETER. :vartype type: str or ~azure.ai.projects.models.CODE_INTERPRETER + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar container: The code interpreter container. Can be a container ID or an object that specifies uploaded file IDs to make available to your code, along with an optional ``memory_limit`` setting. If not provided, the service assumes auto. Is either a str type or a @@ -2288,6 +2429,10 @@ class CodeInterpreterTool(Tool, discriminator="code_interpreter"): type: Literal[ToolType.CODE_INTERPRETER] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The type of the code interpreter tool. Always ``code_interpreter``. Required. CODE_INTERPRETER.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" container: Optional[Union[str, "_models.AutoCodeInterpreterToolParam"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -2300,6 +2445,8 @@ class CodeInterpreterTool(Tool, discriminator="code_interpreter"): def __init__( self, *, + name: Optional[str] = None, + description: Optional[str] = None, container: Optional[Union[str, "_models.AutoCodeInterpreterToolParam"]] = None, ) -> None: ... @@ -3462,6 +3609,46 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) +class DeleteToolsetResponse(_Model): + """Response returned when a toolset is deleted. + + :ivar object: The object type. Always 'toolset.deleted'. Required. TOOLSET_DELETED. + :vartype object: str or ~azure.ai.projects.models.TOOLSET_DELETED + :ivar name: The name of the toolset. Required. + :vartype name: str + :ivar deleted: Whether the toolset was successfully deleted. Required. + :vartype deleted: bool + """ + + object: Literal[ToolsetObjectType.TOOLSET_DELETED] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The object type. Always 'toolset.deleted'. Required. TOOLSET_DELETED.""" + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the toolset. Required.""" + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Whether the toolset was successfully deleted. Required.""" + + @overload + def __init__( + self, + *, + object: Literal[ToolsetObjectType.TOOLSET_DELETED], + name: str, + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + class Deployment(_Model): """Model Deployment Definition. @@ -4429,11 +4616,19 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class FabricDataAgentToolParameters(_Model): """The fabric data agent tool parameters. + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar project_connections: The project connections attached to this tool. There can be a maximum of 1 connection resource attached to the tool. :vartype project_connections: list[~azure.ai.projects.models.ToolProjectConnection] """ + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" project_connections: Optional[list["_models.ToolProjectConnection"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -4444,6 +4639,8 @@ class FabricDataAgentToolParameters(_Model): def __init__( self, *, + name: Optional[str] = None, + description: Optional[str] = None, project_connections: Optional[list["_models.ToolProjectConnection"]] = None, ) -> None: ... @@ -4578,6 +4775,10 @@ class FileSearchTool(Tool, discriminator="file_search"): :ivar filters: Is either a ComparisonFilter type or a CompoundFilter type. :vartype filters: ~azure.ai.projects.models.ComparisonFilter or ~azure.ai.projects.models.CompoundFilter + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str """ type: Literal[ToolType.FILE_SEARCH] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore @@ -4592,6 +4793,10 @@ class FileSearchTool(Tool, discriminator="file_search"): """Ranking options for search.""" filters: Optional["_types.Filters"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Is either a ComparisonFilter type or a CompoundFilter type.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" @overload def __init__( @@ -4601,6 +4806,8 @@ def __init__( max_num_results: Optional[int] = None, ranking_options: Optional["_models.RankingOptions"] = None, filters: Optional["_types.Filters"] = None, + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -4674,6 +4881,10 @@ class FunctionShellToolParam(Tool, discriminator="shell"): :vartype type: str or ~azure.ai.projects.models.SHELL :ivar environment: :vartype environment: ~azure.ai.projects.models.FunctionShellToolParamEnvironment + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str """ type: Literal[ToolType.SHELL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore @@ -4681,12 +4892,18 @@ class FunctionShellToolParam(Tool, discriminator="shell"): environment: Optional["_models.FunctionShellToolParamEnvironment"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" @overload def __init__( self, *, environment: Optional["_models.FunctionShellToolParamEnvironment"] = None, + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -5023,6 +5240,10 @@ class ImageGenTool(Tool, discriminator="image_generation"): :ivar action: Whether to generate a new image or edit an existing image. Default: ``auto``. Known values are: "generate", "edit", and "auto". :vartype action: str or ~azure.ai.projects.models.ImageGenAction + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str """ type: Literal[ToolType.IMAGE_GENERATION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore @@ -5078,6 +5299,10 @@ class ImageGenTool(Tool, discriminator="image_generation"): ) """Whether to generate a new image or edit an existing image. Default: ``auto``. Known values are: \"generate\", \"edit\", and \"auto\".""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" @overload def __init__( @@ -5096,6 +5321,8 @@ def __init__( input_image_mask: Optional["_models.ImageGenToolInputImageMask"] = None, partial_images: Optional[int] = None, action: Optional[Union[str, "_models.ImageGenAction"]] = None, + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -5510,14 +5737,25 @@ class LocalShellToolParam(Tool, discriminator="local_shell"): :ivar type: The type of the local shell tool. Always ``local_shell``. Required. LOCAL_SHELL. :vartype type: str or ~azure.ai.projects.models.LOCAL_SHELL + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str """ type: Literal[ToolType.LOCAL_SHELL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The type of the local shell tool. Always ``local_shell``. Required. LOCAL_SHELL.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" @overload def __init__( self, + *, + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -5921,6 +6159,10 @@ class MemorySearchPreviewTool(Tool, discriminator="memory_search_preview"): :ivar type: The type of the tool. Always ``memory_search_preview``. Required. MEMORY_SEARCH_PREVIEW. :vartype type: str or ~azure.ai.projects.models.MEMORY_SEARCH_PREVIEW + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar memory_store_name: The name of the memory store to use. Required. :vartype memory_store_name: str :ivar scope: The namespace used to group and isolate memories, such as a user ID. Limits which @@ -5936,6 +6178,10 @@ class MemorySearchPreviewTool(Tool, discriminator="memory_search_preview"): type: Literal[ToolType.MEMORY_SEARCH_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The type of the tool. Always ``memory_search_preview``. Required. MEMORY_SEARCH_PREVIEW.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" memory_store_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the memory store to use. Required.""" scope: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -5955,6 +6201,8 @@ def __init__( *, memory_store_name: str, scope: str, + name: Optional[str] = None, + description: Optional[str] = None, search_options: Optional["_models.MemorySearchOptions"] = None, update_delay: Optional[int] = None, ) -> None: ... @@ -6403,6 +6651,10 @@ class MicrosoftFabricPreviewTool(Tool, discriminator="fabric_dataagent_preview") :ivar type: The object type, which is always 'fabric_dataagent_preview'. Required. FABRIC_DATAAGENT_PREVIEW. :vartype type: str or ~azure.ai.projects.models.FABRIC_DATAAGENT_PREVIEW + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar fabric_dataagent_preview: The fabric data agent tool parameters. Required. :vartype fabric_dataagent_preview: ~azure.ai.projects.models.FabricDataAgentToolParameters """ @@ -6410,6 +6662,10 @@ class MicrosoftFabricPreviewTool(Tool, discriminator="fabric_dataagent_preview") type: Literal[ToolType.FABRIC_DATAAGENT_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'fabric_dataagent_preview'. Required. FABRIC_DATAAGENT_PREVIEW.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" fabric_dataagent_preview: "_models.FabricDataAgentToolParameters" = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -6420,6 +6676,8 @@ def __init__( self, *, fabric_dataagent_preview: "_models.FabricDataAgentToolParameters", + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -7802,11 +8060,19 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class SharepointGroundingToolParameters(_Model): """The sharepoint grounding tool parameters. + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar project_connections: The project connections attached to this tool. There can be a maximum of 1 connection resource attached to the tool. :vartype project_connections: list[~azure.ai.projects.models.ToolProjectConnection] """ + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" project_connections: Optional[list["_models.ToolProjectConnection"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -7817,6 +8083,8 @@ class SharepointGroundingToolParameters(_Model): def __init__( self, *, + name: Optional[str] = None, + description: Optional[str] = None, project_connections: Optional[list["_models.ToolProjectConnection"]] = None, ) -> None: ... @@ -7837,6 +8105,10 @@ class SharepointPreviewTool(Tool, discriminator="sharepoint_grounding_preview"): :ivar type: The object type, which is always 'sharepoint_grounding_preview'. Required. SHAREPOINT_GROUNDING_PREVIEW. :vartype type: str or ~azure.ai.projects.models.SHAREPOINT_GROUNDING_PREVIEW + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar sharepoint_grounding_preview: The sharepoint grounding tool parameters. Required. :vartype sharepoint_grounding_preview: ~azure.ai.projects.models.SharepointGroundingToolParameters @@ -7845,6 +8117,10 @@ class SharepointPreviewTool(Tool, discriminator="sharepoint_grounding_preview"): type: Literal[ToolType.SHAREPOINT_GROUNDING_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'sharepoint_grounding_preview'. Required. SHAREPOINT_GROUNDING_PREVIEW.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" sharepoint_grounding_preview: "_models.SharepointGroundingToolParameters" = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -7855,6 +8131,8 @@ def __init__( self, *, sharepoint_grounding_preview: "_models.SharepointGroundingToolParameters", + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -8711,11 +8989,19 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class ToolProjectConnection(_Model): """A project connection resource. + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar project_connection_id: A project connection in a ToolProjectConnectionList attached to this tool. Required. :vartype project_connection_id: str """ + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" project_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A project connection in a ToolProjectConnectionList attached to this tool. Required.""" @@ -8724,6 +9010,75 @@ def __init__( self, *, project_connection_id: str, + name: Optional[str] = None, + description: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ToolsetObject(_Model): + """A toolset that stores reusable tool definitions for agents. + + :ivar object: The object type, which is always 'toolset'. Required. TOOLSET. + :vartype object: str or ~azure.ai.projects.models.TOOLSET + :ivar id: The unique identifier of the toolset. Required. + :vartype id: str + :ivar created_at: The Unix timestamp (seconds) when the toolset was created. Required. + :vartype created_at: ~datetime.datetime + :ivar updated_at: The Unix timestamp (seconds) when the toolset was last updated. Required. + :vartype updated_at: ~datetime.datetime + :ivar name: The name of the toolset. Required. + :vartype name: str + :ivar description: A human-readable description of the toolset. + :vartype description: str + :ivar metadata: Arbitrary key-value metadata to associate with the toolset. + :vartype metadata: dict[str, str] + :ivar tools: The list of tools contained in this toolset. Required. + :vartype tools: list[~azure.ai.projects.models.Tool] + """ + + object: Literal[ToolsetObjectType.TOOLSET] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'toolset'. Required. TOOLSET.""" + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The unique identifier of the toolset. Required.""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (seconds) when the toolset was created. Required.""" + updated_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (seconds) when the toolset was last updated. Required.""" + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the toolset. Required.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A human-readable description of the toolset.""" + metadata: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Arbitrary key-value metadata to associate with the toolset.""" + tools: list["_models.Tool"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The list of tools contained in this toolset. Required.""" + + @overload + def __init__( + self, + *, + object: Literal[ToolsetObjectType.TOOLSET], + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + updated_at: datetime.datetime, + name: str, + tools: list["_models.Tool"], + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, ) -> None: ... @overload @@ -8829,6 +9184,10 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class WebSearchConfiguration(_Model): """A web search configuration for bing custom search. + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar project_connection_id: Project connection id for grounding with bing custom search. Required. :vartype project_connection_id: str @@ -8836,6 +9195,10 @@ class WebSearchConfiguration(_Model): :vartype instance_name: str """ + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" project_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Project connection id for grounding with bing custom search. Required.""" instance_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -8847,6 +9210,8 @@ def __init__( *, project_connection_id: str, instance_name: str, + name: Optional[str] = None, + description: Optional[str] = None, ) -> None: ... @overload @@ -8921,6 +9286,10 @@ class WebSearchTool(Tool, discriminator="web_search"): for the search. One of ``low``, ``medium``, or ``high``. ``medium`` is the default. Is one of the following types: Literal["low"], Literal["medium"], Literal["high"] :vartype search_context_size: str or str or str + :ivar name: Optional user-defined name for this tool or configuration. + :vartype name: str + :ivar description: Optional user-defined description for this tool or configuration. + :vartype description: str :ivar custom_search_configuration: The project connections attached to this tool. There can be a maximum of 1 connection resource attached to the tool. :vartype custom_search_configuration: ~azure.ai.projects.models.WebSearchConfiguration @@ -8941,6 +9310,10 @@ class WebSearchTool(Tool, discriminator="web_search"): """High level guidance for the amount of context window space to use for the search. One of ``low``, ``medium``, or ``high``. ``medium`` is the default. Is one of the following types: Literal[\"low\"], Literal[\"medium\"], Literal[\"high\"]""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined name for this tool or configuration.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional user-defined description for this tool or configuration.""" custom_search_configuration: Optional["_models.WebSearchConfiguration"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -8954,6 +9327,8 @@ def __init__( filters: Optional["_models.WebSearchToolFilters"] = None, user_location: Optional["_models.WebSearchApproximateLocation"] = None, search_context_size: Optional[Literal["low", "medium", "high"]] = None, + name: Optional[str] = None, + description: Optional[str] = None, custom_search_configuration: Optional["_models.WebSearchConfiguration"] = None, ) -> None: ... @@ -9065,3 +9440,66 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.kind = AgentKind.WORKFLOW # type: ignore + + +class WorkIQPreviewTool(Tool, discriminator="work_iq_preview"): + """A WorkIQ server-side tool. + + :ivar type: The object type, which is always 'work_iq_preview'. Required. WORK_IQ_PREVIEW. + :vartype type: str or ~azure.ai.projects.models.WORK_IQ_PREVIEW + :ivar work_iq_preview: The WorkIQ tool parameters. Required. + :vartype work_iq_preview: ~azure.ai.projects.models.WorkIQPreviewToolParameters + """ + + type: Literal[ToolType.WORK_IQ_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'work_iq_preview'. Required. WORK_IQ_PREVIEW.""" + work_iq_preview: "_models.WorkIQPreviewToolParameters" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The WorkIQ tool parameters. Required.""" + + @overload + def __init__( + self, + *, + work_iq_preview: "_models.WorkIQPreviewToolParameters", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.type = ToolType.WORK_IQ_PREVIEW # type: ignore + + +class WorkIQPreviewToolParameters(_Model): + """The WorkIQ tool parameters. + + :ivar project_connection_id: The ID of the WorkIQ project connection. Required. + :vartype project_connection_id: str + """ + + project_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the WorkIQ project connection. Required.""" + + @overload + def __init__( + self, + *, + project_connection_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index 21502f0de2bb..5a9b2fc628c9 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -80,114 +80,6 @@ def build_agents_get_request(agent_name: str, **kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_agents_create_agent_request( - *, foundry_features: Optional[Union[str, _AgentDefinitionOptInKeys]] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "v1")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/agents" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if foundry_features is not None: - _headers["Foundry-Features"] = _SERIALIZER.header("foundry_features", foundry_features, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_update_agent_request( - agent_name: str, *, foundry_features: Optional[Union[str, _AgentDefinitionOptInKeys]] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "v1")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/agents/{agent_name}" - path_format_arguments = { - "agent_name": _SERIALIZER.url("agent_name", agent_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if foundry_features is not None: - _headers["Foundry-Features"] = _SERIALIZER.header("foundry_features", foundry_features, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_agent_from_manifest_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "v1")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/agents:import" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_update_agent_from_manifest_request( # pylint: disable=name-too-long - agent_name: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "v1")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/agents/{agent_name}/import" - path_format_arguments = { - "agent_name": _SERIALIZER.url("agent_name", agent_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - def build_agents_delete_request(agent_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -1916,7 +1808,152 @@ def build_beta_schedules_list_runs_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -class BetaOperations: +def build_beta_toolsets_create_request( + *, foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW], **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "v1")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/toolsets" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Foundry-Features"] = _SERIALIZER.header("foundry_features", foundry_features, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_beta_toolsets_update_request( + tool_set_name: str, *, foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW], **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "v1")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/toolsets/{tool_set_name}" + path_format_arguments = { + "tool_set_name": _SERIALIZER.url("tool_set_name", tool_set_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Foundry-Features"] = _SERIALIZER.header("foundry_features", foundry_features, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_beta_toolsets_get_request( + tool_set_name: str, *, foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW], **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "v1")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/toolsets/{tool_set_name}" + path_format_arguments = { + "tool_set_name": _SERIALIZER.url("tool_set_name", tool_set_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Foundry-Features"] = _SERIALIZER.header("foundry_features", foundry_features, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_beta_toolsets_list_request( + *, + foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW], + limit: Optional[int] = None, + order: Optional[Union[str, _models.PageOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "v1")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/toolsets" + + # Construct parameters + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Foundry-Features"] = _SERIALIZER.header("foundry_features", foundry_features, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_beta_toolsets_delete_request( + tool_set_name: str, *, foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW], **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "v1")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/toolsets/{tool_set_name}" + path_format_arguments = { + "tool_set_name": _SERIALIZER.url("tool_set_name", tool_set_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Foundry-Features"] = _SERIALIZER.header("foundry_features", foundry_features, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +class BetaOperations: # pylint: disable=too-many-instance-attributes """ .. warning:: **DO NOT** instantiate this class directly. @@ -1941,6 +1978,7 @@ def __init__(self, *args, **kwargs) -> None: self.memory_stores = BetaMemoryStoresOperations(self._client, self._config, self._serialize, self._deserialize) self.red_teams = BetaRedTeamsOperations(self._client, self._config, self._serialize, self._deserialize) self.schedules = BetaSchedulesOperations(self._client, self._config, self._serialize, self._deserialize) + self.toolsets = BetaToolsetsOperations(self._client, self._config, self._serialize, self._deserialize) class AgentsOperations: @@ -2025,65 +2063,16 @@ def get(self, agent_name: str, **kwargs: Any) -> _models.AgentDetails: return deserialized # type: ignore - @overload - def _create_agent( - self, - *, - name: str, - definition: _models.AgentDefinition, - content_type: str = "application/json", - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentDetails: ... - @overload - def _create_agent( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... - @overload - def _create_agent( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... - @distributed_trace - def _create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - name: str = _Unset, - definition: _models.AgentDefinition = _Unset, - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentDetails: - """Creates the agent. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. + def delete(self, agent_name: str, **kwargs: Any) -> _models.DeleteAgentResponse: + """Deletes an agent. - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :paramtype name: str - :keyword definition: The agent definition. This can be a workflow, hosted agent, or a simple - agent definition. Required. - :paramtype definition: ~azure.ai.projects.models.AgentDefinition - :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be - useful for storing additional information about the object in a structured - format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings - with a maximum length of 512 characters. Default value is None. - :paramtype metadata: dict[str, str] - :keyword description: A human-readable description of the agent. Default value is None. - :paramtype description: str - :return: AgentDetails. The AgentDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentDetails + :param agent_name: The name of the agent to delete. Required. + :type agent_name: str + :return: DeleteAgentResponse. The DeleteAgentResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DeleteAgentResponse :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Optional[str] = _get_agent_definition_opt_in_keys if self._config.allow_preview else None # type: ignore error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -2092,31 +2081,14 @@ def _create_agent( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentDetails] = kwargs.pop("cls", None) - - if body is _Unset: - if name is _Unset: - raise TypeError("missing required argument: name") - if definition is _Unset: - raise TypeError("missing required argument: definition") - body = {"definition": definition, "description": description, "metadata": metadata, "name": name} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + cls: ClsType[_models.DeleteAgentResponse] = kwargs.pop("cls", None) - _request = build_agents_create_agent_request( - foundry_features=_foundry_features, - content_type=content_type, + _request = build_agents_delete_request( + agent_name=agent_name, api_version=self._config.api_version, - content=_content, headers=_headers, params=_params, ) @@ -2149,15 +2121,110 @@ def _create_agent( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.AgentDetails, response.json()) + deserialized = _deserialize(_models.DeleteAgentResponse, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + @distributed_trace + def list( + self, + *, + kind: Optional[Union[str, _models.AgentKind]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.PageOrder]] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> ItemPaged["_models.AgentDetails"]: + """Returns the list of all agents. + + :keyword kind: Filter agents by kind. If not provided, all agents are returned. Known values + are: "prompt", "hosted", and "workflow". Default value is None. + :paramtype kind: str or ~azure.ai.projects.models.AgentKind + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the + default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the ``created_at`` timestamp of the objects. ``asc`` for + ascending order and``desc`` + for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.PageOrder + :keyword before: A cursor for use in pagination. ``before`` is an object ID that defines your + place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + Default value is None. + :paramtype before: str + :return: An iterator like instance of AgentDetails + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.AgentDetails] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.AgentDetails]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(_continuation_token=None): + + _request = build_agents_list_request( + kind=kind, + limit=limit, + order=order, + after=_continuation_token, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize( + List[_models.AgentDetails], + deserialized.get("data", []), + ) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("last_id") or None, iter(list_of_elem) + + def get_next(_continuation_token=None): + _request = prepare_request(_continuation_token) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + @overload - def _update_agent( + def create_version( self, agent_name: str, *, @@ -2166,18 +2233,84 @@ def _update_agent( metadata: Optional[dict[str, str]] = None, description: Optional[str] = None, **kwargs: Any - ) -> _models.AgentDetails: ... + ) -> _models.AgentVersionDetails: + """Create a new agent version. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :keyword definition: The agent definition. This can be a workflow, hosted agent, or a simple + agent definition. Required. + :paramtype definition: ~azure.ai.projects.models.AgentDefinition + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be + useful for storing additional information about the object in a structured + format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings + with a maximum length of 512 characters. Default value is None. + :paramtype metadata: dict[str, str] + :keyword description: A human-readable description of the agent. Default value is None. + :paramtype description: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload - def _update_agent( + def create_version( self, agent_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... + ) -> _models.AgentVersionDetails: + """Create a new agent version. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload - def _update_agent( + def create_version( self, agent_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... + ) -> _models.AgentVersionDetails: + """Create a new agent version. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ @distributed_trace - def _update_agent( + def create_version( self, agent_name: str, body: Union[JSON, IO[bytes]] = _Unset, @@ -2186,11 +2319,15 @@ def _update_agent( metadata: Optional[dict[str, str]] = None, description: Optional[str] = None, **kwargs: Any - ) -> _models.AgentDetails: - """Updates the agent by adding a new version if there are any changes to the agent definition. If - no changes, returns the existing agent version. + ) -> _models.AgentVersionDetails: + """Create a new agent version. - :param agent_name: The name of the agent to retrieve. Required. + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. :type agent_name: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] @@ -2206,8 +2343,8 @@ def _update_agent( :paramtype metadata: dict[str, str] :keyword description: A human-readable description of the agent. Default value is None. :paramtype description: str - :return: AgentDetails. The AgentDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentDetails + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails :raises ~azure.core.exceptions.HttpResponseError: """ _foundry_features: Optional[str] = _get_agent_definition_opt_in_keys if self._config.allow_preview else None # type: ignore @@ -2223,7 +2360,7 @@ def _update_agent( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentDetails] = kwargs.pop("cls", None) + cls: ClsType[_models.AgentVersionDetails] = kwargs.pop("cls", None) if body is _Unset: if definition is _Unset: @@ -2237,7 +2374,7 @@ def _update_agent( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_agents_update_agent_request( + _request = build_agents_create_version_request( agent_name=agent_name, foundry_features=_foundry_features, content_type=content_type, @@ -2275,7 +2412,7 @@ def _update_agent( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.AgentDetails, response.json()) + deserialized = _deserialize(_models.AgentVersionDetails, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2283,49 +2420,117 @@ def _update_agent( return deserialized # type: ignore @overload - def _create_agent_from_manifest( + def create_version_from_manifest( self, + agent_name: str, *, - name: str, manifest_id: str, parameter_values: dict[str, Any], content_type: str = "application/json", metadata: Optional[dict[str, str]] = None, description: Optional[str] = None, **kwargs: Any - ) -> _models.AgentDetails: ... - @overload - def _create_agent_from_manifest( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... - @overload - def _create_agent_from_manifest( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... + ) -> _models.AgentVersionDetails: + """Create a new agent version from a manifest. - @distributed_trace - def _create_agent_from_manifest( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - name: str = _Unset, - manifest_id: str = _Unset, - parameter_values: dict[str, Any] = _Unset, - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentDetails: - """Creates an agent from a manifest. + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword name: The unique name that identifies the agent. Name can be used to + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :keyword manifest_id: The manifest ID to import the agent version from. Required. + :paramtype manifest_id: str + :keyword parameter_values: The inputs to the manifest that will result in a fully materialized + Agent. Required. + :paramtype parameter_values: dict[str, any] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be + useful for storing additional information about the object in a structured + format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings + with a maximum length of 512 characters. Default value is None. + :paramtype metadata: dict[str, str] + :keyword description: A human-readable description of the agent. Default value is None. + :paramtype description: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_version_from_manifest( + self, agent_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentVersionDetails: + """Create a new agent version from a manifest. + + :param agent_name: The unique name that identifies the agent. Name can be used to retrieve/update/delete the agent. * Must start and end with alphanumeric characters, * Can contain hyphens in the middle * Must not exceed 63 characters. Required. - :paramtype name: str + :type agent_name: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_version_from_manifest( + self, agent_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentVersionDetails: + """Create a new agent version from a manifest. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_version_from_manifest( + self, + agent_name: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + manifest_id: str = _Unset, + parameter_values: dict[str, Any] = _Unset, + metadata: Optional[dict[str, str]] = None, + description: Optional[str] = None, + **kwargs: Any + ) -> _models.AgentVersionDetails: + """Create a new agent version from a manifest. + + :param agent_name: The unique name that identifies the agent. Name can be used to + retrieve/update/delete the agent. + + * Must start and end with alphanumeric characters, + * Can contain hyphens in the middle + * Must not exceed 63 characters. Required. + :type agent_name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] :keyword manifest_id: The manifest ID to import the agent version from. Required. :paramtype manifest_id: str :keyword parameter_values: The inputs to the manifest that will result in a fully materialized @@ -2340,8 +2545,8 @@ def _create_agent_from_manifest( :paramtype metadata: dict[str, str] :keyword description: A human-readable description of the agent. Default value is None. :paramtype description: str - :return: AgentDetails. The AgentDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentDetails + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -2356,11 +2561,9 @@ def _create_agent_from_manifest( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentDetails] = kwargs.pop("cls", None) + cls: ClsType[_models.AgentVersionDetails] = kwargs.pop("cls", None) if body is _Unset: - if name is _Unset: - raise TypeError("missing required argument: name") if manifest_id is _Unset: raise TypeError("missing required argument: manifest_id") if parameter_values is _Unset: @@ -2369,7 +2572,6 @@ def _create_agent_from_manifest( "description": description, "manifest_id": manifest_id, "metadata": metadata, - "name": name, "parameter_values": parameter_values, } body = {k: v for k, v in body.items() if v is not None} @@ -2380,7 +2582,8 @@ def _create_agent_from_manifest( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_agents_create_agent_from_manifest_request( + _request = build_agents_create_version_from_manifest_request( + agent_name=agent_name, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -2416,69 +2619,23 @@ def _create_agent_from_manifest( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.AgentDetails, response.json()) + deserialized = _deserialize(_models.AgentVersionDetails, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @overload - def _update_agent_from_manifest( - self, - agent_name: str, - *, - manifest_id: str, - parameter_values: dict[str, Any], - content_type: str = "application/json", - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentDetails: ... - @overload - def _update_agent_from_manifest( - self, agent_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... - @overload - def _update_agent_from_manifest( - self, agent_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentDetails: ... - @distributed_trace - def _update_agent_from_manifest( - self, - agent_name: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - manifest_id: str = _Unset, - parameter_values: dict[str, Any] = _Unset, - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentDetails: - """Updates the agent from a manifest by adding a new version if there are any changes to the agent - definition. If no changes, returns the existing agent version. + def get_version(self, agent_name: str, agent_version: str, **kwargs: Any) -> _models.AgentVersionDetails: + """Retrieves a specific version of an agent. - :param agent_name: The name of the agent to update. Required. + :param agent_name: The name of the agent to retrieve. Required. :type agent_name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword manifest_id: The manifest ID to import the agent version from. Required. - :paramtype manifest_id: str - :keyword parameter_values: The inputs to the manifest that will result in a fully materialized - Agent. Required. - :paramtype parameter_values: dict[str, any] - :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be - useful for storing additional information about the object in a structured - format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings - with a maximum length of 512 characters. Default value is None. - :paramtype metadata: dict[str, str] - :keyword description: A human-readable description of the agent. Default value is None. - :paramtype description: str - :return: AgentDetails. The AgentDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentDetails + :param agent_version: The version of the agent to retrieve. Required. + :type agent_version: str + :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentVersionDetails :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -2489,36 +2646,15 @@ def _update_agent_from_manifest( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentDetails] = kwargs.pop("cls", None) - - if body is _Unset: - if manifest_id is _Unset: - raise TypeError("missing required argument: manifest_id") - if parameter_values is _Unset: - raise TypeError("missing required argument: parameter_values") - body = { - "description": description, - "manifest_id": manifest_id, - "metadata": metadata, - "parameter_values": parameter_values, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + cls: ClsType[_models.AgentVersionDetails] = kwargs.pop("cls", None) - _request = build_agents_update_agent_from_manifest_request( + _request = build_agents_get_version_request( agent_name=agent_name, - content_type=content_type, + agent_version=agent_version, api_version=self._config.api_version, - content=_content, headers=_headers, params=_params, ) @@ -2551,7 +2687,7 @@ def _update_agent_from_manifest( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.AgentDetails, response.json()) + deserialized = _deserialize(_models.AgentVersionDetails, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2559,13 +2695,16 @@ def _update_agent_from_manifest( return deserialized # type: ignore @distributed_trace - def delete(self, agent_name: str, **kwargs: Any) -> _models.DeleteAgentResponse: - """Deletes an agent. + def delete_version(self, agent_name: str, agent_version: str, **kwargs: Any) -> _models.DeleteAgentVersionResponse: + """Deletes a specific version of an agent. :param agent_name: The name of the agent to delete. Required. :type agent_name: str - :return: DeleteAgentResponse. The DeleteAgentResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DeleteAgentResponse + :param agent_version: The version of the agent to delete. Required. + :type agent_version: str + :return: DeleteAgentVersionResponse. The DeleteAgentVersionResponse is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.DeleteAgentVersionResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -2579,10 +2718,11 @@ def delete(self, agent_name: str, **kwargs: Any) -> _models.DeleteAgentResponse: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.DeleteAgentResponse] = kwargs.pop("cls", None) + cls: ClsType[_models.DeleteAgentVersionResponse] = kwargs.pop("cls", None) - _request = build_agents_delete_request( + _request = build_agents_delete_version_request( agent_name=agent_name, + agent_version=agent_version, api_version=self._config.api_version, headers=_headers, params=_params, @@ -2616,7 +2756,7 @@ def delete(self, agent_name: str, **kwargs: Any) -> _models.DeleteAgentResponse: if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.DeleteAgentResponse, response.json()) + deserialized = _deserialize(_models.DeleteAgentVersionResponse, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2624,20 +2764,19 @@ def delete(self, agent_name: str, **kwargs: Any) -> _models.DeleteAgentResponse: return deserialized # type: ignore @distributed_trace - def list( + def list_versions( self, + agent_name: str, *, - kind: Optional[Union[str, _models.AgentKind]] = None, limit: Optional[int] = None, order: Optional[Union[str, _models.PageOrder]] = None, before: Optional[str] = None, **kwargs: Any - ) -> ItemPaged["_models.AgentDetails"]: - """Returns the list of all agents. + ) -> ItemPaged["_models.AgentVersionDetails"]: + """Returns the list of versions of an agent. - :keyword kind: Filter agents by kind. If not provided, all agents are returned. Known values - are: "prompt", "hosted", and "workflow". Default value is None. - :paramtype kind: str or ~azure.ai.projects.models.AgentKind + :param agent_name: The name of the agent to retrieve versions for. Required. + :type agent_name: str :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. Default value is None. @@ -2652,14 +2791,14 @@ def list( subsequent call can include before=obj_foo in order to fetch the previous page of the list. Default value is None. :paramtype before: str - :return: An iterator like instance of AgentDetails - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.AgentDetails] + :return: An iterator like instance of AgentVersionDetails + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.AgentVersionDetails] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.AgentDetails]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.AgentVersionDetails]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -2671,8 +2810,8 @@ def list( def prepare_request(_continuation_token=None): - _request = build_agents_list_request( - kind=kind, + _request = build_agents_list_versions_request( + agent_name=agent_name, limit=limit, order=order, after=_continuation_token, @@ -2690,7 +2829,7 @@ def prepare_request(_continuation_token=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.AgentDetails], + List[_models.AgentVersionDetails], deserialized.get("data", []), ) if cls: @@ -2718,131 +2857,34 @@ def get_next(_continuation_token=None): return ItemPaged(get_next, extract_data) - @overload - def create_version( - self, - agent_name: str, - *, - definition: _models.AgentDefinition, - content_type: str = "application/json", - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version. - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. +class EvaluationRulesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :keyword definition: The agent definition. This can be a workflow, hosted agent, or a simple - agent definition. Required. - :paramtype definition: ~azure.ai.projects.models.AgentDefinition - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be - useful for storing additional information about the object in a structured - format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings - with a maximum length of 512 characters. Default value is None. - :paramtype metadata: dict[str, str] - :keyword description: A human-readable description of the agent. Default value is None. - :paramtype description: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_version( - self, agent_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version. - - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. - - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_version( - self, agent_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version. - - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. + Instead, you should access the following operations through + :class:`~azure.ai.projects.AIProjectClient`'s + :attr:`evaluation_rules` attribute. + """ - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def create_version( - self, - agent_name: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - definition: _models.AgentDefinition = _Unset, - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version. - - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. - - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword definition: The agent definition. This can be a workflow, hosted agent, or a simple - agent definition. Required. - :paramtype definition: ~azure.ai.projects.models.AgentDefinition - :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be - useful for storing additional information about the object in a structured - format, and querying for objects via API or the dashboard. + def get(self, id: str, **kwargs: Any) -> _models.EvaluationRule: + """Get an evaluation rule. - Keys are strings with a maximum length of 64 characters. Values are strings - with a maximum length of 512 characters. Default value is None. - :paramtype metadata: dict[str, str] - :keyword description: A human-readable description of the agent. Default value is None. - :paramtype description: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Optional[str] = _get_agent_definition_opt_in_keys if self._config.allow_preview else None # type: ignore error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -2851,30 +2893,14 @@ def create_version( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentVersionDetails] = kwargs.pop("cls", None) - - if body is _Unset: - if definition is _Unset: - raise TypeError("missing required argument: definition") - body = {"definition": definition, "description": description, "metadata": metadata} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + cls: ClsType[_models.EvaluationRule] = kwargs.pop("cls", None) - _request = build_agents_create_version_request( - agent_name=agent_name, - foundry_features=_foundry_features, - content_type=content_type, + _request = build_evaluation_rules_get_request( + id=id, api_version=self._config.api_version, - content=_content, headers=_headers, params=_params, ) @@ -2898,152 +2924,136 @@ def create_version( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) + raise HttpResponseError(response=response) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.AgentVersionDetails, response.json()) + deserialized = _deserialize(_models.EvaluationRule, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @overload - def create_version_from_manifest( - self, - agent_name: str, - *, - manifest_id: str, - parameter_values: dict[str, Any], - content_type: str = "application/json", - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version from a manifest. - - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. - - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :keyword manifest_id: The manifest ID to import the agent version from. Required. - :paramtype manifest_id: str - :keyword parameter_values: The inputs to the manifest that will result in a fully materialized - Agent. Required. - :paramtype parameter_values: dict[str, any] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be - useful for storing additional information about the object in a structured - format, and querying for objects via API or the dashboard. + @distributed_trace + def delete(self, id: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Delete an evaluation rule. - Keys are strings with a maximum length of 64 characters. Values are strings - with a maximum length of 512 characters. Default value is None. - :paramtype metadata: dict[str, str] - :keyword description: A human-readable description of the agent. Default value is None. - :paramtype description: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :return: None + :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @overload - def create_version_from_manifest( - self, agent_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version from a manifest. + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. + cls: ClsType[None] = kwargs.pop("cls", None) - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :param body: Required. - :type body: JSON + _request = build_evaluation_rules_delete_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @overload + def create_or_update( + self, id: str, evaluation_rule: _models.EvaluationRule, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. + + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Required. + :type evaluation_rule: ~azure.ai.projects.models.EvaluationRule :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create_version_from_manifest( - self, agent_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version from a manifest. + def create_or_update( + self, id: str, evaluation_rule: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Required. + :type evaluation_rule: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :param body: Required. - :type body: IO[bytes] + @overload + def create_or_update( + self, id: str, evaluation_rule: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. + + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Required. + :type evaluation_rule: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace - def create_version_from_manifest( - self, - agent_name: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - manifest_id: str = _Unset, - parameter_values: dict[str, Any] = _Unset, - metadata: Optional[dict[str, str]] = None, - description: Optional[str] = None, - **kwargs: Any - ) -> _models.AgentVersionDetails: - """Create a new agent version from a manifest. - - :param agent_name: The unique name that identifies the agent. Name can be used to - retrieve/update/delete the agent. - - * Must start and end with alphanumeric characters, - * Can contain hyphens in the middle - * Must not exceed 63 characters. Required. - :type agent_name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword manifest_id: The manifest ID to import the agent version from. Required. - :paramtype manifest_id: str - :keyword parameter_values: The inputs to the manifest that will result in a fully materialized - Agent. Required. - :paramtype parameter_values: dict[str, any] - :keyword metadata: Set of 16 key-value pairs that can be attached to an object. This can be - useful for storing additional information about the object in a structured - format, and querying for objects via API or the dashboard. + def create_or_update( + self, id: str, evaluation_rule: Union[_models.EvaluationRule, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluationRule: + """Create or update an evaluation rule. - Keys are strings with a maximum length of 64 characters. Values are strings - with a maximum length of 512 characters. Default value is None. - :paramtype metadata: dict[str, str] - :keyword description: A human-readable description of the agent. Default value is None. - :paramtype description: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails + :param id: Unique identifier for the evaluation rule. Required. + :type id: str + :param evaluation_rule: Evaluation rule resource. Is one of the following types: + EvaluationRule, JSON, IO[bytes] Required. + :type evaluation_rule: ~azure.ai.projects.models.EvaluationRule or JSON or IO[bytes] + :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationRule :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Optional[Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW]] = _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW if self._config.allow_preview else None # type: ignore error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -3056,29 +3066,18 @@ def create_version_from_manifest( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentVersionDetails] = kwargs.pop("cls", None) + cls: ClsType[_models.EvaluationRule] = kwargs.pop("cls", None) - if body is _Unset: - if manifest_id is _Unset: - raise TypeError("missing required argument: manifest_id") - if parameter_values is _Unset: - raise TypeError("missing required argument: parameter_values") - body = { - "description": description, - "manifest_id": manifest_id, - "metadata": metadata, - "parameter_values": parameter_values, - } - body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(body, (IOBase, bytes)): - _content = body + if isinstance(evaluation_rule, (IOBase, bytes)): + _content = evaluation_rule else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(evaluation_rule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_agents_create_version_from_manifest_request( - agent_name=agent_name, + _request = build_evaluation_rules_create_or_update_request( + id=id, + foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -3098,23 +3097,19 @@ def create_version_from_manifest( response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [200, 201]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) + raise HttpResponseError(response=response) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.AgentVersionDetails, response.json()) + deserialized = _deserialize(_models.EvaluationRule, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3122,17 +3117,32 @@ def create_version_from_manifest( return deserialized # type: ignore @distributed_trace - def get_version(self, agent_name: str, agent_version: str, **kwargs: Any) -> _models.AgentVersionDetails: - """Retrieves a specific version of an agent. + def list( + self, + *, + action_type: Optional[Union[str, _models.EvaluationRuleActionType]] = None, + agent_name: Optional[str] = None, + enabled: Optional[bool] = None, + **kwargs: Any + ) -> ItemPaged["_models.EvaluationRule"]: + """List all evaluation rules. - :param agent_name: The name of the agent to retrieve. Required. - :type agent_name: str - :param agent_version: The version of the agent to retrieve. Required. - :type agent_version: str - :return: AgentVersionDetails. The AgentVersionDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentVersionDetails + :keyword action_type: Filter by the type of evaluation rule. Known values are: + "continuousEvaluation" and "humanEvaluationPreview". Default value is None. + :paramtype action_type: str or ~azure.ai.projects.models.EvaluationRuleActionType + :keyword agent_name: Filter by the agent name. Default value is None. + :paramtype agent_name: str + :keyword enabled: Filter by the enabled status. Default value is None. + :paramtype enabled: bool + :return: An iterator like instance of EvaluationRule + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.EvaluationRule] :raises ~azure.core.exceptions.HttpResponseError: """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.EvaluationRule]] = kwargs.pop("cls", None) + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -3141,68 +3151,102 @@ def get_version(self, agent_name: str, agent_version: str, **kwargs: Any) -> _mo } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} + def prepare_request(next_link=None): + if not next_link: - cls: ClsType[_models.AgentVersionDetails] = kwargs.pop("cls", None) + _request = build_evaluation_rules_list_request( + action_type=action_type, + agent_name=agent_name, + enabled=enabled, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - _request = build_agents_get_version_request( - agent_name=agent_name, - agent_version=agent_version, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) + return _request - response = pipeline_response.http_response + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize( + List[_models.EvaluationRule], + deserialized.get("value", []), + ) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs ) - raise HttpResponseError(response=response, model=error) + response = pipeline_response.http_response - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.AgentVersionDetails, response.json()) + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return pipeline_response - return deserialized # type: ignore + return ItemPaged(get_next, extract_data) - @distributed_trace - def delete_version(self, agent_name: str, agent_version: str, **kwargs: Any) -> _models.DeleteAgentVersionResponse: - """Deletes a specific version of an agent. - :param agent_name: The name of the agent to delete. Required. - :type agent_name: str - :param agent_version: The version of the agent to delete. Required. - :type agent_version: str - :return: DeleteAgentVersionResponse. The DeleteAgentVersionResponse is compatible with - MutableMapping - :rtype: ~azure.ai.projects.models.DeleteAgentVersionResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { +class ConnectionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.AIProjectClient`'s + :attr:`connections` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def _get(self, name: str, **kwargs: Any) -> _models.Connection: + """Get a connection by name, without populating connection credentials. + + :param name: The friendly name of the connection, provided by the user. Required. + :type name: str + :return: Connection. The Connection is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Connection + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3213,11 +3257,10 @@ def delete_version(self, agent_name: str, agent_version: str, **kwargs: Any) -> _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.DeleteAgentVersionResponse] = kwargs.pop("cls", None) + cls: ClsType[_models.Connection] = kwargs.pop("cls", None) - _request = build_agents_delete_version_request( - agent_name=agent_name, - agent_version=agent_version, + _request = build_connections_get_request( + name=name, api_version=self._config.api_version, headers=_headers, params=_params, @@ -3242,58 +3285,114 @@ def delete_version(self, agent_name: str, agent_version: str, **kwargs: Any) -> except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.DeleteAgentVersionResponse, response.json()) + deserialized = _deserialize(_models.Connection, response.json()) if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized # type: ignore @distributed_trace - def list_versions( + def _get_with_credentials(self, name: str, **kwargs: Any) -> _models.Connection: + """Get a connection by name, with its connection credentials. + + :param name: The friendly name of the connection, provided by the user. Required. + :type name: str + :return: Connection. The Connection is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Connection + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Connection] = kwargs.pop("cls", None) + + _request = build_connections_get_with_credentials_request( + name=name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.Connection, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list( self, - agent_name: str, *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.PageOrder]] = None, - before: Optional[str] = None, + connection_type: Optional[Union[str, _models.ConnectionType]] = None, + default_connection: Optional[bool] = None, **kwargs: Any - ) -> ItemPaged["_models.AgentVersionDetails"]: - """Returns the list of versions of an agent. + ) -> ItemPaged["_models.Connection"]: + """List all connections in the project, without populating connection credentials. - :param agent_name: The name of the agent to retrieve versions for. Required. - :type agent_name: str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the - default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the ``created_at`` timestamp of the objects. ``asc`` for - ascending order and``desc`` - for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.projects.models.PageOrder - :keyword before: A cursor for use in pagination. ``before`` is an object ID that defines your - place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page of the list. - Default value is None. - :paramtype before: str - :return: An iterator like instance of AgentVersionDetails - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.AgentVersionDetails] + :keyword connection_type: List connections of this specific type. Known values are: + "AzureOpenAI", "AzureBlob", "AzureStorageAccount", "CognitiveSearch", "CosmosDB", "ApiKey", + "AppConfig", "AppInsights", "CustomKeys", and "RemoteTool_Preview". Default value is None. + :paramtype connection_type: str or ~azure.ai.projects.models.ConnectionType + :keyword default_connection: List connections that are default connections. Default value is + None. + :paramtype default_connection: bool + :return: An iterator like instance of Connection + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.Connection] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.AgentVersionDetails]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.Connection]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -3303,36 +3402,57 @@ def list_versions( } error_map.update(kwargs.pop("error_map", {}) or {}) - def prepare_request(_continuation_token=None): + def prepare_request(next_link=None): + if not next_link: + + _request = build_connections_list_request( + connection_type=connection_type, + default_connection=default_connection, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - _request = build_agents_list_versions_request( - agent_name=agent_name, - limit=limit, - order=order, - after=_continuation_token, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) return _request def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.AgentVersionDetails], - deserialized.get("data", []), + List[_models.Connection], + deserialized.get("value", []), ) if cls: list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("last_id") or None, iter(list_of_elem) + return deserialized.get("nextLink") or None, iter(list_of_elem) - def get_next(_continuation_token=None): - _request = prepare_request(_continuation_token) + def get_next(next_link=None): + _request = prepare_request(next_link) _stream = False pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access @@ -3342,25 +3462,21 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) + raise HttpResponseError(response=response) return pipeline_response return ItemPaged(get_next, extract_data) -class EvaluationRulesOperations: +class DatasetsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.ai.projects.AIProjectClient`'s - :attr:`evaluation_rules` attribute. + :attr:`datasets` attribute. """ def __init__(self, *args, **kwargs) -> None: @@ -3371,15 +3487,20 @@ def __init__(self, *args, **kwargs) -> None: self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def get(self, id: str, **kwargs: Any) -> _models.EvaluationRule: - """Get an evaluation rule. + def list_versions(self, name: str, **kwargs: Any) -> ItemPaged["_models.DatasetVersion"]: + """List all versions of the given DatasetVersion. - :param id: Unique identifier for the evaluation rule. Required. - :type id: str - :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationRule + :param name: The name of the resource. Required. + :type name: str + :return: An iterator like instance of DatasetVersion + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.DatasetVersion] :raises ~azure.core.exceptions.HttpResponseError: """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.DatasetVersion]] = kwargs.pop("cls", None) + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -3388,16 +3509,188 @@ def get(self, id: str, **kwargs: Any) -> _models.EvaluationRule: } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.EvaluationRule] = kwargs.pop("cls", None) + def prepare_request(next_link=None): + if not next_link: - _request = build_evaluation_rules_get_request( - id=id, - api_version=self._config.api_version, - headers=_headers, - params=_params, + _request = build_datasets_list_versions_request( + name=name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize( + List[_models.DatasetVersion], + deserialized.get("value", []), + ) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def list(self, **kwargs: Any) -> ItemPaged["_models.DatasetVersion"]: + """List the latest version of each DatasetVersion. + + :return: An iterator like instance of DatasetVersion + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.DatasetVersion] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.DatasetVersion]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_datasets_list_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize( + List[_models.DatasetVersion], + deserialized.get("value", []), + ) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def get(self, name: str, version: str, **kwargs: Any) -> _models.DatasetVersion: + """Get the specific version of the DatasetVersion. The service returns 404 Not Found error if the + DatasetVersion does not exist. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to retrieve. Required. + :type version: str + :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetVersion + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.DatasetVersion] = kwargs.pop("cls", None) + + _request = build_datasets_get_request( + name=name, + version=version, + api_version=self._config.api_version, + headers=_headers, + params=_params, ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), @@ -3424,7 +3717,7 @@ def get(self, id: str, **kwargs: Any) -> _models.EvaluationRule: if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.EvaluationRule, response.json()) + deserialized = _deserialize(_models.DatasetVersion, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -3432,11 +3725,14 @@ def get(self, id: str, **kwargs: Any) -> _models.EvaluationRule: return deserialized # type: ignore @distributed_trace - def delete(self, id: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements - """Delete an evaluation rule. + def delete(self, name: str, version: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Delete the specific version of the DatasetVersion. The service returns 204 No Content if the + DatasetVersion was deleted successfully or if the DatasetVersion does not exist. - :param id: Unique identifier for the evaluation rule. Required. - :type id: str + :param name: The name of the resource. Required. + :type name: str + :param version: The version of the DatasetVersion to delete. Required. + :type version: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -3454,8 +3750,9 @@ def delete(self, id: str, **kwargs: Any) -> None: # pylint: disable=inconsisten cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_evaluation_rules_delete_request( - id=id, + _request = build_datasets_delete_request( + name=name, + version=version, api_version=self._config.api_version, headers=_headers, params=_params, @@ -3481,74 +3778,99 @@ def delete(self, id: str, **kwargs: Any) -> None: # pylint: disable=inconsisten @overload def create_or_update( - self, id: str, evaluation_rule: _models.EvaluationRule, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationRule: - """Create or update an evaluation rule. + self, + name: str, + version: str, + dataset_version: _models.DatasetVersion, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.DatasetVersion: + """Create a new or update an existing DatasetVersion with the given version id. - :param id: Unique identifier for the evaluation rule. Required. - :type id: str - :param evaluation_rule: Evaluation rule resource. Required. - :type evaluation_rule: ~azure.ai.projects.models.EvaluationRule + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to create or update. Required. + :type version: str + :param dataset_version: The DatasetVersion to create or update. Required. + :type dataset_version: ~azure.ai.projects.models.DatasetVersion :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". + Default value is "application/merge-patch+json". :paramtype content_type: str - :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationRule + :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetVersion :raises ~azure.core.exceptions.HttpResponseError: """ @overload def create_or_update( - self, id: str, evaluation_rule: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationRule: - """Create or update an evaluation rule. + self, + name: str, + version: str, + dataset_version: JSON, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.DatasetVersion: + """Create a new or update an existing DatasetVersion with the given version id. - :param id: Unique identifier for the evaluation rule. Required. - :type id: str - :param evaluation_rule: Evaluation rule resource. Required. - :type evaluation_rule: JSON + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to create or update. Required. + :type version: str + :param dataset_version: The DatasetVersion to create or update. Required. + :type dataset_version: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". + Default value is "application/merge-patch+json". :paramtype content_type: str - :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationRule + :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetVersion :raises ~azure.core.exceptions.HttpResponseError: """ @overload def create_or_update( - self, id: str, evaluation_rule: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationRule: - """Create or update an evaluation rule. + self, + name: str, + version: str, + dataset_version: IO[bytes], + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.DatasetVersion: + """Create a new or update an existing DatasetVersion with the given version id. - :param id: Unique identifier for the evaluation rule. Required. - :type id: str - :param evaluation_rule: Evaluation rule resource. Required. - :type evaluation_rule: IO[bytes] + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to create or update. Required. + :type version: str + :param dataset_version: The DatasetVersion to create or update. Required. + :type dataset_version: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". + Default value is "application/merge-patch+json". :paramtype content_type: str - :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationRule + :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetVersion :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace def create_or_update( - self, id: str, evaluation_rule: Union[_models.EvaluationRule, JSON, IO[bytes]], **kwargs: Any - ) -> _models.EvaluationRule: - """Create or update an evaluation rule. + self, name: str, version: str, dataset_version: Union[_models.DatasetVersion, JSON, IO[bytes]], **kwargs: Any + ) -> _models.DatasetVersion: + """Create a new or update an existing DatasetVersion with the given version id. - :param id: Unique identifier for the evaluation rule. Required. - :type id: str - :param evaluation_rule: Evaluation rule resource. Is one of the following types: - EvaluationRule, JSON, IO[bytes] Required. - :type evaluation_rule: ~azure.ai.projects.models.EvaluationRule or JSON or IO[bytes] - :return: EvaluationRule. The EvaluationRule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationRule + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to create or update. Required. + :type version: str + :param dataset_version: The DatasetVersion to create or update. Is one of the following types: + DatasetVersion, JSON, IO[bytes] Required. + :type dataset_version: ~azure.ai.projects.models.DatasetVersion or JSON or IO[bytes] + :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetVersion :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Optional[Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW]] = _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW if self._config.allow_preview else None # type: ignore error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -3561,18 +3883,18 @@ def create_or_update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EvaluationRule] = kwargs.pop("cls", None) + cls: ClsType[_models.DatasetVersion] = kwargs.pop("cls", None) - content_type = content_type or "application/json" + content_type = content_type or "application/merge-patch+json" _content = None - if isinstance(evaluation_rule, (IOBase, bytes)): - _content = evaluation_rule + if isinstance(dataset_version, (IOBase, bytes)): + _content = dataset_version else: - _content = json.dumps(evaluation_rule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(dataset_version, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_evaluation_rules_create_or_update_request( - id=id, - foundry_features=_foundry_features, + _request = build_datasets_create_or_update_request( + name=name, + version=version, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -3604,40 +3926,113 @@ def create_or_update( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.EvaluationRule, response.json()) + deserialized = _deserialize(_models.DatasetVersion, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @distributed_trace - def list( + @overload + def pending_upload( self, + name: str, + version: str, + pending_upload_request: _models.PendingUploadRequest, *, - action_type: Optional[Union[str, _models.EvaluationRuleActionType]] = None, - agent_name: Optional[str] = None, - enabled: Optional[bool] = None, + content_type: str = "application/json", **kwargs: Any - ) -> ItemPaged["_models.EvaluationRule"]: - """List all evaluation rules. + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of a dataset for a specific version. - :keyword action_type: Filter by the type of evaluation rule. Known values are: - "continuousEvaluation" and "humanEvaluationPreview". Default value is None. - :paramtype action_type: str or ~azure.ai.projects.models.EvaluationRuleActionType - :keyword agent_name: Filter by the agent name. Default value is None. - :paramtype agent_name: str - :keyword enabled: Filter by the enabled status. Default value is None. - :paramtype enabled: bool - :return: An iterator like instance of EvaluationRule - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.EvaluationRule] + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse :raises ~azure.core.exceptions.HttpResponseError: """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.EvaluationRule]] = kwargs.pop("cls", None) + @overload + def pending_upload( + self, + name: str, + version: str, + pending_upload_request: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of a dataset for a specific version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def pending_upload( + self, + name: str, + version: str, + pending_upload_request: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of a dataset for a specific version. + + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def pending_upload( + self, + name: str, + version: str, + pending_upload_request: Union[_models.PendingUploadRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of a dataset for a specific version. + :param name: The name of the resource. Required. + :type name: str + :param version: The specific version id of the DatasetVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Is one of the following + types: PendingUploadRequest, JSON, IO[bytes] Required. + :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest or JSON or + IO[bytes] + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -3646,99 +4041,70 @@ def list( } error_map.update(kwargs.pop("error_map", {}) or {}) - def prepare_request(next_link=None): - if not next_link: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} - _request = build_evaluation_rules_list_request( - action_type=action_type, - agent_name=agent_name, - enabled=enabled, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.PendingUploadResponse] = kwargs.pop("cls", None) - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + content_type = content_type or "application/json" + _content = None + if isinstance(pending_upload_request, (IOBase, bytes)): + _content = pending_upload_request + else: + _content = json.dumps(pending_upload_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - return _request + _request = build_datasets_pending_upload_request( + name=name, + version=version, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize( - List[_models.EvaluationRule], - deserialized.get("value", []), - ) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, iter(list_of_elem) + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) - def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - return pipeline_response - - return ItemPaged(get_next, extract_data) + response = pipeline_response.http_response + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) -class ConnectionsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.PendingUploadResponse, response.json()) - Instead, you should access the following operations through - :class:`~azure.ai.projects.AIProjectClient`'s - :attr:`connections` attribute. - """ + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + return deserialized # type: ignore @distributed_trace - def _get(self, name: str, **kwargs: Any) -> _models.Connection: - """Get a connection by name, without populating connection credentials. + def get_credentials(self, name: str, version: str, **kwargs: Any) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with a Dataset version. - :param name: The friendly name of the connection, provided by the user. Required. + :param name: The name of the resource. Required. :type name: str - :return: Connection. The Connection is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Connection + :param version: The specific version id of the DatasetVersion to operate on. Required. + :type version: str + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -3752,10 +4118,11 @@ def _get(self, name: str, **kwargs: Any) -> _models.Connection: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Connection] = kwargs.pop("cls", None) + cls: ClsType[_models.DatasetCredential] = kwargs.pop("cls", None) - _request = build_connections_get_request( + _request = build_datasets_get_credentials_request( name=name, + version=version, api_version=self._config.api_version, headers=_headers, params=_params, @@ -3782,29 +4149,42 @@ def _get(self, name: str, **kwargs: Any) -> _models.Connection: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Connection, response.json()) + deserialized = _deserialize(_models.DatasetCredential, response.json()) if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + +class DeploymentsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.AIProjectClient`'s + :attr:`deployments` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + @distributed_trace - def _get_with_credentials(self, name: str, **kwargs: Any) -> _models.Connection: - """Get a connection by name, with its connection credentials. + def get(self, name: str, **kwargs: Any) -> _models.Deployment: + """Get a deployed model. - :param name: The friendly name of the connection, provided by the user. Required. + :param name: Name of the deployment. Required. :type name: str - :return: Connection. The Connection is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Connection + :return: Deployment. The Deployment is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Deployment :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -3818,9 +4198,9 @@ def _get_with_credentials(self, name: str, **kwargs: Any) -> _models.Connection: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Connection] = kwargs.pop("cls", None) + cls: ClsType[_models.Deployment] = kwargs.pop("cls", None) - _request = build_connections_get_with_credentials_request( + _request = build_deployments_get_request( name=name, api_version=self._config.api_version, headers=_headers, @@ -3856,7 +4236,7 @@ def _get_with_credentials(self, name: str, **kwargs: Any) -> _models.Connection: if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Connection, response.json()) + deserialized = _deserialize(_models.Deployment, response.json()) if cls: return cls(pipeline_response, deserialized, response_headers) # type: ignore @@ -3867,27 +4247,29 @@ def _get_with_credentials(self, name: str, **kwargs: Any) -> _models.Connection: def list( self, *, - connection_type: Optional[Union[str, _models.ConnectionType]] = None, - default_connection: Optional[bool] = None, + model_publisher: Optional[str] = None, + model_name: Optional[str] = None, + deployment_type: Optional[Union[str, _models.DeploymentType]] = None, **kwargs: Any - ) -> ItemPaged["_models.Connection"]: - """List all connections in the project, without populating connection credentials. + ) -> ItemPaged["_models.Deployment"]: + """List all deployed models in the project. - :keyword connection_type: List connections of this specific type. Known values are: - "AzureOpenAI", "AzureBlob", "AzureStorageAccount", "CognitiveSearch", "CosmosDB", "ApiKey", - "AppConfig", "AppInsights", "CustomKeys", and "RemoteTool_Preview". Default value is None. - :paramtype connection_type: str or ~azure.ai.projects.models.ConnectionType - :keyword default_connection: List connections that are default connections. Default value is - None. - :paramtype default_connection: bool - :return: An iterator like instance of Connection - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.Connection] + :keyword model_publisher: Model publisher to filter models by. Default value is None. + :paramtype model_publisher: str + :keyword model_name: Model name (the publisher specific name) to filter models by. Default + value is None. + :paramtype model_name: str + :keyword deployment_type: Type of deployment to filter list by. "ModelDeployment" Default value + is None. + :paramtype deployment_type: str or ~azure.ai.projects.models.DeploymentType + :return: An iterator like instance of Deployment + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.Deployment] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.Connection]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.Deployment]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -3900,9 +4282,10 @@ def list( def prepare_request(next_link=None): if not next_link: - _request = build_connections_list_request( - connection_type=connection_type, - default_connection=default_connection, + _request = build_deployments_list_request( + model_publisher=model_publisher, + model_name=model_name, + deployment_type=deployment_type, api_version=self._config.api_version, headers=_headers, params=_params, @@ -3939,7 +4322,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.Connection], + List[_models.Deployment], deserialized.get("value", []), ) if cls: @@ -3964,14 +4347,14 @@ def get_next(next_link=None): return ItemPaged(get_next, extract_data) -class DatasetsOperations: +class IndexesOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.ai.projects.AIProjectClient`'s - :attr:`datasets` attribute. + :attr:`indexes` attribute. """ def __init__(self, *args, **kwargs) -> None: @@ -3982,19 +4365,19 @@ def __init__(self, *args, **kwargs) -> None: self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def list_versions(self, name: str, **kwargs: Any) -> ItemPaged["_models.DatasetVersion"]: - """List all versions of the given DatasetVersion. + def list_versions(self, name: str, **kwargs: Any) -> ItemPaged["_models.Index"]: + """List all versions of the given Index. :param name: The name of the resource. Required. :type name: str - :return: An iterator like instance of DatasetVersion - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.DatasetVersion] + :return: An iterator like instance of Index + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.Index] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.DatasetVersion]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.Index]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -4007,7 +4390,7 @@ def list_versions(self, name: str, **kwargs: Any) -> ItemPaged["_models.DatasetV def prepare_request(next_link=None): if not next_link: - _request = build_datasets_list_versions_request( + _request = build_indexes_list_versions_request( name=name, api_version=self._config.api_version, headers=_headers, @@ -4045,7 +4428,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.DatasetVersion], + List[_models.Index], deserialized.get("value", []), ) if cls: @@ -4070,17 +4453,17 @@ def get_next(next_link=None): return ItemPaged(get_next, extract_data) @distributed_trace - def list(self, **kwargs: Any) -> ItemPaged["_models.DatasetVersion"]: - """List the latest version of each DatasetVersion. + def list(self, **kwargs: Any) -> ItemPaged["_models.Index"]: + """List the latest version of each Index. - :return: An iterator like instance of DatasetVersion - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.DatasetVersion] + :return: An iterator like instance of Index + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.Index] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.DatasetVersion]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.Index]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -4093,7 +4476,7 @@ def list(self, **kwargs: Any) -> ItemPaged["_models.DatasetVersion"]: def prepare_request(next_link=None): if not next_link: - _request = build_datasets_list_request( + _request = build_indexes_list_request( api_version=self._config.api_version, headers=_headers, params=_params, @@ -4130,7 +4513,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.DatasetVersion], + List[_models.Index], deserialized.get("value", []), ) if cls: @@ -4155,16 +4538,16 @@ def get_next(next_link=None): return ItemPaged(get_next, extract_data) @distributed_trace - def get(self, name: str, version: str, **kwargs: Any) -> _models.DatasetVersion: - """Get the specific version of the DatasetVersion. The service returns 404 Not Found error if the - DatasetVersion does not exist. + def get(self, name: str, version: str, **kwargs: Any) -> _models.Index: + """Get the specific version of the Index. The service returns 404 Not Found error if the Index + does not exist. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the DatasetVersion to retrieve. Required. + :param version: The specific version id of the Index to retrieve. Required. :type version: str - :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetVersion + :return: Index. The Index is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Index :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -4178,9 +4561,9 @@ def get(self, name: str, version: str, **kwargs: Any) -> _models.DatasetVersion: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.DatasetVersion] = kwargs.pop("cls", None) + cls: ClsType[_models.Index] = kwargs.pop("cls", None) - _request = build_datasets_get_request( + _request = build_indexes_get_request( name=name, version=version, api_version=self._config.api_version, @@ -4212,7 +4595,7 @@ def get(self, name: str, version: str, **kwargs: Any) -> _models.DatasetVersion: if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.DatasetVersion, response.json()) + deserialized = _deserialize(_models.Index, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4221,12 +4604,12 @@ def get(self, name: str, version: str, **kwargs: Any) -> _models.DatasetVersion: @distributed_trace def delete(self, name: str, version: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements - """Delete the specific version of the DatasetVersion. The service returns 204 No Content if the - DatasetVersion was deleted successfully or if the DatasetVersion does not exist. + """Delete the specific version of the Index. The service returns 204 No Content if the Index was + deleted successfully or if the Index does not exist. :param name: The name of the resource. Required. :type name: str - :param version: The version of the DatasetVersion to delete. Required. + :param version: The version of the Index to delete. Required. :type version: str :return: None :rtype: None @@ -4245,7 +4628,7 @@ def delete(self, name: str, version: str, **kwargs: Any) -> None: # pylint: dis cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_datasets_delete_request( + _request = build_indexes_delete_request( name=name, version=version, api_version=self._config.api_version, @@ -4276,50 +4659,44 @@ def create_or_update( self, name: str, version: str, - dataset_version: _models.DatasetVersion, + index: _models.Index, *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.DatasetVersion: - """Create a new or update an existing DatasetVersion with the given version id. + ) -> _models.Index: + """Create a new or update an existing Index with the given version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the DatasetVersion to create or update. Required. + :param version: The specific version id of the Index to create or update. Required. :type version: str - :param dataset_version: The DatasetVersion to create or update. Required. - :type dataset_version: ~azure.ai.projects.models.DatasetVersion + :param index: The Index to create or update. Required. + :type index: ~azure.ai.projects.models.Index :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/merge-patch+json". :paramtype content_type: str - :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetVersion + :return: Index. The Index is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Index :raises ~azure.core.exceptions.HttpResponseError: """ @overload def create_or_update( - self, - name: str, - version: str, - dataset_version: JSON, - *, - content_type: str = "application/merge-patch+json", - **kwargs: Any - ) -> _models.DatasetVersion: - """Create a new or update an existing DatasetVersion with the given version id. + self, name: str, version: str, index: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Index: + """Create a new or update an existing Index with the given version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the DatasetVersion to create or update. Required. + :param version: The specific version id of the Index to create or update. Required. :type version: str - :param dataset_version: The DatasetVersion to create or update. Required. - :type dataset_version: JSON + :param index: The Index to create or update. Required. + :type index: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/merge-patch+json". :paramtype content_type: str - :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetVersion + :return: Index. The Index is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Index :raises ~azure.core.exceptions.HttpResponseError: """ @@ -4328,42 +4705,42 @@ def create_or_update( self, name: str, version: str, - dataset_version: IO[bytes], + index: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.DatasetVersion: - """Create a new or update an existing DatasetVersion with the given version id. + ) -> _models.Index: + """Create a new or update an existing Index with the given version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the DatasetVersion to create or update. Required. + :param version: The specific version id of the Index to create or update. Required. :type version: str - :param dataset_version: The DatasetVersion to create or update. Required. - :type dataset_version: IO[bytes] + :param index: The Index to create or update. Required. + :type index: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/merge-patch+json". :paramtype content_type: str - :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetVersion + :return: Index. The Index is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Index :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace def create_or_update( - self, name: str, version: str, dataset_version: Union[_models.DatasetVersion, JSON, IO[bytes]], **kwargs: Any - ) -> _models.DatasetVersion: - """Create a new or update an existing DatasetVersion with the given version id. - - :param name: The name of the resource. Required. + self, name: str, version: str, index: Union[_models.Index, JSON, IO[bytes]], **kwargs: Any + ) -> _models.Index: + """Create a new or update an existing Index with the given version id. + + :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the DatasetVersion to create or update. Required. + :param version: The specific version id of the Index to create or update. Required. :type version: str - :param dataset_version: The DatasetVersion to create or update. Is one of the following types: - DatasetVersion, JSON, IO[bytes] Required. - :type dataset_version: ~azure.ai.projects.models.DatasetVersion or JSON or IO[bytes] - :return: DatasetVersion. The DatasetVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetVersion + :param index: The Index to create or update. Is one of the following types: Index, JSON, + IO[bytes] Required. + :type index: ~azure.ai.projects.models.Index or JSON or IO[bytes] + :return: Index. The Index is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Index :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -4378,16 +4755,16 @@ def create_or_update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.DatasetVersion] = kwargs.pop("cls", None) + cls: ClsType[_models.Index] = kwargs.pop("cls", None) content_type = content_type or "application/merge-patch+json" _content = None - if isinstance(dataset_version, (IOBase, bytes)): - _content = dataset_version + if isinstance(index, (IOBase, bytes)): + _content = index else: - _content = json.dumps(dataset_version, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(index, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_datasets_create_or_update_request( + _request = build_indexes_create_or_update_request( name=name, version=version, content_type=content_type, @@ -4421,113 +4798,44 @@ def create_or_update( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.DatasetVersion, response.json()) + deserialized = _deserialize(_models.Index, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @overload - def pending_upload( - self, - name: str, - version: str, - pending_upload_request: _models.PendingUploadRequest, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of a dataset for a specific version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the DatasetVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Required. - :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def pending_upload( - self, - name: str, - version: str, - pending_upload_request: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of a dataset for a specific version. - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the DatasetVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Required. - :type pending_upload_request: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ +class BetaEvaluationTaxonomiesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. - @overload - def pending_upload( - self, - name: str, - version: str, - pending_upload_request: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of a dataset for a specific version. + Instead, you should access the following operations through + :class:`~azure.ai.projects.AIProjectClient`'s + :attr:`evaluation_taxonomies` attribute. + """ - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the DatasetVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Required. - :type pending_upload_request: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def pending_upload( - self, - name: str, - version: str, - pending_upload_request: Union[_models.PendingUploadRequest, JSON, IO[bytes]], - **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of a dataset for a specific version. + def get(self, name: str, **kwargs: Any) -> _models.EvaluationTaxonomy: + """Get an evaluation run by name. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the DatasetVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Is one of the following - types: PendingUploadRequest, JSON, IO[bytes] Required. - :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest or JSON or - IO[bytes] - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -4536,25 +4844,15 @@ def pending_upload( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.PendingUploadResponse] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(pending_upload_request, (IOBase, bytes)): - _content = pending_upload_request - else: - _content = json.dumps(pending_upload_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + cls: ClsType[_models.EvaluationTaxonomy] = kwargs.pop("cls", None) - _request = build_datasets_pending_upload_request( + _request = build_beta_evaluation_taxonomies_get_request( name=name, - version=version, - content_type=content_type, + foundry_features=_foundry_features, api_version=self._config.api_version, - content=_content, headers=_headers, params=_params, ) @@ -4583,7 +4881,7 @@ def pending_upload( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.PendingUploadResponse, response.json()) + deserialized = _deserialize(_models.EvaluationTaxonomy, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4591,17 +4889,27 @@ def pending_upload( return deserialized # type: ignore @distributed_trace - def get_credentials(self, name: str, version: str, **kwargs: Any) -> _models.DatasetCredential: - """Get the SAS credential to access the storage account associated with a Dataset version. + def list( + self, *, input_name: Optional[str] = None, input_type: Optional[str] = None, **kwargs: Any + ) -> ItemPaged["_models.EvaluationTaxonomy"]: + """List evaluation taxonomies. - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the DatasetVersion to operate on. Required. - :type version: str - :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetCredential + :keyword input_name: Filter by the evaluation input name. Default value is None. + :paramtype input_name: str + :keyword input_type: Filter by taxonomy input type. Default value is None. + :paramtype input_type: str + :return: An iterator like instance of EvaluationTaxonomy + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.EvaluationTaxonomy] :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.EvaluationTaxonomy]] = kwargs.pop("cls", None) + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -4610,78 +4918,200 @@ def get_credentials(self, name: str, version: str, **kwargs: Any) -> _models.Dat } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} + def prepare_request(next_link=None): + if not next_link: - cls: ClsType[_models.DatasetCredential] = kwargs.pop("cls", None) + _request = build_beta_evaluation_taxonomies_list_request( + foundry_features=_foundry_features, + input_name=input_name, + input_type=input_type, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - _request = build_datasets_get_credentials_request( - name=name, - version=version, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", + urllib.parse.urljoin(next_link, _parsed_next_link.path), + params=_next_request_params, + headers={"Foundry-Features": _SERIALIZER.header("foundry_features", _foundry_features, "str")}, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) + return _request - response = pipeline_response.http_response + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize( + List[_models.EvaluationTaxonomy], + deserialized.get("value", []), + ) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def delete(self, name: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Delete an evaluation taxonomy by name. + + :param name: The name of the resource. Required. + :type name: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_beta_evaluation_taxonomies_delete_request( + name=name, + foundry_features=_foundry_features, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.DatasetCredential, response.json()) - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return cls(pipeline_response, None, {}) # type: ignore - return deserialized # type: ignore + @overload + def create( + self, name: str, body: _models.EvaluationTaxonomy, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Create an evaluation taxonomy. + :param name: The name of the evaluation taxonomy. Required. + :type name: str + :param body: The evaluation taxonomy. Required. + :type body: ~azure.ai.projects.models.EvaluationTaxonomy + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :raises ~azure.core.exceptions.HttpResponseError: + """ -class DeploymentsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. + @overload + def create( + self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Create an evaluation taxonomy. - Instead, you should access the following operations through - :class:`~azure.ai.projects.AIProjectClient`'s - :attr:`deployments` attribute. - """ + :param name: The name of the evaluation taxonomy. Required. + :type name: str + :param body: The evaluation taxonomy. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :raises ~azure.core.exceptions.HttpResponseError: + """ - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + @overload + def create( + self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Create an evaluation taxonomy. + + :param name: The name of the evaluation taxonomy. Required. + :type name: str + :param body: The evaluation taxonomy. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :raises ~azure.core.exceptions.HttpResponseError: + """ @distributed_trace - def get(self, name: str, **kwargs: Any) -> _models.Deployment: - """Get a deployed model. + def create( + self, name: str, body: Union[_models.EvaluationTaxonomy, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Create an evaluation taxonomy. - :param name: Name of the deployment. Required. + :param name: The name of the evaluation taxonomy. Required. :type name: str - :return: Deployment. The Deployment is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Deployment + :param body: The evaluation taxonomy. Is one of the following types: EvaluationTaxonomy, JSON, + IO[bytes] Required. + :type body: ~azure.ai.projects.models.EvaluationTaxonomy or JSON or IO[bytes] + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -4690,14 +5120,25 @@ def get(self, name: str, **kwargs: Any) -> _models.Deployment: } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Deployment] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EvaluationTaxonomy] = kwargs.pop("cls", None) - _request = build_deployments_get_request( + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_beta_evaluation_taxonomies_create_request( name=name, + foundry_features=_foundry_features, + content_type=content_type, api_version=self._config.api_version, + content=_content, headers=_headers, params=_params, ) @@ -4714,7 +5155,7 @@ def get(self, name: str, **kwargs: Any) -> _models.Deployment: response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [200, 201]: if _stream: try: response.read() # Load the body in memory and close the socket @@ -4723,133 +5164,159 @@ def get(self, name: str, **kwargs: Any) -> _models.Deployment: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Deployment, response.json()) + deserialized = _deserialize(_models.EvaluationTaxonomy, response.json()) if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore + return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @distributed_trace - def list( - self, - *, - model_publisher: Optional[str] = None, - model_name: Optional[str] = None, - deployment_type: Optional[Union[str, _models.DeploymentType]] = None, - **kwargs: Any - ) -> ItemPaged["_models.Deployment"]: - """List all deployed models in the project. + @overload + def update( + self, name: str, body: _models.EvaluationTaxonomy, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Update an evaluation taxonomy. - :keyword model_publisher: Model publisher to filter models by. Default value is None. - :paramtype model_publisher: str - :keyword model_name: Model name (the publisher specific name) to filter models by. Default - value is None. - :paramtype model_name: str - :keyword deployment_type: Type of deployment to filter list by. "ModelDeployment" Default value - is None. - :paramtype deployment_type: str or ~azure.ai.projects.models.DeploymentType - :return: An iterator like instance of Deployment - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.Deployment] + :param name: The name of the evaluation taxonomy. Required. + :type name: str + :param body: The evaluation taxonomy. Required. + :type body: ~azure.ai.projects.models.EvaluationTaxonomy + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy :raises ~azure.core.exceptions.HttpResponseError: """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.Deployment]] = kwargs.pop("cls", None) + @overload + def update( + self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Update an evaluation taxonomy. - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) + :param name: The name of the evaluation taxonomy. Required. + :type name: str + :param body: The evaluation taxonomy. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :raises ~azure.core.exceptions.HttpResponseError: + """ - def prepare_request(next_link=None): - if not next_link: + @overload + def update( + self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Update an evaluation taxonomy. - _request = build_deployments_list_request( - model_publisher=model_publisher, - model_name=model_name, - deployment_type=deployment_type, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + :param name: The name of the evaluation taxonomy. Required. + :type name: str + :param body: The evaluation taxonomy. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :raises ~azure.core.exceptions.HttpResponseError: + """ - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + @distributed_trace + def update( + self, name: str, body: Union[_models.EvaluationTaxonomy, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluationTaxonomy: + """Update an evaluation taxonomy. - return _request + :param name: The name of the evaluation taxonomy. Required. + :type name: str + :param body: The evaluation taxonomy. Is one of the following types: EvaluationTaxonomy, JSON, + IO[bytes] Required. + :type body: ~azure.ai.projects.models.EvaluationTaxonomy or JSON or IO[bytes] + :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize( - List[_models.Deployment], - deserialized.get("value", []), - ) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, iter(list_of_elem) + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} - def get_next(next_link=None): - _request = prepare_request(next_link) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EvaluationTaxonomy] = kwargs.pop("cls", None) - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + _request = build_beta_evaluation_taxonomies_update_request( + name=name, + foundry_features=_foundry_features, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - return pipeline_response + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) - return ItemPaged(get_next, extract_data) + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.EvaluationTaxonomy, response.json()) -class IndexesOperations: + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class BetaEvaluatorsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.ai.projects.AIProjectClient`'s - :attr:`indexes` attribute. + :attr:`evaluators` attribute. """ def __init__(self, *args, **kwargs) -> None: @@ -4860,19 +5327,36 @@ def __init__(self, *args, **kwargs) -> None: self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def list_versions(self, name: str, **kwargs: Any) -> ItemPaged["_models.Index"]: - """List all versions of the given Index. + def list_versions( + self, + name: str, + *, + type: Optional[Union[Literal["builtin"], Literal["custom"], Literal["all"], str]] = None, + limit: Optional[int] = None, + **kwargs: Any + ) -> ItemPaged["_models.EvaluatorVersion"]: + """List all versions of the given evaluator. :param name: The name of the resource. Required. :type name: str - :return: An iterator like instance of Index - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.Index] + :keyword type: Filter evaluators by type. Possible values: 'all', 'custom', 'builtin'. Is one + of the following types: Literal["builtin"], Literal["custom"], Literal["all"], str Default + value is None. + :paramtype type: str or str or str or str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :return: An iterator like instance of EvaluatorVersion + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.EvaluatorVersion] :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.Index]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.EvaluatorVersion]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -4885,8 +5369,11 @@ def list_versions(self, name: str, **kwargs: Any) -> ItemPaged["_models.Index"]: def prepare_request(next_link=None): if not next_link: - _request = build_indexes_list_versions_request( + _request = build_beta_evaluators_list_versions_request( name=name, + foundry_features=_foundry_features, + type=type, + limit=limit, api_version=self._config.api_version, headers=_headers, params=_params, @@ -4909,7 +5396,10 @@ def prepare_request(next_link=None): ) _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + "GET", + urllib.parse.urljoin(next_link, _parsed_next_link.path), + params=_next_request_params, + headers={"Foundry-Features": _SERIALIZER.header("foundry_features", _foundry_features, "str")}, ) path_format_arguments = { "endpoint": self._serialize.url( @@ -4923,7 +5413,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.Index], + List[_models.EvaluatorVersion], deserialized.get("value", []), ) if cls: @@ -4948,17 +5438,33 @@ def get_next(next_link=None): return ItemPaged(get_next, extract_data) @distributed_trace - def list(self, **kwargs: Any) -> ItemPaged["_models.Index"]: - """List the latest version of each Index. + def list( + self, + *, + type: Optional[Union[Literal["builtin"], Literal["custom"], Literal["all"], str]] = None, + limit: Optional[int] = None, + **kwargs: Any + ) -> ItemPaged["_models.EvaluatorVersion"]: + """List the latest version of each evaluator. - :return: An iterator like instance of Index - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.Index] + :keyword type: Filter evaluators by type. Possible values: 'all', 'custom', 'builtin'. Is one + of the following types: Literal["builtin"], Literal["custom"], Literal["all"], str Default + value is None. + :paramtype type: str or str or str or str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :return: An iterator like instance of EvaluatorVersion + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.EvaluatorVersion] :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.Index]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.EvaluatorVersion]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -4971,7 +5477,10 @@ def list(self, **kwargs: Any) -> ItemPaged["_models.Index"]: def prepare_request(next_link=None): if not next_link: - _request = build_indexes_list_request( + _request = build_beta_evaluators_list_request( + foundry_features=_foundry_features, + type=type, + limit=limit, api_version=self._config.api_version, headers=_headers, params=_params, @@ -4994,7 +5503,10 @@ def prepare_request(next_link=None): ) _next_request_params["api-version"] = self._config.api_version _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + "GET", + urllib.parse.urljoin(next_link, _parsed_next_link.path), + params=_next_request_params, + headers={"Foundry-Features": _SERIALIZER.header("foundry_features", _foundry_features, "str")}, ) path_format_arguments = { "endpoint": self._serialize.url( @@ -5008,7 +5520,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.Index], + List[_models.EvaluatorVersion], deserialized.get("value", []), ) if cls: @@ -5033,18 +5545,21 @@ def get_next(next_link=None): return ItemPaged(get_next, extract_data) @distributed_trace - def get(self, name: str, version: str, **kwargs: Any) -> _models.Index: - """Get the specific version of the Index. The service returns 404 Not Found error if the Index - does not exist. + def get_version(self, name: str, version: str, **kwargs: Any) -> _models.EvaluatorVersion: + """Get the specific version of the EvaluatorVersion. The service returns 404 Not Found error if + the EvaluatorVersion does not exist. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the Index to retrieve. Required. + :param version: The specific version id of the EvaluatorVersion to retrieve. Required. :type version: str - :return: Index. The Index is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Index + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -5056,11 +5571,12 @@ def get(self, name: str, version: str, **kwargs: Any) -> _models.Index: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Index] = kwargs.pop("cls", None) + cls: ClsType[_models.EvaluatorVersion] = kwargs.pop("cls", None) - _request = build_indexes_get_request( + _request = build_beta_evaluators_get_version_request( name=name, version=version, + foundry_features=_foundry_features, api_version=self._config.api_version, headers=_headers, params=_params, @@ -5090,7 +5606,7 @@ def get(self, name: str, version: str, **kwargs: Any) -> _models.Index: if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Index, response.json()) + deserialized = _deserialize(_models.EvaluatorVersion, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -5098,18 +5614,23 @@ def get(self, name: str, version: str, **kwargs: Any) -> _models.Index: return deserialized # type: ignore @distributed_trace - def delete(self, name: str, version: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements - """Delete the specific version of the Index. The service returns 204 No Content if the Index was - deleted successfully or if the Index does not exist. + def delete_version( # pylint: disable=inconsistent-return-statements + self, name: str, version: str, **kwargs: Any + ) -> None: + """Delete the specific version of the EvaluatorVersion. The service returns 204 No Content if the + EvaluatorVersion was deleted successfully or if the EvaluatorVersion does not exist. :param name: The name of the resource. Required. :type name: str - :param version: The version of the Index to delete. Required. + :param version: The version of the EvaluatorVersion to delete. Required. :type version: str :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -5123,9 +5644,10 @@ def delete(self, name: str, version: str, **kwargs: Any) -> None: # pylint: dis cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_indexes_delete_request( + _request = build_beta_evaluators_delete_version_request( name=name, version=version, + foundry_features=_foundry_features, api_version=self._config.api_version, headers=_headers, params=_params, @@ -5150,94 +5672,82 @@ def delete(self, name: str, version: str, **kwargs: Any) -> None: # pylint: dis return cls(pipeline_response, None, {}) # type: ignore @overload - def create_or_update( + def create_version( self, name: str, - version: str, - index: _models.Index, + evaluator_version: _models.EvaluatorVersion, *, - content_type: str = "application/merge-patch+json", + content_type: str = "application/json", **kwargs: Any - ) -> _models.Index: - """Create a new or update an existing Index with the given version id. + ) -> _models.EvaluatorVersion: + """Create a new EvaluatorVersion with auto incremented version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the Index to create or update. Required. - :type version: str - :param index: The Index to create or update. Required. - :type index: ~azure.ai.projects.models.Index + :param evaluator_version: Required. + :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". + Default value is "application/json". :paramtype content_type: str - :return: Index. The Index is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Index + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create_or_update( - self, name: str, version: str, index: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.Index: - """Create a new or update an existing Index with the given version id. + def create_version( + self, name: str, evaluator_version: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluatorVersion: + """Create a new EvaluatorVersion with auto incremented version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the Index to create or update. Required. - :type version: str - :param index: The Index to create or update. Required. - :type index: JSON + :param evaluator_version: Required. + :type evaluator_version: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". + Default value is "application/json". :paramtype content_type: str - :return: Index. The Index is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Index + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create_or_update( - self, - name: str, - version: str, - index: IO[bytes], - *, - content_type: str = "application/merge-patch+json", - **kwargs: Any - ) -> _models.Index: - """Create a new or update an existing Index with the given version id. + def create_version( + self, name: str, evaluator_version: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluatorVersion: + """Create a new EvaluatorVersion with auto incremented version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the Index to create or update. Required. - :type version: str - :param index: The Index to create or update. Required. - :type index: IO[bytes] + :param evaluator_version: Required. + :type evaluator_version: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/merge-patch+json". + Default value is "application/json". :paramtype content_type: str - :return: Index. The Index is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Index + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace - def create_or_update( - self, name: str, version: str, index: Union[_models.Index, JSON, IO[bytes]], **kwargs: Any - ) -> _models.Index: - """Create a new or update an existing Index with the given version id. + def create_version( + self, name: str, evaluator_version: Union[_models.EvaluatorVersion, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluatorVersion: + """Create a new EvaluatorVersion with auto incremented version id. :param name: The name of the resource. Required. :type name: str - :param version: The specific version id of the Index to create or update. Required. - :type version: str - :param index: The Index to create or update. Is one of the following types: Index, JSON, - IO[bytes] Required. - :type index: ~azure.ai.projects.models.Index or JSON or IO[bytes] - :return: Index. The Index is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Index + :param evaluator_version: Is one of the following types: EvaluatorVersion, JSON, IO[bytes] + Required. + :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion or JSON or IO[bytes] + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -5250,18 +5760,18 @@ def create_or_update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Index] = kwargs.pop("cls", None) + cls: ClsType[_models.EvaluatorVersion] = kwargs.pop("cls", None) - content_type = content_type or "application/merge-patch+json" + content_type = content_type or "application/json" _content = None - if isinstance(index, (IOBase, bytes)): - _content = index + if isinstance(evaluator_version, (IOBase, bytes)): + _content = evaluator_version else: - _content = json.dumps(index, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(evaluator_version, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_indexes_create_or_update_request( + _request = build_beta_evaluators_create_version_request( name=name, - version=version, + foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -5281,7 +5791,7 @@ def create_or_update( response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [201]: if _stream: try: response.read() # Load the body in memory and close the socket @@ -5293,39 +5803,104 @@ def create_or_update( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Index, response.json()) + deserialized = _deserialize(_models.EvaluatorVersion, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + @overload + def update_version( + self, + name: str, + version: str, + evaluator_version: _models.EvaluatorVersion, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EvaluatorVersion: + """Update an existing EvaluatorVersion with the given version id. -class BetaEvaluationTaxonomiesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. + :param name: The name of the resource. Required. + :type name: str + :param version: The version of the EvaluatorVersion to update. Required. + :type version: str + :param evaluator_version: Evaluator resource. Required. + :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion + :raises ~azure.core.exceptions.HttpResponseError: + """ - Instead, you should access the following operations through - :class:`~azure.ai.projects.AIProjectClient`'s - :attr:`evaluation_taxonomies` attribute. - """ + @overload + def update_version( + self, name: str, version: str, evaluator_version: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluatorVersion: + """Update an existing EvaluatorVersion with the given version id. - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + :param name: The name of the resource. Required. + :type name: str + :param version: The version of the EvaluatorVersion to update. Required. + :type version: str + :param evaluator_version: Evaluator resource. Required. + :type evaluator_version: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_version( + self, + name: str, + version: str, + evaluator_version: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.EvaluatorVersion: + """Update an existing EvaluatorVersion with the given version id. + + :param name: The name of the resource. Required. + :type name: str + :param version: The version of the EvaluatorVersion to update. Required. + :type version: str + :param evaluator_version: Evaluator resource. Required. + :type evaluator_version: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion + :raises ~azure.core.exceptions.HttpResponseError: + """ @distributed_trace - def get(self, name: str, **kwargs: Any) -> _models.EvaluationTaxonomy: - """Get an evaluation run by name. + def update_version( + self, + name: str, + version: str, + evaluator_version: Union[_models.EvaluatorVersion, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.EvaluatorVersion: + """Update an existing EvaluatorVersion with the given version id. :param name: The name of the resource. Required. :type name: str - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :param version: The version of the EvaluatorVersion to update. Required. + :type version: str + :param evaluator_version: Evaluator resource. Is one of the following types: EvaluatorVersion, + JSON, IO[bytes] Required. + :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion or JSON or IO[bytes] + :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluatorVersion :raises ~azure.core.exceptions.HttpResponseError: """ _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( @@ -5339,15 +5914,26 @@ def get(self, name: str, **kwargs: Any) -> _models.EvaluationTaxonomy: } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.EvaluationTaxonomy] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EvaluatorVersion] = kwargs.pop("cls", None) - _request = build_beta_evaluation_taxonomies_get_request( + content_type = content_type or "application/json" + _content = None + if isinstance(evaluator_version, (IOBase, bytes)): + _content = evaluator_version + else: + _content = json.dumps(evaluator_version, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_beta_evaluators_update_version_request( name=name, + version=version, foundry_features=_foundry_features, + content_type=content_type, api_version=self._config.api_version, + content=_content, headers=_headers, params=_params, ) @@ -5376,232 +5962,111 @@ def get(self, name: str, **kwargs: Any) -> _models.EvaluationTaxonomy: if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.EvaluationTaxonomy, response.json()) + deserialized = _deserialize(_models.EvaluatorVersion, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @distributed_trace - def list( - self, *, input_name: Optional[str] = None, input_type: Optional[str] = None, **kwargs: Any - ) -> ItemPaged["_models.EvaluationTaxonomy"]: - """List evaluation taxonomies. - - :keyword input_name: Filter by the evaluation input name. Default value is None. - :paramtype input_name: str - :keyword input_type: Filter by taxonomy input type. Default value is None. - :paramtype input_type: str - :return: An iterator like instance of EvaluationTaxonomy - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.EvaluationTaxonomy] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW - ) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[List[_models.EvaluationTaxonomy]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_beta_evaluation_taxonomies_list_request( - foundry_features=_foundry_features, - input_name=input_name, - input_type=input_type, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", - urllib.parse.urljoin(next_link, _parsed_next_link.path), - params=_next_request_params, - headers={"Foundry-Features": _SERIALIZER.header("foundry_features", _foundry_features, "str")}, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize( - List[_models.EvaluationTaxonomy], - deserialized.get("value", []), - ) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, iter(list_of_elem) - - def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - return pipeline_response - - return ItemPaged(get_next, extract_data) - - @distributed_trace - def delete(self, name: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements - """Delete an evaluation taxonomy by name. + @overload + def pending_upload( + self, + name: str, + version: str, + pending_upload_request: _models.PendingUploadRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. :param name: The name of the resource. Required. :type name: str - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[None] = kwargs.pop("cls", None) - - _request = build_beta_evaluation_taxonomies_delete_request( - name=name, - foundry_features=_foundry_features, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @overload - def create( - self, name: str, body: _models.EvaluationTaxonomy, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Create an evaluation taxonomy. - - :param name: The name of the evaluation taxonomy. Required. - :type name: str - :param body: The evaluation taxonomy. Required. - :type body: ~azure.ai.projects.models.EvaluationTaxonomy + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create( - self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Create an evaluation taxonomy. + def pending_upload( + self, + name: str, + version: str, + pending_upload_request: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. - :param name: The name of the evaluation taxonomy. Required. + :param name: The name of the resource. Required. :type name: str - :param body: The evaluation taxonomy. Required. - :type body: JSON + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create( - self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Create an evaluation taxonomy. + def pending_upload( + self, + name: str, + version: str, + pending_upload_request: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. - :param name: The name of the evaluation taxonomy. Required. + :param name: The name of the resource. Required. :type name: str - :param body: The evaluation taxonomy. Required. - :type body: IO[bytes] + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Required. + :type pending_upload_request: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace - def create( - self, name: str, body: Union[_models.EvaluationTaxonomy, JSON, IO[bytes]], **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Create an evaluation taxonomy. + def pending_upload( + self, + name: str, + version: str, + pending_upload_request: Union[_models.PendingUploadRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.PendingUploadResponse: + """Start a new or get an existing pending upload of an evaluator for a specific version. - :param name: The name of the evaluation taxonomy. Required. + :param name: The name of the resource. Required. :type name: str - :param body: The evaluation taxonomy. Is one of the following types: EvaluationTaxonomy, JSON, - IO[bytes] Required. - :type body: ~azure.ai.projects.models.EvaluationTaxonomy or JSON or IO[bytes] - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param pending_upload_request: The pending upload request parameters. Is one of the following + types: PendingUploadRequest, JSON, IO[bytes] Required. + :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest or JSON or + IO[bytes] + :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.PendingUploadResponse :raises ~azure.core.exceptions.HttpResponseError: """ _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( @@ -5619,17 +6084,18 @@ def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EvaluationTaxonomy] = kwargs.pop("cls", None) + cls: ClsType[_models.PendingUploadResponse] = kwargs.pop("cls", None) content_type = content_type or "application/json" _content = None - if isinstance(body, (IOBase, bytes)): - _content = body + if isinstance(pending_upload_request, (IOBase, bytes)): + _content = pending_upload_request else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(pending_upload_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_evaluation_taxonomies_create_request( + _request = build_beta_evaluators_pending_upload_request( name=name, + version=version, foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, @@ -5650,7 +6116,7 @@ def create( response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: response.read() # Load the body in memory and close the socket @@ -5662,7 +6128,7 @@ def create( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.EvaluationTaxonomy, response.json()) + deserialized = _deserialize(_models.PendingUploadResponse, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -5670,72 +6136,103 @@ def create( return deserialized # type: ignore @overload - def update( - self, name: str, body: _models.EvaluationTaxonomy, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Update an evaluation taxonomy. + def get_credentials( + self, + name: str, + version: str, + credential_request: _models.EvaluatorCredentialRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. - :param name: The name of the evaluation taxonomy. Required. + :param name: The name of the resource. Required. :type name: str - :param body: The evaluation taxonomy. Required. - :type body: ~azure.ai.projects.models.EvaluationTaxonomy + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Required. + :type credential_request: ~azure.ai.projects.models.EvaluatorCredentialRequest :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def update( - self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Update an evaluation taxonomy. + def get_credentials( + self, + name: str, + version: str, + credential_request: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. - :param name: The name of the evaluation taxonomy. Required. + :param name: The name of the resource. Required. :type name: str - :param body: The evaluation taxonomy. Required. - :type body: JSON + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Required. + :type credential_request: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def update( - self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Update an evaluation taxonomy. + def get_credentials( + self, + name: str, + version: str, + credential_request: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. - :param name: The name of the evaluation taxonomy. Required. + :param name: The name of the resource. Required. :type name: str - :param body: The evaluation taxonomy. Required. - :type body: IO[bytes] + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Required. + :type credential_request: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace - def update( - self, name: str, body: Union[_models.EvaluationTaxonomy, JSON, IO[bytes]], **kwargs: Any - ) -> _models.EvaluationTaxonomy: - """Update an evaluation taxonomy. + def get_credentials( + self, + name: str, + version: str, + credential_request: Union[_models.EvaluatorCredentialRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.DatasetCredential: + """Get the SAS credential to access the storage account associated with an Evaluator version. - :param name: The name of the evaluation taxonomy. Required. + :param name: The name of the resource. Required. :type name: str - :param body: The evaluation taxonomy. Is one of the following types: EvaluationTaxonomy, JSON, - IO[bytes] Required. - :type body: ~azure.ai.projects.models.EvaluationTaxonomy or JSON or IO[bytes] - :return: EvaluationTaxonomy. The EvaluationTaxonomy is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluationTaxonomy + :param version: The specific version id of the EvaluatorVersion to operate on. Required. + :type version: str + :param credential_request: The credential request parameters. Is one of the following types: + EvaluatorCredentialRequest, JSON, IO[bytes] Required. + :type credential_request: ~azure.ai.projects.models.EvaluatorCredentialRequest or JSON or + IO[bytes] + :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DatasetCredential :raises ~azure.core.exceptions.HttpResponseError: """ _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( @@ -5753,17 +6250,18 @@ def update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EvaluationTaxonomy] = kwargs.pop("cls", None) + cls: ClsType[_models.DatasetCredential] = kwargs.pop("cls", None) content_type = content_type or "application/json" _content = None - if isinstance(body, (IOBase, bytes)): - _content = body + if isinstance(credential_request, (IOBase, bytes)): + _content = credential_request else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(credential_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_evaluation_taxonomies_update_request( + _request = build_beta_evaluators_get_credentials_request( name=name, + version=version, foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, @@ -5796,7 +6294,7 @@ def update( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.EvaluationTaxonomy, response.json()) + deserialized = _deserialize(_models.DatasetCredential, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -5804,14 +6302,14 @@ def update( return deserialized # type: ignore -class BetaEvaluatorsOperations: +class BetaInsightsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.ai.projects.AIProjectClient`'s - :attr:`evaluators` attribute. + :attr:`insights` attribute. """ def __init__(self, *args, **kwargs) -> None: @@ -5821,38 +6319,67 @@ def __init__(self, *args, **kwargs) -> None: self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") - @distributed_trace - def list_versions( - self, - name: str, - *, - type: Optional[Union[Literal["builtin"], Literal["custom"], Literal["all"], str]] = None, - limit: Optional[int] = None, - **kwargs: Any - ) -> ItemPaged["_models.EvaluatorVersion"]: - """List all versions of the given evaluator. + @overload + def generate( + self, insight: _models.Insight, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Insight: + """Generate Insights. - :param name: The name of the resource. Required. - :type name: str - :keyword type: Filter evaluators by type. Possible values: 'all', 'custom', 'builtin'. Is one - of the following types: Literal["builtin"], Literal["custom"], Literal["all"], str Default - value is None. - :paramtype type: str or str or str or str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :return: An iterator like instance of EvaluatorVersion - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.EvaluatorVersion] + :param insight: Complete evaluation configuration including data source, evaluators, and result + settings. Required. + :type insight: ~azure.ai.projects.models.Insight + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Insight. The Insight is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Insight :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW - ) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.EvaluatorVersion]] = kwargs.pop("cls", None) + @overload + def generate(self, insight: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Insight: + """Generate Insights. + + :param insight: Complete evaluation configuration including data source, evaluators, and result + settings. Required. + :type insight: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Insight. The Insight is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Insight + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def generate(self, insight: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Insight: + """Generate Insights. + + :param insight: Complete evaluation configuration including data source, evaluators, and result + settings. Required. + :type insight: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Insight. The Insight is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Insight + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def generate(self, insight: Union[_models.Insight, JSON, IO[bytes]], **kwargs: Any) -> _models.Insight: + """Generate Insights. + :param insight: Complete evaluation configuration including data source, evaluators, and result + settings. Is one of the following types: Insight, JSON, IO[bytes] Required. + :type insight: ~azure.ai.projects.models.Insight or JSON or IO[bytes] + :return: Insight. The Insight is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Insight + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW + ) error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -5861,105 +6388,164 @@ def list_versions( } error_map.update(kwargs.pop("error_map", {}) or {}) - def prepare_request(next_link=None): - if not next_link: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} - _request = build_beta_evaluators_list_versions_request( - name=name, - foundry_features=_foundry_features, - type=type, - limit=limit, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Insight] = kwargs.pop("cls", None) - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", - urllib.parse.urljoin(next_link, _parsed_next_link.path), - params=_next_request_params, - headers={"Foundry-Features": _SERIALIZER.header("foundry_features", _foundry_features, "str")}, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + content_type = content_type or "application/json" + _content = None + if isinstance(insight, (IOBase, bytes)): + _content = insight + else: + _content = json.dumps(insight, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - return _request + _request = build_beta_insights_generate_request( + foundry_features=_foundry_features, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize( - List[_models.EvaluatorVersion], - deserialized.get("value", []), - ) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, iter(list_of_elem) + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) - def get_next(next_link=None): - _request = prepare_request(next_link) + response = pipeline_response.http_response - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response + if response.status_code not in [201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.Insight, response.json()) - return pipeline_response + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore - return ItemPaged(get_next, extract_data) + return deserialized # type: ignore + + @distributed_trace + def get(self, insight_id: str, *, include_coordinates: Optional[bool] = None, **kwargs: Any) -> _models.Insight: + """Get a specific insight by Id. + + :param insight_id: The unique identifier for the insights report. Required. + :type insight_id: str + :keyword include_coordinates: Whether to include coordinates for visualization in the response. + Defaults to false. Default value is None. + :paramtype include_coordinates: bool + :return: Insight. The Insight is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Insight + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Insight] = kwargs.pop("cls", None) + + _request = build_beta_insights_get_request( + insight_id=insight_id, + foundry_features=_foundry_features, + include_coordinates=include_coordinates, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.Insight, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore @distributed_trace def list( self, *, - type: Optional[Union[Literal["builtin"], Literal["custom"], Literal["all"], str]] = None, - limit: Optional[int] = None, + type: Optional[Union[str, _models.InsightType]] = None, + eval_id: Optional[str] = None, + run_id: Optional[str] = None, + agent_name: Optional[str] = None, + include_coordinates: Optional[bool] = None, **kwargs: Any - ) -> ItemPaged["_models.EvaluatorVersion"]: - """List the latest version of each evaluator. + ) -> ItemPaged["_models.Insight"]: + """List all insights in reverse chronological order (newest first). - :keyword type: Filter evaluators by type. Possible values: 'all', 'custom', 'builtin'. Is one - of the following types: Literal["builtin"], Literal["custom"], Literal["all"], str Default - value is None. - :paramtype type: str or str or str or str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :return: An iterator like instance of EvaluatorVersion - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.EvaluatorVersion] + :keyword type: Filter by the type of analysis. Known values are: "EvaluationRunClusterInsight", + "AgentClusterInsight", and "EvaluationComparison". Default value is None. + :paramtype type: str or ~azure.ai.projects.models.InsightType + :keyword eval_id: Filter by the evaluation ID. Default value is None. + :paramtype eval_id: str + :keyword run_id: Filter by the evaluation run ID. Default value is None. + :paramtype run_id: str + :keyword agent_name: Filter by the agent name. Default value is None. + :paramtype agent_name: str + :keyword include_coordinates: Whether to include coordinates for visualization in the response. + Defaults to false. Default value is None. + :paramtype include_coordinates: bool + :return: An iterator like instance of Insight + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.Insight] :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW ) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.EvaluatorVersion]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.Insight]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -5972,10 +6558,13 @@ def list( def prepare_request(next_link=None): if not next_link: - _request = build_beta_evaluators_list_request( + _request = build_beta_insights_list_request( foundry_features=_foundry_features, type=type, - limit=limit, + eval_id=eval_id, + run_id=run_id, + agent_name=agent_name, + include_coordinates=include_coordinates, api_version=self._config.api_version, headers=_headers, params=_params, @@ -6015,7 +6604,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.EvaluatorVersion], + List[_models.Insight], deserialized.get("value", []), ) if cls: @@ -6039,92 +6628,116 @@ def get_next(next_link=None): return ItemPaged(get_next, extract_data) - @distributed_trace - def get_version(self, name: str, version: str, **kwargs: Any) -> _models.EvaluatorVersion: - """Get the specific version of the EvaluatorVersion. The service returns 404 Not Found error if - the EvaluatorVersion does not exist. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to retrieve. Required. - :type version: str - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} +class BetaMemoryStoresOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. - cls: ClsType[_models.EvaluatorVersion] = kwargs.pop("cls", None) + Instead, you should access the following operations through + :class:`~azure.ai.projects.AIProjectClient`'s + :attr:`memory_stores` attribute. + """ - _request = build_beta_evaluators_get_version_request( - name=name, - version=version, - foundry_features=_foundry_features, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) + @overload + def create( + self, + *, + name: str, + definition: _models.MemoryStoreDefinition, + content_type: str = "application/json", + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, + **kwargs: Any + ) -> _models.MemoryStoreDetails: + """Create a memory store. - response = pipeline_response.http_response + :keyword name: The name of the memory store. Required. + :paramtype name: str + :keyword definition: The memory store definition. Required. + :paramtype definition: ~azure.ai.projects.models.MemoryStoreDefinition + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword description: A human-readable description of the memory store. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default + value is None. + :paramtype metadata: dict[str, str] + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + @overload + def create( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreDetails: + """Create a memory store. - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.EvaluatorVersion, response.json()) + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + @overload + def create( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreDetails: + """Create a memory store. - return deserialized # type: ignore + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :raises ~azure.core.exceptions.HttpResponseError: + """ @distributed_trace - def delete_version( # pylint: disable=inconsistent-return-statements - self, name: str, version: str, **kwargs: Any - ) -> None: - """Delete the specific version of the EvaluatorVersion. The service returns 204 No Content if the - EvaluatorVersion was deleted successfully or if the EvaluatorVersion does not exist. + def create( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + definition: _models.MemoryStoreDefinition = _Unset, + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, + **kwargs: Any + ) -> _models.MemoryStoreDetails: + """Create a memory store. - :param name: The name of the resource. Required. - :type name: str - :param version: The version of the EvaluatorVersion to delete. Required. - :type version: str - :return: None - :rtype: None + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: The name of the memory store. Required. + :paramtype name: str + :keyword definition: The memory store definition. Required. + :paramtype definition: ~azure.ai.projects.models.MemoryStoreDefinition + :keyword description: A human-readable description of the memory store. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default + value is None. + :paramtype metadata: dict[str, str] + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -6134,16 +6747,31 @@ def delete_version( # pylint: disable=inconsistent-return-statements } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[None] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.MemoryStoreDetails] = kwargs.pop("cls", None) - _request = build_beta_evaluators_delete_version_request( - name=name, - version=version, + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + if definition is _Unset: + raise TypeError("missing required argument: definition") + body = {"definition": definition, "description": description, "metadata": metadata, "name": name} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_beta_memory_stores_create_request( foundry_features=_foundry_features, + content_type=content_type, api_version=self._config.api_version, + content=_content, headers=_headers, params=_params, ) @@ -6152,96 +6780,127 @@ def delete_version( # pylint: disable=inconsistent-return-statements } _request.url = self._client.format_url(_request.url, **path_format_arguments) - _stream = False + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs ) response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.MemoryStoreDetails, response.json()) if cls: - return cls(pipeline_response, None, {}) # type: ignore + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore @overload - def create_version( + def update( self, name: str, - evaluator_version: _models.EvaluatorVersion, *, content_type: str = "application/json", + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, **kwargs: Any - ) -> _models.EvaluatorVersion: - """Create a new EvaluatorVersion with auto incremented version id. + ) -> _models.MemoryStoreDetails: + """Update a memory store. - :param name: The name of the resource. Required. + :param name: The name of the memory store to update. Required. :type name: str - :param evaluator_version: Required. - :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion + :keyword description: A human-readable description of the memory store. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default + value is None. + :paramtype metadata: dict[str, str] + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create_version( - self, name: str, evaluator_version: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluatorVersion: - """Create a new EvaluatorVersion with auto incremented version id. + def update( + self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreDetails: + """Update a memory store. - :param name: The name of the resource. Required. + :param name: The name of the memory store to update. Required. :type name: str - :param evaluator_version: Required. - :type evaluator_version: JSON + :param body: Required. + :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create_version( - self, name: str, evaluator_version: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluatorVersion: - """Create a new EvaluatorVersion with auto incremented version id. + def update( + self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreDetails: + """Update a memory store. - :param name: The name of the resource. Required. + :param name: The name of the memory store to update. Required. :type name: str - :param evaluator_version: Required. - :type evaluator_version: IO[bytes] + :param body: Required. + :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace - def create_version( - self, name: str, evaluator_version: Union[_models.EvaluatorVersion, JSON, IO[bytes]], **kwargs: Any - ) -> _models.EvaluatorVersion: - """Create a new EvaluatorVersion with auto incremented version id. + def update( + self, + name: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, + **kwargs: Any + ) -> _models.MemoryStoreDetails: + """Update a memory store. - :param name: The name of the resource. Required. + :param name: The name of the memory store to update. Required. :type name: str - :param evaluator_version: Is one of the following types: EvaluatorVersion, JSON, IO[bytes] - Required. - :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion or JSON or IO[bytes] - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword description: A human-readable description of the memory store. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default + value is None. + :paramtype metadata: dict[str, str] + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -6255,16 +6914,19 @@ def create_version( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EvaluatorVersion] = kwargs.pop("cls", None) + cls: ClsType[_models.MemoryStoreDetails] = kwargs.pop("cls", None) + if body is _Unset: + body = {"description": description, "metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(evaluator_version, (IOBase, bytes)): - _content = evaluator_version + if isinstance(body, (IOBase, bytes)): + _content = body else: - _content = json.dumps(evaluator_version, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_evaluators_create_version_request( + _request = build_beta_memory_stores_update_request( name=name, foundry_features=_foundry_features, content_type=content_type, @@ -6286,120 +6948,204 @@ def create_version( response = pipeline_response.http_response - if response.status_code not in [201]: + if response.status_code not in [200]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.EvaluatorVersion, response.json()) + deserialized = _deserialize(_models.MemoryStoreDetails, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @overload - def update_version( - self, - name: str, - version: str, - evaluator_version: _models.EvaluatorVersion, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.EvaluatorVersion: - """Update an existing EvaluatorVersion with the given version id. + @distributed_trace + def get(self, name: str, **kwargs: Any) -> _models.MemoryStoreDetails: + """Retrieve a memory store. - :param name: The name of the resource. Required. + :param name: The name of the memory store to retrieve. Required. :type name: str - :param version: The version of the EvaluatorVersion to update. Required. - :type version: str - :param evaluator_version: Evaluator resource. Required. - :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion + :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDetails :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @overload - def update_version( - self, name: str, version: str, evaluator_version: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluatorVersion: - """Update an existing EvaluatorVersion with the given version id. + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} - :param name: The name of the resource. Required. - :type name: str - :param version: The version of the EvaluatorVersion to update. Required. - :type version: str - :param evaluator_version: Evaluator resource. Required. - :type evaluator_version: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion - :raises ~azure.core.exceptions.HttpResponseError: - """ + cls: ClsType[_models.MemoryStoreDetails] = kwargs.pop("cls", None) - @overload - def update_version( + _request = build_beta_memory_stores_get_request( + name=name, + foundry_features=_foundry_features, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.MemoryStoreDetails, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list( self, - name: str, - version: str, - evaluator_version: IO[bytes], *, - content_type: str = "application/json", + limit: Optional[int] = None, + order: Optional[Union[str, _models.PageOrder]] = None, + before: Optional[str] = None, **kwargs: Any - ) -> _models.EvaluatorVersion: - """Update an existing EvaluatorVersion with the given version id. + ) -> ItemPaged["_models.MemoryStoreDetails"]: + """List all memory stores. - :param name: The name of the resource. Required. - :type name: str - :param version: The version of the EvaluatorVersion to update. Required. - :type version: str - :param evaluator_version: Evaluator resource. Required. - :type evaluator_version: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the + default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the ``created_at`` timestamp of the objects. ``asc`` for + ascending order and``desc`` + for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.PageOrder + :keyword before: A cursor for use in pagination. ``before`` is an object ID that defines your + place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + Default value is None. + :paramtype before: str + :return: An iterator like instance of MemoryStoreDetails + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.MemoryStoreDetails] :raises ~azure.core.exceptions.HttpResponseError: """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW + ) + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.MemoryStoreDetails]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(_continuation_token=None): + + _request = build_beta_memory_stores_list_request( + foundry_features=_foundry_features, + limit=limit, + order=order, + after=_continuation_token, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize( + List[_models.MemoryStoreDetails], + deserialized.get("data", []), + ) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("last_id") or None, iter(list_of_elem) + + def get_next(_continuation_token=None): + _request = prepare_request(_continuation_token) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) @distributed_trace - def update_version( - self, - name: str, - version: str, - evaluator_version: Union[_models.EvaluatorVersion, JSON, IO[bytes]], - **kwargs: Any - ) -> _models.EvaluatorVersion: - """Update an existing EvaluatorVersion with the given version id. + def delete(self, name: str, **kwargs: Any) -> _models.DeleteMemoryStoreResult: + """Delete a memory store. - :param name: The name of the resource. Required. + :param name: The name of the memory store to delete. Required. :type name: str - :param version: The version of the EvaluatorVersion to update. Required. - :type version: str - :param evaluator_version: Evaluator resource. Is one of the following types: EvaluatorVersion, - JSON, IO[bytes] Required. - :type evaluator_version: ~azure.ai.projects.models.EvaluatorVersion or JSON or IO[bytes] - :return: EvaluatorVersion. The EvaluatorVersion is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.EvaluatorVersion + :return: DeleteMemoryStoreResult. The DeleteMemoryStoreResult is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DeleteMemoryStoreResult :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -6409,26 +7155,15 @@ def update_version( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EvaluatorVersion] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(evaluator_version, (IOBase, bytes)): - _content = evaluator_version - else: - _content = json.dumps(evaluator_version, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + cls: ClsType[_models.DeleteMemoryStoreResult] = kwargs.pop("cls", None) - _request = build_beta_evaluators_update_version_request( + _request = build_beta_memory_stores_delete_request( name=name, - version=version, foundry_features=_foundry_features, - content_type=content_type, api_version=self._config.api_version, - content=_content, headers=_headers, params=_params, ) @@ -6452,12 +7187,16 @@ def update_version( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.EvaluatorVersion, response.json()) + deserialized = _deserialize(_models.DeleteMemoryStoreResult, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -6465,107 +7204,60 @@ def update_version( return deserialized # type: ignore @overload - def pending_upload( + def _search_memories( self, name: str, - version: str, - pending_upload_request: _models.PendingUploadRequest, *, + scope: str, content_type: str = "application/json", + items: Optional[List[dict[str, Any]]] = None, + previous_search_id: Optional[str] = None, + options: Optional[_models.MemorySearchOptions] = None, **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of an evaluator for a specific version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Required. - :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def pending_upload( - self, - name: str, - version: str, - pending_upload_request: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of an evaluator for a specific version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Required. - :type pending_upload_request: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def pending_upload( - self, - name: str, - version: str, - pending_upload_request: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of an evaluator for a specific version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Required. - :type pending_upload_request: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ + ) -> _models.MemoryStoreSearchResult: ... + @overload + def _search_memories( + self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreSearchResult: ... + @overload + def _search_memories( + self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreSearchResult: ... @distributed_trace - def pending_upload( + def _search_memories( self, name: str, - version: str, - pending_upload_request: Union[_models.PendingUploadRequest, JSON, IO[bytes]], + body: Union[JSON, IO[bytes]] = _Unset, + *, + scope: str = _Unset, + items: Optional[List[dict[str, Any]]] = None, + previous_search_id: Optional[str] = None, + options: Optional[_models.MemorySearchOptions] = None, **kwargs: Any - ) -> _models.PendingUploadResponse: - """Start a new or get an existing pending upload of an evaluator for a specific version. + ) -> _models.MemoryStoreSearchResult: + """Search for relevant memories from a memory store based on conversation context. - :param name: The name of the resource. Required. + :param name: The name of the memory store to search. Required. :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param pending_upload_request: The pending upload request parameters. Is one of the following - types: PendingUploadRequest, JSON, IO[bytes] Required. - :type pending_upload_request: ~azure.ai.projects.models.PendingUploadRequest or JSON or - IO[bytes] - :return: PendingUploadResponse. The PendingUploadResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.PendingUploadResponse + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword scope: The namespace that logically groups and isolates memories, such as a user ID. + Required. + :paramtype scope: str + :keyword items: Items for which to search for relevant memories. Default value is None. + :paramtype items: list[dict[str, any]] + :keyword previous_search_id: The unique ID of the previous search request, enabling incremental + memory search from where the last operation left off. Default value is None. + :paramtype previous_search_id: str + :keyword options: Memory search options. Default value is None. + :paramtype options: ~azure.ai.projects.models.MemorySearchOptions + :return: MemoryStoreSearchResult. The MemoryStoreSearchResult is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreSearchResult :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -6579,18 +7271,27 @@ def pending_upload( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.PendingUploadResponse] = kwargs.pop("cls", None) + cls: ClsType[_models.MemoryStoreSearchResult] = kwargs.pop("cls", None) + if body is _Unset: + if scope is _Unset: + raise TypeError("missing required argument: scope") + body = { + "items": items, + "options": options, + "previous_search_id": previous_search_id, + "scope": scope, + } + body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(pending_upload_request, (IOBase, bytes)): - _content = pending_upload_request + if isinstance(body, (IOBase, bytes)): + _content = body else: - _content = json.dumps(pending_upload_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_evaluators_pending_upload_request( + _request = build_beta_memory_stores_search_memories_request( name=name, - version=version, foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, @@ -6618,120 +7319,35 @@ def pending_upload( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.PendingUploadResponse, response.json()) + deserialized = _deserialize(_models.MemoryStoreSearchResult, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @overload - def get_credentials( - self, - name: str, - version: str, - credential_request: _models.EvaluatorCredentialRequest, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.DatasetCredential: - """Get the SAS credential to access the storage account associated with an Evaluator version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param credential_request: The credential request parameters. Required. - :type credential_request: ~azure.ai.projects.models.EvaluatorCredentialRequest - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetCredential - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def get_credentials( - self, - name: str, - version: str, - credential_request: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.DatasetCredential: - """Get the SAS credential to access the storage account associated with an Evaluator version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param credential_request: The credential request parameters. Required. - :type credential_request: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetCredential - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def get_credentials( + def _update_memories_initial( self, name: str, - version: str, - credential_request: IO[bytes], + body: Union[JSON, IO[bytes]] = _Unset, *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.DatasetCredential: - """Get the SAS credential to access the storage account associated with an Evaluator version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param credential_request: The credential request parameters. Required. - :type credential_request: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetCredential - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def get_credentials( - self, - name: str, - version: str, - credential_request: Union[_models.EvaluatorCredentialRequest, JSON, IO[bytes]], + scope: str = _Unset, + items: Optional[List[dict[str, Any]]] = None, + previous_update_id: Optional[str] = None, + update_delay: Optional[int] = None, **kwargs: Any - ) -> _models.DatasetCredential: - """Get the SAS credential to access the storage account associated with an Evaluator version. - - :param name: The name of the resource. Required. - :type name: str - :param version: The specific version id of the EvaluatorVersion to operate on. Required. - :type version: str - :param credential_request: The credential request parameters. Is one of the following types: - EvaluatorCredentialRequest, JSON, IO[bytes] Required. - :type credential_request: ~azure.ai.projects.models.EvaluatorCredentialRequest or JSON or - IO[bytes] - :return: DatasetCredential. The DatasetCredential is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DatasetCredential - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.EVALUATIONS_V1_PREVIEW + ) -> Iterator[bytes]: + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -6745,18 +7361,27 @@ def get_credentials( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.DatasetCredential] = kwargs.pop("cls", None) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + if body is _Unset: + if scope is _Unset: + raise TypeError("missing required argument: scope") + body = { + "items": items, + "previous_update_id": previous_update_id, + "scope": scope, + "update_delay": update_delay, + } + body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(credential_request, (IOBase, bytes)): - _content = credential_request + if isinstance(body, (IOBase, bytes)): + _content = body else: - _content = json.dumps(credential_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_evaluators_get_credentials_request( + _request = build_beta_memory_stores_update_memories_request( name=name, - version=version, foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, @@ -6770,110 +7395,236 @@ def get_credentials( _request.url = self._client.format_url(_request.url, **path_format_arguments) _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) + _stream = True pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs ) response = pipeline_response.http_response - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.DatasetCredential, response.json()) + response_headers = {} + response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location")) + + deserialized = response.iter_bytes() if _decompress else response.iter_raw() if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized # type: ignore + @overload + def _begin_update_memories( + self, + name: str, + *, + scope: str, + content_type: str = "application/json", + items: Optional[List[dict[str, Any]]] = None, + previous_update_id: Optional[str] = None, + update_delay: Optional[int] = None, + **kwargs: Any + ) -> LROPoller[_models.MemoryStoreUpdateCompletedResult]: ... + @overload + def _begin_update_memories( + self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> LROPoller[_models.MemoryStoreUpdateCompletedResult]: ... + @overload + def _begin_update_memories( + self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> LROPoller[_models.MemoryStoreUpdateCompletedResult]: ... -class BetaInsightsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. + @distributed_trace + def _begin_update_memories( + self, + name: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + scope: str = _Unset, + items: Optional[List[dict[str, Any]]] = None, + previous_update_id: Optional[str] = None, + update_delay: Optional[int] = None, + **kwargs: Any + ) -> LROPoller[_models.MemoryStoreUpdateCompletedResult]: + """Update memory store with conversation memories. - Instead, you should access the following operations through - :class:`~azure.ai.projects.AIProjectClient`'s - :attr:`insights` attribute. - """ + :param name: The name of the memory store to update. Required. + :type name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword scope: The namespace that logically groups and isolates memories, such as a user ID. + Required. + :paramtype scope: str + :keyword items: Conversation items to be stored in memory. Default value is None. + :paramtype items: list[dict[str, any]] + :keyword previous_update_id: The unique ID of the previous update request, enabling incremental + memory updates from where the last operation left off. Default value is None. + :paramtype previous_update_id: str + :keyword update_delay: Timeout period before processing the memory update in seconds. + If a new update request is received during this period, it will cancel the current request and + reset the timeout. + Set to 0 to immediately trigger the update without delay. + Defaults to 300 (5 minutes). Default value is None. + :paramtype update_delay: int + :return: An instance of LROPoller that returns MemoryStoreUpdateCompletedResult. The + MemoryStoreUpdateCompletedResult is compatible with MutableMapping + :rtype: + ~azure.core.polling.LROPoller[~azure.ai.projects.models.MemoryStoreUpdateCompletedResult] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW + ) + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.MemoryStoreUpdateCompletedResult] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._update_memories_initial( + name=name, + body=body, + foundry_features=_foundry_features, + scope=scope, + items=items, + previous_update_id=previous_update_id, + update_delay=update_delay, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Operation-Location"] = self._deserialize( + "str", response.headers.get("Operation-Location") + ) + + deserialized = _deserialize(_models.MemoryStoreUpdateCompletedResult, response.json().get("result", {})) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.MemoryStoreUpdateCompletedResult].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.MemoryStoreUpdateCompletedResult]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) @overload - def generate( - self, insight: _models.Insight, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Insight: - """Generate Insights. + def delete_scope( + self, name: str, *, scope: str, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreDeleteScopeResult: + """Delete all memories associated with a specific scope from a memory store. - :param insight: Complete evaluation configuration including data source, evaluators, and result - settings. Required. - :type insight: ~azure.ai.projects.models.Insight + :param name: The name of the memory store. Required. + :type name: str + :keyword scope: The namespace that logically groups and isolates memories to delete, such as a + user ID. Required. + :paramtype scope: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: Insight. The Insight is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Insight + :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def generate(self, insight: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Insight: - """Generate Insights. + def delete_scope( + self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreDeleteScopeResult: + """Delete all memories associated with a specific scope from a memory store. - :param insight: Complete evaluation configuration including data source, evaluators, and result - settings. Required. - :type insight: JSON + :param name: The name of the memory store. Required. + :type name: str + :param body: Required. + :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: Insight. The Insight is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Insight + :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def generate(self, insight: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Insight: - """Generate Insights. + def delete_scope( + self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.MemoryStoreDeleteScopeResult: + """Delete all memories associated with a specific scope from a memory store. - :param insight: Complete evaluation configuration including data source, evaluators, and result - settings. Required. - :type insight: IO[bytes] + :param name: The name of the memory store. Required. + :type name: str + :param body: Required. + :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: Insight. The Insight is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Insight + :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace - def generate(self, insight: Union[_models.Insight, JSON, IO[bytes]], **kwargs: Any) -> _models.Insight: - """Generate Insights. + def delete_scope( + self, name: str, body: Union[JSON, IO[bytes]] = _Unset, *, scope: str = _Unset, **kwargs: Any + ) -> _models.MemoryStoreDeleteScopeResult: + """Delete all memories associated with a specific scope from a memory store. - :param insight: Complete evaluation configuration including data source, evaluators, and result - settings. Is one of the following types: Insight, JSON, IO[bytes] Required. - :type insight: ~azure.ai.projects.models.Insight or JSON or IO[bytes] - :return: Insight. The Insight is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Insight + :param name: The name of the memory store. Required. + :type name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword scope: The namespace that logically groups and isolates memories to delete, such as a + user ID. Required. + :paramtype scope: str + :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -6887,16 +7638,22 @@ def generate(self, insight: Union[_models.Insight, JSON, IO[bytes]], **kwargs: A _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Insight] = kwargs.pop("cls", None) + cls: ClsType[_models.MemoryStoreDeleteScopeResult] = kwargs.pop("cls", None) - content_type = content_type or "application/json" - _content = None - if isinstance(insight, (IOBase, bytes)): - _content = insight + if body is _Unset: + if scope is _Unset: + raise TypeError("missing required argument: scope") + body = {"scope": scope} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body else: - _content = json.dumps(insight, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_insights_generate_request( + _request = build_beta_memory_stores_delete_scope_request( + name=name, foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, @@ -6917,40 +7674,59 @@ def generate(self, insight: Union[_models.Insight, JSON, IO[bytes]], **kwargs: A response = pipeline_response.http_response - if response.status_code not in [201]: + if response.status_code not in [200]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Insight, response.json()) + deserialized = _deserialize(_models.MemoryStoreDeleteScopeResult, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + +class BetaRedTeamsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.AIProjectClient`'s + :attr:`red_teams` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + @distributed_trace - def get(self, insight_id: str, *, include_coordinates: Optional[bool] = None, **kwargs: Any) -> _models.Insight: - """Get a specific insight by Id. + def get(self, name: str, **kwargs: Any) -> _models.RedTeam: + """Get a redteam by name. - :param insight_id: The unique identifier for the insights report. Required. - :type insight_id: str - :keyword include_coordinates: Whether to include coordinates for visualization in the response. - Defaults to false. Default value is None. - :paramtype include_coordinates: bool - :return: Insight. The Insight is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Insight + :param name: Identifier of the red team run. Required. + :type name: str + :return: RedTeam. The RedTeam is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.RedTeam :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -6963,12 +7739,11 @@ def get(self, insight_id: str, *, include_coordinates: Optional[bool] = None, ** _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Insight] = kwargs.pop("cls", None) + cls: ClsType[_models.RedTeam] = kwargs.pop("cls", None) - _request = build_beta_insights_get_request( - insight_id=insight_id, + _request = build_beta_red_teams_get_request( + name=name, foundry_features=_foundry_features, - include_coordinates=include_coordinates, api_version=self._config.api_version, headers=_headers, params=_params, @@ -6998,7 +7773,7 @@ def get(self, insight_id: str, *, include_coordinates: Optional[bool] = None, ** if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Insight, response.json()) + deserialized = _deserialize(_models.RedTeam, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -7006,41 +7781,20 @@ def get(self, insight_id: str, *, include_coordinates: Optional[bool] = None, ** return deserialized # type: ignore @distributed_trace - def list( - self, - *, - type: Optional[Union[str, _models.InsightType]] = None, - eval_id: Optional[str] = None, - run_id: Optional[str] = None, - agent_name: Optional[str] = None, - include_coordinates: Optional[bool] = None, - **kwargs: Any - ) -> ItemPaged["_models.Insight"]: - """List all insights in reverse chronological order (newest first). + def list(self, **kwargs: Any) -> ItemPaged["_models.RedTeam"]: + """List a redteam by name. - :keyword type: Filter by the type of analysis. Known values are: "EvaluationRunClusterInsight", - "AgentClusterInsight", and "EvaluationComparison". Default value is None. - :paramtype type: str or ~azure.ai.projects.models.InsightType - :keyword eval_id: Filter by the evaluation ID. Default value is None. - :paramtype eval_id: str - :keyword run_id: Filter by the evaluation run ID. Default value is None. - :paramtype run_id: str - :keyword agent_name: Filter by the agent name. Default value is None. - :paramtype agent_name: str - :keyword include_coordinates: Whether to include coordinates for visualization in the response. - Defaults to false. Default value is None. - :paramtype include_coordinates: bool - :return: An iterator like instance of Insight - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.Insight] + :return: An iterator like instance of RedTeam + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.RedTeam] :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.INSIGHTS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW ) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.Insight]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.RedTeam]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -7053,13 +7807,8 @@ def list( def prepare_request(next_link=None): if not next_link: - _request = build_beta_insights_list_request( + _request = build_beta_red_teams_list_request( foundry_features=_foundry_features, - type=type, - eval_id=eval_id, - run_id=run_id, - agent_name=agent_name, - include_coordinates=include_coordinates, api_version=self._config.api_version, headers=_headers, params=_params, @@ -7099,7 +7848,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.Insight], + List[_models.RedTeam], deserialized.get("value", []), ) if cls: @@ -7123,116 +7872,63 @@ def get_next(next_link=None): return ItemPaged(get_next, extract_data) - -class BetaMemoryStoresOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.projects.AIProjectClient`'s - :attr:`memory_stores` attribute. - """ - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") - @overload def create( - self, - *, - name: str, - definition: _models.MemoryStoreDefinition, - content_type: str = "application/json", - description: Optional[str] = None, - metadata: Optional[dict[str, str]] = None, - **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Create a memory store. + self, red_team: _models.RedTeam, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.RedTeam: + """Creates a redteam run. - :keyword name: The name of the memory store. Required. - :paramtype name: str - :keyword definition: The memory store definition. Required. - :paramtype definition: ~azure.ai.projects.models.MemoryStoreDefinition + :param red_team: Redteam to be run. Required. + :type red_team: ~azure.ai.projects.models.RedTeam :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword description: A human-readable description of the memory store. Default value is None. - :paramtype description: str - :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default - value is None. - :paramtype metadata: dict[str, str] - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :return: RedTeam. The RedTeam is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.RedTeam :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Create a memory store. + def create(self, red_team: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.RedTeam: + """Creates a redteam run. - :param body: Required. - :type body: JSON + :param red_team: Redteam to be run. Required. + :type red_team: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :return: RedTeam. The RedTeam is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.RedTeam :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Create a memory store. + def create(self, red_team: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.RedTeam: + """Creates a redteam run. - :param body: Required. - :type body: IO[bytes] + :param red_team: Redteam to be run. Required. + :type red_team: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :return: RedTeam. The RedTeam is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.RedTeam :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace - def create( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - name: str = _Unset, - definition: _models.MemoryStoreDefinition = _Unset, - description: Optional[str] = None, - metadata: Optional[dict[str, str]] = None, - **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Create a memory store. + def create(self, red_team: Union[_models.RedTeam, JSON, IO[bytes]], **kwargs: Any) -> _models.RedTeam: + """Creates a redteam run. - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword name: The name of the memory store. Required. - :paramtype name: str - :keyword definition: The memory store definition. Required. - :paramtype definition: ~azure.ai.projects.models.MemoryStoreDefinition - :keyword description: A human-readable description of the memory store. Default value is None. - :paramtype description: str - :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default - value is None. - :paramtype metadata: dict[str, str] - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails + :param red_team: Redteam to be run. Is one of the following types: RedTeam, JSON, IO[bytes] + Required. + :type red_team: ~azure.ai.projects.models.RedTeam or JSON or IO[bytes] + :return: RedTeam. The RedTeam is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.RedTeam :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -7246,23 +7942,16 @@ def create( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.MemoryStoreDetails] = kwargs.pop("cls", None) + cls: ClsType[_models.RedTeam] = kwargs.pop("cls", None) - if body is _Unset: - if name is _Unset: - raise TypeError("missing required argument: name") - if definition is _Unset: - raise TypeError("missing required argument: definition") - body = {"definition": definition, "description": description, "metadata": metadata, "name": name} - body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(body, (IOBase, bytes)): - _content = body + if isinstance(red_team, (IOBase, bytes)): + _content = red_team else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(red_team, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_memory_stores_create_request( + _request = build_beta_red_teams_create_request( foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, @@ -7283,7 +7972,7 @@ def create( response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [201]: if _stream: try: response.read() # Load the body in memory and close the socket @@ -7299,827 +7988,43 @@ def create( if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.MemoryStoreDetails, response.json()) + deserialized = _deserialize(_models.RedTeam, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @overload - def update( - self, - name: str, - *, - content_type: str = "application/json", - description: Optional[str] = None, - metadata: Optional[dict[str, str]] = None, - **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Update a memory store. - :param name: The name of the memory store to update. Required. - :type name: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword description: A human-readable description of the memory store. Default value is None. - :paramtype description: str - :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default - value is None. - :paramtype metadata: dict[str, str] - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update( - self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Update a memory store. - - :param name: The name of the memory store to update. Required. - :type name: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update( - self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Update a memory store. - - :param name: The name of the memory store to update. Required. - :type name: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def update( - self, - name: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - description: Optional[str] = None, - metadata: Optional[dict[str, str]] = None, - **kwargs: Any - ) -> _models.MemoryStoreDetails: - """Update a memory store. - - :param name: The name of the memory store to update. Required. - :type name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword description: A human-readable description of the memory store. Default value is None. - :paramtype description: str - :keyword metadata: Arbitrary key-value metadata to associate with the memory store. Default - value is None. - :paramtype metadata: dict[str, str] - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.MemoryStoreDetails] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"description": description, "metadata": metadata} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_beta_memory_stores_update_request( - name=name, - foundry_features=_foundry_features, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.MemoryStoreDetails, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get(self, name: str, **kwargs: Any) -> _models.MemoryStoreDetails: - """Retrieve a memory store. - - :param name: The name of the memory store to retrieve. Required. - :type name: str - :return: MemoryStoreDetails. The MemoryStoreDetails is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDetails - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.MemoryStoreDetails] = kwargs.pop("cls", None) - - _request = build_beta_memory_stores_get_request( - name=name, - foundry_features=_foundry_features, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.MemoryStoreDetails, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list( - self, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.PageOrder]] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> ItemPaged["_models.MemoryStoreDetails"]: - """List all memory stores. - - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the - default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the ``created_at`` timestamp of the objects. ``asc`` for - ascending order and``desc`` - for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.projects.models.PageOrder - :keyword before: A cursor for use in pagination. ``before`` is an object ID that defines your - place in the list. - For instance, if you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page of the list. - Default value is None. - :paramtype before: str - :return: An iterator like instance of MemoryStoreDetails - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.MemoryStoreDetails] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW - ) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[List[_models.MemoryStoreDetails]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(_continuation_token=None): - - _request = build_beta_memory_stores_list_request( - foundry_features=_foundry_features, - limit=limit, - order=order, - after=_continuation_token, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - return _request - - def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize( - List[_models.MemoryStoreDetails], - deserialized.get("data", []), - ) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("last_id") or None, iter(list_of_elem) - - def get_next(_continuation_token=None): - _request = prepare_request(_continuation_token) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return ItemPaged(get_next, extract_data) - - @distributed_trace - def delete(self, name: str, **kwargs: Any) -> _models.DeleteMemoryStoreResult: - """Delete a memory store. - - :param name: The name of the memory store to delete. Required. - :type name: str - :return: DeleteMemoryStoreResult. The DeleteMemoryStoreResult is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.DeleteMemoryStoreResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.DeleteMemoryStoreResult] = kwargs.pop("cls", None) - - _request = build_beta_memory_stores_delete_request( - name=name, - foundry_features=_foundry_features, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.DeleteMemoryStoreResult, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def _search_memories( - self, - name: str, - *, - scope: str, - content_type: str = "application/json", - items: Optional[List[dict[str, Any]]] = None, - previous_search_id: Optional[str] = None, - options: Optional[_models.MemorySearchOptions] = None, - **kwargs: Any - ) -> _models.MemoryStoreSearchResult: ... - @overload - def _search_memories( - self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreSearchResult: ... - @overload - def _search_memories( - self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreSearchResult: ... - - @distributed_trace - def _search_memories( - self, - name: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - scope: str = _Unset, - items: Optional[List[dict[str, Any]]] = None, - previous_search_id: Optional[str] = None, - options: Optional[_models.MemorySearchOptions] = None, - **kwargs: Any - ) -> _models.MemoryStoreSearchResult: - """Search for relevant memories from a memory store based on conversation context. - - :param name: The name of the memory store to search. Required. - :type name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword scope: The namespace that logically groups and isolates memories, such as a user ID. - Required. - :paramtype scope: str - :keyword items: Items for which to search for relevant memories. Default value is None. - :paramtype items: list[dict[str, any]] - :keyword previous_search_id: The unique ID of the previous search request, enabling incremental - memory search from where the last operation left off. Default value is None. - :paramtype previous_search_id: str - :keyword options: Memory search options. Default value is None. - :paramtype options: ~azure.ai.projects.models.MemorySearchOptions - :return: MemoryStoreSearchResult. The MemoryStoreSearchResult is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreSearchResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.MemoryStoreSearchResult] = kwargs.pop("cls", None) - - if body is _Unset: - if scope is _Unset: - raise TypeError("missing required argument: scope") - body = { - "items": items, - "options": options, - "previous_search_id": previous_search_id, - "scope": scope, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_beta_memory_stores_search_memories_request( - name=name, - foundry_features=_foundry_features, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.MemoryStoreSearchResult, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - def _update_memories_initial( - self, - name: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - scope: str = _Unset, - items: Optional[List[dict[str, Any]]] = None, - previous_update_id: Optional[str] = None, - update_delay: Optional[int] = None, - **kwargs: Any - ) -> Iterator[bytes]: - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) - - if body is _Unset: - if scope is _Unset: - raise TypeError("missing required argument: scope") - body = { - "items": items, - "previous_update_id": previous_update_id, - "scope": scope, - "update_delay": update_delay, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_beta_memory_stores_update_memories_request( - name=name, - foundry_features=_foundry_features, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _decompress = kwargs.pop("decompress", True) - _stream = True - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [202]: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location")) - - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @overload - def _begin_update_memories( - self, - name: str, - *, - scope: str, - content_type: str = "application/json", - items: Optional[List[dict[str, Any]]] = None, - previous_update_id: Optional[str] = None, - update_delay: Optional[int] = None, - **kwargs: Any - ) -> LROPoller[_models.MemoryStoreUpdateCompletedResult]: ... - @overload - def _begin_update_memories( - self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> LROPoller[_models.MemoryStoreUpdateCompletedResult]: ... - @overload - def _begin_update_memories( - self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> LROPoller[_models.MemoryStoreUpdateCompletedResult]: ... - - @distributed_trace - def _begin_update_memories( - self, - name: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - scope: str = _Unset, - items: Optional[List[dict[str, Any]]] = None, - previous_update_id: Optional[str] = None, - update_delay: Optional[int] = None, - **kwargs: Any - ) -> LROPoller[_models.MemoryStoreUpdateCompletedResult]: - """Update memory store with conversation memories. - - :param name: The name of the memory store to update. Required. - :type name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword scope: The namespace that logically groups and isolates memories, such as a user ID. - Required. - :paramtype scope: str - :keyword items: Conversation items to be stored in memory. Default value is None. - :paramtype items: list[dict[str, any]] - :keyword previous_update_id: The unique ID of the previous update request, enabling incremental - memory updates from where the last operation left off. Default value is None. - :paramtype previous_update_id: str - :keyword update_delay: Timeout period before processing the memory update in seconds. - If a new update request is received during this period, it will cancel the current request and - reset the timeout. - Set to 0 to immediately trigger the update without delay. - Defaults to 300 (5 minutes). Default value is None. - :paramtype update_delay: int - :return: An instance of LROPoller that returns MemoryStoreUpdateCompletedResult. The - MemoryStoreUpdateCompletedResult is compatible with MutableMapping - :rtype: - ~azure.core.polling.LROPoller[~azure.ai.projects.models.MemoryStoreUpdateCompletedResult] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW - ) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.MemoryStoreUpdateCompletedResult] = kwargs.pop("cls", None) - polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) - lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) - cont_token: Optional[str] = kwargs.pop("continuation_token", None) - if cont_token is None: - raw_result = self._update_memories_initial( - name=name, - body=body, - foundry_features=_foundry_features, - scope=scope, - items=items, - previous_update_id=previous_update_id, - update_delay=update_delay, - content_type=content_type, - cls=lambda x, y, z: x, - headers=_headers, - params=_params, - **kwargs - ) - raw_result.http_response.read() # type: ignore - kwargs.pop("error_map", None) - - def get_long_running_output(pipeline_response): - response_headers = {} - response = pipeline_response.http_response - response_headers["Operation-Location"] = self._deserialize( - "str", response.headers.get("Operation-Location") - ) - - deserialized = _deserialize(_models.MemoryStoreUpdateCompletedResult, response.json().get("result", {})) - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - return deserialized - - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - if polling is True: - polling_method: PollingMethod = cast( - PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) - ) - elif polling is False: - polling_method = cast(PollingMethod, NoPolling()) - else: - polling_method = polling - if cont_token: - return LROPoller[_models.MemoryStoreUpdateCompletedResult].from_continuation_token( - polling_method=polling_method, - continuation_token=cont_token, - client=self._client, - deserialization_callback=get_long_running_output, - ) - return LROPoller[_models.MemoryStoreUpdateCompletedResult]( - self._client, raw_result, get_long_running_output, polling_method # type: ignore - ) - - @overload - def delete_scope( - self, name: str, *, scope: str, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreDeleteScopeResult: - """Delete all memories associated with a specific scope from a memory store. - - :param name: The name of the memory store. Required. - :type name: str - :keyword scope: The namespace that logically groups and isolates memories to delete, such as a - user ID. Required. - :paramtype scope: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with - MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def delete_scope( - self, name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreDeleteScopeResult: - """Delete all memories associated with a specific scope from a memory store. - - :param name: The name of the memory store. Required. - :type name: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with - MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult - :raises ~azure.core.exceptions.HttpResponseError: - """ +class BetaSchedulesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. - @overload - def delete_scope( - self, name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.MemoryStoreDeleteScopeResult: - """Delete all memories associated with a specific scope from a memory store. + Instead, you should access the following operations through + :class:`~azure.ai.projects.AIProjectClient`'s + :attr:`schedules` attribute. + """ - :param name: The name of the memory store. Required. - :type name: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with - MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult - :raises ~azure.core.exceptions.HttpResponseError: - """ + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def delete_scope( - self, name: str, body: Union[JSON, IO[bytes]] = _Unset, *, scope: str = _Unset, **kwargs: Any - ) -> _models.MemoryStoreDeleteScopeResult: - """Delete all memories associated with a specific scope from a memory store. + def delete(self, schedule_id: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Delete a schedule. - :param name: The name of the memory store. Required. - :type name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword scope: The namespace that logically groups and isolates memories to delete, such as a - user ID. Required. - :paramtype scope: str - :return: MemoryStoreDeleteScopeResult. The MemoryStoreDeleteScopeResult is compatible with - MutableMapping - :rtype: ~azure.ai.projects.models.MemoryStoreDeleteScopeResult + :param schedule_id: Identifier of the schedule. Required. + :type schedule_id: str + :return: None + :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -8129,30 +8034,15 @@ def delete_scope( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.MemoryStoreDeleteScopeResult] = kwargs.pop("cls", None) - - if body is _Unset: - if scope is _Unset: - raise TypeError("missing required argument: scope") - body = {"scope": scope} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} - _request = build_beta_memory_stores_delete_scope_request( - name=name, + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_beta_schedules_delete_request( + schedule_id=schedule_id, foundry_features=_foundry_features, - content_type=content_type, api_version=self._config.api_version, - content=_content, headers=_headers, params=_params, ) @@ -8161,67 +8051,32 @@ def delete_scope( } _request.url = self._client.format_url(_request.url, **path_format_arguments) - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) + _stream = False pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs ) response = pipeline_response.http_response - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass + if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.MemoryStoreDeleteScopeResult, response.json()) + raise HttpResponseError(response=response) if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - -class BetaRedTeamsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.projects.AIProjectClient`'s - :attr:`red_teams` attribute. - """ - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + return cls(pipeline_response, None, {}) # type: ignore @distributed_trace - def get(self, name: str, **kwargs: Any) -> _models.RedTeam: - """Get a redteam by name. + def get(self, schedule_id: str, **kwargs: Any) -> _models.Schedule: + """Get a schedule by id. - :param name: Identifier of the red team run. Required. - :type name: str - :return: RedTeam. The RedTeam is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.RedTeam + :param schedule_id: Identifier of the schedule. Required. + :type schedule_id: str + :return: Schedule. The Schedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Schedule :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -8234,10 +8089,10 @@ def get(self, name: str, **kwargs: Any) -> _models.RedTeam: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.RedTeam] = kwargs.pop("cls", None) + cls: ClsType[_models.Schedule] = kwargs.pop("cls", None) - _request = build_beta_red_teams_get_request( - name=name, + _request = build_beta_schedules_get_request( + schedule_id=schedule_id, foundry_features=_foundry_features, api_version=self._config.api_version, headers=_headers, @@ -8268,7 +8123,7 @@ def get(self, name: str, **kwargs: Any) -> _models.RedTeam: if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.RedTeam, response.json()) + deserialized = _deserialize(_models.Schedule, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -8276,20 +8131,31 @@ def get(self, name: str, **kwargs: Any) -> _models.RedTeam: return deserialized # type: ignore @distributed_trace - def list(self, **kwargs: Any) -> ItemPaged["_models.RedTeam"]: - """List a redteam by name. + def list( + self, + *, + type: Optional[Union[str, _models.ScheduleTaskType]] = None, + enabled: Optional[bool] = None, + **kwargs: Any + ) -> ItemPaged["_models.Schedule"]: + """List all schedules. - :return: An iterator like instance of RedTeam - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.RedTeam] + :keyword type: Filter by the type of schedule. Known values are: "Evaluation" and "Insight". + Default value is None. + :paramtype type: str or ~azure.ai.projects.models.ScheduleTaskType + :keyword enabled: Filter by the enabled status. Default value is None. + :paramtype enabled: bool + :return: An iterator like instance of Schedule + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.Schedule] :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW ) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.RedTeam]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.Schedule]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -8302,8 +8168,10 @@ def list(self, **kwargs: Any) -> ItemPaged["_models.RedTeam"]: def prepare_request(next_link=None): if not next_link: - _request = build_beta_red_teams_list_request( + _request = build_beta_schedules_list_request( foundry_features=_foundry_features, + type=type, + enabled=enabled, api_version=self._config.api_version, headers=_headers, params=_params, @@ -8343,7 +8211,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.RedTeam], + List[_models.Schedule], deserialized.get("value", []), ) if cls: @@ -8368,154 +8236,72 @@ def get_next(next_link=None): return ItemPaged(get_next, extract_data) @overload - def create( - self, red_team: _models.RedTeam, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.RedTeam: - """Creates a redteam run. + def create_or_update( + self, schedule_id: str, schedule: _models.Schedule, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Schedule: + """Create or update operation template. - :param red_team: Redteam to be run. Required. - :type red_team: ~azure.ai.projects.models.RedTeam + :param schedule_id: Identifier of the schedule. Required. + :type schedule_id: str + :param schedule: The resource instance. Required. + :type schedule: ~azure.ai.projects.models.Schedule :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: RedTeam. The RedTeam is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.RedTeam + :return: Schedule. The Schedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Schedule :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create(self, red_team: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.RedTeam: - """Creates a redteam run. + def create_or_update( + self, schedule_id: str, schedule: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Schedule: + """Create or update operation template. - :param red_team: Redteam to be run. Required. - :type red_team: JSON + :param schedule_id: Identifier of the schedule. Required. + :type schedule_id: str + :param schedule: The resource instance. Required. + :type schedule: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: RedTeam. The RedTeam is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.RedTeam + :return: Schedule. The Schedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Schedule :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create(self, red_team: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.RedTeam: - """Creates a redteam run. - - :param red_team: Redteam to be run. Required. - :type red_team: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: RedTeam. The RedTeam is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.RedTeam - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create(self, red_team: Union[_models.RedTeam, JSON, IO[bytes]], **kwargs: Any) -> _models.RedTeam: - """Creates a redteam run. - - :param red_team: Redteam to be run. Is one of the following types: RedTeam, JSON, IO[bytes] - Required. - :type red_team: ~azure.ai.projects.models.RedTeam or JSON or IO[bytes] - :return: RedTeam. The RedTeam is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.RedTeam - :raises ~azure.core.exceptions.HttpResponseError: - """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.RED_TEAMS_V1_PREVIEW - ) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.RedTeam] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(red_team, (IOBase, bytes)): - _content = red_team - else: - _content = json.dumps(red_team, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_beta_red_teams_create_request( - foundry_features=_foundry_features, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _decompress = kwargs.pop("decompress", True) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models.ApiErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if _stream: - deserialized = response.iter_bytes() if _decompress else response.iter_raw() - else: - deserialized = _deserialize(_models.RedTeam, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - -class BetaSchedulesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.projects.AIProjectClient`'s - :attr:`schedules` attribute. - """ + def create_or_update( + self, schedule_id: str, schedule: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Schedule: + """Create or update operation template. - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") - self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + :param schedule_id: Identifier of the schedule. Required. + :type schedule_id: str + :param schedule: The resource instance. Required. + :type schedule: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Schedule. The Schedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Schedule + :raises ~azure.core.exceptions.HttpResponseError: + """ @distributed_trace - def delete(self, schedule_id: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements - """Delete a schedule. + def create_or_update( + self, schedule_id: str, schedule: Union[_models.Schedule, JSON, IO[bytes]], **kwargs: Any + ) -> _models.Schedule: + """Create or update operation template. :param schedule_id: Identifier of the schedule. Required. :type schedule_id: str - :return: None - :rtype: None + :param schedule: The resource instance. Is one of the following types: Schedule, JSON, + IO[bytes] Required. + :type schedule: ~azure.ai.projects.models.Schedule or JSON or IO[bytes] + :return: Schedule. The Schedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Schedule :raises ~azure.core.exceptions.HttpResponseError: """ _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( @@ -8529,15 +8315,25 @@ def delete(self, schedule_id: str, **kwargs: Any) -> None: # pylint: disable=in } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[None] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Schedule] = kwargs.pop("cls", None) - _request = build_beta_schedules_delete_request( + content_type = content_type or "application/json" + _content = None + if isinstance(schedule, (IOBase, bytes)): + _content = schedule + else: + _content = json.dumps(schedule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_beta_schedules_create_or_update_request( schedule_id=schedule_id, foundry_features=_foundry_features, + content_type=content_type, api_version=self._config.api_version, + content=_content, headers=_headers, params=_params, ) @@ -8546,28 +8342,43 @@ def delete(self, schedule_id: str, **kwargs: Any) -> None: # pylint: disable=in } _request.url = self._client.format_url(_request.url, **path_format_arguments) - _stream = False + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs ) response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.Schedule, response.json()) + if cls: - return cls(pipeline_response, None, {}) # type: ignore + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore @distributed_trace - def get(self, schedule_id: str, **kwargs: Any) -> _models.Schedule: - """Get a schedule by id. + def get_run(self, schedule_id: str, run_id: str, **kwargs: Any) -> _models.ScheduleRun: + """Get a schedule run by id. - :param schedule_id: Identifier of the schedule. Required. + :param schedule_id: The unique identifier of the schedule. Required. :type schedule_id: str - :return: Schedule. The Schedule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Schedule + :param run_id: The unique identifier of the schedule run. Required. + :type run_id: str + :return: ScheduleRun. The ScheduleRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ScheduleRun :raises ~azure.core.exceptions.HttpResponseError: """ _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( @@ -8584,10 +8395,11 @@ def get(self, schedule_id: str, **kwargs: Any) -> _models.Schedule: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Schedule] = kwargs.pop("cls", None) + cls: ClsType[_models.ScheduleRun] = kwargs.pop("cls", None) - _request = build_beta_schedules_get_request( + _request = build_beta_schedules_get_run_request( schedule_id=schedule_id, + run_id=run_id, foundry_features=_foundry_features, api_version=self._config.api_version, headers=_headers, @@ -8613,12 +8425,16 @@ def get(self, schedule_id: str, **kwargs: Any) -> _models.Schedule: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Schedule, response.json()) + deserialized = _deserialize(_models.ScheduleRun, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -8626,22 +8442,25 @@ def get(self, schedule_id: str, **kwargs: Any) -> _models.Schedule: return deserialized # type: ignore @distributed_trace - def list( + def list_runs( self, + schedule_id: str, *, type: Optional[Union[str, _models.ScheduleTaskType]] = None, enabled: Optional[bool] = None, **kwargs: Any - ) -> ItemPaged["_models.Schedule"]: - """List all schedules. + ) -> ItemPaged["_models.ScheduleRun"]: + """List all schedule runs. + :param schedule_id: Identifier of the schedule. Required. + :type schedule_id: str :keyword type: Filter by the type of schedule. Known values are: "Evaluation" and "Insight". Default value is None. :paramtype type: str or ~azure.ai.projects.models.ScheduleTaskType :keyword enabled: Filter by the enabled status. Default value is None. :paramtype enabled: bool - :return: An iterator like instance of Schedule - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.Schedule] + :return: An iterator like instance of ScheduleRun + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.ScheduleRun] :raises ~azure.core.exceptions.HttpResponseError: """ _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( @@ -8650,7 +8469,7 @@ def list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.Schedule]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.ScheduleRun]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -8663,7 +8482,8 @@ def list( def prepare_request(next_link=None): if not next_link: - _request = build_beta_schedules_list_request( + _request = build_beta_schedules_list_runs_request( + schedule_id=schedule_id, foundry_features=_foundry_features, type=type, enabled=enabled, @@ -8706,7 +8526,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.Schedule], + List[_models.ScheduleRun], deserialized.get("value", []), ) if cls: @@ -8730,77 +8550,283 @@ def get_next(next_link=None): return ItemPaged(get_next, extract_data) + +class BetaToolsetsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.AIProjectClient`'s + :attr:`toolsets` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + @overload - def create_or_update( - self, schedule_id: str, schedule: _models.Schedule, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Schedule: - """Create or update operation template. + def create( + self, + *, + name: str, + tools: List[_models.Tool], + content_type: str = "application/json", + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, + **kwargs: Any + ) -> _models.ToolsetObject: + """Create a toolset. + + :keyword name: The name of the toolset. Required. + :paramtype name: str + :keyword tools: The list of tools to include in the toolset. Required. + :paramtype tools: list[~azure.ai.projects.models.Tool] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword description: A human-readable description of the toolset. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the toolset. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.ToolsetObject: + """Create a toolset. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ToolsetObject: + """Create a toolset. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: str = _Unset, + tools: List[_models.Tool] = _Unset, + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, + **kwargs: Any + ) -> _models.ToolsetObject: + """Create a toolset. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: The name of the toolset. Required. + :paramtype name: str + :keyword tools: The list of tools to include in the toolset. Required. + :paramtype tools: list[~azure.ai.projects.models.Tool] + :keyword description: A human-readable description of the toolset. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the toolset. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ToolsetObject] = kwargs.pop("cls", None) + + if body is _Unset: + if name is _Unset: + raise TypeError("missing required argument: name") + if tools is _Unset: + raise TypeError("missing required argument: tools") + body = {"description": description, "metadata": metadata, "name": name, "tools": tools} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_beta_toolsets_create_request( + foundry_features=_foundry_features, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.ToolsetObject, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update( + self, + tool_set_name: str, + *, + tools: List[_models.Tool], + content_type: str = "application/json", + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, + **kwargs: Any + ) -> _models.ToolsetObject: + """Update a toolset. - :param schedule_id: Identifier of the schedule. Required. - :type schedule_id: str - :param schedule: The resource instance. Required. - :type schedule: ~azure.ai.projects.models.Schedule + :param tool_set_name: The name of the toolset to update. Required. + :type tool_set_name: str + :keyword tools: The list of tools to include in the toolset. Required. + :paramtype tools: list[~azure.ai.projects.models.Tool] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: Schedule. The Schedule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Schedule + :keyword description: A human-readable description of the toolset. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the toolset. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create_or_update( - self, schedule_id: str, schedule: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Schedule: - """Create or update operation template. + def update( + self, tool_set_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ToolsetObject: + """Update a toolset. - :param schedule_id: Identifier of the schedule. Required. - :type schedule_id: str - :param schedule: The resource instance. Required. - :type schedule: JSON + :param tool_set_name: The name of the toolset to update. Required. + :type tool_set_name: str + :param body: Required. + :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: Schedule. The Schedule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Schedule + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create_or_update( - self, schedule_id: str, schedule: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Schedule: - """Create or update operation template. + def update( + self, tool_set_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ToolsetObject: + """Update a toolset. - :param schedule_id: Identifier of the schedule. Required. - :type schedule_id: str - :param schedule: The resource instance. Required. - :type schedule: IO[bytes] + :param tool_set_name: The name of the toolset to update. Required. + :type tool_set_name: str + :param body: Required. + :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: Schedule. The Schedule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Schedule + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace - def create_or_update( - self, schedule_id: str, schedule: Union[_models.Schedule, JSON, IO[bytes]], **kwargs: Any - ) -> _models.Schedule: - """Create or update operation template. + def update( + self, + tool_set_name: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tools: List[_models.Tool] = _Unset, + description: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, + **kwargs: Any + ) -> _models.ToolsetObject: + """Update a toolset. - :param schedule_id: Identifier of the schedule. Required. - :type schedule_id: str - :param schedule: The resource instance. Is one of the following types: Schedule, JSON, - IO[bytes] Required. - :type schedule: ~azure.ai.projects.models.Schedule or JSON or IO[bytes] - :return: Schedule. The Schedule is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Schedule + :param tool_set_name: The name of the toolset to update. Required. + :type tool_set_name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tools: The list of tools to include in the toolset. Required. + :paramtype tools: list[~azure.ai.projects.models.Tool] + :keyword description: A human-readable description of the toolset. Default value is None. + :paramtype description: str + :keyword metadata: Arbitrary key-value metadata to associate with the toolset. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -8814,17 +8840,22 @@ def create_or_update( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Schedule] = kwargs.pop("cls", None) + cls: ClsType[_models.ToolsetObject] = kwargs.pop("cls", None) + if body is _Unset: + if tools is _Unset: + raise TypeError("missing required argument: tools") + body = {"description": description, "metadata": metadata, "tools": tools} + body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None - if isinstance(schedule, (IOBase, bytes)): - _content = schedule + if isinstance(body, (IOBase, bytes)): + _content = body else: - _content = json.dumps(schedule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_beta_schedules_create_or_update_request( - schedule_id=schedule_id, + _request = build_beta_toolsets_update_request( + tool_set_name=tool_set_name, foundry_features=_foundry_features, content_type=content_type, api_version=self._config.api_version, @@ -8845,19 +8876,23 @@ def create_or_update( response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.Schedule, response.json()) + deserialized = _deserialize(_models.ToolsetObject, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -8865,19 +8900,17 @@ def create_or_update( return deserialized # type: ignore @distributed_trace - def get_run(self, schedule_id: str, run_id: str, **kwargs: Any) -> _models.ScheduleRun: - """Get a schedule run by id. + def get(self, tool_set_name: str, **kwargs: Any) -> _models.ToolsetObject: + """Retrieve a toolset. - :param schedule_id: The unique identifier of the schedule. Required. - :type schedule_id: str - :param run_id: The unique identifier of the schedule run. Required. - :type run_id: str - :return: ScheduleRun. The ScheduleRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ScheduleRun + :param tool_set_name: The name of the toolset to retrieve. Required. + :type tool_set_name: str + :return: ToolsetObject. The ToolsetObject is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ToolsetObject :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW ) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -8890,11 +8923,10 @@ def get_run(self, schedule_id: str, run_id: str, **kwargs: Any) -> _models.Sched _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ScheduleRun] = kwargs.pop("cls", None) + cls: ClsType[_models.ToolsetObject] = kwargs.pop("cls", None) - _request = build_beta_schedules_get_run_request( - schedule_id=schedule_id, - run_id=run_id, + _request = build_beta_toolsets_get_request( + tool_set_name=tool_set_name, foundry_features=_foundry_features, api_version=self._config.api_version, headers=_headers, @@ -8929,7 +8961,7 @@ def get_run(self, schedule_id: str, run_id: str, **kwargs: Any) -> _models.Sched if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models.ScheduleRun, response.json()) + deserialized = _deserialize(_models.ToolsetObject, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -8937,34 +8969,41 @@ def get_run(self, schedule_id: str, run_id: str, **kwargs: Any) -> _models.Sched return deserialized # type: ignore @distributed_trace - def list_runs( + def list( self, - schedule_id: str, *, - type: Optional[Union[str, _models.ScheduleTaskType]] = None, - enabled: Optional[bool] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.PageOrder]] = None, + before: Optional[str] = None, **kwargs: Any - ) -> ItemPaged["_models.ScheduleRun"]: - """List all schedule runs. + ) -> ItemPaged["_models.ToolsetObject"]: + """List all toolsets. - :param schedule_id: Identifier of the schedule. Required. - :type schedule_id: str - :keyword type: Filter by the type of schedule. Known values are: "Evaluation" and "Insight". + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the + default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the ``created_at`` timestamp of the objects. ``asc`` for + ascending order and``desc`` + for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.PageOrder + :keyword before: A cursor for use in pagination. ``before`` is an object ID that defines your + place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. Default value is None. - :paramtype type: str or ~azure.ai.projects.models.ScheduleTaskType - :keyword enabled: Filter by the enabled status. Default value is None. - :paramtype enabled: bool - :return: An iterator like instance of ScheduleRun - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.ScheduleRun] + :paramtype before: str + :return: An iterator like instance of ToolsetObject + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.ToolsetObject] :raises ~azure.core.exceptions.HttpResponseError: """ - _foundry_features: Literal[_FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW] = ( - _FoundryFeaturesOptInKeys.SCHEDULES_V1_PREVIEW + _foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW ) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.ScheduleRun]] = kwargs.pop("cls", None) + cls: ClsType[List[_models.ToolsetObject]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -8974,62 +9013,36 @@ def list_runs( } error_map.update(kwargs.pop("error_map", {}) or {}) - def prepare_request(next_link=None): - if not next_link: - - _request = build_beta_schedules_list_runs_request( - schedule_id=schedule_id, - foundry_features=_foundry_features, - type=type, - enabled=enabled, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", - urllib.parse.urljoin(next_link, _parsed_next_link.path), - params=_next_request_params, - headers={"Foundry-Features": _SERIALIZER.header("foundry_features", _foundry_features, "str")}, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + def prepare_request(_continuation_token=None): + _request = build_beta_toolsets_list_request( + foundry_features=_foundry_features, + limit=limit, + order=order, + after=_continuation_token, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) return _request def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() list_of_elem = _deserialize( - List[_models.ScheduleRun], - deserialized.get("value", []), + List[_models.ToolsetObject], + deserialized.get("data", []), ) if cls: list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, iter(list_of_elem) + return deserialized.get("last_id") or None, iter(list_of_elem) - def get_next(next_link=None): - _request = prepare_request(next_link) + def get_next(_continuation_token=None): + _request = prepare_request(_continuation_token) _stream = False pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access @@ -9039,8 +9052,81 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) return pipeline_response return ItemPaged(get_next, extract_data) + + @distributed_trace + def delete(self, tool_set_name: str, **kwargs: Any) -> _models.DeleteToolsetResponse: + """Delete a toolset. + + :param tool_set_name: The name of the toolset to delete. Required. + :type tool_set_name: str + :return: DeleteToolsetResponse. The DeleteToolsetResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.DeleteToolsetResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + _foundry_features: Literal[_FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW] = ( + _FoundryFeaturesOptInKeys.TOOLSET_V1_PREVIEW + ) + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.DeleteToolsetResponse] = kwargs.pop("cls", None) + + _request = build_beta_toolsets_delete_request( + tool_set_name=tool_set_name, + foundry_features=_foundry_features, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _decompress = kwargs.pop("decompress", True) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ApiErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() if _decompress else response.iter_raw() + else: + deserialized = _deserialize(_models.DeleteToolsetResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-projects/post-emitter-fixes.cmd b/sdk/ai/azure-ai-projects/post-emitter-fixes.cmd index 028d41916f63..885fc6b2348f 100644 --- a/sdk/ai/azure-ai-projects/post-emitter-fixes.cmd +++ b/sdk/ai/azure-ai-projects/post-emitter-fixes.cmd @@ -55,4 +55,4 @@ REM Add: REM _SERIALIZER = Serializer() REM _SERIALIZER.client_side_validation = False REM just before the definition of the class BetaOperations (the first class defined in the file) - + diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py index c604386b5cf7..2a15ce981d7b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py @@ -49,9 +49,7 @@ # Upload file to vector store with open(asset_file_path, "rb") as f: - file = openai_client.vector_stores.files.upload_and_poll( - vector_store_id=vector_store.id, file=f - ) + file = openai_client.vector_stores.files.upload_and_poll(vector_store_id=vector_store.id, file=f) print(f"File uploaded to vector store (id: {file.id})") tool = FileSearchTool(vector_store_ids=[vector_store.id]) diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py index eb17555821be..49501c4a05b7 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py @@ -51,9 +51,7 @@ # Upload file to vector store try: with open(asset_file_path, "rb") as f: - file = openai_client.vector_stores.files.upload_and_poll( - vector_store_id=vector_store.id, file=f - ) + file = openai_client.vector_stores.files.upload_and_poll(vector_store_id=vector_store.id, file=f) print(f"File uploaded to vector store (id: {file.id})") except FileNotFoundError: print(f"Warning: Asset file not found at {asset_file_path}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py index 0be162b59062..889d8a6b24b8 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py @@ -52,9 +52,7 @@ async def main() -> None: # pylint: disable=too-many-statements # Upload file to vector store try: with open(asset_file_path, "rb") as f: - file = await openai_client.vector_stores.files.upload_and_poll( - vector_store_id=vector_store.id, file=f - ) + file = await openai_client.vector_stores.files.upload_and_poll(vector_store_id=vector_store.id, file=f) print(f"File uploaded to vector store (id: {file.id})") except FileNotFoundError: print(f"Warning: Asset file not found at {asset_file_path}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_model_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_model_evaluation.py index bf0a0120299b..13f695562540 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_model_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_synthetic_data_model_evaluation.py @@ -50,7 +50,7 @@ endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"] model_deployment_name = os.environ["FOUNDRY_MODEL_NAME"] -with( +with ( DefaultAzureCredential() as credential, AIProjectClient(endpoint=endpoint, credential=credential) as project_client, project_client.get_openai_client() as client, diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py index 4125772e8a42..6d3b54a6dc97 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py @@ -204,7 +204,7 @@ async def test_async_workflow_non_streaming_with_content_recording( assert AIProjectInstrumentor().is_content_recording_enabled() assert AIProjectInstrumentor().is_instrumented() - project_client = self.create_async_client(operation_group="tracing", allow_preview=True, **kwargs) + project_client = self.create_async_client(operation_group="tracing", allow_preview=True, **kwargs) deployment_name = kwargs.get("foundry_model_name") assert deployment_name is not None diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py index 5416b3269e7b..626b9931c279 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py @@ -73,7 +73,7 @@ def test_agent_tools_samples(self, sample_path: str, **kwargs) -> None: samples_to_skip=[ "sample_memory_advanced.py", "sample_memory_basic.py", - "sample_memory_crud.py", # Sample works fine. But AI thinks something is wrong. + "sample_memory_crud.py", # Sample works fine. But AI thinks something is wrong. ], ), ) @@ -95,7 +95,9 @@ def test_memory_samples(self, sample_path: str, **kwargs) -> None: "sample_path", get_sample_paths( "agents", - samples_to_skip=["sample_workflow_multi_agent.py"], # I see in sample spew: "Event 10 type 'response.failed'" with error message in payload "The specified agent was not found. Please verify that the agent name and version are correct". + samples_to_skip=[ + "sample_workflow_multi_agent.py" + ], # I see in sample spew: "Event 10 type 'response.failed'" with error message in payload "The specified agent was not found. Please verify that the agent name and version are correct". ), ) @servicePreparer() diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py index 18336f6122a9..14b0f827d0a4 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples_evaluations.py @@ -170,7 +170,7 @@ class TestSamplesEvaluations(AzureRecordedTestCase): "sample_evaluations_builtin_with_csv.py", # Requires CSV file upload prerequisite "sample_synthetic_data_agent_evaluation.py", # Synthetic data gen is long-running preview feature "sample_synthetic_data_model_evaluation.py", # Synthetic data gen is long-running preview feature - "sample_eval_catalog_prompt_based_evaluators.py", # For some reason fails with 500 (Internal server error) + "sample_eval_catalog_prompt_based_evaluators.py", # For some reason fails with 500 (Internal server error) ], ), ) From 090b9231addb09baba9c8c9a8d5e5bd16f62ebdd Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 12 Mar 2026 15:08:42 -0700 Subject: [PATCH 14/16] Remove --- sdk/ai/azure-ai-projects/.env.save | 79 ------------------------------ 1 file changed, 79 deletions(-) delete mode 100644 sdk/ai/azure-ai-projects/.env.save diff --git a/sdk/ai/azure-ai-projects/.env.save b/sdk/ai/azure-ai-projects/.env.save deleted file mode 100644 index 6e722704dada..000000000000 --- a/sdk/ai/azure-ai-projects/.env.save +++ /dev/null @@ -1,79 +0,0 @@ -# -# Environment variables that define secrets required for running tests and samples. -# -# All values should be empty by default in this template. -# -# To run tests locally on your device: -# 1. Rename the file to `.env.template` to `.env` -# 2. Fill in the values for the environment variables below (do not commit these changes to the repository!) -# 3. Run the tests (`pytest`) or run samples in the `samples` folder -# - -AZURE_AI_PROJECTS_CONSOLE_LOGGING=true - -####################################################################### -# -# Used in samples -# -# Project endpoint has the format: -# `https://.services.ai.azure.com/api/projects/` - -# Foundry link: https://ai.azure.com/nextgen/r/v9nzDiUtT8iCPPJ9mU_Itw,rg-agentsv2-ga-bugbash,,agentsv2-ga-bugbash-tip,project2/home -#FOUNDRY_PROJECT_ENDPOINT=https://agentsv2-ga-bugbash-tip.services.ai.azure.com/api/projects/project2 -#FOUNDRY_PROJECT_ENDPOINT=https://rovins-sweden.services.ai.azure.com/api/projects/swedenProject -#FOUNDRY_PROJECT_ENDPOINT=https://aoai-jep6bl5hlacma.services.ai.azure.com/api/projects/proj-jep6bl5hlacma -#FOUNDRY_PROJECT_ENDPOINT=https://openai-sdk-aiservices-tip.services.ai.azure.com/api/projects/test-project -FOUNDRY_PROJECT_ENDPOINT=https://balapv-1dp-prod-account1.services.ai.azure.com/api/projects/project1 -#FOUNDRY_PROJECT_ENDPOINT=https://aoai-3vdvhhtwxyahm.services.ai.azure.com/api/projects/proj-3vdvhhtwxyahm -#FOUNDRY_MODEL_NAME=gpt-4.1-mini -FOUNDRY_MODEL_NAME=gpt-4o-mini -FOUNDRY_AGENT_NAME=My-Agent -CONVERSATION_ID= -CONNECTION_NAME= -AZURE_AI_PROJECTS_AZURE_SUBSCRIPTION_ID= -AZURE_AI_PROJECTS_AZURE_RESOURCE_GROUP= -AZURE_AI_PROJECTS_AZURE_AOAI_ACCOUNT= -FABRIC_USER_INPUT= -A2A_USER_INPUT= -BING_CUSTOM_USER_INPUT= - -# Used in Memory Store samples -#MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME=gpt-4.1-mini -MEMORY_STORE_CHAT_MODEL_DEPLOYMENT_NAME=gpt-4o-mini -MEMORY_STORE_EMBEDDING_MODEL_DEPLOYMENT_NAME=text-embedding-3-large - -# Used in Agent tools samples -IMAGE_GENERATION_MODEL_DEPLOYMENT_NAME=gpt-image-1 -BING_PROJECT_CONNECTION_ID= -MCP_PROJECT_CONNECTION_ID= -FABRIC_PROJECT_CONNECTION_ID= -AI_SEARCH_PROJECT_CONNECTION_ID= -AI_SEARCH_INDEX_NAME= -BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID= -BING_CUSTOM_SEARCH_INSTANCE_NAME= -SHAREPOINT_PROJECT_CONNECTION_ID= -A2A_PROJECT_CONNECTION_ID= -BROWSER_AUTOMATION_PROJECT_CONNECTION_ID= -OPENAPI_PROJECT_CONNECTION_ID= -AI_SEARCH_USER_INPUT= -SHAREPOINT_USER_INPUT= - - -####################################################################### -# -# Used in tests -####################################################################### - -# Used for recording or playback -AZURE_TEST_RUN_LIVE=true -AZURE_SKIP_LIVE_RECORDING=true - -# Used in Fine-tuning tests -COMPLETED_OAI_MODEL_SFT_FINE_TUNING_JOB_ID= -COMPLETED_OAI_MODEL_RFT_FINE_TUNING_JOB_ID= -COMPLETED_OAI_MODEL_DPO_FINE_TUNING_JOB_ID= -COMPLETED_OSS_MODEL_SFT_FINE_TUNING_JOB_ID= -RUNNING_FINE_TUNING_JOB_ID= -PAUSED_FINE_TUNING_JOB_ID= -AZURE_SUBSCRIPTION_ID= -AZURE_RESOURCE_GROUP= \ No newline at end of file From d27b152054483ee533e773e8ce1c4358e0aeea67 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 13 Mar 2026 09:27:14 -0700 Subject: [PATCH 15/16] Update project status to Production/Stable --- sdk/ai/azure-ai-projects/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-projects/pyproject.toml b/sdk/ai/azure-ai-projects/pyproject.toml index e2ae27e26534..f5babde606a2 100644 --- a/sdk/ai/azure-ai-projects/pyproject.toml +++ b/sdk/ai/azure-ai-projects/pyproject.toml @@ -17,7 +17,7 @@ authors = [ description = "Microsoft Corporation Azure AI Projects Client Library for Python" license = "MIT" classifiers = [ - "Development Status :: 4 - Beta", + "Development Status :: 5 - Production/Stable", "Programming Language :: Python", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3", From b70d34967c8aa1071c5b2b3f97aa7b68b47a6bdb Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 13 Mar 2026 15:14:57 -0700 Subject: [PATCH 16/16] Classes `UpdateMemoriesLROPollingMethod` and `AsyncUpdateMemoriesLROPollingMethod` should be private (#45685) --- .../aio/operations/_patch_memories_async.py | 16 +++++++++------- .../azure/ai/projects/models/_patch.py | 10 ++++------ .../ai/projects/operations/_patch_memories.py | 16 +++++++++------- 3 files changed, 22 insertions(+), 20 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_memories_async.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_memories_async.py index 31c1b98eea09..86fa98998195 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_memories_async.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_memories_async.py @@ -20,8 +20,8 @@ ResponseUsageOutputTokensDetails, MemoryStoreUpdateCompletedResult, AsyncUpdateMemoriesLROPoller, - AsyncUpdateMemoriesLROPollingMethod, ) +from ...models._patch import _AsyncUpdateMemoriesLROPollingMethod from ...models._enums import _FoundryFeaturesOptInKeys from ._operations import JSON, _Unset, ClsType, BetaMemoryStoresOperations as GenerateBetaMemoryStoresOperations from ...operations._patch_memories import _serialize_memory_input_items @@ -296,7 +296,9 @@ async def begin_update_memories( content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.MemoryStoreUpdateCompletedResult] = kwargs.pop("cls", None) - polling: Union[bool, AsyncUpdateMemoriesLROPollingMethod] = kwargs.pop("polling", True) + polling = kwargs.pop("polling", True) + if not isinstance(polling, bool): + raise TypeError("polling must be of type bool.") lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) cont_token: Optional[str] = kwargs.pop("continuation_token", None) if cont_token is None: @@ -348,17 +350,16 @@ def get_long_running_output(pipeline_response): "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } - if polling is True: - polling_method: AsyncUpdateMemoriesLROPollingMethod = AsyncUpdateMemoriesLROPollingMethod( + if polling: + polling_method: _AsyncUpdateMemoriesLROPollingMethod = _AsyncUpdateMemoriesLROPollingMethod( lro_delay, path_format_arguments=path_format_arguments, headers={"Foundry-Features": _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW.value}, **kwargs, ) - elif polling is False: - polling_method = cast(AsyncUpdateMemoriesLROPollingMethod, AsyncNoPolling()) else: - polling_method = polling + polling_method = cast(_AsyncUpdateMemoriesLROPollingMethod, AsyncNoPolling()) + if cont_token: return AsyncUpdateMemoriesLROPoller.from_continuation_token( polling_method=polling_method, @@ -366,6 +367,7 @@ def get_long_running_output(pipeline_response): client=self._client, deserialization_callback=get_long_running_output, ) + return AsyncUpdateMemoriesLROPoller( self._client, raw_result, # type: ignore[possibly-undefined] diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index eb69fd4e6836..ed7f7e804883 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -68,7 +68,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: _FAILED = frozenset(["failed"]) -class UpdateMemoriesLROPollingMethod(LROBasePolling): +class _UpdateMemoriesLROPollingMethod(LROBasePolling): """A custom polling method implementation for Memory Store updates.""" @property @@ -139,7 +139,7 @@ def _poll(self) -> None: _raise_if_bad_http_status_and_method(self._pipeline_response.http_response) -class AsyncUpdateMemoriesLROPollingMethod(AsyncLROBasePolling): +class _AsyncUpdateMemoriesLROPollingMethod(AsyncLROBasePolling): """A custom polling method implementation for Memory Store updates.""" @property @@ -213,7 +213,7 @@ async def _poll(self) -> None: class UpdateMemoriesLROPoller(LROPoller[MemoryStoreUpdateCompletedResult]): """Custom LROPoller for Memory Store update operations.""" - _polling_method: "UpdateMemoriesLROPollingMethod" + _polling_method: "_UpdateMemoriesLROPollingMethod" @property def update_id(self) -> str: @@ -263,7 +263,7 @@ def from_continuation_token( class AsyncUpdateMemoriesLROPoller(AsyncLROPoller[MemoryStoreUpdateCompletedResult]): """Custom AsyncLROPoller for Memory Store update operations.""" - _polling_method: "AsyncUpdateMemoriesLROPollingMethod" + _polling_method: "_AsyncUpdateMemoriesLROPollingMethod" @property def update_id(self) -> str: @@ -315,8 +315,6 @@ def from_continuation_token( __all__: List[str] = [ "CustomCredential", - "UpdateMemoriesLROPollingMethod", - "AsyncUpdateMemoriesLROPollingMethod", "UpdateMemoriesLROPoller", "AsyncUpdateMemoriesLROPoller", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_memories.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_memories.py index 100919e7e40a..a946cfd11080 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_memories.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_memories.py @@ -20,8 +20,8 @@ ResponseUsageOutputTokensDetails, MemoryStoreUpdateCompletedResult, UpdateMemoriesLROPoller, - UpdateMemoriesLROPollingMethod, ) +from ..models._patch import _UpdateMemoriesLROPollingMethod from ..models._enums import _FoundryFeaturesOptInKeys from ._operations import JSON, _Unset, ClsType, BetaMemoryStoresOperations as GenerateBetaMemoryStoresOperations from .._validation import api_version_validation @@ -331,7 +331,9 @@ def begin_update_memories( content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[MemoryStoreUpdateCompletedResult] = kwargs.pop("cls", None) - polling: Union[bool, UpdateMemoriesLROPollingMethod] = kwargs.pop("polling", True) + polling = kwargs.pop("polling", True) + if not isinstance(polling, bool): + raise TypeError("polling must be of type bool.") lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) cont_token: Optional[str] = kwargs.pop("continuation_token", None) if cont_token is None: @@ -383,17 +385,16 @@ def get_long_running_output(pipeline_response): "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), } - if polling is True: - polling_method: UpdateMemoriesLROPollingMethod = UpdateMemoriesLROPollingMethod( + if polling: + polling_method: _UpdateMemoriesLROPollingMethod = _UpdateMemoriesLROPollingMethod( lro_delay, path_format_arguments=path_format_arguments, headers={"Foundry-Features": _FoundryFeaturesOptInKeys.MEMORY_STORES_V1_PREVIEW.value}, **kwargs, ) - elif polling is False: - polling_method = cast(UpdateMemoriesLROPollingMethod, NoPolling()) else: - polling_method = polling + polling_method = cast(_UpdateMemoriesLROPollingMethod, NoPolling()) + if cont_token: return UpdateMemoriesLROPoller.from_continuation_token( polling_method=polling_method, @@ -401,6 +402,7 @@ def get_long_running_output(pipeline_response): client=self._client, deserialization_callback=get_long_running_output, ) + return UpdateMemoriesLROPoller( self._client, raw_result, # type: ignore[possibly-undefined]