Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# pylint: disable=line-too-long,useless-suppression,too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) Python Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
# pylint: disable=line-too-long,useless-suppression,too-many-lines,duplicate-code,missing-module-docstring,missing-class-docstring,missing-function-docstring,consider-using-f-string,invalid-name,too-many-locals,too-many-branches

# pyright: reportUnnecessaryTypeIgnoreComment=false

Expand Down Expand Up @@ -787,7 +787,7 @@ def serialize_data(self, data, data_type, **kwargs):

# If dependencies is empty, try with current data class
# It has to be a subclass of Enum anyway
enum_type = self.dependencies.get(data_type, data.__class__)
enum_type = self.dependencies.get(data_type, cast(type, data.__class__))
if issubclass(enum_type, Enum):
return Serializer.serialize_enum(data, enum_obj=enum_type)

Expand Down Expand Up @@ -821,13 +821,20 @@ def serialize_basic(cls, data, data_type, **kwargs):
:param str data_type: Type of object in the iterable.
:rtype: str, int, float, bool
:return: serialized object
:raises TypeError: raise if data_type is not one of str, int, float, bool.
"""
custom_serializer = cls._get_custom_serializers(data_type, **kwargs)
if custom_serializer:
return custom_serializer(data)
if data_type == "str":
return cls.serialize_unicode(data)
return eval(data_type)(data) # nosec # pylint: disable=eval-used
if data_type == "int":
return int(data)
if data_type == "float":
return float(data)
if data_type == "bool":
return bool(data)
raise TypeError("Unknown basic data type: {}".format(data_type))

@classmethod
def serialize_unicode(cls, data):
Expand Down Expand Up @@ -1757,7 +1764,7 @@ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return
:param str data_type: deserialization data type.
:return: Deserialized basic type.
:rtype: str, int, float or bool
:raises TypeError: if string format is not valid.
:raises TypeError: if string format is not valid or data_type is not one of str, int, float, bool.
"""
# If we're here, data is supposed to be a basic type.
# If it's still an XML node, take the text
Expand All @@ -1783,7 +1790,11 @@ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return

if data_type == "str":
return self.deserialize_unicode(attr)
return eval(data_type)(attr) # nosec # pylint: disable=eval-used
if data_type == "int":
return int(attr)
if data_type == "float":
return float(attr)
raise TypeError("Unknown basic data type: {}".format(data_type))

@staticmethod
def deserialize_unicode(data):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -787,7 +787,7 @@ def serialize_data(self, data, data_type, **kwargs):

# If dependencies is empty, try with current data class
# It has to be a subclass of Enum anyway
enum_type = self.dependencies.get(data_type, data.__class__)
enum_type = self.dependencies.get(data_type, cast(type, data.__class__))
if issubclass(enum_type, Enum):
return Serializer.serialize_enum(data, enum_obj=enum_type)

Expand Down Expand Up @@ -821,13 +821,20 @@ def serialize_basic(cls, data, data_type, **kwargs):
:param str data_type: Type of object in the iterable.
:rtype: str, int, float, bool
:return: serialized object
:raises TypeError: raise if data_type is not one of str, int, float, bool.
"""
custom_serializer = cls._get_custom_serializers(data_type, **kwargs)
if custom_serializer:
return custom_serializer(data)
if data_type == "str":
return cls.serialize_unicode(data)
return eval(data_type)(data) # nosec # pylint: disable=eval-used
if data_type == "int":
return int(data)
if data_type == "float":
return float(data)
if data_type == "bool":
return bool(data)
raise TypeError("Unknown basic data type: {}".format(data_type))

@classmethod
def serialize_unicode(cls, data):
Expand Down Expand Up @@ -1757,7 +1764,7 @@ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return
:param str data_type: deserialization data type.
:return: Deserialized basic type.
:rtype: str, int, float or bool
:raises TypeError: if string format is not valid.
:raises TypeError: if string format is not valid or data_type is not one of str, int, float, bool.
"""
# If we're here, data is supposed to be a basic type.
# If it's still an XML node, take the text
Expand All @@ -1783,7 +1790,11 @@ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return

if data_type == "str":
return self.deserialize_unicode(attr)
return eval(data_type)(attr) # nosec # pylint: disable=eval-used
if data_type == "int":
return int(attr)
if data_type == "float":
return float(attr)
raise TypeError("Unknown basic data type: {}".format(data_type))

@staticmethod
def deserialize_unicode(data):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -787,7 +787,7 @@ def serialize_data(self, data, data_type, **kwargs):

# If dependencies is empty, try with current data class
# It has to be a subclass of Enum anyway
enum_type = self.dependencies.get(data_type, data.__class__)
enum_type = self.dependencies.get(data_type, cast(type, data.__class__))
if issubclass(enum_type, Enum):
return Serializer.serialize_enum(data, enum_obj=enum_type)

Expand Down Expand Up @@ -821,13 +821,20 @@ def serialize_basic(cls, data, data_type, **kwargs):
:param str data_type: Type of object in the iterable.
:rtype: str, int, float, bool
:return: serialized object
:raises TypeError: raise if data_type is not one of str, int, float, bool.
"""
custom_serializer = cls._get_custom_serializers(data_type, **kwargs)
if custom_serializer:
return custom_serializer(data)
if data_type == "str":
return cls.serialize_unicode(data)
return eval(data_type)(data) # nosec # pylint: disable=eval-used
if data_type == "int":
return int(data)
if data_type == "float":
return float(data)
if data_type == "bool":
return bool(data)
raise TypeError("Unknown basic data type: {}".format(data_type))

@classmethod
def serialize_unicode(cls, data):
Expand Down Expand Up @@ -1757,7 +1764,7 @@ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return
:param str data_type: deserialization data type.
:return: Deserialized basic type.
:rtype: str, int, float or bool
:raises TypeError: if string format is not valid.
:raises TypeError: if string format is not valid or data_type is not one of str, int, float, bool.
"""
# If we're here, data is supposed to be a basic type.
# If it's still an XML node, take the text
Expand All @@ -1783,7 +1790,11 @@ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return

if data_type == "str":
return self.deserialize_unicode(attr)
return eval(data_type)(attr) # nosec # pylint: disable=eval-used
if data_type == "int":
return int(attr)
if data_type == "float":
return float(attr)
raise TypeError("Unknown basic data type: {}".format(data_type))

@staticmethod
def deserialize_unicode(data):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
directory: specification/cognitiveservices/Language.AnalyzeText-authoring
commit: a525e37a2b1546c47a778eda7106cfd7c04051e1
directory: specification/cognitiveservices/data-plane/LanguageAnalyzeTextAuthoring
commit: 483301e475684d809bdc7e81aa786e84cfb4236f
repo: Azure/azure-rest-api-specs
additionalDirectories:
25 changes: 5 additions & 20 deletions sdk/cognitivelanguage/azure-ai-textanalytics/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ from azure.ai.textanalytics import TextAnalysisClient
endpoint = os.environ["AZURE_TEXT_ENDPOINT"]
key = os.environ["AZURE_TEXT_KEY"]

text_client = TextAnalysisClient(endpoint, AzureKeyCredential(key))
text_client = TextAnalysisClient(endpoint, AzureKeyCredential(key)) # pylint:disable=unused-variable
```

<!-- END SNIPPET -->
Expand Down Expand Up @@ -126,7 +126,7 @@ from azure.ai.textanalytics import TextAnalysisClient
endpoint = os.environ["AZURE_TEXT_ENDPOINT"]
key = os.environ["AZURE_TEXT_KEY"]

text_client = TextAnalysisClient(endpoint, AzureKeyCredential(key))
text_client = TextAnalysisClient(endpoint, AzureKeyCredential(key)) # pylint:disable=unused-variable
```

<!-- END SNIPPET -->
Expand Down Expand Up @@ -164,7 +164,7 @@ from azure.identity import DefaultAzureCredential
endpoint = os.environ["AZURE_TEXT_ENDPOINT"]
credential = DefaultAzureCredential()

text_client = TextAnalysisClient(endpoint, credential=credential)
text_client = TextAnalysisClient(endpoint, credential=credential) # pylint:disable=unused-variable
```

<!-- END SNIPPET -->
Expand Down Expand Up @@ -803,19 +803,12 @@ def sample_analyze_healthcare_entities():
print(f" Offset: {entity.offset}")
print(f" Length: {entity.length}")
print(f" Confidence score: {entity.confidence_score}")
if entity.links:
for link in entity.links:
print(f" Link ID: {link.id}")
print(f" Data source: {link.data_source}")
print()

# Relations
print("Relations:")
for relation in doc.relations or []:
print(f" Relation type: {relation.relation_type}")
for rel_entity in relation.entities or []:
print(f" Role: {rel_entity.role}")
print(f" Ref: {rel_entity.ref}")
print()
else:
# Other action kinds, if present
Expand All @@ -824,8 +817,8 @@ def sample_analyze_healthcare_entities():
f"\n[Non-healthcare action] name={op_result.task_name}, "
f"status={op_result.status}, kind={op_result.kind}"
)
except Exception:
print("\n[Non-healthcare action present]")
except (AttributeError, TypeError) as e:
print(f"\n[Non-healthcare action present] Error: {e}")
```

<!-- END SNIPPET -->
Expand Down Expand Up @@ -854,7 +847,6 @@ Note: Healthcare Entities Analysis is only available with API version v3.1 and n
import os

from azure.identity import DefaultAzureCredential
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics import TextAnalysisClient
from azure.ai.textanalytics.models import (
MultiLanguageTextInput,
Expand All @@ -863,7 +855,6 @@ from azure.ai.textanalytics.models import (
KeyPhraseLROTask,
EntityRecognitionOperationResult,
KeyPhraseExtractionOperationResult,
EntityTag,
)


Expand Down Expand Up @@ -931,12 +922,6 @@ def sample_analyze():
print(f" Type: {entity.type}")
if hasattr(entity, "subcategory") and entity.subcategory:
print(f" Subcategory: {entity.subcategory}")
if hasattr(entity, "tags") and entity.tags:
print(" Tags:")
for tag in entity.tags:
if isinstance(tag, EntityTag):
print(f" TagName: {tag.name}")
print(f" TagConfidenceScore: {tag.confidence_score}")
print(f" Confidence score: {entity.confidence_score}")
print()
for err in action_result.results.errors:
Expand Down
2 changes: 1 addition & 1 deletion sdk/cognitivelanguage/azure-ai-textanalytics/assets.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "python",
"TagPrefix": "python/cognitivelanguage/azure-ai-textanalytics",
"Tag": "python/cognitivelanguage/azure-ai-textanalytics_c820b61b7e"
"Tag": "python/cognitivelanguage/azure-ai-textanalytics_95abc8a0dd"
}
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,10 @@ def analyze_text(
except (StreamConsumedError, StreamClosedError):
pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = _failsafe_deserialize(_models.ErrorResponse, response)
error = _failsafe_deserialize(
_models.ErrorResponse,
response,
)
raise HttpResponseError(response=response, model=error)

if _stream:
Expand Down Expand Up @@ -365,7 +368,10 @@ def get_job_status(
except (StreamConsumedError, StreamClosedError):
pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = _failsafe_deserialize(_models.ErrorResponse, response)
error = _failsafe_deserialize(
_models.ErrorResponse,
response,
)
raise HttpResponseError(response=response, model=error)

if _stream:
Expand Down Expand Up @@ -448,7 +454,10 @@ def _analyze_text_job_initial(
except (StreamConsumedError, StreamClosedError):
pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = _failsafe_deserialize(_models.ErrorResponse, response)
error = _failsafe_deserialize(
_models.ErrorResponse,
response,
)
raise HttpResponseError(response=response, model=error)

response_headers = {}
Expand All @@ -462,7 +471,7 @@ def _analyze_text_job_initial(
return deserialized # type: ignore

@overload
def _begin_analyze_text_job(
def begin_analyze_text_job(
self,
*,
text_input: _models.MultiLanguageTextInput,
Expand Down Expand Up @@ -497,7 +506,7 @@ def _begin_analyze_text_job(
"""

@overload
def _begin_analyze_text_job(
def begin_analyze_text_job(
self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
) -> LROPoller[None]:
"""Submit a collection of text documents for analysis. Specify one or more unique tasks to be
Expand All @@ -514,7 +523,7 @@ def _begin_analyze_text_job(
"""

@overload
def _begin_analyze_text_job(
def begin_analyze_text_job(
self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
) -> LROPoller[None]:
"""Submit a collection of text documents for analysis. Specify one or more unique tasks to be
Expand All @@ -531,7 +540,7 @@ def _begin_analyze_text_job(
"""

@distributed_trace
def _begin_analyze_text_job(
def begin_analyze_text_job(
self,
body: Union[JSON, IO[bytes]] = _Unset,
*,
Expand Down Expand Up @@ -651,7 +660,10 @@ def _cancel_job_initial(self, job_id: str, **kwargs: Any) -> Iterator[bytes]:
except (StreamConsumedError, StreamClosedError):
pass
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = _failsafe_deserialize(_models.ErrorResponse, response)
error = _failsafe_deserialize(
_models.ErrorResponse,
response,
)
raise HttpResponseError(response=response, model=error)

response_headers = {}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def __init__(
kwargs["api_version"] = api_version
super().__init__(endpoint=endpoint, credential=credential, **kwargs)

@overload
@overload # type: ignore[override]
def begin_analyze_text_job(
self,
*,
Expand Down Expand Up @@ -178,7 +178,7 @@ def begin_analyze_text_job(
"""

@overload
def begin_analyze_text_job(
def begin_analyze_text_job( # type: ignore[override]
self, body: JSON, *, content_type: str = "application/json", **kwargs: Any
) -> AnalyzeTextLROPoller[ItemPaged["TextActions"]]:
"""Submit a collection of text documents for analysis. Specify one or more unique tasks to be
Expand All @@ -196,7 +196,7 @@ def begin_analyze_text_job(
"""

@overload
def begin_analyze_text_job(
def begin_analyze_text_job( # type: ignore[override]
self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
) -> AnalyzeTextLROPoller[ItemPaged["TextActions"]]:
"""Submit a collection of text documents for analysis. Specify one or more unique tasks to be
Expand Down
Loading
Loading