Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,16 @@

from azure.ai.evaluation._common._experimental import experimental
from azure.ai.evaluation._constants import DEFAULT_AOAI_API_VERSION, TokenScope
from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
from azure.ai.evaluation._model_configurations import AzureOpenAIModelConfiguration, OpenAIModelConfiguration
from azure.ai.evaluation._exceptions import (
ErrorBlame,
ErrorCategory,
ErrorTarget,
EvaluationException,
)
from azure.ai.evaluation._model_configurations import (
AzureOpenAIModelConfiguration,
OpenAIModelConfiguration,
)
from azure.ai.evaluation._user_agent import UserAgentSingleton
from azure.core.credentials import TokenCredential

Expand Down Expand Up @@ -63,14 +71,18 @@ def _validate_model_config(self) -> None:
"""Validate the model configuration that this grader wrapper is using."""
msg = None
if self._is_azure_model_config(self._model_config):
if not any(auth for auth in (self._model_config.get("api_key"), self._credential)):
if not any(
auth for auth in (self._model_config.get("api_key"), self._credential)
):
msg = (
f"{type(self).__name__}: Requires an api_key in the supplied model_config, "
+ "or providing a credential to the grader's __init__ method. "
)

else:
if "api_key" not in self._model_config or not self._model_config.get("api_key"):
if "api_key" not in self._model_config or not self._model_config.get(
"api_key"
):
msg = f"{type(self).__name__}: Requires an api_key in the supplied model_config."

if msg is None:
Expand Down Expand Up @@ -103,7 +115,9 @@ def get_client(self) -> Any:
:rtype: [~openai.OpenAI, ~openai.AzureOpenAI]
"""
default_headers = {"User-Agent": UserAgentSingleton().value}
model_config: Union[AzureOpenAIModelConfiguration, OpenAIModelConfiguration] = self._model_config
model_config: Union[AzureOpenAIModelConfiguration, OpenAIModelConfiguration] = (
self._model_config
)
api_key: Optional[str] = model_config.get("api_key")

if self._is_azure_model_config(model_config):
Expand All @@ -115,7 +129,9 @@ def get_client(self) -> Any:
api_key=api_key, # Default-style access to appease linters.
api_version=DEFAULT_AOAI_API_VERSION, # Force a known working version
azure_deployment=model_config.get("azure_deployment", ""),
azure_ad_token_provider=self._get_token_provider(self._credential) if not api_key else None,
azure_ad_token_provider=(
self._get_token_provider(self._credential) if not api_key else None
),
default_headers=default_headers,
)
from openai import OpenAI
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,10 @@
from openai.types.graders import LabelModelGrader

from azure.ai.evaluation._common._experimental import experimental
from azure.ai.evaluation._model_configurations import AzureOpenAIModelConfiguration, OpenAIModelConfiguration
from azure.ai.evaluation._model_configurations import (
AzureOpenAIModelConfiguration,
OpenAIModelConfiguration,
)
from azure.core.credentials import TokenCredential

from .aoai_grader import AzureOpenAIGrader
Expand Down Expand Up @@ -65,4 +68,9 @@ def __init__(
passing_labels=passing_labels,
type=AzureOpenAILabelGrader._type,
)
super().__init__(model_config=model_config, grader_config=grader, credential=credential, **kwargs)
super().__init__(
model_config=model_config,
grader_config=grader,
credential=credential,
**kwargs
)
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,10 @@
from openai.types.graders import PythonGrader

from azure.ai.evaluation._common._experimental import experimental
from azure.ai.evaluation._model_configurations import AzureOpenAIModelConfiguration, OpenAIModelConfiguration
from azure.ai.evaluation._model_configurations import (
AzureOpenAIModelConfiguration,
OpenAIModelConfiguration,
)
from azure.core.credentials import TokenCredential

from .aoai_grader import AzureOpenAIGrader
Expand Down Expand Up @@ -83,4 +86,9 @@ def __init__(
type=AzureOpenAIPythonGrader._type,
)

super().__init__(model_config=model_config, grader_config=grader, credential=credential, **kwargs)
super().__init__(
model_config=model_config,
grader_config=grader,
credential=credential,
**kwargs,
)
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,10 @@
from openai.types.graders import ScoreModelGrader

from azure.ai.evaluation._common._experimental import experimental
from azure.ai.evaluation._model_configurations import AzureOpenAIModelConfiguration, OpenAIModelConfiguration
from azure.ai.evaluation._model_configurations import (
AzureOpenAIModelConfiguration,
OpenAIModelConfiguration,
)
from azure.core.credentials import TokenCredential

from .aoai_grader import AzureOpenAIGrader
Expand Down Expand Up @@ -67,21 +70,30 @@ def __init__(
# Validate range and pass_threshold
if range is not None:
if len(range) != 2 or range[0] >= range[1]:
raise ValueError("range must be a list of two numbers [min, max] where min < max")
raise ValueError(
"range must be a list of two numbers [min, max] where min < max"
)
else:
range = [0.0, 1.0] # Default range

if pass_threshold is not None:
if range and (pass_threshold < range[0] or pass_threshold > range[1]):
raise ValueError(f"pass_threshold {pass_threshold} must be within range {range}")
raise ValueError(
f"pass_threshold {pass_threshold} must be within range {range}"
)
else:
pass_threshold = (range[0] + range[1]) / 2 # Default to midpoint

# Store pass_threshold as instance attribute
self.pass_threshold = pass_threshold

# Create OpenAI ScoreModelGrader instance
grader_kwargs = {"input": input, "model": model, "name": name, "type": AzureOpenAIScoreModelGrader._type}
grader_kwargs = {
"input": input,
"model": model,
"name": name,
"type": AzureOpenAIScoreModelGrader._type,
}

if range is not None:
grader_kwargs["range"] = range
Expand All @@ -91,4 +103,9 @@ def __init__(

grader = ScoreModelGrader(**grader_kwargs)

super().__init__(model_config=model_config, grader_config=grader, credential=credential, **kwargs)
super().__init__(
model_config=model_config,
grader_config=grader,
credential=credential,
**kwargs,
)
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,10 @@
from typing_extensions import Literal

from azure.ai.evaluation._common._experimental import experimental
from azure.ai.evaluation._model_configurations import AzureOpenAIModelConfiguration, OpenAIModelConfiguration
from azure.ai.evaluation._model_configurations import (
AzureOpenAIModelConfiguration,
OpenAIModelConfiguration,
)
from azure.core.credentials import TokenCredential

from .aoai_grader import AzureOpenAIGrader
Expand Down Expand Up @@ -63,4 +66,9 @@ def __init__(
reference=reference,
type=AzureOpenAIStringCheckGrader._type,
)
super().__init__(model_config=model_config, grader_config=grader, credential=credential, **kwargs)
super().__init__(
model_config=model_config,
grader_config=grader,
credential=credential,
**kwargs
)
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,10 @@
from typing_extensions import Literal

from azure.ai.evaluation._common._experimental import experimental
from azure.ai.evaluation._model_configurations import AzureOpenAIModelConfiguration, OpenAIModelConfiguration
from azure.ai.evaluation._model_configurations import (
AzureOpenAIModelConfiguration,
OpenAIModelConfiguration,
)
from azure.core.credentials import TokenCredential

from .aoai_grader import AzureOpenAIGrader
Expand Down Expand Up @@ -77,4 +80,9 @@ def __init__(
reference=reference,
type=AzureOpenAITextSimilarityGrader._type,
)
super().__init__(model_config=model_config, grader_config=grader, credential=credential, **kwargs)
super().__init__(
model_config=model_config,
grader_config=grader,
credential=credential,
**kwargs
)
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,12 @@

from azure.core.credentials import TokenCredential, AzureSasCredential, AccessToken
from azure.core.rest import HttpResponse
from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
from azure.ai.evaluation._exceptions import (
ErrorBlame,
ErrorCategory,
ErrorTarget,
EvaluationException,
)
from azure.ai.evaluation._http_utils import HttpPipeline, get_http_client
from azure.ai.evaluation._azure._token_manager import AzureMLTokenManager
from azure.ai.evaluation._constants import TokenScope
Expand Down Expand Up @@ -83,7 +88,12 @@ def workspace_get_default_datastore(
stores_response = self._http_client.request(
method="GET",
url=url,
params={QUERY_KEY_API_VERSION: self._api_version, "isDefault": True, "count": 1, "orderByAsc": "false"},
params={
QUERY_KEY_API_VERSION: self._api_version,
"isDefault": True,
"count": 1,
"orderByAsc": "false",
},
headers=headers,
)
self._throw_on_http_error(stores_response, "list default workspace datastore")
Expand All @@ -108,7 +118,11 @@ def workspace_get_default_datastore(
blob_store_credential = self.get_credential()
else:
url = self._generate_path(
*PATH_ML_WORKSPACES, workspace_name, "datastores", "workspaceblobstore", "listSecrets"
*PATH_ML_WORKSPACES,
workspace_name,
"datastores",
"workspaceblobstore",
"listSecrets",
)
secrets_response = self._http_client.request(
method="POST",
Expand Down Expand Up @@ -145,7 +159,9 @@ def workspace_get_default_datastore(
blame=ErrorBlame.SYSTEM_ERROR,
)

return BlobStoreInfo(name, account_name, endpoint, container_name, blob_store_credential)
return BlobStoreInfo(
name, account_name, endpoint, container_name, blob_store_credential
)

def workspace_get_info(self, workspace_name: str) -> Workspace:
# https://learn.microsoft.com/rest/api/azureml/workspaces/get?view=rest-azureml-2024-10-01
Expand All @@ -156,7 +172,9 @@ def workspace_get_info(self, workspace_name: str) -> Workspace:
headers=self._get_headers(),
)

self._throw_on_http_error(workspace_response, f"get '{workspace_name}' workspace")
self._throw_on_http_error(
workspace_response, f"get '{workspace_name}' workspace"
)
workspace = Workspace.deserialize(workspace_response)
return workspace

Expand All @@ -166,14 +184,20 @@ def _get_token_manager(self) -> AzureMLTokenManager:
with self._lock:
if self._token_manager is None:
self._token_manager = AzureMLTokenManager(
TokenScope.DEFAULT_AZURE_MANAGEMENT.value, self._logger, credential=self._credential
TokenScope.DEFAULT_AZURE_MANAGEMENT.value,
self._logger,
credential=self._credential,
)
self._credential = self._token_manager.credential

return self._token_manager

@staticmethod
def _throw_on_http_error(response: HttpResponse, description: str, valid_status: Optional[Set[int]] = None) -> None:
def _throw_on_http_error(
response: HttpResponse,
description: str,
valid_status: Optional[Set[int]] = None,
) -> None:
if valid_status and (response.status_code in valid_status):
return
if response.status_code >= 200 and response.status_code < 300:
Expand Down Expand Up @@ -201,4 +225,7 @@ def _generate_path(self, *paths: str) -> str:
return url

def _get_headers(self) -> Dict[str, str]:
return {"Authorization": f"Bearer {self.get_token().token}", "Content-Type": "application/json"}
return {
"Authorization": f"Bearer {self.get_token().token}",
"Content-Type": "application/json",
}
Loading
Loading