Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 3 additions & 6 deletions vertexai/_genai/_evals_metric_handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1026,11 +1026,9 @@ def _build_request_payload(
"instance": instance_payload,
}

autorater_config = _get_autorater_config(self.metric)
if autorater_config:
request_payload["autorater_config"] = genai_types.AutoraterConfig(
**autorater_config
)
# Note: autorater_config is intentionally not passed for predefined
# metrics. The server uses its own model configuration for predefined
# metrics and ignores the autorater_config field.
return request_payload

@override
Expand All @@ -1045,7 +1043,6 @@ def get_metric_result(
lambda: self.module._evaluate_instances(
metrics=[self.metric],
instance=payload.get("instance"),
autorater_config=payload.get("autorater_config"),
),
metric_name,
)
Expand Down
6 changes: 3 additions & 3 deletions vertexai/_genai/types/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -2392,7 +2392,7 @@ class EvaluationRunConfig(_common.BaseModel):
default=None, description="""The output config for the evaluation run."""
)
autorater_config: Optional[genai_types.AutoraterConfig] = Field(
default=None, description="""The autorater config for the evaluation run."""
default=None, description="""The autorater config for the evaluation run. Not applicable for predefined metrics, the server uses its own model configuration for predefined metrics and this field is ignored."""
)
prompt_template: Optional[EvaluationRunPromptTemplate] = Field(
default=None, description="""The prompt template used for inference."""
Expand All @@ -2413,7 +2413,7 @@ class EvaluationRunConfigDict(TypedDict, total=False):
"""The output config for the evaluation run."""

autorater_config: Optional[genai_types.AutoraterConfigDict]
"""The autorater config for the evaluation run."""
"""The autorater config for the evaluation run. Not applicable for predefined metrics, the server uses its own model configuration for predefined metrics and this field is ignored."""

prompt_template: Optional[EvaluationRunPromptTemplateDict]
"""The prompt template used for inference."""
Expand Down Expand Up @@ -4533,7 +4533,7 @@ class _EvaluateInstancesRequestParameters(_common.BaseModel):
default=None, description=""""""
)
autorater_config: Optional[genai_types.AutoraterConfig] = Field(
default=None, description=""""""
default=None, description="""The autorater config for the evaluation. Not applicable for predefined metrics, the server uses its own model configuration for predefined metrics and this field is ignored."""
)
metrics: Optional[list[Metric]] = Field(
default=None,
Expand Down
Loading