Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 58 additions & 2 deletions vertexai/_genai/evals.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,14 @@ def _CreateEvaluationRunParameters_to_vertex(
setv(to_object, ["labels"], getv(from_object, ["labels"]))

if getv(from_object, ["inference_configs"]) is not None:
setv(to_object, ["inferenceConfigs"], getv(from_object, ["inference_configs"]))
setv(
to_object,
["inferenceConfigs"],
{
k: _EvaluationRunInferenceConfig_to_vertex(v, to_object)
for k, v in getv(from_object, ["inference_configs"]).items()
},
)

if getv(from_object, ["config"]) is not None:
setv(to_object, ["config"], getv(from_object, ["config"]))
Expand Down Expand Up @@ -331,6 +338,52 @@ def _EvaluationRunConfig_to_vertex(
return to_object


def _EvaluationRunInferenceConfig_from_vertex(
from_object: Union[dict[str, Any], object],
parent_object: Optional[dict[str, Any]] = None,
) -> dict[str, Any]:
to_object: dict[str, Any] = {}
if getv(from_object, ["agentConfig"]) is not None:
setv(to_object, ["agent_config"], getv(from_object, ["agentConfig"]))

if getv(from_object, ["model"]) is not None:
setv(to_object, ["model"], getv(from_object, ["model"]))

if getv(from_object, ["promptTemplate"]) is not None:
setv(to_object, ["prompt_template"], getv(from_object, ["promptTemplate"]))

if getv(from_object, ["agentRunConfig"]) is not None:
setv(to_object, ["agent_run_config"], getv(from_object, ["agentRunConfig"]))

if getv(from_object, ["agents"]) is not None:
setv(to_object, ["agent_configs"], getv(from_object, ["agents"]))

return to_object


def _EvaluationRunInferenceConfig_to_vertex(
from_object: Union[dict[str, Any], object],
parent_object: Optional[dict[str, Any]] = None,
) -> dict[str, Any]:
to_object: dict[str, Any] = {}
if getv(from_object, ["agent_config"]) is not None:
setv(to_object, ["agentConfig"], getv(from_object, ["agent_config"]))

if getv(from_object, ["model"]) is not None:
setv(to_object, ["model"], getv(from_object, ["model"]))

if getv(from_object, ["prompt_template"]) is not None:
setv(to_object, ["promptTemplate"], getv(from_object, ["prompt_template"]))

if getv(from_object, ["agent_run_config"]) is not None:
setv(to_object, ["agentRunConfig"], getv(from_object, ["agent_run_config"]))

if getv(from_object, ["agent_configs"]) is not None:
setv(to_object, ["agents"], getv(from_object, ["agent_configs"]))

return to_object


def _EvaluationRunMetric_from_vertex(
from_object: Union[dict[str, Any], object],
parent_object: Optional[dict[str, Any]] = None,
Expand Down Expand Up @@ -423,7 +476,10 @@ def _EvaluationRun_from_vertex(
setv(
to_object,
["inference_configs"],
{k: v for k, v in getv(from_object, ["inferenceConfigs"]).items()},
{
k: _EvaluationRunInferenceConfig_from_vertex(v, to_object)
for k, v in getv(from_object, ["inferenceConfigs"]).items()
},
)

if getv(from_object, ["labels"]) is not None:
Expand Down
18 changes: 9 additions & 9 deletions vertexai/_genai/types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1346,6 +1346,15 @@
"EvaluationRunConfig",
"EvaluationRunConfigDict",
"EvaluationRunConfigOrDict",
"EvaluationRunAgentConfig",
"EvaluationRunAgentConfigDict",
"EvaluationRunAgentConfigOrDict",
"AgentRunConfig",
"AgentRunConfigDict",
"AgentRunConfigOrDict",
"EvaluationRunInferenceConfig",
"EvaluationRunInferenceConfigDict",
"EvaluationRunInferenceConfigOrDict",
"CreateEvaluationRunConfig",
"CreateEvaluationRunConfigDict",
"CreateEvaluationRunConfigOrDict",
Expand Down Expand Up @@ -1391,15 +1400,6 @@
"EvaluationResult",
"EvaluationResultDict",
"EvaluationResultOrDict",
"EvaluationRunAgentConfig",
"EvaluationRunAgentConfigDict",
"EvaluationRunAgentConfigOrDict",
"AgentRunConfig",
"AgentRunConfigDict",
"AgentRunConfigOrDict",
"EvaluationRunInferenceConfig",
"EvaluationRunInferenceConfigDict",
"EvaluationRunInferenceConfigOrDict",
"EvaluationRun",
"EvaluationRunDict",
"EvaluationRunOrDict",
Expand Down
237 changes: 122 additions & 115 deletions vertexai/_genai/types/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -2576,6 +2576,126 @@ class EvaluationRunConfigDict(TypedDict, total=False):
EvaluationRunConfigOrDict = Union[EvaluationRunConfig, EvaluationRunConfigDict]


class EvaluationRunAgentConfig(_common.BaseModel):
"""This field is experimental and may change in future versions.

Agent config for an evaluation run.
"""

developer_instruction: Optional[genai_types.Content] = Field(
default=None, description="""The developer instruction for the agent."""
)
tools: Optional[list[genai_types.Tool]] = Field(
default=None, description="""The tools available to the agent."""
)


class EvaluationRunAgentConfigDict(TypedDict, total=False):
"""This field is experimental and may change in future versions.

Agent config for an evaluation run.
"""

developer_instruction: Optional[genai_types.ContentDict]
"""The developer instruction for the agent."""

tools: Optional[list[genai_types.ToolDict]]
"""The tools available to the agent."""


EvaluationRunAgentConfigOrDict = Union[
EvaluationRunAgentConfig, EvaluationRunAgentConfigDict
]


class AgentRunConfig(_common.BaseModel):
"""Configuration for an Agent Run."""

session_input: Optional[evals_types.SessionInput] = Field(
default=None, description="""The session input to get agent running results."""
)
agent_engine: Optional[str] = Field(
default=None, description="""The resource name of the Agent Engine."""
)
user_simulator_config: Optional[evals_types.UserSimulatorConfig] = Field(
default=None,
description="""Used for multi-turn agent run.
Contains configuration for a user simulator that
uses an LLM to generate messages on behalf of the user.""",
)


class AgentRunConfigDict(TypedDict, total=False):
"""Configuration for an Agent Run."""

session_input: Optional[evals_types.SessionInput]
"""The session input to get agent running results."""

agent_engine: Optional[str]
"""The resource name of the Agent Engine."""

user_simulator_config: Optional[evals_types.UserSimulatorConfig]
"""Used for multi-turn agent run.
Contains configuration for a user simulator that
uses an LLM to generate messages on behalf of the user."""


AgentRunConfigOrDict = Union[AgentRunConfig, AgentRunConfigDict]


class EvaluationRunInferenceConfig(_common.BaseModel):
"""This field is experimental and may change in future versions.

Configuration that describes an agent.
"""

agent_config: Optional[EvaluationRunAgentConfig] = Field(
default=None, description="""The agent config."""
)
model: Optional[str] = Field(
default=None,
description="""The fully qualified name of the publisher model or endpoint to use for inference.""",
)
prompt_template: Optional[EvaluationRunPromptTemplate] = Field(
default=None, description="""The prompt template used for inference."""
)
agent_run_config: Optional[AgentRunConfig] = Field(
default=None,
description="""Configuration for Agent Run in evaluation management service.""",
)
agent_configs: Optional[dict[str, evals_types.AgentConfig]] = Field(
default=None,
description="""A map of agent IDs to their respective agent config.""",
)


class EvaluationRunInferenceConfigDict(TypedDict, total=False):
"""This field is experimental and may change in future versions.

Configuration that describes an agent.
"""

agent_config: Optional[EvaluationRunAgentConfigDict]
"""The agent config."""

model: Optional[str]
"""The fully qualified name of the publisher model or endpoint to use for inference."""

prompt_template: Optional[EvaluationRunPromptTemplateDict]
"""The prompt template used for inference."""

agent_run_config: Optional[AgentRunConfigDict]
"""Configuration for Agent Run in evaluation management service."""

agent_configs: Optional[dict[str, evals_types.AgentConfig]]
"""A map of agent IDs to their respective agent config."""


EvaluationRunInferenceConfigOrDict = Union[
EvaluationRunInferenceConfig, EvaluationRunInferenceConfigDict
]


class CreateEvaluationRunConfig(_common.BaseModel):
"""Config to create an evaluation run."""

Expand Down Expand Up @@ -2608,7 +2728,7 @@ class _CreateEvaluationRunParameters(_common.BaseModel):
default=None, description=""""""
)
labels: Optional[dict[str, str]] = Field(default=None, description="""""")
inference_configs: Optional[dict[str, "EvaluationRunInferenceConfig"]] = Field(
inference_configs: Optional[dict[str, EvaluationRunInferenceConfig]] = Field(
default=None, description=""""""
)
config: Optional[CreateEvaluationRunConfig] = Field(
Expand All @@ -2634,7 +2754,7 @@ class _CreateEvaluationRunParametersDict(TypedDict, total=False):
labels: Optional[dict[str, str]]
""""""

inference_configs: Optional[dict[str, "EvaluationRunInferenceConfigDict"]]
inference_configs: Optional[dict[str, EvaluationRunInferenceConfigDict]]
""""""

config: Optional[CreateEvaluationRunConfigDict]
Expand Down Expand Up @@ -3252,119 +3372,6 @@ class EvaluationResultDict(TypedDict, total=False):
EvaluationResultOrDict = Union[EvaluationResult, EvaluationResultDict]


class EvaluationRunAgentConfig(_common.BaseModel):
"""This field is experimental and may change in future versions.

Agent config for an evaluation run.
"""

developer_instruction: Optional[genai_types.Content] = Field(
default=None, description="""The developer instruction for the agent."""
)
tools: Optional[list[genai_types.Tool]] = Field(
default=None, description="""The tools available to the agent."""
)


class EvaluationRunAgentConfigDict(TypedDict, total=False):
"""This field is experimental and may change in future versions.

Agent config for an evaluation run.
"""

developer_instruction: Optional[genai_types.ContentDict]
"""The developer instruction for the agent."""

tools: Optional[list[genai_types.ToolDict]]
"""The tools available to the agent."""


EvaluationRunAgentConfigOrDict = Union[
EvaluationRunAgentConfig, EvaluationRunAgentConfigDict
]


class AgentRunConfig(_common.BaseModel):
"""Configuration for an Agent Run."""

session_input: Optional[evals_types.SessionInput] = Field(
default=None, description="""The session input to get agent running results."""
)
agent_engine: Optional[str] = Field(
default=None, description="""The resource name of the Agent Engine."""
)
user_simulator_config: Optional[evals_types.UserSimulatorConfig] = Field(
default=None,
description="""Used for multi-turn agent run.
Contains configuration for a user simulator that
uses an LLM to generate messages on behalf of the user.""",
)


class AgentRunConfigDict(TypedDict, total=False):
"""Configuration for an Agent Run."""

session_input: Optional[evals_types.SessionInput]
"""The session input to get agent running results."""

agent_engine: Optional[str]
"""The resource name of the Agent Engine."""

user_simulator_config: Optional[evals_types.UserSimulatorConfig]
"""Used for multi-turn agent run.
Contains configuration for a user simulator that
uses an LLM to generate messages on behalf of the user."""


AgentRunConfigOrDict = Union[AgentRunConfig, AgentRunConfigDict]


class EvaluationRunInferenceConfig(_common.BaseModel):
"""This field is experimental and may change in future versions.

Configuration that describes an agent.
"""

agent_config: Optional[EvaluationRunAgentConfig] = Field(
default=None, description="""The agent config."""
)
model: Optional[str] = Field(
default=None,
description="""The fully qualified name of the publisher model or endpoint to use for inference.""",
)
prompt_template: Optional[EvaluationRunPromptTemplate] = Field(
default=None, description="""The prompt template used for inference."""
)
agent_run_config: Optional[AgentRunConfig] = Field(
default=None,
description="""Configuration for Agent Run in evaluation management service.""",
)


class EvaluationRunInferenceConfigDict(TypedDict, total=False):
"""This field is experimental and may change in future versions.

Configuration that describes an agent.
"""

agent_config: Optional[EvaluationRunAgentConfigDict]
"""The agent config."""

model: Optional[str]
"""The fully qualified name of the publisher model or endpoint to use for inference."""

prompt_template: Optional[EvaluationRunPromptTemplateDict]
"""The prompt template used for inference."""

agent_run_config: Optional[AgentRunConfigDict]
"""Configuration for Agent Run in evaluation management service."""


EvaluationRunInferenceConfigOrDict = Union[
EvaluationRunInferenceConfig, EvaluationRunInferenceConfigDict
]


class EvaluationRun(_common.BaseModel):
"""Represents an evaluation run."""

Expand Down
Loading