Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
ede184e
Fix: Use App (with plugins) for eval when available
ishanrajsingh Dec 5, 2025
d0db7fd
Merge branch 'main' into fix-eval-use-app-with-plugins
ishanrajsingh Dec 5, 2025
cf77b85
Fix critical issues from code review
ishanrajsingh Dec 5, 2025
2311528
feat: Support App plugins in evaluation framework
ishanrajsingh Dec 5, 2025
de93f9f
feat: Support App plugins in evaluation framework
ishanrajsingh Dec 5, 2025
884b8df
Update src/google/adk/evaluation/evaluation_generator.py
ishanrajsingh Dec 5, 2025
1380758
feat: Support App plugins in evaluation framework
ishanrajsingh Dec 5, 2025
9bd1433
feat: Support App plugins in evaluation framework
ishanrajsingh Dec 5, 2025
379c9bd
refactor: Address Gemini Code Assist feedback
ishanrajsingh Dec 5, 2025
3c1c7a4
Update src/google/adk/evaluation/evaluation_generator.py
ishanrajsingh Dec 5, 2025
dae85d3
Update src/google/adk/evaluation/evaluation_generator.py
ishanrajsingh Dec 5, 2025
d03e414
refactor: Address Gemini Code Assist feedback
ishanrajsingh Dec 5, 2025
c1babc6
Update src/google/adk/evaluation/evaluation_generator.py
ishanrajsingh Dec 5, 2025
7909bee
Update src/google/adk/cli/cli_tools_click.py
ishanrajsingh Dec 6, 2025
1f41633
Merge branch 'main' into fix-eval-use-app-with-plugins
ishanrajsingh Dec 6, 2025
03919bd
fix: address CI/CD check failures
ishanrajsingh Dec 11, 2025
5f6b1ca
Merge branch 'main' into fix-eval-use-app-with-plugins
ishanrajsingh Dec 11, 2025
40b879e
Merge branch 'main' into fix-eval-use-app-with-plugins
ishanrajsingh Dec 11, 2025
72f6639
Merge branch 'main' into fix-eval-use-app-with-plugins
ishanrajsingh Dec 12, 2025
e71bdc2
fix: Add AppInferenceAdapter to remove cli dependency and enable App-…
ishanrajsingh Dec 12, 2025
c04a97e
Merge branch 'fix-eval-use-app-with-plugins' of https://github.com/is…
ishanrajsingh Dec 12, 2025
81fc2b7
fix: Move app loader to utils to fix CI check
ishanrajsingh Dec 12, 2025
e858f72
Merge branch 'main' into fix-eval-use-app-with-plugins
ishanrajsingh Jan 20, 2026
5315506
Merge branch 'main' into fix-eval-use-app-with-plugins
ishanrajsingh Jan 20, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 42 additions & 2 deletions src/google/adk/cli/cli_tools_click.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,12 @@
import os
from pathlib import Path
import tempfile
import textwrap
from typing import Optional
from typing import Optional, TYPE_CHECKING

if TYPE_CHECKING:
from ..apps.app import App

import textwrap
import click
from click.core import ParameterSource
from fastapi import FastAPI
Expand Down Expand Up @@ -515,6 +518,34 @@ def cli_run(
)
)

def _load_app_from_module(module_path: str) -> Optional['App']:
"""Try to load an App instance from the agent module.

Args:
module_path: Python module path (e.g., 'my_package.my_agent')

Returns:
App instance if found, None otherwise
"""
try:
import importlib
Comment thread
ishanrajsingh marked this conversation as resolved.
Outdated
module = importlib.import_module(module_path)

# Check for 'app' attribute (most common convention)
if hasattr(module, 'app'):
from ..apps.app import App
candidate = getattr(module, 'app')
if isinstance(candidate, App):
logger.info(f"Loaded App instance from {module_path}")
return candidate

logger.debug(f"No App instance found in {module_path}")

except (ImportError, AttributeError) as e:
logger.debug(f"Could not load App from module {module_path}: {e}")

return None
Comment thread
ishanrajsingh marked this conversation as resolved.
Outdated


def eval_options():
"""Decorator to add common eval options to click commands."""
Expand Down Expand Up @@ -733,10 +764,19 @@ def cli_eval(
)

try:
# Try to load App if available (for plugin support like ReflectAndRetryToolPlugin)
app = _load_app_from_module(agent_module_file_path)

if app:
logger.info("Using App instance for evaluation (plugins will be applied)")
else:
logger.info("No App found, using root_agent directly")

Comment thread
ishanrajsingh marked this conversation as resolved.
eval_service = LocalEvalService(
root_agent=root_agent,
eval_sets_manager=eval_sets_manager,
eval_set_results_manager=eval_set_results_manager,
app=app, # NEW: Pass app if available
user_simulator_provider=user_simulator_provider,
)

Expand Down
75 changes: 72 additions & 3 deletions src/google/adk/evaluation/evaluation_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,11 @@

import copy
import importlib
from typing import Any
from typing import AsyncGenerator
from typing import Optional
from typing import Any, Optional, TYPE_CHECKING
Comment thread
ishanrajsingh marked this conversation as resolved.
Outdated

if TYPE_CHECKING:
from ..apps.app import App

import uuid

from google.genai.types import Content
Expand Down Expand Up @@ -324,6 +326,72 @@ def convert_events_to_eval_invocations(
)
)

return response_invocations

@staticmethod
async def _generate_inferences_from_app(
invocations: list['Invocation'],
app: 'App',
initial_session: Optional['SessionInput'],
session_id: str,
session_service: 'BaseSessionService',
artifact_service: 'BaseArtifactService',
) -> list['Invocation']:
"""Generate inferences by invoking through App (preserving plugins)."""

actual_invocations = []

# Determine user_id consistently
user_id = 'test_user_id'
if initial_session and initial_session.user_id is not None:
user_id = initial_session.user_id

# Initialize session if provided
if initial_session:
app_name = initial_session.app_name if initial_session.app_name else app.name
await session_service.create_session(
app_name=app_name,
user_id=user_id,
session_id=session_id,
state=initial_session.state if initial_session.state else {},
)
Comment thread
ishanrajsingh marked this conversation as resolved.
Outdated

# Run each invocation through the app
for expected_invocation in invocations:
user_content = expected_invocation.user_content

# Invoke through App (this applies all plugins)
response = await app.run(
user_id=user_id,
session_id=session_id,
new_message=user_content,
)

# Extract response similar to existing implementation
final_response = None
tool_uses = []
invocation_id = ""

async for event in response:
invocation_id = invocation_id or event.invocation_id

if event.is_final_response() and event.content and event.content.parts:
final_response = event.content
elif event.get_function_calls():
for call in event.get_function_calls():
tool_uses.append(call)

actual_invocations.append(
Invocation(
invocation_id=invocation_id,
user_content=user_content,
final_response=final_response,
intermediate_data=IntermediateData(tool_uses=tool_uses),
)
)

return actual_invocations
Comment thread
ishanrajsingh marked this conversation as resolved.
Outdated

return invocations
Comment thread
ishanrajsingh marked this conversation as resolved.
Outdated

@staticmethod
Expand Down Expand Up @@ -413,3 +481,4 @@ def _process_query_with_session(session_data, data):
responses[index]["actual_tool_use"] = actual_tool_uses
responses[index]["response"] = response
return responses

49 changes: 35 additions & 14 deletions src/google/adk/evaluation/local_eval_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@
from typing import AsyncGenerator
from typing import Callable
from typing import Optional
from typing import TYPE_CHECKING

if TYPE_CHECKING:
from ..apps.app import App
import uuid

from typing_extensions import override
Expand All @@ -41,6 +45,7 @@
from .base_eval_service import InferenceResult
from .base_eval_service import InferenceStatus
from .eval_case import Invocation
from .eval_case import SessionInput
from .eval_metrics import EvalMetric
from .eval_metrics import EvalMetricResult
from .eval_metrics import EvalMetricResultDetails
Expand Down Expand Up @@ -79,11 +84,13 @@ def __init__(
artifact_service: Optional[BaseArtifactService] = None,
eval_set_results_manager: Optional[EvalSetResultsManager] = None,
session_id_supplier: Callable[[], str] = _get_session_id,
app: Optional['App'] = None,
user_simulator_provider: UserSimulatorProvider = UserSimulatorProvider(),
memory_service: Optional[BaseMemoryService] = None,
):
self._root_agent = root_agent
self._eval_sets_manager = eval_sets_manager
self._app = app
metric_evaluator_registry = (
metric_evaluator_registry or DEFAULT_METRIC_EVALUATOR_REGISTRY
)
Expand Down Expand Up @@ -406,25 +413,39 @@ async def _perform_inference_single_eval_item(
)

try:
with client_label_context(EVAL_CLIENT_LABEL):
inferences = (
await EvaluationGenerator._generate_inferences_from_root_agent(
root_agent=root_agent,
user_simulator=self._user_simulator_provider.provide(eval_case),
initial_session=initial_session,
session_id=session_id,
session_service=self._session_service,
artifact_service=self._artifact_service,
memory_service=self._memory_service,
)
)
# Use App if available (so plugins like ReflectAndRetryToolPlugin run)
if self._app is not None:
inferences = (
await EvaluationGenerator._generate_inferences_from_app(
invocations=eval_case.conversation,
app=self._app,
initial_session=initial_session,
session_id=session_id,
session_service=self._session_service,
artifact_service=self._artifact_service,
)
)
Comment thread
ishanrajsingh marked this conversation as resolved.
Outdated
else:
# Fallback to direct root_agent usage (existing behavior)
with client_label_context(EVAL_CLIENT_LABEL):
inferences = (
await EvaluationGenerator._generate_inferences_from_root_agent(
root_agent=root_agent,
user_simulator=self._user_simulator_provider.provide(eval_case),
initial_session=initial_session,
session_id=session_id,
session_service=self._session_service,
artifact_service=self._artifact_service,
memory_service=self._memory_service,
)
)
Comment thread
ishanrajsingh marked this conversation as resolved.

inference_result.inferences = inferences
inference_result.status = InferenceStatus.SUCCESS

return inference_result
except Exception as e:
# We intentionally catch the Exception as we don't failures to affect
# We intentionally catch the Exception as we don't want failures to affect
# other inferences.
logger.error(
'Inference failed for eval case `%s` with error %s.',
Expand All @@ -434,4 +455,4 @@ async def _perform_inference_single_eval_item(
)
inference_result.status = InferenceStatus.FAILURE
inference_result.error_message = str(e)
return inference_result
return inference_result