From 7d6a8d11f58eba3ce02c77d7ea43f1589b4fdee9 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 11 Mar 2026 20:03:36 -0700 Subject: [PATCH 1/9] make projects and openai models private, fix pylint and mypy --- .../agentserver/core/checkpoints/__init__.py | 2 +- .../core/checkpoints/client/__init__.py | 14 +-- .../core/checkpoints/client/_client.py | 1 + .../azure/ai/agentserver/core/logger.py | 96 ++++++++++++++----- .../ai/agentserver/core/models/__init__.py | 3 +- .../core/models/_create_response.py | 7 +- .../models/{openai => _openai}/__init__.py | 0 .../{projects => _projects}/__init__.py | 0 .../models/{projects => _projects}/_enums.py | 0 .../models/{projects => _projects}/_models.py | 0 .../models/{projects => _projects}/_patch.py | 0 .../_patch_evaluations.py | 0 .../_utils/__init__.py | 0 .../_utils/model_base.py | 0 .../_utils/serialization.py | 0 .../core/server/_response_metadata.py | 2 +- .../azure/ai/agentserver/core/server/base.py | 87 +++++++++-------- .../core/server/common/agent_run_context.py | 16 ++-- .../id_generator/foundry_id_generator.py | 86 +++++++++++++---- .../agentserver/core/tools/client/_client.py | 6 +- .../core/tools/runtime/_catalog.py | 2 +- .../core/tools/runtime/_starlette.py | 3 +- .../ai/agentserver/core/utils/_credential.py | 4 +- .../samples/bilingual_weekend_planner/main.py | 2 +- .../samples/mcp_simple/mcp_simple.py | 2 +- .../custom_mock_agent_test.py | 2 +- .../server/test_response_metadata.py | 2 +- .../tests/unit_tests/test_logger.py | 14 +-- 28 files changed, 229 insertions(+), 122 deletions(-) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{openai => _openai}/__init__.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/__init__.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/_enums.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/_models.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/_patch.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/_patch_evaluations.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/_utils/__init__.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/_utils/model_base.py (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/{projects => _projects}/_utils/serialization.py (100%) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py index f9d6ed3d8aa8..0ca387146579 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- """Checkpoint storage module for Azure AI Agent Server.""" -from .client import FoundryCheckpointClient +from .client._client import FoundryCheckpointClient from .client._models import ( CheckpointItem, CheckpointItemId, diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py index 34f30f16c5d9..901cbb3d70a8 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py @@ -3,16 +3,4 @@ # --------------------------------------------------------- """Checkpoint client module for Azure AI Agent Server.""" -from ._client import FoundryCheckpointClient -from ._models import ( - CheckpointItem, - CheckpointItemId, - CheckpointSession, -) - -__all__ = [ - "CheckpointItem", - "CheckpointItemId", - "CheckpointSession", - "FoundryCheckpointClient", -] +__path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py index f7e178d758b4..fc2f45321968 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py @@ -2,6 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=client-method-missing-kwargs,client-accepts-api-version-keyword,missing-client-constructor-parameter-kwargs +# ^^^ azure-sdk pylint rules: internal client not intended as a public Azure SDK client """Asynchronous client for Azure AI Foundry checkpoint storage API.""" from typing import Any, AsyncContextManager, List, Optional diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py index f15e98986470..2b5f39e964b4 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py @@ -1,4 +1,3 @@ -# pylint: disable=broad-exception-caught,dangerous-default-value # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- @@ -12,13 +11,12 @@ from .constants import Constants def _get_default_log_config() -> dict[str, Any]: - """ - Build default log config with level from environment. - + """Build default log config with level from environment. + :return: A dictionary containing logging configuration. - :rtype: dict + :rtype: dict[str, Any] """ - log_level = get_log_level() + log_level = _get_log_level() return { "version": 1, "disable_existing_loggers": False, @@ -40,7 +38,14 @@ def _get_default_log_config() -> dict[str, Any]: } -def get_log_level(): +def _get_log_level() -> str: + """Read log level from the ``AGENT_LOG_LEVEL`` environment variable. + + Falls back to ``"INFO"`` if the variable is unset or contains an invalid value. + + :return: A valid Python logging level name. + :rtype: str + """ log_level = os.getenv(Constants.AGENT_LOG_LEVEL, "INFO").upper() valid_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] if log_level not in valid_levels: @@ -54,7 +59,12 @@ def get_log_level(): APPINSIGHT_CONNSTR_ENV_NAME = "APPLICATIONINSIGHTS_CONNECTION_STRING" -def get_dimensions(): +def _get_dimensions() -> dict[str, str]: + """Collect environment-based dimensions for structured logging. + + :return: A mapping of dimension keys to their runtime values. + :rtype: dict[str, str] + """ env_values = {name: value for name, value in vars(Constants).items() if not name.startswith("_")} res = {"azure.ai.agentserver.version": VERSION} for name, env_name in env_values.items(): @@ -65,11 +75,25 @@ def get_dimensions(): return res -def get_project_endpoint(logger=None): +def get_project_endpoint(logger: Optional[logging.Logger] = None) -> Optional[str]: + """Resolve the project endpoint from environment variables. + + Checks ``AZURE_AI_PROJECT_ENDPOINT`` first, then falls back to deriving + an endpoint from ``AGENT_PROJECT_NAME``. + + :param logger: Optional logger for diagnostic messages. + :type logger: Optional[logging.Logger] + :return: The resolved project endpoint URL, or ``None`` if unavailable. + :rtype: Optional[str] + """ project_endpoint = os.environ.get(Constants.AZURE_AI_PROJECT_ENDPOINT) if project_endpoint: if logger: - logger.info(f"Using project endpoint from {Constants.AZURE_AI_PROJECT_ENDPOINT}: {project_endpoint}") + logger.info( + "Using project endpoint from %s: %s", + Constants.AZURE_AI_PROJECT_ENDPOINT, + project_endpoint, + ) return project_endpoint project_resource_id = os.environ.get(Constants.AGENT_PROJECT_RESOURCE_ID) if project_resource_id: @@ -78,18 +102,32 @@ def get_project_endpoint(logger=None): parts = last_part.split("@") if len(parts) < 2: if logger: - logger.warning(f"Invalid project resource id format: {project_resource_id}") + logger.warning("Invalid project resource id format: %s", project_resource_id) return None account = parts[0] project = parts[1] endpoint = f"https://{account}.services.ai.azure.com/api/projects/{project}" if logger: - logger.info(f"Using project endpoint derived from {Constants.AGENT_PROJECT_RESOURCE_ID}: {endpoint}") + logger.info( + "Using project endpoint derived from %s: %s", + Constants.AGENT_PROJECT_RESOURCE_ID, + endpoint, + ) return endpoint return None -def get_application_insights_connstr(logger=None): +def _get_application_insights_connstr(logger: Optional[logging.Logger] = None) -> Optional[str]: + """Retrieve or derive the Application Insights connection string. + + Looks in the ``APPLICATIONINSIGHTS_CONNECTION_STRING`` environment variable first, + then attempts to fetch it from the project endpoint. + + :param logger: Optional logger for diagnostic messages. + :type logger: Optional[logging.Logger] + :return: The connection string, or ``None`` if unavailable. + :rtype: Optional[str] + """ try: conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) if not conn_str: @@ -101,22 +139,36 @@ def get_application_insights_connstr(logger=None): project_client = AIProjectClient(credential=DefaultAzureCredential(), endpoint=project_endpoint) conn_str = project_client.telemetry.get_application_insights_connection_string() if not conn_str and logger: - logger.info(f"No Application Insights connection found for project: {project_endpoint}") + logger.info( + "No Application Insights connection found for project: %s", + project_endpoint, + ) elif conn_str: os.environ[APPINSIGHT_CONNSTR_ENV_NAME] = conn_str elif logger: logger.info("Application Insights not configured, telemetry export disabled.") return conn_str - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught # bootstrap: many failure modes possible if logger: - logger.warning(f"Failed to get Application Insights connection string, telemetry export disabled: {e}") + logger.warning( + "Failed to get Application Insights connection string, telemetry export disabled: %s", + e, + ) return None class CustomDimensionsFilter(logging.Filter): - def filter(self, record): - # Add custom dimensions to every log record - dimensions = get_dimensions() + """Logging filter that attaches environment dimensions and request context to log records.""" + + def filter(self, record: logging.LogRecord) -> bool: + """Inject custom dimensions into *record* and allow it through. + + :param record: The log record to enrich. + :type record: logging.LogRecord + :return: Always ``True`` so the record is never discarded. + :rtype: bool + """ + dimensions = _get_dimensions() for key, value in dimensions.items(): setattr(record, key, value) cur_request_context = request_context.get() @@ -140,7 +192,7 @@ def configure(log_config: Optional[dict[str, Any]] = None): config.dictConfig(log_config) app_logger = logging.getLogger("azure.ai.agentserver") - application_insights_connection_string = get_application_insights_connstr(logger=app_logger) + application_insights_connection_string = _get_application_insights_connstr(logger=app_logger) enable_application_insights_logger = ( os.environ.get(Constants.ENABLE_APPLICATION_INSIGHTS_LOGGER, "true").lower() == "true" ) @@ -169,10 +221,10 @@ def configure(log_config: Optional[dict[str, Any]] = None): handler.addFilter(custom_filter) # Only add to azure.ai.agentserver namespace to avoid infrastructure logs - app_logger.setLevel(get_log_level()) + app_logger.setLevel(_get_log_level()) app_logger.addHandler(handler) - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught print(f"Failed to configure logging: {e}") diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/__init__.py index d5622ebe7732..b6a1895a3868 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/__init__.py @@ -1,7 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +# TypedDict module; __all__ cannot be statically typed because the list is built at runtime. from ._create_response import CreateResponse # type: ignore -from .projects import Response, ResponseStreamEvent +from ._projects import Response, ResponseStreamEvent __all__ = ["CreateResponse", "Response", "ResponseStreamEvent"] # type: ignore[var-annotated] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py index 820d54c6cea0..5ec72115734a 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py @@ -1,11 +1,12 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=no-name-in-module +# pylint: disable=no-name-in-module # openai re-exports are dynamically generated from typing import Optional -from .openai import response_create_params # type: ignore -from . import projects as _azure_ai_projects_models +# ResponseCreateParamsBase is a TypedDict — mypy cannot verify total=False on mixed bases. +from ._openai import response_create_params # type: ignore +from . import _projects as _azure_ai_projects_models class CreateResponse(response_create_params.ResponseCreateParamsBase, total=False): # type: ignore agent: Optional[_azure_ai_projects_models.AgentReference] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/openai/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_openai/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/openai/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_openai/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_enums.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_enums.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_enums.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_enums.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_models.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_models.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_models.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_models.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_patch.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_patch.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_patch.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_patch.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_patch_evaluations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_patch_evaluations.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_patch_evaluations.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_patch_evaluations.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/model_base.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/model_base.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/model_base.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/serialization.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/serialization.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/serialization.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/serialization.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py index 352dfdc9d27b..9b13cfedd636 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py @@ -9,7 +9,7 @@ from ..application._metadata import get_current_app from ..models import Response as OpenAIResponse, ResponseStreamEvent -from ..models.projects import ( +from ..models._projects import ( ResponseCompletedEvent, ResponseCreatedEvent, ResponseInProgressEvent, diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 7a9f488227a7..994d9045dd38 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -1,9 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=broad-exception-caught,unused-argument,logging-fstring-interpolation,too-many-statements,too-many-return-statements -# mypy: ignore-errors -import asyncio # pylint: disable=C4763 +import asyncio # pylint: disable=C4763 # azure-sdk: async-client-bad-name (false positive on module) import contextlib import inspect import json @@ -13,6 +11,7 @@ from typing import Any, AsyncGenerator, Generator, Optional, Union import uvicorn +from openai import AsyncOpenAI from opentelemetry import context as otel_context, trace from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from starlette.applications import Starlette @@ -40,7 +39,11 @@ from .common.agent_run_context import AgentRunContext from ..constants import Constants from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, get_project_endpoint, request_context -from ..models import Response as OpenAIResponse, ResponseStreamEvent, projects as project_models +from ..models import ( + Response as OpenAIResponse, + ResponseStreamEvent, + _projects as project_models +) from ..tools import UserInfoContextMiddleware, create_tool_runtime from ..utils._credential import AsyncTokenCredentialAdapter @@ -53,19 +56,19 @@ def __init__(self, app: ASGIApp, agent: Optional['FoundryCBAgent'] = None): super().__init__(app) self.agent = agent - async def dispatch(self, request: Request, call_next): + async def dispatch(self, request: Request, call_next): # type: ignore[override] if request.url.path in ("/runs", "/responses"): try: self.set_request_id_to_context_var(request) payload = await request.json() - except Exception as e: - logger.error(f"Invalid JSON payload: {e}") + except Exception as e: # pylint: disable=broad-exception-caught # middleware catch-all for bad payload + logger.error("Invalid JSON payload: %s", e) return JSONResponse({"error": f"Invalid JSON payload: {e}"}, status_code=400) try: request.state.agent_run_context = AgentRunContext(payload) self.set_run_context_to_context_var(request.state.agent_run_context) - except Exception as e: - logger.error(f"Context build failed: {e}.", exc_info=True) + except Exception as e: # pylint: disable=broad-exception-caught # middleware catch-all for context build + logger.error("Context build failed: %s.", e, exc_info=True) return JSONResponse({"error": f"Context build failed: {e}"}, status_code=500) return await call_next(request) @@ -99,7 +102,8 @@ def set_run_context_to_context_var(self, run_context): class FoundryCBAgent: - def __init__(self, + def __init__( # pylint: disable=too-many-statements # Starlette app setup requires sequential route/middleware wiring + self, credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, project_endpoint: Optional[str] = None) -> None: self.credentials = AsyncTokenCredentialAdapter(credentials) if credentials else AsyncDefaultTokenCredential() @@ -129,9 +133,9 @@ async def runs_endpoint(request): ex = None resp = await self.agent_run(context) - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught # top-level agent_run catch-all # TODO: extract status code from exception - logger.error(f"Error processing CreateResponse request: {e}", exc_info=True) + logger.error("Error processing CreateResponse request: %s", e, exc_info=True) ex = e if not context.stream: @@ -172,7 +176,7 @@ async def gen_async(ex): if self._should_store(context): logger.debug("Storing output to conversation.") await self._save_output_events_to_conversation(context, output_events) - except Exception as e: # noqa: BLE001 + except Exception as e: # noqa: BLE001 # pylint: disable=broad-exception-caught logger.error("Error in async generator: %s", e, exc_info=True) ex = e finally: @@ -207,12 +211,12 @@ async def readiness_endpoint(request): ] @contextlib.asynccontextmanager - async def _lifespan(app): + async def _lifespan(app): # pylint: disable=unused-argument import logging # Log server started successfully port = getattr(self, '_port', 'unknown') - logger.info(f"FoundryCBAgent server started successfully on port {port}") + logger.info("FoundryCBAgent server started successfully on port %s", port) # Attach App Insights handler to uvicorn loggers for handler in logger.handlers: @@ -234,9 +238,9 @@ async def _lifespan(app): allow_methods=["*"], allow_headers=["*"], ) - self.app.add_middleware(AgentRunContextMiddleware, agent=self) + self.app.add_middleware(AgentRunContextMiddleware, agent=self) # type: ignore[arg-type] - self.tracer = None + self.tracer: trace.Tracer = trace.get_tracer(__name__) def _should_store(self, context: AgentRunContext) -> bool: """Determine whether conversation artifacts should be persisted. @@ -246,7 +250,7 @@ def _should_store(self, context: AgentRunContext) -> bool: :return: ``True`` when storage is requested and the conversation is scoped to a project. :rtype: bool """ - return context.request.get("store", False) and context.conversation_id and self._project_endpoint + return bool(context.request.get("store", False) and context.conversation_id and self._project_endpoint) def _items_are_equal(self, item1: dict, item2: dict) -> bool: """Compare two conversation items for equality based on type and content. @@ -274,7 +278,7 @@ def _items_are_equal(self, item1: dict, item2: dict) -> bool: return text1 == text2 return content1 == content2 - async def _create_openai_client(self) -> "AsyncOpenAI": + async def _create_openai_client(self) -> AsyncOpenAI: """Create an AsyncOpenAI client for conversation operations. :return: Configured AsyncOpenAI client scoped to the Foundry project endpoint. @@ -303,7 +307,7 @@ async def _save_input_to_conversation(self, context: AgentRunContext) -> None: try: conversation_id = context.conversation_id input_items = context.request.get("input", []) - if not input_items: + if not input_items or not conversation_id: return # Handle string input as a single item @@ -349,19 +353,22 @@ async def _save_input_to_conversation(self, context: AgentRunContext) -> None: all_match = False break if all_match: - logger.debug(f"All {n} input items already exist in " + - f"conversation {conversation_id}, skipping save") + logger.debug( + "All %d input items already exist in conversation %s, skipping save", + n, + conversation_id, + ) return - except Exception as e: - logger.debug(f"Could not check for duplicates: {e}") + except Exception as e: # pylint: disable=broad-exception-caught # best-effort duplicate check + logger.debug("Could not check for duplicates: %s", e) await openai_client.conversations.items.create( conversation_id=conversation_id, items=items_to_save, ) - logger.debug(f"Saved {len(items_to_save)} input items to conversation {conversation_id}") - except Exception as e: - logger.warning(f"Failed to save input items to conversation: {e}", exc_info=True) + logger.debug("Saved %d input items to conversation %s", len(items_to_save), conversation_id) + except Exception as e: # pylint: disable=broad-exception-caught # best-effort conversation persistence + logger.warning("Failed to save input items to conversation: %s", e, exc_info=True) async def _save_output_to_conversation( self, context: AgentRunContext, response: project_models.Response) -> None: @@ -396,9 +403,9 @@ async def _save_output_to_conversation( conversation_id=conversation_id, items=items_to_save, ) - logger.debug(f"Saved {len(items_to_save)} output items to conversation {conversation_id}") - except Exception as e: - logger.warning(f"Failed to save output items to conversation: {e}", exc_info=True) + logger.debug("Saved %d output items to conversation %s", len(items_to_save), conversation_id) + except Exception as e: # pylint: disable=broad-exception-caught # best-effort conversation persistence + logger.warning("Failed to save output items to conversation: %s", e, exc_info=True) async def _save_output_events_to_conversation(self, context: AgentRunContext, events: list) -> None: """Persist streaming output events for later retrieval. @@ -433,9 +440,9 @@ async def _save_output_events_to_conversation(self, context: AgentRunContext, ev conversation_id=conversation_id, items=items_to_save, ) - logger.debug(f"Saved {len(items_to_save)} output items to conversation {conversation_id}") - except Exception as e: - logger.warning(f"Failed to save output items to conversation: {e}", exc_info=True) + logger.debug("Saved %d output items to conversation %s", len(items_to_save), conversation_id) + except Exception as e: # pylint: disable=broad-exception-caught # best-effort conversation persistence + logger.warning("Failed to save output items to conversation: %s", e, exc_info=True) @abstractmethod async def agent_run( @@ -557,10 +564,10 @@ async def respond_with_oauth_consent_astream(self, context, error) -> AsyncGener }) yield project_models.ResponseCompletedEvent(sequence_number=sequence_number, response=response) - async def agent_liveness(self, request) -> Union[Response, dict]: + async def agent_liveness(self, request) -> Union[Response, dict]: # pylint: disable=unused-argument return Response(status_code=200) - async def agent_readiness(self, request) -> Union[Response, dict]: + async def agent_readiness(self, request) -> Union[Response, dict]: # pylint: disable=unused-argument return {"status": "ready"} async def run_async( @@ -577,7 +584,7 @@ async def run_async( config = uvicorn.Config(self.app, host="0.0.0.0", port=port, loop="asyncio") server = uvicorn.Server(config) self._port = port - logger.info(f"Starting FoundryCBAgent server async on port {port}") + logger.info("Starting FoundryCBAgent server async on port %s", port) await server.serve() def run(self, port: int = int(os.environ.get("DEFAULT_AD_PORT", 8088))) -> None: @@ -593,7 +600,7 @@ def run(self, port: int = int(os.environ.get("DEFAULT_AD_PORT", 8088))) -> None: """ self.init_tracing() self._port = port - logger.info(f"Starting FoundryCBAgent server on port {port}") + logger.info("Starting FoundryCBAgent server on port %s", port) uvicorn.run(self.app, host="0.0.0.0", port=port) def init_tracing(self): @@ -618,7 +625,9 @@ def get_trace_attributes(self): "service.name": "azure.ai.agentserver", } - def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=None): + def init_tracing_internal( # pylint: disable=unused-argument # base class hook, params used by subclasses + self, exporter_endpoint=None, app_insights_conn_str=None + ): pass def setup_application_insights_exporter(self, connection_string, provider): @@ -638,7 +647,7 @@ def setup_otlp_exporter(self, endpoint, provider): exporter_instance = OTLPSpanExporter(endpoint=endpoint) processor = BatchSpanProcessor(exporter_instance) provider.add_span_processor(processor) - logger.info(f"Tracing setup with OTLP exporter: {endpoint}") + logger.info("Tracing setup with OTLP exporter: %s", endpoint) def create_response_headers(self) -> dict[str, str]: headers = {} diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 87c32926bde4..174685f652fe 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -7,7 +7,7 @@ from .id_generator.id_generator import IdGenerator from ...logger import get_logger from ...models import CreateResponse -from ...models.projects import AgentId, AgentReference, ResponseConversation1 +from ...models._projects import AgentId, AgentReference, ResponseConversation1 logger = get_logger() @@ -48,10 +48,10 @@ def conversation_id(self) -> Optional[str]: def stream(self) -> bool: return self._stream - def get_agent_id_object(self) -> AgentId: + def get_agent_id_object(self) -> Optional[AgentId]: agent = self.request.get("agent") if not agent: - return None # type: ignore + return None return AgentId( { "type": agent.type, @@ -60,9 +60,9 @@ def get_agent_id_object(self) -> AgentId: } ) - def get_conversation_object(self) -> ResponseConversation1: + def get_conversation_object(self) -> Optional[ResponseConversation1]: if not self._conversation_id: - return None # type: ignore + return None return ResponseConversation1(id=self._conversation_id) @@ -75,11 +75,11 @@ def _deserialize_create_response(payload: dict) -> CreateResponse: tools = payload.get("tools") if tools: - _deserialized["tools"] = [tool for tool in tools] # pylint: disable=unnecessary-comprehension + _deserialized["tools"] = list(tools) return _deserialized -def _deserialize_agent_reference(payload: dict) -> AgentReference: +def _deserialize_agent_reference(payload: dict) -> Optional[AgentReference]: if not payload: - return None # type: ignore + return None return AgentReference(**payload) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py index 01ac72289e4e..4d9cc741ec81 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py @@ -1,4 +1,3 @@ -# pylint: disable=docstring-missing-return,docstring-missing-param,docstring-missing-rtype # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- @@ -26,6 +25,13 @@ class FoundryIdGenerator(IdGenerator): """ def __init__(self, response_id: Optional[str], conversation_id: Optional[str]): + """Initialize the ID generator. + + :param response_id: An existing response ID, or ``None`` to generate one. + :type response_id: Optional[str] + :param conversation_id: An existing conversation ID, or ``None``. + :type conversation_id: Optional[str] + """ self.response_id = response_id or self._new_id("resp") self.conversation_id = conversation_id partition_source = self.conversation_id or self.response_id @@ -36,6 +42,13 @@ def __init__(self, response_id: Optional[str], conversation_id: Optional[str]): @classmethod def from_request(cls, payload: dict) -> "FoundryIdGenerator": + """Create a generator from an incoming request payload. + + :param payload: The raw request payload dictionary. + :type payload: dict + :return: A configured :class:`FoundryIdGenerator` instance. + :rtype: FoundryIdGenerator + """ response_id = payload.get("metadata", {}).get("response_id", None) conv_id_raw = payload.get("conversation", None) if isinstance(conv_id_raw, str): @@ -47,6 +60,13 @@ def from_request(cls, payload: dict) -> "FoundryIdGenerator": return cls(response_id, conv_id) def generate(self, category: Optional[str] = None) -> str: + """Generate a new unique ID for the given category. + + :param category: Optional prefix category (e.g. ``"msg"``, ``"func"``). Defaults to ``"id"``. + :type category: Optional[str] + :return: The generated unique identifier string. + :rtype: str + """ prefix = "id" if not category else category return self._new_id(prefix, partition_key=self._partition_id) @@ -63,12 +83,29 @@ def _new_id( partition_key: Optional[str] = None, partition_key_hint: str = "", ) -> str: - """ - Generates a new ID. - - Format matches the C# logic: - f"{prefix}{delimiter}{infix}{partitionKey}{entropy}" - (i.e., exactly one delimiter after prefix; no delimiter between entropy and partition key) + """Generate a new ID matching the C# FoundryIdGenerator format. + + Format: ``"{prefix}{delimiter}{infix}{partitionKey}{entropy}"`` + + :param prefix: The ID prefix (e.g. ``"resp"``, ``"msg"``). + :type prefix: str + :param string_length: Length of the random entropy portion. + :type string_length: int + :param partition_key_length: Length of the partition key. + :type partition_key_length: int + :param infix: Optional infix inserted between delimiter and partition key. + :type infix: Optional[str] + :param watermark: Optional alphanumeric watermark inserted mid-entropy. + :type watermark: str + :param delimiter: Delimiter between prefix and the rest of the ID. + :type delimiter: str + :param partition_key: Explicit partition key; if ``None``, derived or generated. + :type partition_key: Optional[str] + :param partition_key_hint: ID string to extract a partition key from. + :type partition_key_hint: str + :return: The generated ID string. + :rtype: str + :raises ValueError: If the watermark contains non-alphanumeric characters. """ entropy = FoundryIdGenerator._secure_entropy(string_length) @@ -96,10 +133,16 @@ def _new_id( @staticmethod def _secure_entropy(string_length: int) -> str: - """ - Generates a secure random alphanumeric string of exactly `string_length`. - Re-tries whole generation until the filtered base64 string is exactly the desired length, - matching the C# behavior. + """Generate a cryptographically secure alphanumeric string. + + Uses :func:`os.urandom` and base64 encoding, filtering to alphanumeric + characters and retrying until the exact length is reached. + + :param string_length: Desired length of the output string. + :type string_length: int + :return: A random alphanumeric string of exactly *string_length* characters. + :rtype: str + :raises ValueError: If *string_length* is less than 1. """ if string_length < 1: raise ValueError("Must be greater than or equal to 1") @@ -120,11 +163,22 @@ def _extract_partition_id( partition_key_length: int = 18, delimiter: str = "_", ) -> str: - """ - Extracts partition key from an existing ID. - - Expected shape (per C# logic): "_" - We take the last `partition_key_length` characters from the *second* segment. + """Extract the partition key from an existing ID. + + Expected shape: ``"_"``. + Returns the first *partition_key_length* characters of the second segment. + + :param id_str: The ID string to extract from. + :type id_str: str + :param string_length: Expected entropy length used for validation. + :type string_length: int + :param partition_key_length: Number of characters to extract as partition key. + :type partition_key_length: int + :param delimiter: The delimiter separating ID segments. + :type delimiter: str + :return: The extracted partition key. + :rtype: str + :raises ValueError: If the ID format is invalid. """ if not id_str: raise ValueError("Id cannot be null or empty") diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py index 12b647d7adc7..0efcf1c6f20b 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -import asyncio # pylint: disable=C4763 +import asyncio # pylint: disable=C4763 # azure-sdk: async-client-bad-name import itertools from collections import defaultdict from typing import ( @@ -37,7 +37,7 @@ from .._exceptions import ToolInvocationError -class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): # pylint: disable=C4748 +class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): # pylint: disable=C4748 # azure-sdk: client-paging-methods-use-list """Asynchronous client for aggregating tools from Azure AI MCP and Tools APIs. This client provides access to tools from both MCP (Model Context Protocol) servers @@ -55,7 +55,7 @@ class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): # pylint: di :type api_version: str or None """ - def __init__( # pylint: disable=C4718 + def __init__( # pylint: disable=C4718 # azure-sdk: client-method-name-no-double-underscore self, endpoint: str, credential: "AsyncTokenCredential", diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py index 2d50089fef8f..c75532f0d3e4 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -import asyncio # pylint: disable=C4763 +import asyncio # pylint: disable=C4763 # azure-sdk: async-client-bad-name from abc import ABC, abstractmethod from typing import Any, Awaitable, Collection, List, Mapping, MutableMapping, Optional, Union diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py index 80b25d78b20e..9604124cde9b 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py @@ -40,8 +40,9 @@ def install(cls, :type user_resolver: Optional[Callable[[Request], Awaitable[Optional[UserInfo]]]] """ + user_info_var : _UserContextType = user_context or ContextVarUserProvider.default_user_info_context app.add_middleware(UserInfoContextMiddleware, # type: ignore[arg-type] - user_info_var=user_context or ContextVarUserProvider.default_user_info_context, + user_info_var=user_info_var, user_resolver=user_resolver or cls._default_user_resolver) @staticmethod diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py index 398a8c46fd5d..0b6600de7d6a 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- from __future__ import annotations -import asyncio # pylint: disable=C4763 +import asyncio # pylint: disable=C4763 # azure-sdk: async-client-bad-name import inspect from types import TracebackType from typing import Any, Type, cast @@ -12,7 +12,7 @@ from azure.core.credentials_async import AsyncTokenCredential -async def _to_thread(func, *args, **kwargs): # pylint: disable=C4743 +async def _to_thread(func, *args, **kwargs): # pylint: disable=C4743 # azure-sdk: client-method-should-not-use-static-method """Compatibility wrapper for asyncio.to_thread (Python 3.8+). :param func: The function to run in a thread. diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py b/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py index 099d8dc45181..2cf533eb33fb 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py @@ -33,7 +33,7 @@ CreateResponse, Response as OpenAIResponse, ) -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, ResponseCompletedEvent, ResponseCreatedEvent, diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/mcp_simple/mcp_simple.py b/sdk/agentserver/azure-ai-agentserver-core/samples/mcp_simple/mcp_simple.py index af9812826941..3831f702564d 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/samples/mcp_simple/mcp_simple.py +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/mcp_simple/mcp_simple.py @@ -29,7 +29,7 @@ from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent from azure.ai.agentserver.core.models import Response as OpenAIResponse -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, MCPListToolsItemResource, MCPListToolsTool, diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py index f6d2c08bb0b9..f4298d21d39c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py @@ -3,7 +3,7 @@ from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent from azure.ai.agentserver.core.models import Response as OpenAIResponse -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, ResponseCompletedEvent, ResponseCreatedEvent, diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py index c2e3bea53287..f01c4977cfb0 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py @@ -10,7 +10,7 @@ set_current_app, ) from azure.ai.agentserver.core.models import Response as OpenAIResponse -from azure.ai.agentserver.core.models.projects import ResponseCreatedEvent, ResponseErrorEvent +from azure.ai.agentserver.core.models._projects import ResponseCreatedEvent, ResponseErrorEvent from azure.ai.agentserver.core.server._response_metadata import ( METADATA_KEY, attach_foundry_metadata_to_response, diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/test_logger.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/test_logger.py index 771ca0a0eb0c..35639ea8ae2c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/test_logger.py +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/test_logger.py @@ -78,31 +78,31 @@ def test_logs_warning_for_invalid_resource_id(self): @pytest.mark.unit class TestGetApplicationInsightsConnstr: - """Tests for get_application_insights_connstr function.""" + """Tests for _get_application_insights_connstr function.""" def test_returns_connstr_from_env_var(self): """Test that connection string is returned from environment variable.""" - from azure.ai.agentserver.core.logger import get_application_insights_connstr + from azure.ai.agentserver.core.logger import _get_application_insights_connstr with patch.dict(os.environ, {"APPLICATIONINSIGHTS_CONNECTION_STRING": "InstrumentationKey=test123"}, clear=False): - result = get_application_insights_connstr() + result = _get_application_insights_connstr() assert result == "InstrumentationKey=test123" def test_returns_none_when_no_connstr_and_no_project(self): """Test that None is returned when no connection string and no project endpoint.""" - from azure.ai.agentserver.core.logger import get_application_insights_connstr + from azure.ai.agentserver.core.logger import _get_application_insights_connstr with patch.dict(os.environ, { "APPLICATIONINSIGHTS_CONNECTION_STRING": "", "AZURE_AI_PROJECT_ENDPOINT": "", "AGENT_PROJECT_RESOURCE_ID": "", }, clear=False): - result = get_application_insights_connstr() + result = _get_application_insights_connstr() assert result is None or result == "" def test_logs_debug_when_not_configured(self): """Test that debug message is logged when not configured.""" - from azure.ai.agentserver.core.logger import get_application_insights_connstr + from azure.ai.agentserver.core.logger import _get_application_insights_connstr mock_logger = MagicMock() @@ -111,7 +111,7 @@ def test_logs_debug_when_not_configured(self): "AZURE_AI_PROJECT_ENDPOINT": "", "AGENT_PROJECT_RESOURCE_ID": "", }, clear=False): - result = get_application_insights_connstr(logger=mock_logger) + result = _get_application_insights_connstr(logger=mock_logger) # Debug should be called when not configured, or result should be None assert mock_logger.debug.called or result is None or result == "" From f724547333cb2004bc8f0153bdf87e8a374d9550 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 11 Mar 2026 20:04:14 -0700 Subject: [PATCH 2/9] updated doc rst files --- ...ver.core.checkpoints.client.operations.rst | 7 + ...ai.agentserver.core.checkpoints.client.rst | 15 ++ .../azure.ai.agentserver.core.checkpoints.rst | 15 ++ ...zure.ai.agentserver.core.models.openai.rst | 8 -- ...re.ai.agentserver.core.models.projects.rst | 8 -- .../doc/azure.ai.agentserver.core.models.rst | 9 -- .../doc/azure.ai.agentserver.core.rst | 1 + ...azure.ai.agentserver.core.tools.client.rst | 2 +- .../doc/azure.ai.agentserver.core.tools.rst | 3 +- .../azure-ai-agentserver-core/doc/index.md | 136 ++++++++++++++++++ 10 files changed, 176 insertions(+), 28 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.operations.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.rst delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst delete mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst create mode 100644 sdk/agentserver/azure-ai-agentserver-core/doc/index.md diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.operations.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.operations.rst new file mode 100644 index 000000000000..3076ff010e1b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.operations.rst @@ -0,0 +1,7 @@ +azure.ai.agentserver.core.checkpoints.client.operations package +=============================================================== + +.. automodule:: azure.ai.agentserver.core.checkpoints.client.operations + :inherited-members: + :members: + :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.rst new file mode 100644 index 000000000000..cd6763335948 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.rst @@ -0,0 +1,15 @@ +azure.ai.agentserver.core.checkpoints.client package +==================================================== + +.. automodule:: azure.ai.agentserver.core.checkpoints.client + :inherited-members: + :members: + :undoc-members: + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + azure.ai.agentserver.core.checkpoints.client.operations diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.rst new file mode 100644 index 000000000000..99b9dfa2ef50 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.rst @@ -0,0 +1,15 @@ +azure.ai.agentserver.core.checkpoints package +============================================= + +.. automodule:: azure.ai.agentserver.core.checkpoints + :inherited-members: + :members: + :undoc-members: + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + azure.ai.agentserver.core.checkpoints.client diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst deleted file mode 100644 index dd1cce6eecca..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst +++ /dev/null @@ -1,8 +0,0 @@ -azure.ai.agentserver.core.models.openai package -=============================================== - -.. automodule:: azure.ai.agentserver.core.models.openai - :inherited-members: - :members: - :undoc-members: - :ignore-module-all: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst deleted file mode 100644 index 38e0be4f331b..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst +++ /dev/null @@ -1,8 +0,0 @@ -azure.ai.agentserver.core.models.projects package -================================================= - -.. automodule:: azure.ai.agentserver.core.models.projects - :inherited-members: - :members: - :undoc-members: - :ignore-module-all: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst index 008b280c64de..120b01cccc5a 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst @@ -6,12 +6,3 @@ azure.ai.agentserver.core.models package :members: :undoc-members: :ignore-module-all: - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - azure.ai.agentserver.core.models.openai - azure.ai.agentserver.core.models.projects diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst index b8f1dadf3a73..60005f2b04cc 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst @@ -13,6 +13,7 @@ Subpackages :maxdepth: 4 azure.ai.agentserver.core.application + azure.ai.agentserver.core.checkpoints azure.ai.agentserver.core.models azure.ai.agentserver.core.server azure.ai.agentserver.core.tools diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst index 8182914f69f9..14304731f5e7 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst @@ -2,6 +2,6 @@ azure.ai.agentserver.core.tools.client package ============================================== .. automodule:: azure.ai.agentserver.core.tools.client - :inherited-members: + :inherited-members: BaseModel :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst index c112ec2beabd..6b798851fed2 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst @@ -2,10 +2,9 @@ azure.ai.agentserver.core.tools package ======================================= .. automodule:: azure.ai.agentserver.core.tools - :inherited-members: + :inherited-members: BaseModel :members: :undoc-members: - :exclude-members: BaseModel,model_json_schema Subpackages ----------- diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/index.md b/sdk/agentserver/azure-ai-agentserver-core/doc/index.md new file mode 100644 index 000000000000..bfce99e40065 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/index.md @@ -0,0 +1,136 @@ +# Azure AI Agent Server Adapter for Python + + +## Getting started + +```bash +pip install azure-ai-agentserver-core +``` + +## Key concepts + +This is the core package for Azure AI Agent server. It hosts your agent as a container on the cloud. + +You can talk to your agent using azure-ai-project sdk. + + +## Examples + +If your agent is not built using a supported framework such as LangGraph and Agent-framework, you can still make it compatible with Microsoft AI Foundry by manually implementing the predefined interface. + +```python +import datetime + +from azure.ai.agentserver.core import FoundryCBAgent +from azure.ai.agentserver.core.models import ( + CreateResponse, + Response as OpenAIResponse, +) +from azure.ai.agentserver.core.models._projects import ( + ItemContentOutputText, + ResponsesAssistantMessageItemResource, + ResponseTextDeltaEvent, + ResponseTextDoneEvent, +) + + +def stream_events(text: str): + assembled = "" + for i, token in enumerate(text.split(" ")): + piece = token if i == len(text.split(" ")) - 1 else token + " " + assembled += piece + yield ResponseTextDeltaEvent(delta=piece) + # Done with text + yield ResponseTextDoneEvent(text=assembled) + + +async def agent_run(request_body: CreateResponse): + agent = request_body.agent + print(f"agent:{agent}") + + if request_body.stream: + return stream_events("I am mock agent with no intelligence in stream mode.") + + # Build assistant output content + output_content = [ + ItemContentOutputText( + text="I am mock agent with no intelligence.", + annotations=[], + ) + ] + + response = OpenAIResponse( + metadata={}, + temperature=0.0, + top_p=0.0, + user="me", + id="id", + created_at=datetime.datetime.now(), + output=[ + ResponsesAssistantMessageItemResource( + status="completed", + content=output_content, + ) + ], + ) + return response + + +my_agent = FoundryCBAgent() +my_agent.agent_run = agent_run + +if __name__ == "__main__": + my_agent.run() + +``` + +## Troubleshooting + +First run your agent with azure-ai-agentserver-core locally. + +If it works on local by failed on cloud. Check your logs in the application insight connected to your Azure AI Foundry Project. + + +### Reporting issues + +To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-agents" in the title or content. + + +## Next steps + +Please visit [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/agentserver/azure-ai-agentserver-core/samples) folder. There are several cases for you to build your agent with azure-ai-agentserver + + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, +see the Code of Conduct FAQ or contact opencode@microsoft.com with any +additional questions or comments. + + +## Indices and tables + +- {ref}`genindex` +- {ref}`modindex` +- {ref}`search` + +```{toctree} +:caption: Developer Documentation +:glob: true +:maxdepth: 5 + +azure.ai.agentserver.core.rst + +``` + From ce4b89ecabe37d43a71bcd424e2dbf9fa72078aa Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 11 Mar 2026 20:05:04 -0700 Subject: [PATCH 3/9] updated pyproject --- sdk/agentserver/azure-ai-agentserver-core/README.md | 2 +- sdk/agentserver/azure-ai-agentserver-core/pyproject.toml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/README.md b/sdk/agentserver/azure-ai-agentserver-core/README.md index ff60cf460196..cc420579e5fe 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/README.md +++ b/sdk/agentserver/azure-ai-agentserver-core/README.md @@ -26,7 +26,7 @@ from azure.ai.agentserver.core.models import ( CreateResponse, Response as OpenAIResponse, ) -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, ResponsesAssistantMessageItemResource, ResponseTextDeltaEvent, diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index 3829a7356919..dca59dffef43 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -75,5 +75,5 @@ combine-as-imports = true breaking = false # incompatible python version pyright = false verifytypes = false -latestdependency = false -dependencies = false \ No newline at end of file +# latestdependency = false +# dependencies = false \ No newline at end of file From 9c9ea78d34279d825da4296510a1bcd0d49a5ee1 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 11 Mar 2026 21:30:53 -0700 Subject: [PATCH 4/9] fix -core related changes in af --- .../azure/ai/agentserver/agentframework/_agent_framework.py | 2 +- .../models/agent_framework_output_non_streaming_converter.py | 2 +- .../models/agent_framework_output_streaming_converter.py | 2 +- .../ai/agentserver/agentframework/models/agent_id_generator.py | 2 +- .../persistence/_foundry_checkpoint_repository.py | 2 +- .../agentframework/persistence/_foundry_checkpoint_storage.py | 2 +- .../tests/unit_tests/mocks/mock_checkpoint_client.py | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py index 837d1675ba05..92d243d46764 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py @@ -18,7 +18,7 @@ Response as OpenAIResponse, ResponseStreamEvent, ) -from azure.ai.agentserver.core.models.projects import ResponseErrorEvent, ResponseFailedEvent +from azure.ai.agentserver.core.models._projects import ResponseErrorEvent, ResponseFailedEvent from azure.ai.agentserver.core.tools import OAuthConsentRequiredError # pylint: disable=import-error from .models.agent_framework_output_streaming_converter import AgentFrameworkOutputStreamingConverter diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py index aac9b24c445c..86094a617991 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py @@ -12,7 +12,7 @@ from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import Response as OpenAIResponse -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, ResponsesAssistantMessageItemResource, ) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 22f144a4b7a4..02d11958cf24 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -16,7 +16,7 @@ Response as OpenAIResponse, ResponseStreamEvent, ) -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( FunctionToolCallItemResource, FunctionToolCallOutputItemResource, ItemContentOutputText, diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py index abd2dd2c02ef..ca429683a1be 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py @@ -8,7 +8,7 @@ from typing import Optional from azure.ai.agentserver.core import AgentRunContext -from azure.ai.agentserver.core.models import projects +from azure.ai.agentserver.core.models import _projects as projects def generate_agent_id(context: AgentRunContext) -> Optional[projects.AgentId]: diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py index acd89f1baef0..3e84763f4e68 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py @@ -8,7 +8,7 @@ from agent_framework import CheckpointStorage -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointSession, FoundryCheckpointClient, ) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py index 833c3647149a..63ba16dcd1ed 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py @@ -9,7 +9,7 @@ from agent_framework import WorkflowCheckpoint -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointItem, CheckpointItemId, FoundryCheckpointClient, diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py index ffc1e2fcc4c1..50a4458856ec 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py @@ -5,7 +5,7 @@ from typing import Any, Dict, List, Optional -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointItem, CheckpointItemId, CheckpointSession, From d6aded2f3ddb8f8ef02287294e4a8055467f2a3c Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 11 Mar 2026 22:16:45 -0700 Subject: [PATCH 5/9] fix langgraph unit tests --- .../langgraph/checkpointer/_foundry_checkpoint_saver.py | 2 +- .../agentserver/langgraph/models/human_in_the_loop_helper.py | 4 ++-- .../langgraph/models/human_in_the_loop_json_helper.py | 4 ++-- .../models/response_api_non_stream_response_converter.py | 2 +- .../langgraph/models/response_api_request_converter.py | 4 +++- .../models/response_event_generators/item_content_helpers.py | 2 +- .../models/response_event_generators/item_resource_helpers.py | 2 +- .../response_content_part_event_generator.py | 2 +- .../response_event_generators/response_event_generator.py | 2 +- .../response_function_call_argument_event_generator.py | 2 +- .../response_output_item_event_generator.py | 2 +- .../response_output_text_event_generator.py | 2 +- .../response_stream_event_generator.py | 2 +- .../tests/unit_tests/mocks/mock_checkpoint_client.py | 2 +- .../tests/unit_tests/test_langgraph_request_converter.py | 2 +- 15 files changed, 19 insertions(+), 17 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py index 999b87dc8fe8..82215640d60c 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py @@ -21,7 +21,7 @@ from azure.core.credentials import TokenCredential from azure.core.credentials_async import AsyncTokenCredential -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointItem, CheckpointItemId, CheckpointSession, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py index 9f3c693800a1..bc8890339dca 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py @@ -11,8 +11,8 @@ ) from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.models.openai import (ResponseInputItemParam, ResponseInputParam) +from azure.ai.agentserver.core.models import _projects as project_models +from azure.ai.agentserver.core.models._openai import (ResponseInputItemParam, ResponseInputParam) from .._context import LanggraphRunContext INTERRUPT_NODE_NAME = "__interrupt__" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py index e1396ba90577..e3bf49a5ad4b 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py @@ -11,8 +11,8 @@ ) from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.models.openai import ( +from azure.ai.agentserver.core.models import _projects as project_models +from azure.ai.agentserver.core.models._openai import ( ResponseInputItemParam, ) from azure.ai.agentserver.core.server.common.constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py index 7ec8bdf14f1a..cb3ecefb60f5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py @@ -11,7 +11,7 @@ from langchain_core.messages import AnyMessage from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from .human_in_the_loop_helper import ( HumanInTheLoopHelper, INTERRUPT_NODE_NAME, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py index 486545ef078a..f718695dbc1e 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py @@ -17,7 +17,9 @@ from langchain_core.messages.tool import ToolCall from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.models import CreateResponse, openai as openai_models, projects as project_models +from azure.ai.agentserver.core.models import ( + CreateResponse, _openai as openai_models, _projects as project_models +) logger = get_logger() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py index ae169d866ee5..807d759cb151 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models class ItemContentHelper: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py index 8502ec13069b..9f5f1ac6d55b 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py @@ -6,7 +6,7 @@ from langgraph.types import Interrupt -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from ..human_in_the_loop_helper import HumanInTheLoopHelper from ..utils import extract_function_call diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py index 4823de4411ae..8b989bc20da3 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py @@ -7,7 +7,7 @@ from langchain_core import messages as langgraph_messages -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from . import item_content_helpers from .response_event_generator import ResponseEventGenerator, StreamEventState diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py index cd161b99d152..843cf18fe2dc 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py @@ -7,7 +7,7 @@ from langchain_core.messages import AnyMessage -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from ..._context import LanggraphRunContext diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py index 56c3bde68632..3a556fb70e7b 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py @@ -9,7 +9,7 @@ from langchain_core.messages import AnyMessage from langgraph.types import Interrupt -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from . import ResponseEventGenerator, StreamEventState from ..human_in_the_loop_helper import HumanInTheLoopHelper from ..utils import extract_function_call diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py index 14eee3c571b2..181952077875 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py @@ -9,7 +9,7 @@ from langchain_core.messages import AnyMessage from langgraph.types import Interrupt -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from azure.ai.agentserver.core.server.common.id_generator.id_generator import IdGenerator from . import ResponseEventGenerator, StreamEventState, item_resource_helpers from .response_content_part_event_generator import ResponseContentPartEventGenerator diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py index 8d0e62650a2d..dc64f37733bc 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py @@ -5,7 +5,7 @@ # mypy: disable-error-code="return-value,assignment" from typing import List -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from .response_event_generator import ( ResponseEventGenerator, StreamEventState, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py index f19629eba94b..896e35829d98 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py @@ -8,7 +8,7 @@ from langchain_core import messages as langgraph_messages -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from .response_event_generator import ( ResponseEventGenerator, StreamEventState, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py index ffc1e2fcc4c1..50a4458856ec 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py @@ -5,7 +5,7 @@ from typing import Any, Dict, List, Optional -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointItem, CheckpointItemId, CheckpointSession, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py index b1894f7350d5..056780cc9903 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py @@ -2,7 +2,7 @@ from langchain_core import messages as langgraph_messages from azure.ai.agentserver.core import models -from azure.ai.agentserver.core.models import projects as project_models +from azure.ai.agentserver.core.models import _projects as project_models from azure.ai.agentserver.langgraph.models.response_api_request_converter import ResponseAPIMessageRequestConverter From b4ce92df0aa7b88b44ccf736a6d3af0fc70deed9 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 12 Mar 2026 14:49:02 -0700 Subject: [PATCH 6/9] update to private python files --- .../models/human_in_the_loop_helper.py | 2 +- .../unit_tests/test_human_in_the_loop_helper.py | 2 +- .../azure/ai/agentserver/core/__init__.py | 4 ++-- .../ai/agentserver/core/server/{base.py => _base.py} | 2 +- .../{agent_run_context.py => _agent_run_context.py} | 4 ++-- .../server/common/{constants.py => _constants.py} | 0 ...ndry_id_generator.py => _foundry_id_generator.py} | 2 +- .../{id_generator.py => _id_generator.py} | 0 ...i.agentserver.core.server.common.id_generator.rst | 12 ++++++------ .../doc/azure.ai.agentserver.core.server.common.rst | 12 ++++++------ .../doc/azure.ai.agentserver.core.server.rst | 6 +++--- .../server/common/test_foundry_id_generator.py | 2 +- .../server/test_conversation_persistence.py | 2 +- .../azure/ai/agentserver/langgraph/langgraph.py | 2 +- .../models/human_in_the_loop_json_helper.py | 8 ++++---- .../response_output_item_event_generator.py | 2 +- .../tests/unit_tests/tools/conftest.py | 2 +- .../tests/unit_tests/tools/test_agent_integration.py | 2 +- 18 files changed, 33 insertions(+), 33 deletions(-) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/{base.py => _base.py} (99%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/{agent_run_context.py => _agent_run_context.py} (95%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/{constants.py => _constants.py} (100%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/{foundry_id_generator.py => _foundry_id_generator.py} (99%) rename sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/{id_generator.py => _id_generator.py} (100%) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py index 89bac4ca76c5..0b054dfe9e08 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py @@ -8,7 +8,7 @@ from agent_framework import Content, Message, WorkflowCheckpoint, WorkflowEvent from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.server.common.constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME +from azure.ai.agentserver.core.server.common._constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME logger = get_logger() diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_human_in_the_loop_helper.py index 4b59922cce9a..c26e716ed4dc 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_human_in_the_loop_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_human_in_the_loop_helper.py @@ -2,7 +2,7 @@ import pytest from agent_framework import Content, Message, WorkflowEvent -from azure.ai.agentserver.core.server.common.constants import ( +from azure.ai.agentserver.core.server.common._constants import ( HUMAN_IN_THE_LOOP_FUNCTION_NAME, ) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py index 88a13741bbac..39de11cefe55 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py @@ -5,8 +5,8 @@ from ._version import VERSION from .logger import configure as config_logging -from .server.base import FoundryCBAgent -from .server.common.agent_run_context import AgentRunContext +from .server._base import FoundryCBAgent +from .server.common._agent_run_context import AgentRunContext from .server._context import AgentServerContext config_logging() diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_base.py similarity index 99% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_base.py index 994d9045dd38..e1ce45188c34 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_base.py @@ -36,7 +36,7 @@ build_foundry_agents_metadata_headers, try_attach_foundry_metadata_to_event, ) -from .common.agent_run_context import AgentRunContext +from .common._agent_run_context import AgentRunContext from ..constants import Constants from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, get_project_endpoint, request_context from ..models import ( diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_agent_run_context.py similarity index 95% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_agent_run_context.py index 174685f652fe..750e4209d9e5 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_agent_run_context.py @@ -3,8 +3,8 @@ # --------------------------------------------------------- from typing import Optional -from .id_generator.foundry_id_generator import FoundryIdGenerator -from .id_generator.id_generator import IdGenerator +from .id_generator._foundry_id_generator import FoundryIdGenerator +from .id_generator._id_generator import IdGenerator from ...logger import get_logger from ...models import CreateResponse from ...models._projects import AgentId, AgentReference, ResponseConversation1 diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/constants.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_constants.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/constants.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_constants.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_foundry_id_generator.py similarity index 99% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_foundry_id_generator.py index 4d9cc741ec81..0c0f91cbb36d 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_foundry_id_generator.py @@ -8,7 +8,7 @@ import re from typing import Optional -from .id_generator import IdGenerator +from ._id_generator import IdGenerator _WATERMARK_RE = re.compile(r"^[A-Za-z0-9]*$") diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_id_generator.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/id_generator.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_id_generator.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.id_generator.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.id_generator.rst index cf935aa1d1ed..68f155131f5c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.id_generator.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.id_generator.rst @@ -9,18 +9,18 @@ azure.ai.agentserver.core.server.common.id\_generator package Submodules ---------- -azure.ai.agentserver.core.server.common.id\_generator.foundry\_id\_generator module ------------------------------------------------------------------------------------ +azure.ai.agentserver.core.server.common.id\_generator.\_foundry\_id\_generator module +------------------------------------------------------------------------------------ -.. automodule:: azure.ai.agentserver.core.server.common.id_generator.foundry_id_generator +.. automodule:: azure.ai.agentserver.core.server.common.id_generator._foundry_id_generator :inherited-members: :members: :undoc-members: -azure.ai.agentserver.core.server.common.id\_generator.id\_generator module --------------------------------------------------------------------------- +azure.ai.agentserver.core.server.common.id\_generator.\_id\_generator module +--------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.core.server.common.id_generator.id_generator +.. automodule:: azure.ai.agentserver.core.server.common.id_generator._id_generator :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst index 8fb5b52e4465..fd02e856642c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst @@ -17,18 +17,18 @@ Subpackages Submodules ---------- -azure.ai.agentserver.core.server.common.agent\_run\_context module ------------------------------------------------------------------- +azure.ai.agentserver.core.server.common.\_agent\_run\_context module +------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.core.server.common.agent_run_context +.. automodule:: azure.ai.agentserver.core.server.common._agent_run_context :inherited-members: :members: :undoc-members: -azure.ai.agentserver.core.server.common.constants module --------------------------------------------------------- +azure.ai.agentserver.core.server.common.\_constants module +---------------------------------------------------------- -.. automodule:: azure.ai.agentserver.core.server.common.constants +.. automodule:: azure.ai.agentserver.core.server.common._constants :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.rst index b82fa765b839..8363ec9e32d8 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.rst @@ -17,10 +17,10 @@ Subpackages Submodules ---------- -azure.ai.agentserver.core.server.base module --------------------------------------------- +azure.ai.agentserver.core.server.\_base module +---------------------------------------------- -.. automodule:: azure.ai.agentserver.core.server.base +.. automodule:: azure.ai.agentserver.core.server._base :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py index a46f45f7c739..fb6dc8858c86 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from azure.ai.agentserver.core.server.common.id_generator.foundry_id_generator import FoundryIdGenerator +from azure.ai.agentserver.core.server.common.id_generator._foundry_id_generator import FoundryIdGenerator def test_conversation_id_none_uses_response_partition(): diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_conversation_persistence.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_conversation_persistence.py index a38871197cba..00137abecf15 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_conversation_persistence.py +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_conversation_persistence.py @@ -42,7 +42,7 @@ async def __anext__(self): def create_mock_agent(): """Create a mock FoundryCBAgent without calling __init__.""" - from azure.ai.agentserver.core.server.base import FoundryCBAgent + from azure.ai.agentserver.core.server._base import FoundryCBAgent # Create instance without calling __init__ agent = object.__new__(FoundryCBAgent) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 37f7080ba81f..14b268056a6a 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -11,7 +11,7 @@ from azure.ai.agentserver.core.constants import Constants from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.server.base import FoundryCBAgent +from azure.ai.agentserver.core.server._base import FoundryCBAgent from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.tools import OAuthConsentRequiredError # pylint:disable=import-error,no-name-in-module from ._context import LanggraphRunContext diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py index e3bf49a5ad4b..eed2be358235 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py @@ -15,7 +15,7 @@ from azure.ai.agentserver.core.models._openai import ( ResponseInputItemParam, ) -from azure.ai.agentserver.core.server.common.constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME +from azure.ai.agentserver.core.server.common._constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME from .human_in_the_loop_helper import HumanInTheLoopHelper @@ -43,7 +43,7 @@ def convert_interrupt(self, interrupt_info: Interrupt) -> Optional[project_model status="in_progress", ) - def interrupt_to_function_call(self, interrupt: Interrupt) : + def interrupt_to_function_call(self, interrupt: Interrupt) -> tuple[Optional[str], Optional[str], Optional[str]]: """ Convert an Interrupt to a function call tuple. @@ -51,7 +51,7 @@ def interrupt_to_function_call(self, interrupt: Interrupt) : :type interrupt: Interrupt :return: A tuple of (name, call_id, argument). - :rtype: tuple[str | None, str | None, str | None] + :rtype: tuple[Optional[str], Optional[str], Optional[str]] """ if isinstance(interrupt.value, str): arguments = interrupt.value @@ -63,7 +63,7 @@ def interrupt_to_function_call(self, interrupt: Interrupt) : arguments = str(interrupt.value) return HUMAN_IN_THE_LOOP_FUNCTION_NAME, interrupt.id, arguments - def convert_input_item_to_command(self, input_item: ResponseInputItemParam) -> Union[Command, None]: + def convert_input_item_to_command(self, input_item: ResponseInputItemParam) -> Optional[Command]: output_str = input_item.get("output") if not isinstance(output_str, str): logger.error("Invalid output type in function call output: %s", input_item) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py index 181952077875..5facf7206bda 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py @@ -10,7 +10,7 @@ from langgraph.types import Interrupt from azure.ai.agentserver.core.models import _projects as project_models -from azure.ai.agentserver.core.server.common.id_generator.id_generator import IdGenerator +from azure.ai.agentserver.core.server.common.id_generator._id_generator import IdGenerator from . import ResponseEventGenerator, StreamEventState, item_resource_helpers from .response_content_part_event_generator import ResponseContentPartEventGenerator from .response_function_call_argument_event_generator import ResponseFunctionCallArgumentEventGenerator diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/conftest.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/conftest.py index 7efc298559c1..f4e962d9d4c5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/conftest.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/conftest.py @@ -21,7 +21,7 @@ SchemaProperty, SchemaType, ) -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext +from azure.ai.agentserver.core.server.common._agent_run_context import AgentRunContext from azure.ai.agentserver.langgraph._context import LanggraphRunContext from azure.ai.agentserver.langgraph.tools._context import FoundryToolContext from azure.ai.agentserver.langgraph.tools._resolver import ResolvedTools diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py index eea917e54fd4..5f9115db66cf 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py @@ -23,7 +23,7 @@ SchemaProperty, SchemaType, ) -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext +from azure.ai.agentserver.core.server.common._agent_run_context import AgentRunContext from azure.ai.agentserver.langgraph._context import LanggraphRunContext from azure.ai.agentserver.langgraph.tools import use_foundry_tools from azure.ai.agentserver.langgraph.tools._context import FoundryToolContext From 57768dccd54518747dc29706ab2b27fea1e5955d Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 12 Mar 2026 16:53:55 -0700 Subject: [PATCH 7/9] update private file path --- .../ai/agentserver/langgraph/__init__.py | 4 +- .../langgraph/{langgraph.py => _langgraph.py} | 6 +-- .../agentserver/langgraph/models/__init__.py | 17 +++++++ ...helper.py => _human_in_the_loop_helper.py} | 0 ...r.py => _human_in_the_loop_json_helper.py} | 2 +- ...onverter.py => _response_api_converter.py} | 0 ....py => _response_api_default_converter.py} | 14 +++--- ...onse_api_non_stream_response_converter.py} | 4 +- ....py => _response_api_request_converter.py} | 0 ...response_api_stream_response_converter.py} | 2 +- .../langgraph/models/{utils.py => _utils.py} | 0 .../response_event_generators/__init__.py | 4 +- ...nt_helpers.py => _item_content_helpers.py} | 0 ...e_helpers.py => _item_resource_helpers.py} | 4 +- ..._response_content_part_event_generator.py} | 6 +-- ...erator.py => _response_event_generator.py} | 0 ...function_call_argument_event_generator.py} | 4 +- ... _response_output_item_event_generator.py} | 8 ++-- ... _response_output_text_event_generator.py} | 2 +- ...py => _response_stream_event_generator.py} | 4 +- ...graph.models.response_event_generators.rst | 48 +++++++++---------- .../azure.ai.agentserver.langgraph.models.rst | 48 +++++++++---------- .../doc/azure.ai.agentserver.langgraph.rst | 6 +-- .../samples/custom_state/main.py | 3 +- .../test_conversation_id_optional.py | 2 +- .../unit_tests/test_historical_items_fetch.py | 12 ++--- .../test_langgraph_request_converter.py | 2 +- 27 files changed, 109 insertions(+), 93 deletions(-) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/{langgraph.py => _langgraph.py} (97%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{human_in_the_loop_helper.py => _human_in_the_loop_helper.py} (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{human_in_the_loop_json_helper.py => _human_in_the_loop_json_helper.py} (98%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{response_api_converter.py => _response_api_converter.py} (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{response_api_default_converter.py => _response_api_default_converter.py} (97%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{response_api_non_stream_response_converter.py => _response_api_non_stream_response_converter.py} (99%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{response_api_request_converter.py => _response_api_request_converter.py} (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{response_api_stream_response_converter.py => _response_api_stream_response_converter.py} (98%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/{utils.py => _utils.py} (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{item_content_helpers.py => _item_content_helpers.py} (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{item_resource_helpers.py => _item_resource_helpers.py} (97%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{response_content_part_event_generator.py => _response_content_part_event_generator.py} (96%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{response_event_generator.py => _response_event_generator.py} (100%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{response_function_call_argument_event_generator.py => _response_function_call_argument_event_generator.py} (98%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{response_output_item_event_generator.py => _response_output_item_event_generator.py} (95%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{response_output_text_event_generator.py => _response_output_text_event_generator.py} (99%) rename sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/{response_stream_event_generator.py => _response_stream_event_generator.py} (97%) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index 18e4d6bfbdc2..fc9aaf43880c 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -10,11 +10,11 @@ from ._context import LanggraphRunContext from ._version import VERSION -from .langgraph import LangGraphAdapter +from ._langgraph import LangGraphAdapter if TYPE_CHECKING: # pragma: no cover from langgraph.graph.state import CompiledStateGraph - from .models.response_api_converter import ResponseAPIConverter + from .models._response_api_converter import ResponseAPIConverter from azure.core.credentials_async import AsyncTokenCredential from azure.core.credentials import TokenCredential diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py similarity index 97% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py index 14b268056a6a..c05185692972 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py @@ -15,9 +15,9 @@ from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.tools import OAuthConsentRequiredError # pylint:disable=import-error,no-name-in-module from ._context import LanggraphRunContext -from .models.response_api_converter import GraphInputArguments, ResponseAPIConverter -from .models.response_api_default_converter import ResponseAPIDefaultConverter -from .models.utils import is_state_schema_valid +from .models._response_api_converter import GraphInputArguments, ResponseAPIConverter +from .models._response_api_default_converter import ResponseAPIDefaultConverter +from .models._utils import is_state_schema_valid from .tools._context import FoundryToolContext from .tools._resolver import FoundryLangChainToolResolver diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py index d540fd20468c..c4a276af508a 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py @@ -1,3 +1,20 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- + +from ._response_api_converter import GraphInputArguments, ResponseAPIConverter +from ._response_api_default_converter import ResponseAPIDefaultConverter +from ._response_api_request_converter import ( + ResponseAPIMessageRequestConverter, + ResponseAPIRequestConverter, + convert_item_resource_to_message, +) + +__all__ = [ + "ResponseAPIConverter", + "GraphInputArguments", + "ResponseAPIDefaultConverter", + "ResponseAPIRequestConverter", + "ResponseAPIMessageRequestConverter", + "convert_item_resource_to_message", +] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_helper.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_helper.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py similarity index 98% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py index eed2be358235..ea32d6683232 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py @@ -17,7 +17,7 @@ ) from azure.ai.agentserver.core.server.common._constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME -from .human_in_the_loop_helper import HumanInTheLoopHelper +from ._human_in_the_loop_helper import HumanInTheLoopHelper logger = get_logger() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py similarity index 97% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py index b64afc900f9d..448374038f86 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py @@ -15,17 +15,17 @@ from azure.ai.agentserver.core.logger import get_logger, get_project_endpoint from azure.ai.agentserver.core.models import Response, ResponseStreamEvent -from .human_in_the_loop_helper import HumanInTheLoopHelper -from .human_in_the_loop_json_helper import HumanInTheLoopJsonHelper -from .response_api_converter import GraphInputArguments, ResponseAPIConverter -from .response_api_non_stream_response_converter import (ResponseAPIMessagesNonStreamResponseConverter, - ResponseAPINonStreamResponseConverter) -from .response_api_request_converter import ( +from ._human_in_the_loop_helper import HumanInTheLoopHelper +from ._human_in_the_loop_json_helper import HumanInTheLoopJsonHelper +from ._response_api_converter import GraphInputArguments, ResponseAPIConverter +from ._response_api_non_stream_response_converter import (ResponseAPIMessagesNonStreamResponseConverter, + ResponseAPINonStreamResponseConverter) +from ._response_api_request_converter import ( ResponseAPIMessageRequestConverter, ResponseAPIRequestConverter, convert_item_resource_to_message, ) -from .response_api_stream_response_converter import ResponseAPIMessagesStreamResponseConverter +from ._response_api_stream_response_converter import ResponseAPIMessagesStreamResponseConverter from .._context import LanggraphRunContext logger = get_logger() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py similarity index 99% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py index cb3ecefb60f5..5e688e42664c 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py @@ -12,11 +12,11 @@ from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import _projects as project_models -from .human_in_the_loop_helper import ( +from ._human_in_the_loop_helper import ( HumanInTheLoopHelper, INTERRUPT_NODE_NAME, ) -from .utils import extract_function_call +from ._utils import extract_function_call from .._context import LanggraphRunContext logger = get_logger() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py similarity index 98% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py index 02f79c589a96..0c6f772dece5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py @@ -10,7 +10,7 @@ from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import ResponseStreamEvent -from .human_in_the_loop_helper import HumanInTheLoopHelper +from ._human_in_the_loop_helper import HumanInTheLoopHelper from .response_event_generators import ( ResponseEventGenerator, ResponseStreamEventGenerator, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/utils.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_utils.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/utils.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_utils.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/__init__.py index 7b9f0362e4ba..83c9590a3b58 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/__init__.py @@ -1,8 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from .response_event_generator import ResponseEventGenerator, StreamEventState -from .response_stream_event_generator import ResponseStreamEventGenerator +from ._response_event_generator import ResponseEventGenerator, StreamEventState +from ._response_stream_event_generator import ResponseStreamEventGenerator __all__ = [ "ResponseEventGenerator", diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_content_helpers.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_content_helpers.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py similarity index 97% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py index 9f5f1ac6d55b..7c97d7adee0a 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py @@ -8,8 +8,8 @@ from azure.ai.agentserver.core.models import _projects as project_models -from ..human_in_the_loop_helper import HumanInTheLoopHelper -from ..utils import extract_function_call +from .._human_in_the_loop_helper import HumanInTheLoopHelper +from .._utils import extract_function_call class ItemResourceHelper: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py similarity index 96% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py index 8b989bc20da3..213a9f78e348 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py @@ -9,9 +9,9 @@ from azure.ai.agentserver.core.models import _projects as project_models -from . import item_content_helpers -from .response_event_generator import ResponseEventGenerator, StreamEventState -from .response_output_text_event_generator import ResponseOutputTextEventGenerator +from . import _item_content_helpers as item_content_helpers +from ._response_event_generator import ResponseEventGenerator, StreamEventState +from ._response_output_text_event_generator import ResponseOutputTextEventGenerator class ResponseContentPartEventGenerator(ResponseEventGenerator): diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py similarity index 98% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py index 3a556fb70e7b..0f2e93fe0ef1 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py @@ -11,8 +11,8 @@ from azure.ai.agentserver.core.models import _projects as project_models from . import ResponseEventGenerator, StreamEventState -from ..human_in_the_loop_helper import HumanInTheLoopHelper -from ..utils import extract_function_call +from .._human_in_the_loop_helper import HumanInTheLoopHelper +from .._utils import extract_function_call from ..._context import LanggraphRunContext diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py similarity index 95% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py index 5facf7206bda..4c98b4b1a18d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py @@ -11,10 +11,10 @@ from azure.ai.agentserver.core.models import _projects as project_models from azure.ai.agentserver.core.server.common.id_generator._id_generator import IdGenerator -from . import ResponseEventGenerator, StreamEventState, item_resource_helpers -from .response_content_part_event_generator import ResponseContentPartEventGenerator -from .response_function_call_argument_event_generator import ResponseFunctionCallArgumentEventGenerator -from ..human_in_the_loop_helper import HumanInTheLoopHelper +from . import ResponseEventGenerator, StreamEventState, _item_resource_helpers as item_resource_helpers +from ._response_content_part_event_generator import ResponseContentPartEventGenerator +from ._response_function_call_argument_event_generator import ResponseFunctionCallArgumentEventGenerator +from .._human_in_the_loop_helper import HumanInTheLoopHelper from ..._context import LanggraphRunContext diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py similarity index 99% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py index dc64f37733bc..21772abd1ea7 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py @@ -6,7 +6,7 @@ from typing import List from azure.ai.agentserver.core.models import _projects as project_models -from .response_event_generator import ( +from ._response_event_generator import ( ResponseEventGenerator, StreamEventState, ) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py similarity index 97% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py index 896e35829d98..bb361639601d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py @@ -9,11 +9,11 @@ from langchain_core import messages as langgraph_messages from azure.ai.agentserver.core.models import _projects as project_models -from .response_event_generator import ( +from ._response_event_generator import ( ResponseEventGenerator, StreamEventState, ) -from .response_output_item_event_generator import ResponseOutputItemEventGenerator +from ._response_output_item_event_generator import ResponseOutputItemEventGenerator from ..._context import LanggraphRunContext diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst index af7cc69bd859..bf53fee7e8fa 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst @@ -9,66 +9,66 @@ azure.ai.agentserver.langgraph.models.response\_event\_generators package Submodules ---------- -azure.ai.agentserver.langgraph.models.response\_event\_generators.item\_content\_helpers module ------------------------------------------------------------------------------------------------ +azure.ai.agentserver.langgraph.models.response\_event\_generators._item\_content\_helpers module +------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.item_content_helpers +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._item_content_helpers :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_event\_generators.item\_resource\_helpers module ------------------------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models.response\_event\_generators._item\_resource\_helpers module +-------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.item_resource_helpers +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._item_resource_helpers :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_content\_part\_event\_generator module ------------------------------------------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models.response\_event\_generators._response\_content\_part\_event\_generator module +-------------------------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_content_part_event_generator +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._response_content_part_event_generator :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_event\_generator module ---------------------------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models.response\_event\_generators._response\_event\_generator module +----------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_event_generator +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._response_event_generator :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_function\_call\_argument\_event\_generator module ------------------------------------------------------------------------------------------------------------------------------ +azure.ai.agentserver.langgraph.models.response\_event\_generators._response\_function\_call\_argument\_event\_generator module +------------------------------------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_function_call_argument_event_generator +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._response_function_call_argument_event_generator :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_output\_item\_event\_generator module ------------------------------------------------------------------------------------------------------------------ +azure.ai.agentserver.langgraph.models.response\_event\_generators._response\_output\_item\_event\_generator module +------------------------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_output_item_event_generator +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._response_output_item_event_generator :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_output\_text\_event\_generator module ------------------------------------------------------------------------------------------------------------------ +azure.ai.agentserver.langgraph.models.response\_event\_generators._response\_output\_text\_event\_generator module +------------------------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_output_text_event_generator +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._response_output_text_event_generator :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_stream\_event\_generator module ------------------------------------------------------------------------------------------------------------ +azure.ai.agentserver.langgraph.models.response\_event\_generators._response\_stream\_event\_generator module +------------------------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_stream_event_generator +.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators._response_stream_event_generator :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst index aba857c3b64a..e5d72b41cd42 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst @@ -17,66 +17,66 @@ Subpackages Submodules ---------- -azure.ai.agentserver.langgraph.models.human\_in\_the\_loop\_helper module -------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models._human\_in\_the\_loop\_helper module +------------------------------------------------------------------------------ -.. automodule:: azure.ai.agentserver.langgraph.models.human_in_the_loop_helper +.. automodule:: azure.ai.agentserver.langgraph.models._human_in_the_loop_helper :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.human\_in\_the\_loop\_json\_helper module -------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models._human\_in\_the\_loop\_json\_helper module +--------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.human_in_the_loop_json_helper +.. automodule:: azure.ai.agentserver.langgraph.models._human_in_the_loop_json_helper :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_api\_converter module ---------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models._response\_api\_converter module +----------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_converter +.. automodule:: azure.ai.agentserver.langgraph.models._response_api_converter :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_api\_default\_converter module ------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models._response\_api\_default\_converter module +-------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_default_converter +.. automodule:: azure.ai.agentserver.langgraph.models._response_api_default_converter :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_api\_non\_stream\_response\_converter module --------------------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models._response\_api\_non\_stream\_response\_converter module +---------------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_non_stream_response_converter +.. automodule:: azure.ai.agentserver.langgraph.models._response_api_non_stream_response_converter :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_api\_request\_converter module ------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models._response\_api\_request\_converter module +-------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_request_converter +.. automodule:: azure.ai.agentserver.langgraph.models._response_api_request_converter :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.response\_api\_stream\_response\_converter module ---------------------------------------------------------------------------------------- +azure.ai.agentserver.langgraph.models._response\_api\_stream\_response\_converter module +----------------------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_stream_response_converter +.. automodule:: azure.ai.agentserver.langgraph.models._response_api_stream_response_converter :inherited-members: :members: :undoc-members: -azure.ai.agentserver.langgraph.models.utils module --------------------------------------------------- +azure.ai.agentserver.langgraph.models._utils module +--------------------------------------------------- -.. automodule:: azure.ai.agentserver.langgraph.models.utils +.. automodule:: azure.ai.agentserver.langgraph.models._utils :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst index deefeb67fa96..1ca9d33660a9 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst @@ -18,10 +18,10 @@ Subpackages Submodules ---------- -azure.ai.agentserver.langgraph.langgraph module ------------------------------------------------ +azure.ai.agentserver.langgraph._langgraph module +------------------------------------------------ -.. automodule:: azure.ai.agentserver.langgraph.langgraph +.. automodule:: azure.ai.agentserver.langgraph._langgraph :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py index ec45dceccfc8..5a5b22993a19 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py @@ -12,8 +12,7 @@ from azure.ai.agentserver.core.models import Response, ResponseStreamEvent from azure.ai.agentserver.langgraph import LanggraphRunContext, from_langgraph -from azure.ai.agentserver.langgraph.models.response_api_default_converter import ResponseAPIDefaultConverter -from azure.ai.agentserver.langgraph.models.response_api_request_converter import ResponseAPIRequestConverter +from azure.ai.agentserver.langgraph.models import ResponseAPIDefaultConverter, ResponseAPIRequestConverter load_dotenv() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py index 6f8ff173e6c3..727b501b9b3d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py @@ -5,7 +5,7 @@ import pytest -from azure.ai.agentserver.langgraph.models.response_api_default_converter import ResponseAPIDefaultConverter +from azure.ai.agentserver.langgraph.models import ResponseAPIDefaultConverter class DummyGraphState: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_historical_items_fetch.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_historical_items_fetch.py index bc7cfbee93e4..2b28ae158c4f 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_historical_items_fetch.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_historical_items_fetch.py @@ -4,7 +4,7 @@ from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage -from azure.ai.agentserver.langgraph.models.response_api_request_converter import convert_item_resource_to_message +from azure.ai.agentserver.langgraph.models import convert_item_resource_to_message @pytest.mark.unit @@ -113,7 +113,7 @@ def test_convert_message_with_empty_content_list(self): def _create_converter(): """Helper to create a ResponseAPIDefaultConverter with mocked graph.""" - from azure.ai.agentserver.langgraph.models.response_api_default_converter import ( + from azure.ai.agentserver.langgraph.models import ( ResponseAPIDefaultConverter, ) @@ -122,7 +122,7 @@ def _create_converter(): mock_graph.checkpointer = None with patch( - "azure.ai.agentserver.langgraph.models.utils.is_state_schema_valid", + "azure.ai.agentserver.langgraph.models._utils.is_state_schema_valid", return_value=True, ): return ResponseAPIDefaultConverter(graph=mock_graph) @@ -381,7 +381,7 @@ async def test_fetch_returns_empty_when_no_endpoint(self): converter = _create_converter() with patch( - "azure.ai.agentserver.langgraph.models.response_api_default_converter.get_project_endpoint", + "azure.ai.agentserver.langgraph.models._response_api_default_converter.get_project_endpoint", return_value=None, ): result = await converter._fetch_historical_items("conv_123") @@ -393,7 +393,7 @@ async def test_fetch_returns_empty_on_import_error(self): converter = _create_converter() with patch( - "azure.ai.agentserver.langgraph.models.response_api_default_converter.get_project_endpoint", + "azure.ai.agentserver.langgraph.models._response_api_default_converter.get_project_endpoint", return_value="https://test.endpoint.com", ): with patch.dict("sys.modules", {"openai": None}): @@ -421,7 +421,7 @@ async def mock_list(*args, **kwargs): mock_client.conversations.items.list = mock_list with patch( - "azure.ai.agentserver.langgraph.models.response_api_default_converter.get_project_endpoint", + "azure.ai.agentserver.langgraph.models._response_api_default_converter.get_project_endpoint", return_value="https://test.endpoint.com", ): with patch( diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py index 056780cc9903..a68e5be13f13 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py @@ -3,7 +3,7 @@ from azure.ai.agentserver.core import models from azure.ai.agentserver.core.models import _projects as project_models -from azure.ai.agentserver.langgraph.models.response_api_request_converter import ResponseAPIMessageRequestConverter +from azure.ai.agentserver.langgraph.models import ResponseAPIMessageRequestConverter @pytest.mark.unit From b0ea657a972060bf375b840875d36c80123cb6a5 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 12 Mar 2026 20:02:26 -0700 Subject: [PATCH 8/9] fix pylint --- .../ai/agentserver/langgraph/_langgraph.py | 29 ++++----- .../models/_human_in_the_loop_json_helper.py | 6 +- .../models/_response_api_converter.py | 1 - .../models/_response_api_default_converter.py | 64 ++++++++++++------- ...ponse_api_non_stream_response_converter.py | 21 +++--- .../models/_response_api_request_converter.py | 32 +++++----- ..._response_api_stream_response_converter.py | 29 +++++---- .../_item_resource_helpers.py | 19 +++--- .../_response_content_part_event_generator.py | 26 ++++---- .../_response_event_generator.py | 42 +++++++----- ..._function_call_argument_event_generator.py | 23 +++---- .../_response_output_item_event_generator.py | 18 +++--- .../_response_output_text_event_generator.py | 22 +++---- .../_response_stream_event_generator.py | 22 ++++--- 14 files changed, 193 insertions(+), 161 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py index c05185692972..6e57f3037cdf 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py @@ -1,11 +1,9 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,broad-exception-caught,no-member -# mypy: disable-error-code="assignment,arg-type" import os import re -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, List, Optional, cast from langgraph.graph.state import CompiledStateGraph @@ -49,7 +47,7 @@ def __init__( :param converter: custom response converter. :type converter: Optional[ResponseAPIConverter] """ - super().__init__(credentials=credentials) # pylint: disable=unexpected-keyword-arg + super().__init__(credentials=credentials) # pylint: disable=unexpected-keyword-arg self._graph = graph self._tool_resolver = FoundryLangChainToolResolver() self.azure_ai_tracer = None @@ -103,8 +101,8 @@ def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=No name=self.get_agent_identifier(), ) logger.info("AzureAIOpenTelemetryTracer initialized successfully.") - except Exception as e: - logger.error(f"Failed to import AzureAIOpenTelemetryTracer, ignore: {e}") + except Exception as error: # pylint: disable=broad-except + logger.error("Failed to initialize AzureAIOpenTelemetryTracer, ignore: %s", error) def setup_otlp_exporter(self, endpoint, provider): endpoint = self.format_otlp_endpoint(endpoint) @@ -129,8 +127,8 @@ async def agent_run_non_stream(self, input_arguments: GraphInputArguments): result = await self._graph.ainvoke(**input_arguments) output = await self.converter.convert_response_non_stream(result, input_arguments["context"]) return output - except Exception as e: - logger.error(f"Error during agent run: {e}", exc_info=True) + except Exception as e: # pylint: disable=broad-except + logger.error("Error during agent run: %s", e, exc_info=True) raise e async def agent_run_astream(self, @@ -145,14 +143,15 @@ async def agent_run_astream(self, :rtype: AsyncGenerator[dict] """ try: - logger.info(f"Starting streaming agent run {input_arguments['context'].agent_run.response_id}") + logger.info("Starting streaming agent run %s", input_arguments["context"].agent_run.response_id) stream = self._graph.astream(**input_arguments) async for output_event in self.converter.convert_response_stream( - stream, - input_arguments["context"]): + stream, + input_arguments["context"], + ): yield output_event - except Exception as e: - logger.error(f"Error during streaming agent run: {e}", exc_info=True) + except Exception as e: # pylint: disable=broad-except + logger.error("Error during streaming agent run: %s", e, exc_info=True) raise e def ensure_runnable_config(self, input_arguments: GraphInputArguments, context: LanggraphRunContext): @@ -171,11 +170,11 @@ def ensure_runnable_config(self, input_arguments: GraphInputArguments, context: configurable["thread_id"] = thread_id else: configurable["thread_id"] = f"langgraph-{input_arguments['context'].agent_run.response_id}" - logger.debug(f"Conversation ID not provided, generate one: thread_id={configurable['thread_id']}") + logger.debug("Conversation ID not provided, generate one: thread_id=%s", configurable["thread_id"]) config["configurable"] = configurable context.attach_to_config(config) - callbacks = config.get("callbacks", []) # mypy: ignore-errors + callbacks = cast(List[object], config.get("callbacks") or []) if self.azure_ai_tracer and self.azure_ai_tracer not in callbacks: callbacks.append(self.azure_ai_tracer) config["callbacks"] = callbacks diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py index ea32d6683232..a4e868bee152 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- import json -from typing import Optional, Union +from typing import Optional from langgraph.types import ( Command, @@ -58,8 +58,8 @@ def interrupt_to_function_call(self, interrupt: Interrupt) -> tuple[Optional[str else: try: arguments = json.dumps(interrupt.value) - except Exception as e: # pragma: no cover - fallback # pylint: disable=broad-exception-caught - logger.error("Failed to serialize interrupt value to JSON: %s, error: %s", interrupt.value, e) + except (TypeError, ValueError) as error: # pragma: no cover - fallback + logger.error("Failed to serialize interrupt value to JSON: %s, error: %s", interrupt.value, error) arguments = str(interrupt.value) return HUMAN_IN_THE_LOOP_FUNCTION_NAME, interrupt.id, arguments diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py index 32cbf93a4bfb..caf3c95d5994 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# mypy: disable-error-code="call-overload,override" """Base interface for converting between LangGraph internal state and OpenAI-style responses. A ResponseAPIConverter implementation bridges: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py index 448374038f86..d18cde162fd8 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation from __future__ import annotations import time @@ -180,7 +179,7 @@ async def _convert_request_input_with_history( prev_state, context.agent_run.request.get("input") ) if command is not None: - logger.info(f"HITL command detected for conversation {conversation_id}") + logger.info("HITL command detected for conversation %s", conversation_id) return command # Convert current request input @@ -190,7 +189,7 @@ async def _convert_request_input_with_history( # Check if checkpoint exists has_checkpoint = prev_state is not None and prev_state.values is not None and len(prev_state.values) > 0 if has_checkpoint: - logger.info(f"Checkpoint found for conversation {conversation_id}, using existing state") + logger.info("Checkpoint found for conversation %s, using existing state", conversation_id) return current_input # No checkpoint - try to fetch historical items from AIProjectClient @@ -198,11 +197,11 @@ async def _convert_request_input_with_history( logger.debug("No conversation_id provided, skipping historical items fetch") return current_input - logger.info(f"No checkpoint found for conversation {conversation_id}, fetching historical items") + logger.info("No checkpoint found for conversation %s, fetching historical items", conversation_id) historical_messages = await self._fetch_historical_items(conversation_id) if not historical_messages: - logger.info(f"No historical items found for conversation {conversation_id}") + logger.info("No historical items found for conversation %s", conversation_id) return current_input # Merge historical messages with current input, avoiding duplicates @@ -238,7 +237,7 @@ async def _fetch_historical_items(self, conversation_id: str) -> List[AnyMessage from openai import AsyncOpenAI from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider - logger.debug(f"Creating AsyncOpenAI client for endpoint: {endpoint}/openai") + logger.debug("Creating AsyncOpenAI client for endpoint: %s/openai", endpoint) credential = DefaultAzureCredential() token_provider = get_bearer_token_provider(credential, "https://ai.azure.com/.default") @@ -252,7 +251,7 @@ async def _fetch_historical_items(self, conversation_id: str) -> List[AnyMessage items.append(item) items.reverse() - logger.info(f"Fetched {len(items)} historical items from conversation {conversation_id}") + logger.info("Fetched %s historical items from conversation %s", len(items), conversation_id) # Convert items to LangGraph messages messages = [] @@ -267,11 +266,20 @@ async def _fetch_historical_items(self, conversation_id: str) -> List[AnyMessage return messages - except ImportError as e: - logger.warning(f"OpenAI or Azure Identity not available, cannot fetch historical items: {e}", exc_info=True) + except ImportError as error: + logger.warning( + "OpenAI or Azure Identity not available, cannot fetch historical items: %s", + error, + exc_info=True, + ) return [] - except Exception as e: # pylint: disable=broad-except - logger.warning(f"Failed to fetch historical items for conversation {conversation_id}: {e}", exc_info=True) + except Exception as error: # pylint: disable=broad-except + logger.warning( + "Failed to fetch historical items for conversation %s: %s", + conversation_id, + error, + exc_info=True, + ) return [] def _merge_messages_without_duplicates( @@ -298,8 +306,10 @@ def _merge_messages_without_duplicates( if not current_messages or not historical_messages: merged = list(historical_messages) + list(current_messages) logger.info( - f"Merged {len(historical_messages)} historical items with {len(current_messages)} " - f"current items for conversation {conversation_id}" + "Merged %s historical items with %s current items for conversation %s", + len(historical_messages), + len(current_messages), + conversation_id, ) return merged @@ -322,31 +332,37 @@ def _merge_messages_without_duplicates( curr_content = self._normalize_content(curr_msg.content if hasattr(curr_msg, 'content') else "") logger.debug( - f"Comparing message {i}: historical({hist_type}, '{hist_content}') " - f"vs current({curr_type}, '{curr_content}')" + "Comparing message %s: historical(%s, '%s') vs current(%s, '%s')", + i, + hist_type, + hist_content, + curr_type, + curr_content, ) # Compare type and content if hist_type != curr_type: - logger.debug(f"Message {i} type mismatch: {hist_type} != {curr_type}") + logger.debug("Message %s type mismatch: %s != %s", i, hist_type, curr_type) all_match = False break if hist_content != curr_content: - logger.debug(f"Message {i} content mismatch") + logger.debug("Message %s content mismatch", i) all_match = False break if all_match: # Remove the last N historical messages (they're duplicates) filtered_historical = filtered_historical[:-n] - logger.info(f"Filtered {n} duplicate items from end of historical items") + logger.info("Filtered %s duplicate items from end of historical items", n) # Prepend historical messages to current messages merged = filtered_historical + list(current_messages) logger.info( - f"Merged {len(filtered_historical)} historical items with {len(current_messages)} " - f"current items for conversation {conversation_id}" + "Merged %s historical items with %s current items for conversation %s", + len(filtered_historical), + len(current_messages), + conversation_id, ) return merged @@ -360,12 +376,12 @@ async def _aget_state(self, context: LanggraphRunContext) -> Optional[StateSnaps configurable={"thread_id": thread_id}, ) if self._graph.checkpointer: - logger.debug(f"Checking for existing checkpoint for conversation {thread_id}") + logger.debug("Checking for existing checkpoint for conversation %s", thread_id) state = await self._graph.aget_state(config=config) if state and state.values: - logger.debug(f"Checkpoint state retrieved for conversation {thread_id}") + logger.debug("Checkpoint state retrieved for conversation %s", thread_id) else: - logger.debug(f"No checkpoint state found for conversation {thread_id}") + logger.debug("No checkpoint state found for conversation %s", thread_id) return state logger.debug("No checkpointer configured for graph, skipping checkpoint lookup") return None @@ -420,7 +436,7 @@ def _filter_incomplete_tool_calls(self, messages: List[AnyMessage]) -> List[AnyM result.append(msg) if removed_count > 0: - logger.info(f"Filtered {removed_count} messages with incomplete tool call sequences") + logger.info("Filtered %s messages with incomplete tool call sequences", removed_count) return result diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py index 5e688e42664c..9a050a57a9f1 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py @@ -1,11 +1,9 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,broad-exception-caught,logging-not-lazy -# mypy: disable-error-code="valid-type,call-overload,attr-defined" import copy from abc import ABC, abstractmethod -from typing import Any, Collection, Iterable, List, Union +from typing import Any, Collection, Iterable, List, Optional, Union from langchain_core import messages from langchain_core.messages import AnyMessage @@ -54,7 +52,7 @@ def __init__(self, def convert(self, output: Union[dict[str, Any], Any]) -> list[project_models.ItemResource]: res: list[project_models.ItemResource] = [] if not isinstance(output, list): - logger.error(f"Expected output to be a list, got {type(output)}: {output}") + logger.error("Expected output to be a list, got %s: %s", type(output), output) raise ValueError(f"Invalid output format. Expected a list, got {type(output)}.") for step in output: for node_name, node_output in step.items(): @@ -70,7 +68,7 @@ def _convert_node_output( else: message_arr = node_output.get("messages") if not message_arr or not isinstance(message_arr, Collection): - logger.warning(f"No messages found in node {node_name} output: {node_output}") + logger.warning("No messages found in node %s output: %s", node_name, node_output) return for message in message_arr: @@ -78,10 +76,10 @@ def _convert_node_output( converted = self.convert_output_message(message) if converted: yield converted - except Exception as e: - logger.error(f"Error converting message {message}: {e}") + except (AttributeError, TypeError, ValueError) as error: + logger.error("Error converting message %s: %s", message, error) - def convert_output_message(self, output_message: AnyMessage): # pylint: disable=inconsistent-return-statements + def convert_output_message(self, output_message: AnyMessage) -> Optional[project_models.ItemResource]: # Implement the conversion logic for inner inputs if isinstance(output_message, messages.HumanMessage): return project_models.ResponsesUserMessageItemResource( @@ -104,8 +102,8 @@ def convert_output_message(self, output_message: AnyMessage): # pylint: disable # If there are tool calls, we assume there is only ONE function call if len(output_message.tool_calls) > 1: logger.warning( - f"There are {len(output_message.tool_calls)} tool calls found. " - + "Only the first one will be processed." + "There are %s tool calls found. Only the first one will be processed.", + len(output_message.tool_calls), ) tool_call = output_message.tool_calls[0] name, call_id, argument = extract_function_call(tool_call) @@ -129,7 +127,8 @@ def convert_output_message(self, output_message: AnyMessage): # pylint: disable output=output_message.content, id=self.context.agent_run.id_generator.generate_function_output_id(), ) - logger.warning(f"Unsupported message type: {type(output_message)}, {output_message}") + logger.warning("Unsupported message type: %s, %s", type(output_message), output_message) + return None def convert_MessageContent( self, content, role: project_models.ResponsesMessageRole diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py index f718695dbc1e..1cf94210f543 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py @@ -1,11 +1,9 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation -# mypy: ignore-errors from abc import ABC, abstractmethod import json -from typing import Dict, List +from typing import Dict, List, Optional from langchain_core.messages import ( AIMessage, @@ -40,7 +38,7 @@ } -def convert_item_resource_to_message(item: Dict) -> AnyMessage: +def convert_item_resource_to_message(item: Dict) -> Optional[AnyMessage]: """ Convert an ItemResource (from AIProjectClient conversation items) to a LangGraph message. @@ -69,13 +67,11 @@ def convert_item_resource_to_message(item: Dict) -> AnyMessage: # Fallback: try to get any text field text_content = content[0].get("text", "") content = text_content - elif isinstance(content, str): - pass # content is already a string - else: + elif not isinstance(content, str): content = str(content) if content else "" if role not in role_mapping: - logger.warning(f"Unknown role '{role}' in item resource, defaulting to USER") + logger.warning("Unknown role '%s' in item resource, defaulting to USER", role) role = project_models.ResponsesMessageRole.USER return role_mapping[role](content=content) @@ -102,8 +98,8 @@ def convert_item_resource_to_message(item: Dict) -> AnyMessage: output = " ".join(text_parts) return ToolMessage(content=output, tool_call_id=call_id) - logger.warning(f"Unsupported item type '{item_type}' in item resource, skipping") - return None # type: ignore + logger.warning("Unsupported item type '%s' in item resource, skipping", item_type) + return None class ResponseAPIRequestConverter(ABC): @@ -189,17 +185,19 @@ def convert_function_call(self, item: dict) -> AnyMessage: item = openai_models.ResponseFunctionToolCallParam(**item) argument = item.get("arguments", None) args = json.loads(argument) if argument else {} - except json.JSONDecodeError as e: - raise ValueError(f"Invalid JSON in function call arguments: {item}") from e - except Exception as e: - raise ValueError(f"Invalid function call item: {item}") from e + except json.JSONDecodeError as error: + raise ValueError(f"Invalid JSON in function call arguments: {item}") from error + except (TypeError, ValueError) as error: + raise ValueError(f"Invalid function call item: {item}") from error return AIMessage(tool_calls=[ToolCall(id=item.get("call_id"), name=item.get("name"), args=args)], content="") def convert_function_call_output(self, item: dict) -> ToolMessage: try: - item = openai_models.response_input_item_param.FunctionCallOutput(**item) # pylint: disable=no-member - except Exception as e: - raise ValueError(f"Invalid function call output item: {item}") from e + item_namespace = getattr(openai_models, "response_input_item_param") + function_call_output = getattr(item_namespace, "FunctionCallOutput") + item = function_call_output(**item) + except (AttributeError, TypeError, ValueError) as error: + raise ValueError(f"Invalid function call output item: {item}") from error output = item.get("output", None) if isinstance(output, str): diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py index 0c6f772dece5..71b4c7fbceaa 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py @@ -1,10 +1,9 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,C4751 -# mypy: disable-error-code="assignment,valid-type" +# pylint: disable=C4751 from abc import ABC, abstractmethod -from typing import Any, List, Union +from typing import Any, List, Optional, Union from langchain_core.messages import AnyMessage @@ -57,20 +56,20 @@ def __init__(self, context: LanggraphRunContext, *, hitl_helper: HumanInTheLoopH self.hitl_helper = hitl_helper self.stream_state = StreamEventState() - self.current_generator: ResponseEventGenerator = None + self.current_generator: Optional[ResponseEventGenerator] = None def convert(self, event: Union[AnyMessage, dict, Any, None]): try: if self.current_generator is None: self.current_generator = ResponseStreamEventGenerator(logger, None, hitl_helper=self.hitl_helper) - if event is None or not hasattr(event, '__getitem__'): + if event is None or not hasattr(event, "__getitem__"): raise ValueError(f"Event is not indexable: {event}") message = event[0] # expect a tuple converted = self.try_process_message(message, self.context) return converted - except Exception as e: - logger.error(f"Error converting message {event}: {e}") - raise ValueError(f"Error converting message {event}") from e + except (IndexError, KeyError, TypeError, ValueError) as error: + logger.error("Error converting message %s: %s", event, error) + raise ValueError(f"Error converting message {event}") from error def finalize(self, graph_state=None): logger.info("Stream ended, finalizing response.") @@ -91,6 +90,9 @@ def try_process_message( if event and not self.current_generator: self.current_generator = ResponseStreamEventGenerator(logger, None, hitl_helper=self.hitl_helper) + if self.current_generator is None: + return [] + is_processed = False next_processor = self.current_generator returned_events = [] @@ -101,14 +103,17 @@ def try_process_message( returned_events.extend(processed_events) if not is_processed and next_processor == self.current_generator: logger.warning( - f"Message can not be processed by current generator {type(self.current_generator).__name__}:" - + f" {type(event)}: {event}" + "Message can not be processed by current generator %s: %s: %s", + type(self.current_generator).__name__, + type(event), + event, ) break if next_processor != self.current_generator: logger.info( - f"Switching processor from {type(self.current_generator).__name__} " - + f"to {type(next_processor).__name__}" + "Switching processor from %s to %s", + type(self.current_generator).__name__, + type(next_processor).__name__ if next_processor is not None else "NoneType", ) self.current_generator = next_processor return returned_events diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py index 7c97d7adee0a..8e95d241e23c 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# mypy: disable-error-code="assignment" +from abc import ABC, abstractmethod from typing import Optional from langgraph.types import Interrupt @@ -12,19 +12,22 @@ from .._utils import extract_function_call -class ItemResourceHelper: +class ItemResourceHelper(ABC): def __init__(self, item_type: str, item_id: Optional[str] = None): self.item_type = item_type self.item_id = item_id + @abstractmethod def create_item_resource(self, is_done: bool): - pass + raise NotImplementedError + @abstractmethod def add_aggregate_content(self, item): - pass + raise NotImplementedError + @abstractmethod def get_aggregated_content(self): - pass + raise NotImplementedError class FunctionCallItemResourceHelper(ItemResourceHelper): @@ -77,12 +80,12 @@ def create_item_resource(self, is_done: bool): return None item_resource = self.hitl_helper.convert_interrupt(self.interrupt) if item_resource is not None and not is_done: - if hasattr(item_resource, 'arguments'): - item_resource.arguments = "" # type: ignore[union-attr] + if getattr(item_resource, "arguments", None) is not None: + item_resource.arguments = "" return item_resource def add_aggregate_content(self, item): - pass + return None def get_aggregated_content(self): return self.create_item_resource(is_done=True) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py index 213a9f78e348..432fac54a4a3 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py @@ -1,8 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=unused-argument,consider-using-in,consider-merging-isinstance -# mypy: ignore-errors from typing import List from langchain_core import messages as langgraph_messages @@ -41,7 +39,7 @@ def try_process_message( if not self.item_content_helper: if not self.try_create_item_content_helper(message): # cannot create item content, skip this message - self.logger.warning(f"Cannot create item content helper for message: {message}") + self.logger.warning("Cannot create item content helper for message: %s", message) return True, self, [] if self.item_content_helper and not self.started: self.started, start_events = self.on_start(message, context, stream_state) @@ -63,8 +61,8 @@ def try_process_message( return is_processed, next_processor, events - def on_start( # mypy: ignore[override] - self, event, run_details, stream_state: StreamEventState + def on_start( + self, _event, _run_details, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if self.started: return False, [] @@ -82,8 +80,8 @@ def on_start( # mypy: ignore[override] return True, [start_event] def on_end( - self, message, context, stream_state: StreamEventState - ) -> List[project_models.ResponseStreamEvent]: # mypy: ignore[override] + self, _message, _context, stream_state: StreamEventState + ) -> List[project_models.ResponseStreamEvent]: aggregated_content = self.item_content_helper.create_item_content() done_event = project_models.ResponseContentPartDoneEvent( item_id=self.item_id, @@ -98,13 +96,11 @@ def on_end( return [done_event] def try_create_item_content_helper(self, message): - if isinstance(message, langgraph_messages.AIMessage) or isinstance(message, langgraph_messages.ToolMessage): + if isinstance(message, (langgraph_messages.AIMessage, langgraph_messages.ToolMessage)): if self.is_text_content(message.content): self.item_content_helper = item_content_helpers.OutputTextItemContentHelper() return True - if isinstance(message, langgraph_messages.HumanMessage) or isinstance( - message, langgraph_messages.SystemMessage - ): + if isinstance(message, (langgraph_messages.HumanMessage, langgraph_messages.SystemMessage)): if self.is_text_content(message.content): self.item_content_helper = item_content_helpers.InputTextItemContentHelper() return True @@ -120,10 +116,10 @@ def is_text_content(self, content): return True return False - def create_child_processor(self, message) -> ResponseEventGenerator: - if ( - self.item_content_helper.content_type == project_models.ItemContentType.INPUT_TEXT - or self.item_content_helper.content_type == project_models.ItemContentType.OUTPUT_TEXT + def create_child_processor(self, _message) -> ResponseEventGenerator: + if self.item_content_helper.content_type in ( + project_models.ItemContentType.INPUT_TEXT, + project_models.ItemContentType.OUTPUT_TEXT, ): return ResponseOutputTextEventGenerator( logger=self.logger, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py index 843cf18fe2dc..649664f357bd 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py @@ -1,8 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=unused-argument,unnecessary-pass -# mypy: disable-error-code="valid-type" +from abc import ABC, abstractmethod from typing import List from langchain_core.messages import AnyMessage @@ -20,7 +19,7 @@ class StreamEventState: sequence_number: int = 0 -class ResponseEventGenerator: +class ResponseEventGenerator(ABC): """ :meta private: Abstract base class for response event generators. @@ -32,12 +31,13 @@ def __init__(self, logger, parent): self.logger = logger self.parent = parent # parent generator + @abstractmethod def try_process_message( self, - message: AnyMessage, # mypy: ignore[valid-type] + message: AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState, - ): # mypy: ignore[empty-body] + ) -> tuple[bool, "ResponseEventGenerator | None", List[project_models.ResponseStreamEvent]]: """ Try to process the incoming message. @@ -51,30 +51,42 @@ def try_process_message( :return: tuple of (is_processed, next_processor, events) :rtype: tuple[bool, ResponseEventGenerator, List[ResponseStreamEvent]] """ - pass + raise NotImplementedError - def on_start(self) -> tuple[bool, List[project_models.ResponseStreamEvent]]: + def on_start( + self, + _message: AnyMessage, + _context: LanggraphRunContext, + _stream_state: StreamEventState, + ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: """ Generate the starting events for this layer. + :param _message: The incoming message to process. + :type _message: AnyMessage + :param _context: The agent run context. + :type _context: LanggraphRunContext + :param _stream_state: The current stream event state. + :type _stream_state: StreamEventState + :return: tuple of (started, events) :rtype: tuple[bool, List[ResponseStreamEvent]] """ return False, [] def on_end( - self, message: AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState + self, _message: AnyMessage, _context: LanggraphRunContext, _stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: """ Generate the ending events for this layer. TODO: handle different end conditions, e.g. normal end, error end, etc. - :param message: The incoming message to process. - :type message: AnyMessage - :param context: The agent run context. - :type context: LanggraphRunContext - :param stream_state: The current stream event state. - :type stream_state: StreamEventState + :param _message: The incoming message to process. + :type _message: AnyMessage + :param _context: The agent run context. + :type _context: LanggraphRunContext + :param _stream_state: The current stream event state. + :type _stream_state: StreamEventState :return: tuple of (started, events) :rtype: tuple[bool, List[ResponseStreamEvent]] @@ -89,4 +101,4 @@ def aggregate_content(self): :return: content from child processor :rtype: str | dict """ - pass + return None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py index 0f2e93fe0ef1..685074e668c4 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py @@ -1,8 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=unused-argument,name-too-long -# mypy: ignore-errors from typing import List, Union from langchain_core import messages as langgraph_messages @@ -16,7 +14,7 @@ from ..._context import LanggraphRunContext -class ResponseFunctionCallArgumentEventGenerator(ResponseEventGenerator): +class ResponseFunctionCallArgumentEventGenerator(ResponseEventGenerator): # pylint: disable=C4751 def __init__( self, logger, @@ -45,7 +43,7 @@ def try_process_message( is_processed, next_processor, processed_events = self.process(message, context, stream_state) if not is_processed: - self.logger.warning(f"FunctionCallArgumentEventGenerator did not process message: {message}") + self.logger.warning("FunctionCallArgumentEventGenerator did not process message: %s", message) events.extend(processed_events) if self.should_end(message): @@ -58,7 +56,7 @@ def try_process_message( return is_processed, next_processor, events def on_start( - self, event: AnyMessage, run_details, stream_state: StreamEventState + self, _event: AnyMessage, _run_details, _stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if self.started: return True, [] @@ -66,7 +64,10 @@ def on_start( return True, [] def process( - self, message: Union[langgraph_messages.AnyMessage, Interrupt], run_details, stream_state: StreamEventState + self, + message: Union[langgraph_messages.AnyMessage, Interrupt], + _run_details, + stream_state: StreamEventState, ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: if self.should_end(message): return False, self, [] @@ -115,7 +116,7 @@ def should_end(self, event: AnyMessage) -> bool: return False def on_end( - self, message: AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState + self, message: AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState # pylint: disable=unused-argument ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: done_event = project_models.ResponseFunctionCallArgumentsDoneEvent( item_id=self.item_id, @@ -132,16 +133,16 @@ def get_tool_call_info(self, message: Union[langgraph_messages.AnyMessage, Inter if message.tool_call_chunks: if len(message.tool_call_chunks) > 1: self.logger.warning( - f"There are {len(message.tool_call_chunks)} tool calls found. " - + "Only the first one will be processed." + "There are %s tool calls found. Only the first one will be processed.", + len(message.tool_call_chunks), ) return message.tool_call_chunks[0] elif isinstance(message, langgraph_messages.AIMessage): if message.tool_calls: if len(message.tool_calls) > 1: self.logger.warning( - f"There are {len(message.tool_calls)} tool calls found. " - + "Only the first one will be processed." + "There are %s tool calls found. Only the first one will be processed.", + len(message.tool_calls), ) return message.tool_calls[0] return None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py index 4c98b4b1a18d..ddc61c6e43e8 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py @@ -1,8 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=unused-argument -# mypy: ignore-errors from typing import List, Union from langchain_core import messages as langgraph_messages @@ -37,14 +35,14 @@ def try_process_message( if self.item_resource_helper is None: if not self.try_create_item_resource_helper(message, context.agent_run.id_generator): # cannot create item resource, skip this message - self.logger.warning(f"Cannot create item resource helper for message: {message}, skipping.") + self.logger.warning("Cannot create item resource helper for message: %s, skipping.", message) return True, self, [] if self.item_resource_helper and not self.started: self.started, start_events = self.on_start(message, context, stream_state) if not self.started: # could not start processing, skip this message - self.logger.warning(f"Cannot create start events for message: {message}, skipping.") + self.logger.warning("Cannot create start events for message: %s, skipping.", message) return True, self, [] events.extend(start_events) @@ -58,7 +56,7 @@ def try_process_message( child_processor = self.create_child_processor(message) if child_processor: - self.logger.info(f"Created child processor: {child_processor}") + self.logger.info("Created child processor: %s", child_processor) return False, child_processor, events if message: @@ -69,7 +67,7 @@ def try_process_message( return is_processed, next_processor, events def on_start( - self, event: Union[AnyMessage, Interrupt], context: LanggraphRunContext, stream_state: StreamEventState + self, _event: Union[AnyMessage, Interrupt], _context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if self.started: return True, [] @@ -77,7 +75,7 @@ def on_start( item_resource = self.item_resource_helper.create_item_resource(is_done=False) if item_resource is None: # cannot know what item resource to create - return False, None + return False, [] item_added_event = project_models.ResponseOutputItemAddedEvent( output_index=self.output_index, sequence_number=stream_state.sequence_number, @@ -96,8 +94,8 @@ def should_end(self, event: Union[AnyMessage, Interrupt]) -> bool: return False def on_end( - self, message: Union[AnyMessage, Interrupt], context: LanggraphRunContext, stream_state: StreamEventState - ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: + self, message: Union[AnyMessage, Interrupt], context: LanggraphRunContext, stream_state: StreamEventState # pylint: disable=unused-argument + ) -> List[project_models.ResponseStreamEvent]: if not self.started: # should not happen return [] @@ -112,7 +110,7 @@ def on_end( self.parent.aggregate_content(item_resource) # pass aggregated content to parent return [done_event] - def aggregate_content(self, content): + def aggregate_content(self, content) -> None: # aggregate content from child processor self.item_resource_helper.add_aggregate_content(content) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py index 21772abd1ea7..61f5652a6d46 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py @@ -1,10 +1,10 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=unused-argument -# mypy: disable-error-code="return-value,assignment" from typing import List +from langchain_core.messages import AnyMessage + from azure.ai.agentserver.core.models import _projects as project_models from ._response_event_generator import ( ResponseEventGenerator, @@ -31,7 +31,7 @@ def __init__( self.aggregated_content = "" def try_process_message( - self, message, context, stream_state: StreamEventState + self, message: AnyMessage, _context, stream_state: StreamEventState ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: is_processed = False events = [] @@ -40,27 +40,27 @@ def try_process_message( self.started = True if message: - is_processed, next_processor, processed_events = self.process(message, context, stream_state) + is_processed, next_processor, processed_events = self.process(message, stream_state) if not is_processed: - self.logger.warning(f"OutputTextEventGenerator did not process message: {message}") + self.logger.warning("OutputTextEventGenerator did not process message: %s", message) events.extend(processed_events) if self.should_end(message): - is_processed, complete_events = self.on_end(message, context, stream_state) + is_processed, complete_events = self.on_end(message, _context, stream_state) events.extend(complete_events) next_processor = self.parent return is_processed, next_processor, events def process( - self, message, run_details, stream_state: StreamEventState + self, message: AnyMessage, stream_state: StreamEventState ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: if message and message.content: content = [message.content] if isinstance(message.content, str) else message.content res = [] for item in content: if not isinstance(item, str): - self.logger.warning(f"Skipping non-string content item: {item}") + self.logger.warning("Skipping non-string content item: %s", item) continue # create an event for each content item chunk_event = project_models.ResponseTextDeltaEvent( @@ -73,7 +73,7 @@ def process( self.aggregated_content += item stream_state.sequence_number += 1 res.append(chunk_event) - return True, self, res # mypy: ignore[return-value] + return True, self, res return False, self, [] def has_finish_reason(self, message) -> bool: @@ -91,8 +91,8 @@ def should_end(self, message) -> bool: return True return False - def on_end( # mypy: ignore[override] - self, message, context: LanggraphRunContext, stream_state: StreamEventState + def on_end( + self, message, context: LanggraphRunContext, stream_state: StreamEventState # pylint: disable=unused-argument ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if not self.started: return False, [] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py index bb361639601d..c186dbd329e6 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py @@ -1,10 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=unused-argument -# mypy: ignore-errors import time -from typing import List +from typing import List, Optional, Union from langchain_core import messages as langgraph_messages @@ -76,8 +74,11 @@ def should_complete(self, event: langgraph_messages.AnyMessage) -> bool: return False def try_process_message( - self, message: langgraph_messages.AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState - ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: + self, + message: Optional[langgraph_messages.AnyMessage], + context: LanggraphRunContext, + stream_state: StreamEventState, + ) -> tuple[bool, Optional[ResponseEventGenerator], List[project_models.ResponseStreamEvent]]: is_processed = False next_processor = self events = [] @@ -108,8 +109,12 @@ def should_end(self, event: langgraph_messages.AnyMessage) -> bool: return True return False - def on_end(self, message: langgraph_messages.AnyMessage, context: LanggraphRunContext, - stream_state: StreamEventState): + def on_end( + self, + _message: Optional[langgraph_messages.AnyMessage], + context: LanggraphRunContext, + stream_state: StreamEventState, + ) -> List[project_models.ResponseStreamEvent]: agent_id = context.agent_run.get_agent_id_object() conversation = context.agent_run.get_conversation_object() response_dict = { @@ -130,11 +135,12 @@ def on_end(self, message: langgraph_messages.AnyMessage, context: LanggraphRunCo self.parent.aggregate_content(self.aggregated_contents) return [done_event] - def aggregate_content(self, content): + def aggregate_content(self, content: Union[List[project_models.ItemResource], project_models.ItemResource]) -> None: # aggregate content from children if isinstance(content, list): for c in content: self.aggregate_content(c) + return if isinstance(content, project_models.ItemResource): self.aggregated_contents.append(content) else: From 234bb7748a737b3f35ea9db6064aa377d15e6ce3 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Fri, 13 Mar 2026 13:13:34 -0700 Subject: [PATCH 9/9] fix docstring and pylint --- .../ai/agentserver/langgraph/__init__.py | 5 +- .../ai/agentserver/langgraph/_context.py | 39 +++++ .../ai/agentserver/langgraph/_langgraph.py | 51 +++++++ .../models/_human_in_the_loop_helper.py | 16 ++ .../models/_human_in_the_loop_json_helper.py | 16 ++ .../models/_response_api_default_converter.py | 81 ++++++++++ ...ponse_api_non_stream_response_converter.py | 54 ++++++- .../models/_response_api_request_converter.py | 28 ++++ ..._response_api_stream_response_converter.py | 35 +++++ .../_item_content_helpers.py | 38 +++++ .../_item_resource_helpers.py | 142 +++++++++++++++++- .../_response_content_part_event_generator.py | 99 +++++++++++- .../_response_event_generator.py | 7 + ..._function_call_argument_event_generator.py | 95 +++++++++++- .../_response_output_item_event_generator.py | 128 +++++++++++++--- .../_response_output_text_event_generator.py | 70 ++++++++- .../_response_stream_event_generator.py | 67 ++++++++- .../langgraph/tools/_chat_model.py | 95 +++++++++++- .../langgraph/tools/_middleware.py | 10 ++ .../agentserver/langgraph/tools/_resolver.py | 33 ++++ .../agentserver/langgraph/tools/_tool_node.py | 14 ++ 21 files changed, 1078 insertions(+), 45 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index fc9aaf43880c..959432ada6af 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -1,7 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=docstring-should-be-keyword +"""Public entry points for the Azure AI Agent Server LangGraph adapter.""" + __path__ = __import__("pkgutil").extend_path(__path__, __name__) from typing import Optional, Union, TYPE_CHECKING @@ -19,7 +20,7 @@ from azure.core.credentials import TokenCredential -def from_langgraph( +def from_langgraph( # pylint: disable=docstring-should-be-keyword agent: "CompiledStateGraph", /, credentials: Optional[Union["AsyncTokenCredential", "TokenCredential"]] = None, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py index d037088b18a5..354cf42fa06d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py @@ -1,6 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +"""Execution context helpers for the LangGraph adapter.""" + import sys from dataclasses import dataclass from typing import Optional, Union @@ -15,11 +17,24 @@ @dataclass class LanggraphRunContext: + """Holds per-run state shared across LangGraph adapter components. + + :param agent_run: The current agent run context. + :type agent_run: AgentRunContext + :param tools: The resolved Foundry tool context for the run. + :type tools: FoundryToolContext + """ + agent_run: AgentRunContext tools: FoundryToolContext def attach_to_config(self, config: RunnableConfig): + """Attach this run context to a LangChain runnable config. + + :param config: The runnable config to enrich. + :type config: RunnableConfig + """ config["configurable"]["__foundry_hosted_agent_langgraph_run_context__"] = self @classmethod @@ -46,6 +61,14 @@ def resolve(cls, @staticmethod def _resolve_runtime( runtime: Optional[Union[Runtime, ToolRuntime]] = None) -> Optional[Union[Runtime, ToolRuntime]]: + """Resolve the active runtime from the explicit runtime or thread-local state. + + :param runtime: An explicitly supplied runtime, if available. + :type runtime: Optional[Union[Runtime, ToolRuntime]] + + :return: The resolved runtime, if one is available. + :rtype: Optional[Union[Runtime, ToolRuntime]] + """ if runtime: return runtime if sys.version_info >= (3, 11): @@ -54,6 +77,14 @@ def _resolve_runtime( @staticmethod def from_config(config: RunnableConfig) -> Optional["LanggraphRunContext"]: + """Extract the run context from a runnable config. + + :param config: The runnable config carrying the context. + :type config: RunnableConfig + + :return: The extracted run context, if present. + :rtype: Optional[LanggraphRunContext] + """ context = config["configurable"].get("__foundry_hosted_agent_langgraph_run_context__") if isinstance(context, LanggraphRunContext): return context @@ -61,6 +92,14 @@ def from_config(config: RunnableConfig) -> Optional["LanggraphRunContext"]: @staticmethod def from_runtime(runtime: Union[Runtime, ToolRuntime]) -> Optional["LanggraphRunContext"]: + """Extract the run context from a LangGraph runtime wrapper. + + :param runtime: The runtime to inspect. + :type runtime: Union[Runtime, ToolRuntime] + + :return: The extracted run context, if present. + :rtype: Optional[LanggraphRunContext] + """ context = runtime.context if isinstance(context, LanggraphRunContext): return context diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py index 6e57f3037cdf..a8b24bad360d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py @@ -61,6 +61,14 @@ def __init__( self.converter = converter async def agent_run(self, context: AgentRunContext): + """Execute a LangGraph-backed agent run. + + :param context: The agent run context supplied by Agent Server. + :type context: AgentRunContext + + :return: A response object or an async response stream. + :rtype: Any + """ # Resolve graph - always resolve if it's a factory function to get fresh graph each time # For factories, get a new graph instance per request to avoid concurrency issues try: @@ -80,12 +88,27 @@ async def agent_run(self, context: AgentRunContext): return self.respond_with_oauth_consent_astream(context, e) async def setup_lg_run_context(self, agent_run_context: AgentRunContext) -> LanggraphRunContext: + """Build the LangGraph run context for the current request. + + :param agent_run_context: The agent run context from the server layer. + :type agent_run_context: AgentRunContext + + :return: The run context used by the adapter and tools. + :rtype: LanggraphRunContext + """ resolved = await self._tool_resolver.resolve_from_registry() return LanggraphRunContext( agent_run_context, FoundryToolContext(resolved)) def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=None): + """Initialize LangSmith and Azure AI tracing hooks for the adapter. + + :param exporter_endpoint: Optional OTLP exporter endpoint. + :type exporter_endpoint: Optional[str] + :param app_insights_conn_str: Optional Application Insights connection string. + :type app_insights_conn_str: Optional[str] + """ # set env vars for langsmith os.environ["LANGSMITH_OTEL_ENABLED"] = "true" os.environ["LANGSMITH_TRACING"] = "true" @@ -105,10 +128,25 @@ def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=No logger.error("Failed to initialize AzureAIOpenTelemetryTracer, ignore: %s", error) def setup_otlp_exporter(self, endpoint, provider): + """Normalize the OTLP endpoint before delegating exporter setup. + + :param endpoint: The configured exporter endpoint. + :type endpoint: str + :param provider: The tracer provider receiving the exporter. + :type provider: Any + + :return: The configured exporter registration result. + :rtype: Any + """ endpoint = self.format_otlp_endpoint(endpoint) return super().setup_otlp_exporter(endpoint, provider) def get_trace_attributes(self): + """Return base tracing attributes for LangGraph spans. + + :return: The trace attributes for this adapter. + :rtype: dict + """ attrs = super().get_trace_attributes() attrs["service.namespace"] = "azure.ai.agentserver.langgraph" return attrs @@ -181,12 +219,25 @@ def ensure_runnable_config(self, input_arguments: GraphInputArguments, context: input_arguments["config"] = config def format_otlp_endpoint(self, endpoint: str) -> str: + """Ensure the OTLP endpoint includes the traces ingestion path. + + :param endpoint: The configured exporter endpoint. + :type endpoint: str + + :return: The normalized traces endpoint. + :rtype: str + """ m = re.match(r"^(https?://[^/]+)", endpoint) if m: return f"{m.group(1)}/v1/traces" return endpoint def get_agent_identifier(self) -> str: + """Resolve the agent identifier used by tracing integrations. + + :return: The configured agent name or identifier. + :rtype: str + """ agent_name = os.getenv(Constants.AGENT_NAME) if agent_name: return agent_name diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_helper.py index bc8890339dca..9628720247f0 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_helper.py @@ -21,7 +21,13 @@ class HumanInTheLoopHelper: """Helper class for managing human-in-the-loop interactions in LangGraph.""" + def __init__(self, context: LanggraphRunContext): + """Initialize the helper with the current LangGraph run context. + + :param context: The current run context. + :type context: LanggraphRunContext + """ self.context = context def has_interrupt(self, state: Optional[StateSnapshot]) -> bool: @@ -104,6 +110,16 @@ def validate_and_convert_human_feedback( def _validate_input_format( self, input_data: Union[str, ResponseInputParam], interrupt_obj: Interrupt ) -> Optional[ResponseInputItemParam]: + """Validate the interrupt feedback payload format. + + :param input_data: The request input payload to validate. + :type input_data: Union[str, ResponseInputParam] + :param interrupt_obj: The interrupt that the feedback must match. + :type interrupt_obj: Interrupt + + :return: The validated function call output item, if valid. + :rtype: Optional[ResponseInputItemParam] + """ if isinstance(input_data, str): logger.warning("Expecting function call output item, got string: %s", input_data) return None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py index a4e868bee152..3f621bc2c402 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py @@ -31,6 +31,14 @@ class HumanInTheLoopJsonHelper(HumanInTheLoopHelper): """ def convert_interrupt(self, interrupt_info: Interrupt) -> Optional[project_models.ItemResource]: + """Convert an interrupt into an in-progress function-call item resource. + + :param interrupt_info: The interrupt emitted by LangGraph. + :type interrupt_info: Interrupt + + :return: The corresponding function-call item resource, if conversion succeeds. + :rtype: Optional[project_models.ItemResource] + """ if not isinstance(interrupt_info, Interrupt): logger.warning("Interrupt is not of type Interrupt: %s", interrupt_info) return None @@ -64,6 +72,14 @@ def interrupt_to_function_call(self, interrupt: Interrupt) -> tuple[Optional[str return HUMAN_IN_THE_LOOP_FUNCTION_NAME, interrupt.id, arguments def convert_input_item_to_command(self, input_item: ResponseInputItemParam) -> Optional[Command]: + """Convert a function-call-output item into a LangGraph resume command. + + :param input_item: The function call output item supplied by the client. + :type input_item: ResponseInputItemParam + + :return: The parsed LangGraph command, if valid. + :rtype: Optional[Command] + """ output_str = input_item.get("output") if not isinstance(output_str, str): logger.error("Invalid output type in function call output: %s", input_item) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py index d18cde162fd8..6cff796fc121 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py @@ -47,6 +47,21 @@ def __init__(self, ResponseAPINonStreamResponseConverter ] | None = None, create_human_in_the_loop_helper: Callable[[LanggraphRunContext], HumanInTheLoopHelper] | None = None): + """Initialize the default LangGraph response converter. + + :param graph: The compiled LangGraph state graph. + :type graph: CompiledStateGraph + :param create_request_converter: Optional factory for request converters. + :type create_request_converter: Optional[Callable[[LanggraphRunContext], ResponseAPIRequestConverter]] + :param create_stream_response_converter: Optional factory for streaming converters. + :type create_stream_response_converter: + Optional[Callable[[LanggraphRunContext], ResponseAPIMessagesStreamResponseConverter]] + :param create_non_stream_response_converter: Optional factory for non-stream converters. + :type create_non_stream_response_converter: + Optional[Callable[[LanggraphRunContext], ResponseAPINonStreamResponseConverter]] + :param create_human_in_the_loop_helper: Optional factory for HITL helpers. + :type create_human_in_the_loop_helper: Optional[Callable[[LanggraphRunContext], HumanInTheLoopHelper]] + """ self._graph = graph self._custom_request_converter_factory = create_request_converter self._custom_stream_response_converter_factory = create_stream_response_converter @@ -54,6 +69,14 @@ def __init__(self, self._custom_human_in_the_loop_helper_factory = create_human_in_the_loop_helper async def convert_request(self, context: LanggraphRunContext) -> GraphInputArguments: + """Convert the incoming request into graph input arguments. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The graph invocation arguments. + :rtype: GraphInputArguments + """ prev_state = await self._aget_state(context) input_data = await self._convert_request_input_with_history(context, prev_state) stream_mode = self.get_stream_mode(context) @@ -66,6 +89,16 @@ async def convert_request(self, context: LanggraphRunContext) -> GraphInputArgum async def convert_response_non_stream( self, output: Union[dict[str, Any], Any], context: LanggraphRunContext) -> Response: + """Convert non-stream graph output into a final response object. + + :param output: The graph output to convert. + :type output: Union[dict[str, Any], Any] + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The final response object. + :rtype: Response + """ agent_run_context = context.agent_run converter = self._create_non_stream_response_converter(context) converted_output = converter.convert(output) @@ -100,11 +133,27 @@ async def convert_response_stream( # type: ignore[override] yield event def get_stream_mode(self, context: LanggraphRunContext) -> StreamMode: + """Select the graph stream mode for the current request. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The stream mode to use for execution. + :rtype: StreamMode + """ if context.agent_run.stream: return "messages" return "updates" def _create_request_converter(self, context: LanggraphRunContext) -> ResponseAPIRequestConverter: + """Create the request converter for the current run. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The request converter. + :rtype: ResponseAPIRequestConverter + """ if self._custom_request_converter_factory: return self._custom_request_converter_factory(context) data = context.agent_run.request @@ -113,6 +162,14 @@ def _create_request_converter(self, context: LanggraphRunContext) -> ResponseAPI def _create_stream_response_converter( self, context: LanggraphRunContext ) -> ResponseAPIMessagesStreamResponseConverter: + """Create the stream response converter for the current run. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The stream response converter. + :rtype: ResponseAPIMessagesStreamResponseConverter + """ if self._custom_stream_response_converter_factory: return self._custom_stream_response_converter_factory(context) hitl_helper = self._create_human_in_the_loop_helper(context) @@ -121,12 +178,28 @@ def _create_stream_response_converter( def _create_non_stream_response_converter( self, context: LanggraphRunContext ) -> ResponseAPINonStreamResponseConverter: + """Create the non-stream response converter for the current run. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The non-stream response converter. + :rtype: ResponseAPINonStreamResponseConverter + """ if self._custom_non_stream_response_converter_factory: return self._custom_non_stream_response_converter_factory(context) hitl_helper = self._create_human_in_the_loop_helper(context) return ResponseAPIMessagesNonStreamResponseConverter(context, hitl_helper) def _create_human_in_the_loop_helper(self, context: LanggraphRunContext) -> HumanInTheLoopHelper: + """Create the human-in-the-loop helper for the current run. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The human-in-the-loop helper. + :rtype: HumanInTheLoopHelper + """ if self._custom_human_in_the_loop_helper_factory: return self._custom_human_in_the_loop_helper_factory(context) return HumanInTheLoopJsonHelper(context) @@ -368,6 +441,14 @@ def _merge_messages_without_duplicates( return merged async def _aget_state(self, context: LanggraphRunContext) -> Optional[StateSnapshot]: + """Fetch the persisted checkpoint state for the current conversation. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The persisted state snapshot, if available. + :rtype: Optional[StateSnapshot] + """ thread_id = context.agent_run.conversation_id if not thread_id: logger.debug("No conversation_id provided, skipping checkpoint lookup") diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py index 9a050a57a9f1..1658c66e4b65 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py @@ -46,10 +46,25 @@ class ResponseAPIMessagesNonStreamResponseConverter(ResponseAPINonStreamResponse def __init__(self, context: LanggraphRunContext, hitl_helper: HumanInTheLoopHelper): + """Initialize the non-stream response converter. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + :param hitl_helper: The helper used for interrupt conversion. + :type hitl_helper: HumanInTheLoopHelper + """ self.context = context self.hitl_helper = hitl_helper def convert(self, output: Union[dict[str, Any], Any]) -> list[project_models.ItemResource]: + """Convert graph output into response item resources. + + :param output: The graph output to convert. + :type output: Union[dict[str, Any], Any] + + :return: The converted response item resources. + :rtype: list[project_models.ItemResource] + """ res: list[project_models.ItemResource] = [] if not isinstance(output, list): logger.error("Expected output to be a list, got %s: %s", type(output), output) @@ -63,6 +78,16 @@ def convert(self, output: Union[dict[str, Any], Any]) -> list[project_models.Ite def _convert_node_output( self, node_name: str, node_output: Any ) -> Iterable[project_models.ItemResource]: + """Convert a single node update into response item resources. + + :param node_name: The name of the node that produced the output. + :type node_name: str + :param node_output: The node output payload. + :type node_output: Any + + :return: An iterable of converted item resources. + :rtype: Iterable[project_models.ItemResource] + """ if node_name == INTERRUPT_NODE_NAME: yield from self.hitl_helper.convert_interrupts(node_output) else: @@ -80,7 +105,14 @@ def _convert_node_output( logger.error("Error converting message %s: %s", message, error) def convert_output_message(self, output_message: AnyMessage) -> Optional[project_models.ItemResource]: - # Implement the conversion logic for inner inputs + """Convert a single LangChain message into a response item resource. + + :param output_message: The message to convert. + :type output_message: AnyMessage + + :return: The converted item resource, if supported. + :rtype: Optional[project_models.ItemResource] + """ if isinstance(output_message, messages.HumanMessage): return project_models.ResponsesUserMessageItemResource( content=self.convert_MessageContent( @@ -133,6 +165,16 @@ def convert_output_message(self, output_message: AnyMessage) -> Optional[project def convert_MessageContent( self, content, role: project_models.ResponsesMessageRole ) -> List[project_models.ItemContent]: + """Convert message content into response item content objects. + + :param content: The content payload to convert. + :type content: Any + :param role: The role associated with the content. + :type role: project_models.ResponsesMessageRole + + :return: The converted item content list. + :rtype: List[project_models.ItemContent] + """ if isinstance(content, str): return [self.convert_MessageContentItem(content, role)] return [self.convert_MessageContentItem(item, role) for item in content] @@ -140,6 +182,16 @@ def convert_MessageContent( def convert_MessageContentItem( self, content, role: project_models.ResponsesMessageRole ) -> project_models.ItemContent: + """Convert one content item into a response item content model. + + :param content: The content item to convert. + :type content: Any + :param role: The role associated with the content item. + :type role: project_models.ResponsesMessageRole + + :return: The converted content model. + :rtype: project_models.ItemContent + """ content_dict = copy.deepcopy(content) if isinstance(content, dict) else {"text": content} content_type = None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py index 1cf94210f543..f4e1e5d3d18f 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py @@ -118,10 +118,22 @@ def convert(self) -> dict: class ResponseAPIMessageRequestConverter(ResponseAPIRequestConverter): + """Convert Response API input items into LangGraph message inputs.""" + def __init__(self, data: CreateResponse): + """Initialize the request converter. + + :param data: The incoming create-response payload. + :type data: CreateResponse + """ self.data: CreateResponse = data def convert(self) -> dict: + """Convert the request payload into LangGraph message input. + + :return: A LangGraph-compatible input dictionary. + :rtype: dict + """ # Convert the CreateRunRequest input to a format suitable for LangGraph langgraph_input = {"messages": []} @@ -181,6 +193,14 @@ def convert_message(self, message: dict) -> AnyMessage: raise ValueError(f"Unsupported ResponseMessagesItemParam content type: {type(content)}, {content}") def convert_function_call(self, item: dict) -> AnyMessage: + """Convert a function call input item into an AI message. + + :param item: The function call item payload. + :type item: dict + + :return: The converted AI message. + :rtype: AnyMessage + """ try: item = openai_models.ResponseFunctionToolCallParam(**item) argument = item.get("arguments", None) @@ -192,6 +212,14 @@ def convert_function_call(self, item: dict) -> AnyMessage: return AIMessage(tool_calls=[ToolCall(id=item.get("call_id"), name=item.get("name"), args=args)], content="") def convert_function_call_output(self, item: dict) -> ToolMessage: + """Convert a function call output item into a tool message. + + :param item: The function call output payload. + :type item: dict + + :return: The converted tool message. + :rtype: ToolMessage + """ try: item_namespace = getattr(openai_models, "response_input_item_param") function_call_output = getattr(item_namespace, "FunctionCallOutput") diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py index 71b4c7fbceaa..8eb72e3986c6 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py @@ -50,7 +50,16 @@ def finalize(self, graph_state=None): class ResponseAPIMessagesStreamResponseConverter(ResponseAPIStreamResponseConverter): + """Convert LangGraph streaming message events into Responses API stream events.""" + def __init__(self, context: LanggraphRunContext, *, hitl_helper: HumanInTheLoopHelper): + """Initialize the stream response converter. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + :keyword hitl_helper: The helper used for interrupt conversion. + :type hitl_helper: HumanInTheLoopHelper + """ # self.stream = stream self.context = context self.hitl_helper = hitl_helper @@ -59,6 +68,14 @@ def __init__(self, context: LanggraphRunContext, *, hitl_helper: HumanInTheLoopH self.current_generator: Optional[ResponseEventGenerator] = None def convert(self, event: Union[AnyMessage, dict, Any, None]): + """Convert a single streamed LangGraph event. + + :param event: The event to convert. + :type event: Union[AnyMessage, dict, Any, None] + + :return: The converted response stream events. + :rtype: List[ResponseStreamEvent] + """ try: if self.current_generator is None: self.current_generator = ResponseStreamEventGenerator(logger, None, hitl_helper=self.hitl_helper) @@ -72,6 +89,14 @@ def convert(self, event: Union[AnyMessage, dict, Any, None]): raise ValueError(f"Error converting message {event}") from error def finalize(self, graph_state=None): + """Emit final stream events after graph execution completes. + + :param graph_state: The final graph state snapshot. + :type graph_state: Any + + :return: The final response stream events. + :rtype: List[ResponseStreamEvent] + """ logger.info("Stream ended, finalizing response.") res = [] # check and convert interrupts @@ -87,6 +112,16 @@ def finalize(self, graph_state=None): def try_process_message( self, event: Union[AnyMessage, Any, None], context: LanggraphRunContext ) -> List[ResponseStreamEvent]: + """Process one message through the current event-generator chain. + + :param event: The message or interrupt to process. + :type event: Union[AnyMessage, Any, None] + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The generated response stream events. + :rtype: List[ResponseStreamEvent] + """ if event and not self.current_generator: self.current_generator = ResponseStreamEventGenerator(logger, None, hitl_helper=self.hitl_helper) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_content_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_content_helpers.py index 807d759cb151..a953a15154cd 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_content_helpers.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_content_helpers.py @@ -5,25 +5,50 @@ class ItemContentHelper: + """Base helper for building response item content during streaming.""" + def __init__(self, content_type: str): + """Initialize the content helper. + + :param content_type: The response item content type handled by this helper. + :type content_type: str + """ self.content_type = content_type self.has_aggregated_content = False def create_item_content(self) -> project_models.ItemContent: + """Create the current response item content model. + + :return: The current item content model. + :rtype: project_models.ItemContent + """ return project_models.ItemContent( type=self.content_type, ) class InputTextItemContentHelper(ItemContentHelper): + """Helper for aggregating input-text content parts.""" + def __init__(self): + """Initialize the input-text content helper.""" super().__init__(project_models.ItemContentType.INPUT_TEXT) self.text = "" def create_item_content(self): + """Create the aggregated input-text content model. + + :return: The aggregated input-text item content. + :rtype: project_models.ItemContentInputText + """ return project_models.ItemContentInputText(text=self.text) def aggregate_content(self, item): + """Accumulate additional input-text content. + + :param item: The content fragment to aggregate. + :type item: Any + """ self.has_aggregated_content = True if isinstance(item, str): self.text += item @@ -36,13 +61,21 @@ def aggregate_content(self, item): class OutputTextItemContentHelper(ItemContentHelper): + """Helper for aggregating output-text content parts.""" + def __init__(self): + """Initialize the output-text content helper.""" super().__init__(project_models.ItemContentType.OUTPUT_TEXT) self.text = "" self.annotations = [] self.logprobs = [] def create_item_content(self): + """Create the aggregated output-text content model. + + :return: The aggregated output-text item content. + :rtype: project_models.ItemContentOutputText + """ return project_models.ItemContentOutputText( text=self.text, annotations=self.annotations, @@ -50,6 +83,11 @@ def create_item_content(self): ) def aggregate_content(self, item): + """Accumulate additional output-text content. + + :param item: The content fragment to aggregate. + :type item: Any + """ self.has_aggregated_content = True if isinstance(item, str): self.text += item diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py index 8e95d241e23c..a2912b8d2f62 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py @@ -13,25 +13,62 @@ class ItemResourceHelper(ABC): + """Base helper for constructing response item resources during streaming.""" + def __init__(self, item_type: str, item_id: Optional[str] = None): + """Initialize the item-resource helper. + + :param item_type: The response item type handled by this helper. + :type item_type: str + :param item_id: The optional response item identifier. + :type item_id: Optional[str] + """ self.item_type = item_type self.item_id = item_id @abstractmethod - def create_item_resource(self, is_done: bool): + def create_item_resource(self, is_done: bool) -> Optional[project_models.ItemResource]: + """Create the current item resource representation. + + :param is_done: Whether the created item should be marked completed. + :type is_done: bool + :return: The current item resource or None if not applicable. + :rtype: Optional[project_models.ItemResource] + """ raise NotImplementedError @abstractmethod def add_aggregate_content(self, item): + """Accumulate child content into the helper state. + + :param item: The child content to aggregate. + :type item: Any + :return: None + :rtype: None + """ raise NotImplementedError @abstractmethod - def get_aggregated_content(self): + def get_aggregated_content(self) -> Optional[project_models.ItemResource]: + """Return the aggregated item resource representation. + + :return: The aggregated item resource or None if not applicable. + :rtype: Optional[project_models.ItemResource] + """ raise NotImplementedError class FunctionCallItemResourceHelper(ItemResourceHelper): + """Helper for streaming function-call item resources.""" + def __init__(self, item_id: str = None, tool_call: dict = None): + """Initialize the function-call item helper. + + :param item_id: The response item identifier. + :type item_id: str + :param tool_call: The initial tool-call payload, if available. + :type tool_call: dict + """ super().__init__(project_models.ItemType.FUNCTION_CALL, item_id) self.call_id = None self.name = None @@ -40,6 +77,14 @@ def __init__(self, item_id: str = None, tool_call: dict = None): self.name, self.call_id, _ = extract_function_call(tool_call) def create_item_resource(self, is_done: bool): + """Create the current function-call item resource. + + :param is_done: Whether the item is complete. + :type is_done: bool + + :return: The current item resource. + :rtype: project_models.ItemResource + """ content = { "id": self.item_id, "type": self.item_type, @@ -51,6 +96,11 @@ def create_item_resource(self, is_done: bool): return project_models.ItemResource(content) def add_aggregate_content(self, item): + """Accumulate additional function-call arguments. + + :param item: The content fragment to aggregate. + :type item: Any + """ if isinstance(item, str): self.arguments += item return @@ -63,19 +113,43 @@ def add_aggregate_content(self, item): self.arguments += argument def get_aggregated_content(self): + """Return the completed function-call item resource. + + :return: The completed item resource. + :rtype: project_models.ItemResource + """ return self.create_item_resource(is_done=True) class FunctionCallInterruptItemResourceHelper(ItemResourceHelper): + """Helper for converting interrupt payloads into function-call resources.""" + def __init__(self, item_id: Optional[str] = None, hitl_helper: Optional[HumanInTheLoopHelper] = None, interrupt: Optional[Interrupt] = None): + """Initialize the interrupt item helper. + + :param item_id: The response item identifier. + :type item_id: Optional[str] + :param hitl_helper: The helper used to convert interrupts. + :type hitl_helper: Optional[HumanInTheLoopHelper] + :param interrupt: The interrupt being represented. + :type interrupt: Optional[Interrupt] + """ super().__init__(project_models.ItemType.FUNCTION_CALL, item_id) self.hitl_helper = hitl_helper self.interrupt = interrupt def create_item_resource(self, is_done: bool): + """Create the interrupt-backed item resource. + + :param is_done: Whether the item is complete. + :type is_done: bool + + :return: The current interrupt item resource, if available. + :rtype: Optional[project_models.ItemResource] + """ if self.hitl_helper is None or self.interrupt is None: return None item_resource = self.hitl_helper.convert_interrupt(self.interrupt) @@ -85,19 +159,46 @@ def create_item_resource(self, is_done: bool): return item_resource def add_aggregate_content(self, item): + """Ignore aggregated content for interrupt-backed items. + + :param item: The content fragment to aggregate. + :type item: Any + """ return None def get_aggregated_content(self): + """Return the completed interrupt-backed item resource. + + :return: The completed item resource, if available. + :rtype: Optional[project_models.ItemResource] + """ return self.create_item_resource(is_done=True) class FunctionCallOutputItemResourceHelper(ItemResourceHelper): + """Helper for streaming function-call-output item resources.""" + def __init__(self, item_id: str = None, call_id: str = None): + """Initialize the function-call-output helper. + + :param item_id: The response item identifier. + :type item_id: str + :param call_id: The function call identifier. + :type call_id: str + """ super().__init__(project_models.ItemType.FUNCTION_CALL_OUTPUT, item_id) self.call_id = call_id self.content = "" def create_item_resource(self, is_done: bool): + """Create the current function-call-output item resource. + + :param is_done: Whether the item is complete. + :type is_done: bool + + :return: The current item resource. + :rtype: project_models.ItemResource + """ content = { "id": self.item_id, "type": self.item_type, @@ -108,6 +209,11 @@ def create_item_resource(self, is_done: bool): return project_models.ItemResource(content) def add_aggregate_content(self, item): + """Accumulate additional function-call-output content. + + :param item: The content fragment to aggregate. + :type item: Any + """ if isinstance(item, str): self.content += item return @@ -118,16 +224,38 @@ def add_aggregate_content(self, item): self.content += content def get_aggregated_content(self): + """Return the completed function-call-output item resource. + + :return: The completed item resource. + :rtype: project_models.ItemResource + """ return self.create_item_resource(is_done=True) class MessageItemResourceHelper(ItemResourceHelper): + """Helper for streaming message item resources.""" + def __init__(self, item_id: str, role: project_models.ResponsesMessageRole): + """Initialize the message item helper. + + :param item_id: The response item identifier. + :type item_id: str + :param role: The response message role. + :type role: project_models.ResponsesMessageRole + """ super().__init__(project_models.ItemType.MESSAGE, item_id) self.role = role self.content: list[project_models.ItemContent] = [] def create_item_resource(self, is_done: bool): + """Create the current message item resource. + + :param is_done: Whether the item is complete. + :type is_done: bool + + :return: The current item resource. + :rtype: project_models.ItemResource + """ content = { "id": self.item_id, "type": self.item_type, @@ -138,10 +266,20 @@ def create_item_resource(self, is_done: bool): return project_models.ItemResource(content) def add_aggregate_content(self, item): + """Accumulate additional message content. + + :param item: The content fragment to aggregate. + :type item: Any + """ if isinstance(item, dict): item = project_models.ItemContent(item) if isinstance(item, project_models.ItemContent): self.content.append(item) def get_aggregated_content(self): + """Return the completed message item resource. + + :return: The completed item resource. + :rtype: project_models.ItemResource + """ return self.create_item_resource(is_done=True) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py index 432fac54a4a3..a15b31a7319a 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py @@ -13,6 +13,8 @@ class ResponseContentPartEventGenerator(ResponseEventGenerator): + """Generate content-part events for a single response item.""" + def __init__( self, logger, @@ -22,6 +24,21 @@ def __init__( output_index: int, content_index: int, ): + """Initialize the content-part event generator. + + :param logger: The logger used for diagnostics. + :type logger: logging.Logger + :param parent: The parent generator in the event chain. + :type parent: ResponseEventGenerator + :param item_id: The response item identifier. + :type item_id: str + :param message_id: The originating message identifier. + :type message_id: str + :param output_index: The output item index. + :type output_index: int + :param content_index: The content part index within the item. + :type content_index: int + """ super().__init__(logger, parent) self.output_index = output_index self.content_index = content_index @@ -33,6 +50,18 @@ def __init__( def try_process_message( self, message, context, stream_state: StreamEventState ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: + """Process a message into content-part events. + + :param message: The message to process. + :type message: Any + :param context: The run context for the current request. + :type context: Any + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Processing status, next generator, and emitted events. + :rtype: tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]] + """ is_processed = False events = [] next_processor = self @@ -64,6 +93,18 @@ def try_process_message( def on_start( self, _event, _run_details, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: + """Emit the content-part-added event. + + :param _event: The current message. + :type _event: Any + :param _run_details: The run context, unused by this generator. + :type _run_details: Any + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Start status and emitted events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ if self.started: return False, [] @@ -82,6 +123,18 @@ def on_start( def on_end( self, _message, _context, stream_state: StreamEventState ) -> List[project_models.ResponseStreamEvent]: + """Emit the content-part-done event. + + :param _message: The terminal message. + :type _message: Any + :param _context: The run context, unused by this generator. + :type _context: Any + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: The completion events. + :rtype: List[project_models.ResponseStreamEvent] + """ aggregated_content = self.item_content_helper.create_item_content() done_event = project_models.ResponseContentPartDoneEvent( item_id=self.item_id, @@ -96,6 +149,14 @@ def on_end( return [done_event] def try_create_item_content_helper(self, message): + """Create the content helper that matches the message payload. + + :param message: The message to inspect. + :type message: Any + + :return: True when a helper was created. + :rtype: bool + """ if isinstance(message, (langgraph_messages.AIMessage, langgraph_messages.ToolMessage)): if self.is_text_content(message.content): self.item_content_helper = item_content_helpers.OutputTextItemContentHelper() @@ -107,9 +168,22 @@ def try_create_item_content_helper(self, message): return False def aggregate_content(self, content): + """Aggregate child content into the current content helper. + + :param content: The child content to aggregate. + :type content: Any + """ return self.item_content_helper.aggregate_content(content) def is_text_content(self, content): + """Check whether the message content can be treated as plain text. + + :param content: The content payload to inspect. + :type content: Any + + :return: True when the content is representable as text. + :rtype: bool + """ if isinstance(content, str): return True if isinstance(content, list) and all(isinstance(c, str) for c in content): @@ -117,6 +191,14 @@ def is_text_content(self, content): return False def create_child_processor(self, _message) -> ResponseEventGenerator: + """Create the child generator for the current content helper. + + :param _message: The originating message, unused by this generator. + :type _message: Any + + :return: The child generator. + :rtype: ResponseEventGenerator + """ if self.item_content_helper.content_type in ( project_models.ItemContentType.INPUT_TEXT, project_models.ItemContentType.OUTPUT_TEXT, @@ -132,6 +214,14 @@ def create_child_processor(self, _message) -> ResponseEventGenerator: raise ValueError(f"Unsupported item content type for child processor: {self.item_content_helper.content_type}") def has_finish_reason(self, message) -> bool: + """Check whether the message contains a finish reason. + + :param message: The message to inspect. + :type message: Any + + :return: True when a finish reason is present. + :rtype: bool + """ if not isinstance(message, langgraph_messages.BaseMessageChunk): return False if message.response_metadata and message.response_metadata.get("finish_reason"): @@ -139,7 +229,14 @@ def has_finish_reason(self, message) -> bool: return False def should_end(self, event) -> bool: - # Determine if the event indicates end of the stream for this item + """Determine whether content generation for this item should end. + + :param event: The current message or chunk. + :type event: Any + + :return: True when the generator should stop. + :rtype: bool + """ if event is None: return True if event.id != self.message_id: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py index 649664f357bd..05e8be6670b4 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py @@ -28,6 +28,13 @@ class ResponseEventGenerator(ABC): started: bool = False def __init__(self, logger, parent): + """Initialize the response event generator. + + :param logger: The logger used for diagnostics. + :type logger: logging.Logger + :param parent: The parent generator in the event chain. + :type parent: ResponseEventGenerator | None + """ self.logger = logger self.parent = parent # parent generator diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py index 685074e668c4..7dd5e780d3ef 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py @@ -15,6 +15,8 @@ class ResponseFunctionCallArgumentEventGenerator(ResponseEventGenerator): # pylint: disable=C4751 + """Generate function-call-argument delta and done events.""" + def __init__( self, logger, @@ -25,6 +27,21 @@ def __init__( *, hitl_helper: HumanInTheLoopHelper = None, ): + """Initialize the function-call-argument generator. + + :param logger: The logger used for diagnostics. + :type logger: logging.Logger + :param parent: The parent generator in the event chain. + :type parent: ResponseEventGenerator + :param item_id: The response item identifier. + :type item_id: str + :param message_id: The originating message identifier. + :type message_id: str + :param output_index: The output item index. + :type output_index: int + :param hitl_helper: Optional helper for interrupt conversion. + :type hitl_helper: HumanInTheLoopHelper + """ super().__init__(logger, parent) self.item_id = item_id self.output_index = output_index @@ -35,6 +52,18 @@ def __init__( def try_process_message( self, message, context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: + """Process one message into function-call argument events. + + :param message: The message or interrupt to process. + :type message: Union[langgraph_messages.AnyMessage, Interrupt] + :param context: The run context for the current request. + :type context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Processing status, next generator, and emitted events. + :rtype: tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]] + """ is_processed = False events = [] next_processor = self @@ -48,7 +77,7 @@ def try_process_message( if self.should_end(message): has_finish_reason = self.has_finish_reason(message) - complete_events = self.on_end(message, context, stream_state) + is_processed, complete_events = self.on_end(message, context, stream_state) events.extend(complete_events) next_processor = self.parent is_processed = has_finish_reason # if has finish reason, mark as processed and stop further processing @@ -58,6 +87,18 @@ def try_process_message( def on_start( self, _event: AnyMessage, _run_details, _stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: + """Start argument generation for the current function call. + + :param _event: The current message. + :type _event: AnyMessage + :param _run_details: The run context, unused by this generator. + :type _run_details: LanggraphRunContext + :param _stream_state: The mutable stream state, unused on start. + :type _stream_state: StreamEventState + + :return: Start status and emitted events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ if self.started: return True, [] self.started = True @@ -69,6 +110,18 @@ def process( _run_details, stream_state: StreamEventState, ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: + """Convert one message into function-call argument delta events. + + :param message: The message or interrupt to process. + :type message: Union[langgraph_messages.AnyMessage, Interrupt] + :param _run_details: The run context, unused by this generator. + :type _run_details: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Processing status, current generator, and emitted events. + :rtype: tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]] + """ if self.should_end(message): return False, self, [] @@ -95,6 +148,14 @@ def process( return False, self, [] def has_finish_reason(self, message: AnyMessage) -> bool: + """Check whether the message marks completion for this argument stream. + + :param message: The message to inspect. + :type message: AnyMessage + + :return: True when the argument stream should finish. + :rtype: bool + """ if not message or message.id != self.message_id: return False if isinstance(message, langgraph_messages.AIMessageChunk): @@ -109,6 +170,14 @@ def has_finish_reason(self, message: AnyMessage) -> bool: return False def should_end(self, event: AnyMessage) -> bool: + """Determine whether this generator should stop processing. + + :param event: The current event. + :type event: AnyMessage + + :return: True when processing should stop. + :rtype: bool + """ if event is None: return True if event.id != self.message_id: @@ -116,8 +185,20 @@ def should_end(self, event: AnyMessage) -> bool: return False def on_end( - self, message: AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState # pylint: disable=unused-argument + self, _message: AnyMessage, _context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: + """Emit the final function-call-arguments-done event. + + :param _message: The terminal message for the argument stream. + :type _message: AnyMessage + :param _context: The run context, unused by this generator. + :type _context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Completion status and final events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ done_event = project_models.ResponseFunctionCallArgumentsDoneEvent( item_id=self.item_id, output_index=self.output_index, @@ -126,9 +207,17 @@ def on_end( ) stream_state.sequence_number += 1 self.parent.aggregate_content(self.aggregated_content) # pass aggregated content to parent - return [done_event] + return True, [done_event] def get_tool_call_info(self, message: Union[langgraph_messages.AnyMessage, Interrupt]): + """Extract the first tool call from a message when present. + + :param message: The message to inspect. + :type message: Union[langgraph_messages.AnyMessage, Interrupt] + + :return: The first tool call payload, if any. + :rtype: Optional[dict] + """ if isinstance(message, langgraph_messages.AIMessageChunk): if message.tool_call_chunks: if len(message.tool_call_chunks) > 1: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py index ddc61c6e43e8..e0fd92660309 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py @@ -17,9 +17,24 @@ class ResponseOutputItemEventGenerator(ResponseEventGenerator): + """Generate output-item added and done events for one streamed message.""" + def __init__(self, logger, parent: ResponseEventGenerator, output_index: int, message_id: str = None, *, hitl_helper: HumanInTheLoopHelper = None): + """Initialize the output-item event generator. + + :param logger: The logger used for diagnostics. + :type logger: logging.Logger + :param parent: The parent generator in the event chain. + :type parent: ResponseEventGenerator + :param output_index: The output item index. + :type output_index: int + :param message_id: The originating message identifier. + :type message_id: str + :param hitl_helper: Optional helper for human-in-the-loop interrupts. + :type hitl_helper: HumanInTheLoopHelper + """ super().__init__(logger, parent) self.output_index = output_index self.message_id = message_id @@ -29,6 +44,18 @@ def __init__(self, logger, parent: ResponseEventGenerator, def try_process_message( self, message: Union[AnyMessage, Interrupt, None], context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: + """Process one streamed message into output-item events. + + :param message: The message or interrupt to process. + :type message: Union[AnyMessage, Interrupt, None] + :param context: The run context for the current request. + :type context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Processing status, next generator, and emitted events. + :rtype: tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]] + """ is_processed = False next_processor = self events = [] @@ -69,6 +96,18 @@ def try_process_message( def on_start( self, _event: Union[AnyMessage, Interrupt], _context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: + """Emit the output-item-added event for this message. + + :param _event: The current message or interrupt. + :type _event: Union[AnyMessage, Interrupt] + :param _context: The run context, unused by this generator. + :type _context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Start status and emitted events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ if self.started: return True, [] @@ -86,6 +125,14 @@ def on_start( return True, [item_added_event] def should_end(self, event: Union[AnyMessage, Interrupt]) -> bool: + """Determine whether this output-item generator should end. + + :param event: The current message or interrupt. + :type event: Union[AnyMessage, Interrupt] + + :return: True when the generator should end. + :rtype: bool + """ if event is None: self.logger.info("Received None event, ending processor.") return True @@ -94,8 +141,20 @@ def should_end(self, event: Union[AnyMessage, Interrupt]) -> bool: return False def on_end( - self, message: Union[AnyMessage, Interrupt], context: LanggraphRunContext, stream_state: StreamEventState # pylint: disable=unused-argument + self, _message: Union[AnyMessage, Interrupt], _context: LanggraphRunContext, stream_state: StreamEventState ) -> List[project_models.ResponseStreamEvent]: + """Emit the output-item-done event for this generator. + + :param _message: The terminal message or interrupt. + :type _message: Union[AnyMessage, Interrupt] + :param _context: The run context, unused by this generator. + :type _context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: The emitted completion events. + :rtype: List[project_models.ResponseStreamEvent] + """ if not self.started: # should not happen return [] @@ -111,50 +170,71 @@ def on_end( return [done_event] def aggregate_content(self, content) -> None: - # aggregate content from child processor + """Aggregate child content into the current item resource helper. + + :param content: The child content to aggregate. + :type content: Any + """ self.item_resource_helper.add_aggregate_content(content) - def try_create_item_resource_helper(self, event: Union[AnyMessage, Interrupt], id_generator: IdGenerator): # pylint: disable=too-many-return-statements + def try_create_item_resource_helper(self, event: Union[AnyMessage, Interrupt], id_generator: IdGenerator): + """Create the item-resource helper for the current message type. + + :param event: The message or interrupt to inspect. + :type event: Union[AnyMessage, Interrupt] + :param id_generator: The identifier generator for new item ids. + :type id_generator: IdGenerator + + :return: True when a helper was created. + :rtype: bool + """ + helper = None if isinstance(event, langgraph_messages.AIMessageChunk) and event.tool_call_chunks: - self.item_resource_helper = item_resource_helpers.FunctionCallItemResourceHelper( + helper = item_resource_helpers.FunctionCallItemResourceHelper( item_id=id_generator.generate_function_call_id(), tool_call=event.tool_call_chunks[0] ) - return True - if isinstance(event, langgraph_messages.AIMessage) and event.tool_calls: - self.item_resource_helper = item_resource_helpers.FunctionCallItemResourceHelper( + elif isinstance(event, langgraph_messages.AIMessage) and event.tool_calls: + helper = item_resource_helpers.FunctionCallItemResourceHelper( item_id=id_generator.generate_function_call_id(), tool_call=event.tool_calls[0] ) - return True - if isinstance(event, langgraph_messages.AIMessage) and event.content: - self.item_resource_helper = item_resource_helpers.MessageItemResourceHelper( + elif isinstance(event, langgraph_messages.AIMessage) and event.content: + helper = item_resource_helpers.MessageItemResourceHelper( item_id=id_generator.generate_message_id(), role=project_models.ResponsesMessageRole.ASSISTANT ) - return True - if isinstance(event, langgraph_messages.HumanMessage) and event.content: - self.item_resource_helper = item_resource_helpers.MessageItemResourceHelper( + elif isinstance(event, langgraph_messages.HumanMessage) and event.content: + helper = item_resource_helpers.MessageItemResourceHelper( item_id=id_generator.generate_message_id(), role=project_models.ResponsesMessageRole.USER ) - return True - if isinstance(event, langgraph_messages.SystemMessage) and event.content: - self.item_resource_helper = item_resource_helpers.MessageItemResourceHelper( + elif isinstance(event, langgraph_messages.SystemMessage) and event.content: + helper = item_resource_helpers.MessageItemResourceHelper( item_id=id_generator.generate_message_id(), role=project_models.ResponsesMessageRole.SYSTEM ) - return True - if isinstance(event, langgraph_messages.ToolMessage): - self.item_resource_helper = item_resource_helpers.FunctionCallOutputItemResourceHelper( + elif isinstance(event, langgraph_messages.ToolMessage): + helper = item_resource_helpers.FunctionCallOutputItemResourceHelper( item_id=id_generator.generate_function_output_id(), call_id=event.tool_call_id ) - return True - if isinstance(event, Interrupt): - self.item_resource_helper = item_resource_helpers.FunctionCallInterruptItemResourceHelper( + elif isinstance(event, Interrupt): + helper = item_resource_helpers.FunctionCallInterruptItemResourceHelper( item_id=id_generator.generate_function_output_id(), hitl_helper=self.hitl_helper, interrupt=event, ) - return True - return False + + if helper is None: + return False + + self.item_resource_helper = helper + return True def create_child_processor(self, message: Union[AnyMessage, Interrupt]): + """Create the child generator for the current item resource type. + + :param message: The originating message or interrupt. + :type message: Union[AnyMessage, Interrupt] + + :return: The child generator, if one is required. + :rtype: Optional[ResponseEventGenerator] + """ if self.item_resource_helper is None: return None if self.item_resource_helper.item_type == project_models.ItemType.FUNCTION_CALL: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py index 61f5652a6d46..f62723cb8337 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py @@ -14,6 +14,8 @@ class ResponseOutputTextEventGenerator(ResponseEventGenerator): + """Generate text delta and done events for one response content part.""" + def __init__( self, logger, @@ -23,6 +25,21 @@ def __init__( item_id: str, message_id: str, ): + """Initialize the output-text event generator. + + :param logger: The logger used for diagnostics. + :type logger: logging.Logger + :param parent: The parent generator in the event chain. + :type parent: ResponseEventGenerator + :param content_index: The content index within the output item. + :type content_index: int + :param output_index: The output item index. + :type output_index: int + :param item_id: The response item identifier. + :type item_id: str + :param message_id: The originating message identifier. + :type message_id: str + """ super().__init__(logger, parent) self.output_index = output_index self.content_index = content_index @@ -33,6 +50,18 @@ def __init__( def try_process_message( self, message: AnyMessage, _context, stream_state: StreamEventState ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: + """Process a message into text delta and completion events. + + :param message: The message chunk to process. + :type message: AnyMessage + :param _context: The run context, unused by this generator. + :type _context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Processing status, next generator, and emitted events. + :rtype: tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]] + """ is_processed = False events = [] next_processor = self @@ -55,6 +84,16 @@ def try_process_message( def process( self, message: AnyMessage, stream_state: StreamEventState ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: + """Convert message content into text delta events. + + :param message: The message containing text content. + :type message: AnyMessage + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Processing status, current generator, and emitted events. + :rtype: tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]] + """ if message and message.content: content = [message.content] if isinstance(message.content, str) else message.content res = [] @@ -77,6 +116,14 @@ def process( return False, self, [] def has_finish_reason(self, message) -> bool: + """Check whether the message marks completion for this text stream. + + :param message: The message to inspect. + :type message: AnyMessage + + :return: True when the message carries a finish reason. + :rtype: bool + """ if not message or message.id != self.message_id: return False if message.response_metadata and message.response_metadata.get("finish_reason"): @@ -84,7 +131,14 @@ def has_finish_reason(self, message) -> bool: return False def should_end(self, message) -> bool: - # Determine if the message indicates end of the stream for this item + """Determine whether text streaming for this item should end. + + :param message: The message to inspect. + :type message: AnyMessage + + :return: True when the generator should end. + :rtype: bool + """ if message is None: return True if message.id != self.message_id: @@ -92,8 +146,20 @@ def should_end(self, message) -> bool: return False def on_end( - self, message, context: LanggraphRunContext, stream_state: StreamEventState # pylint: disable=unused-argument + self, message, _context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: + """Emit the final text-done event for the current content part. + + :param message: The terminal message for this text stream. + :type message: AnyMessage + :param _context: The run context, unused by this generator. + :type _context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Completion status and final events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ if not self.started: return False, [] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py index c186dbd329e6..85f73715605e 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py @@ -22,6 +22,15 @@ class ResponseStreamEventGenerator(ResponseEventGenerator): """ def __init__(self, logger, parent, *, hitl_helper=None): + """Initialize the top-level response stream generator. + + :param logger: The logger used for diagnostics. + :type logger: logging.Logger + :param parent: The parent generator, if any. + :type parent: ResponseEventGenerator | None + :keyword hitl_helper: Optional helper for human-in-the-loop interrupts. + :type hitl_helper: Any + """ super().__init__(logger, parent) self.hitl_helper = hitl_helper self.aggregated_contents: List[project_models.ItemResource] = [] @@ -29,6 +38,16 @@ def __init__(self, logger, parent, *, hitl_helper=None): def on_start( self, context: LanggraphRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: + """Emit the initial response-created and in-progress stream events. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Whether generation started and the emitted events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ if self.started: return True, [] agent_id = context.agent_run.get_agent_id_object() @@ -68,7 +87,14 @@ def on_start( return True, [created_event, in_progress_event] def should_complete(self, event: langgraph_messages.AnyMessage) -> bool: - # Determine if the event indicates completion + """Determine whether the current event represents stream completion. + + :param event: The current stream event. + :type event: langgraph_messages.AnyMessage + + :return: True when the stream should be considered complete. + :rtype: bool + """ if event is None: return True return False @@ -79,6 +105,18 @@ def try_process_message( context: LanggraphRunContext, stream_state: StreamEventState, ) -> tuple[bool, Optional[ResponseEventGenerator], List[project_models.ResponseStreamEvent]]: + """Process a streamed message or transition to a child generator. + + :param message: The streamed message to process. + :type message: Optional[langgraph_messages.AnyMessage] + :param context: The run context for the current request. + :type context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Processing status, next generator, and emitted events. + :rtype: tuple[bool, Optional[ResponseEventGenerator], List[project_models.ResponseStreamEvent]] + """ is_processed = False next_processor = self events = [] @@ -104,7 +142,14 @@ def try_process_message( return is_processed, next_processor, events def should_end(self, event: langgraph_messages.AnyMessage) -> bool: - # Determine if the event indicates end of the stream + """Determine whether the stream should end for the current event. + + :param event: The current stream event. + :type event: langgraph_messages.AnyMessage + + :return: True when the generator should end. + :rtype: bool + """ if event is None: return True return False @@ -115,6 +160,18 @@ def on_end( context: LanggraphRunContext, stream_state: StreamEventState, ) -> List[project_models.ResponseStreamEvent]: + """Emit the final response-completed event for the stream. + + :param _message: The terminal message for the stream. + :type _message: Optional[langgraph_messages.AnyMessage] + :param context: The run context for the current request. + :type context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: The final stream events. + :rtype: List[project_models.ResponseStreamEvent] + """ agent_id = context.agent_run.get_agent_id_object() conversation = context.agent_run.get_conversation_object() response_dict = { @@ -136,7 +193,11 @@ def on_end( return [done_event] def aggregate_content(self, content: Union[List[project_models.ItemResource], project_models.ItemResource]) -> None: - # aggregate content from children + """Collect item resources produced by child generators. + + :param content: The child content to aggregate. + :type content: Union[List[project_models.ItemResource], project_models.ItemResource] + """ if isinstance(content, list): for c in content: self.aggregate_content(c) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py index 4ca422b88c41..08d7e5cf3a32 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py @@ -31,6 +31,15 @@ class FoundryToolLateBindingChatModel(BaseChatModel): """ def __init__(self, delegate: BaseChatModel, runtime: Optional[Runtime], foundry_tools: List[FoundryToolLike]): + """Initialize the late-binding chat model wrapper. + + :param delegate: The underlying chat model. + :type delegate: BaseChatModel + :param runtime: The active LangGraph runtime, if available. + :type runtime: Optional[Runtime] + :param foundry_tools: The Foundry tools to resolve and bind at call time. + :type foundry_tools: List[FoundryToolLike] + """ super().__init__() self._delegate = delegate self._runtime = runtime @@ -75,8 +84,6 @@ def bind_tools(self, # pylint: disable=C4758 :type tools: Sequence[Dict[str, Any] | type | Callable | BaseTool] :keyword tool_choice: Optional tool choice strategy. :type tool_choice: str | None - :keyword kwargs: Additional keyword arguments for tool binding. - :type kwargs: Any :return: A Runnable with the tools bound for later invocation. :rtype: Runnable[LanguageModelInput, AIMessage] """ @@ -89,6 +96,14 @@ def bind_tools(self, # pylint: disable=C4758 return self def _bound_delegate_for_call(self, config: Optional[RunnableConfig]) -> Runnable[LanguageModelInput, AIMessage]: + """Resolve and bind all tools before invoking the delegate model. + + :param config: The runnable config carrying the LangGraph run context. + :type config: Optional[RunnableConfig] + + :return: The delegate model with all applicable tools bound. + :rtype: Runnable[LanguageModelInput, AIMessage] + """ from .._context import LanggraphRunContext foundry_tools: Iterable[BaseTool] = [] @@ -108,24 +123,90 @@ def _bound_delegate_for_call(self, config: Optional[RunnableConfig]) -> Runnable bound_kwargs = self._bound_kwargs or {} return self._delegate.bind_tools(all_tools, **bound_kwargs) - def invoke(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any) -> Any: + def invoke(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any) -> Any: # pylint: disable=C4758 + """Invoke the wrapped chat model with late-bound tools. + + :param input: The model input. + :type input: Any + :param config: Optional runnable config. + :type config: Optional[RunnableConfig] + + :return: The model result. + :rtype: Any + """ return self._bound_delegate_for_call(config).invoke(input, config=config, **kwargs) - async def ainvoke(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any) -> Any: + async def ainvoke(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any) -> Any: # pylint: disable=C4758 + """Asynchronously invoke the wrapped chat model with late-bound tools. + + :param input: The model input. + :type input: Any + :param config: Optional runnable config. + :type config: Optional[RunnableConfig] + :keyword kwargs: Additional invocation keyword arguments. + :type kwargs: Any + + :return: The model result. + :rtype: Any + """ return await self._bound_delegate_for_call(config).ainvoke(input, config=config, **kwargs) - def stream(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any): + def stream(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any): # pylint: disable=C4758 + """Stream results from the wrapped chat model with late-bound tools. + + :param input: The model input. + :type input: Any + :param config: Optional runnable config. + :type config: Optional[RunnableConfig] + :keyword kwargs: Additional streaming keyword arguments. + :type kwargs: Any + + :return: A synchronous iterator of streamed outputs. + :rtype: Iterator[Any] + """ yield from self._bound_delegate_for_call(config).stream(input, config=config, **kwargs) - async def astream(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any): + async def astream(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any): # pylint: disable=C4758 + """Asynchronously stream results from the wrapped chat model. + + :param input: The model input. + :type input: Any + :param config: Optional runnable config. + :type config: Optional[RunnableConfig] + :keyword kwargs: Additional streaming keyword arguments. + :type kwargs: Any + + :return: An async iterator of streamed outputs. + :rtype: AsyncIterator[Any] + """ async for x in self._bound_delegate_for_call(config).astream(input, config=config, **kwargs): yield x @property def _llm_type(self) -> str: + """Return the descriptive model type for LangChain integrations. + + :return: The logical model type name. + :rtype: str + """ return f"foundry_tool_binding_model({getattr(self._delegate, '_llm_type', type(self._delegate).__name__)})" - def _generate(self, messages: list[BaseMessage], stop: list[str] | None = None, + def _generate(self, messages: list[BaseMessage], stop: list[str] | None = None, # pylint: disable=C4758 run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any) -> ChatResult: + """Disallow direct LangChain generation on the wrapper. + + :param messages: The prompt messages. + :type messages: list[BaseMessage] + :param stop: Optional stop sequences. + :type stop: list[str] | None + :param run_manager: Optional LangChain run manager. + :type run_manager: CallbackManagerForLLMRun | None + :keyword kwargs: Additional generation keyword arguments. + :type kwargs: Any + + :raises NotImplementedError: Always raised because calls should route through the delegate methods. + :return: This method never returns. + :rtype: ChatResult + """ # should never be called as invoke/ainvoke/stream/astream are redirected to delegate raise NotImplementedError() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py index c226e51e72ac..019d13c6c254 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py @@ -27,6 +27,11 @@ class FoundryToolBindingMiddleware(AgentMiddleware): _DummyToolName: ClassVar[str] = "__dummy_tool_by_foundry_middleware__" def __init__(self, foundry_tools: List[FoundryToolLike]): + """Initialize the middleware with the Foundry tools to bind. + + :param foundry_tools: The Foundry tools that should be bound at runtime. + :type foundry_tools: List[FoundryToolLike] + """ super().__init__() # to ensure `create_agent()` will create a tool node when there are foundry tools to bind @@ -38,6 +43,11 @@ def __init__(self, foundry_tools: List[FoundryToolLike]): @classmethod def _dummy_tool(cls) -> BaseTool: + """Create a placeholder tool so agent creation wires a tool node. + + :return: The placeholder tool instance. + :rtype: BaseTool + """ return Tool(name=cls._DummyToolName, func=lambda x: None, description="__dummy_tool_by_foundry_middleware__") diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py index 5c77b1339132..c6da0220e5be 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py @@ -21,6 +21,11 @@ class ResolvedTools(Iterable[BaseTool]): :type tools: Iterable[Tuple[ResolvedFoundryTool, BaseTool]] """ def __init__(self, tools: Iterable[Tuple[ResolvedFoundryTool, BaseTool]]): + """Initialize the resolved-tools view. + + :param tools: The resolved tool pairs to index by Foundry tool id. + :type tools: Iterable[Tuple[ResolvedFoundryTool, BaseTool]] + """ self._by_source_id: Dict[str, List[BaseTool]] = defaultdict(list) for rt, t in tools: self._by_source_id[rt.definition.id].append(t) @@ -74,6 +79,11 @@ def get(self, tool: Union[FoundryToolLike, Iterable[FoundryToolLike], None] = No yield from self._by_source_id.get(ft.id, []) def __iter__(self): + """Iterate over all resolved LangChain tools. + + :return: An iterator over the resolved tools. + :rtype: Iterator[BaseTool] + """ for tool_list in self._by_source_id.values(): yield from tool_list @@ -85,6 +95,11 @@ class FoundryLangChainToolResolver: :type name_resolver: Optional[ToolNameResolver] """ def __init__(self, name_resolver: Optional[ToolNameResolver] = None): + """Initialize the Foundry-to-LangChain tool resolver. + + :param name_resolver: Optional resolver for stable tool names. + :type name_resolver: Optional[ToolNameResolver] + """ self._name_resolver = name_resolver or ToolNameResolver() async def resolve_from_registry(self) -> ResolvedTools: @@ -108,6 +123,14 @@ async def resolve(self, foundry_tools: List[FoundryToolLike]) -> ResolvedTools: return ResolvedTools(tools=((tool, self._create_structured_tool(tool)) for tool in resolved_foundry_tools)) def _create_structured_tool(self, resolved_tool: ResolvedFoundryTool) -> StructuredTool: + """Create a LangChain structured tool from a resolved Foundry tool. + + :param resolved_tool: The resolved Foundry tool descriptor. + :type resolved_tool: ResolvedFoundryTool + + :return: The structured tool wrapper. + :rtype: StructuredTool + """ name = self._name_resolver.resolve(resolved_tool) args_schema = self._create_pydantic_model(name, resolved_tool.input_schema) @@ -127,6 +150,16 @@ async def _tool_func(**kwargs: Any) -> str: @classmethod def _create_pydantic_model(cls, tool_name: str, input_schema: SchemaDefinition) -> type[BaseModel]: + """Create a Pydantic model for a Foundry tool input schema. + + :param tool_name: The tool name used to derive the model name. + :type tool_name: str + :param input_schema: The Foundry schema definition. + :type input_schema: SchemaDefinition + + :return: The generated Pydantic model type. + :rtype: type[BaseModel] + """ field_definitions: Dict[str, Any] = {} required_fields = input_schema.required or set() for prop_name, prop in input_schema.properties.items(): diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py index 1bfef8c39f81..ffcb8de2e911 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py @@ -35,7 +35,13 @@ class FoundryToolNodeWrappers(TypedDict): class FoundryToolCallWrapper: """A ToolCallWrapper that tries to resolve invokable foundry tools from context if tool is not resolved yet.""" + def __init__(self, foundry_tools: List[FoundryToolLike]): + """Initialize the wrapper with the allowed Foundry tools. + + :param foundry_tools: The Foundry tools that may be resolved at runtime. + :type foundry_tools: List[FoundryToolLike] + """ self._allowed_foundry_tools = foundry_tools def as_wrappers(self) -> FoundryToolNodeWrappers: @@ -74,6 +80,14 @@ async def call_tool_async(self, request: ToolCallRequest, invocation: AsyncToolI return await invocation(self._maybe_calling_foundry_tool(request)) def _maybe_calling_foundry_tool(self, request: ToolCallRequest) -> ToolCallRequest: + """Attach a resolved Foundry tool to the tool call request when possible. + + :param request: The incoming tool call request. + :type request: ToolCallRequest + + :return: The request with a resolved tool when a matching Foundry tool is available. + :rtype: ToolCallRequest + """ from .._context import LanggraphRunContext if (request.tool