diff --git a/python/packages/bedrock/tests/test_bedrock_client.py b/python/packages/bedrock/tests/test_bedrock_client.py index 48d1847655..d226943256 100644 --- a/python/packages/bedrock/tests/test_bedrock_client.py +++ b/python/packages/bedrock/tests/test_bedrock_client.py @@ -139,7 +139,6 @@ def test_prepare_options_tool_choice_required_includes_any() -> None: assert request["toolConfig"]["toolChoice"] == {"any": {}} - def test_prepare_options_tool_choice_auto_without_tools_omits_tool_config() -> None: """When tool_choice='auto' but no tools are provided, toolConfig must be omitted. diff --git a/python/packages/core/agent_framework/_feature_stage.py b/python/packages/core/agent_framework/_feature_stage.py index 818439cf86..90235b0232 100644 --- a/python/packages/core/agent_framework/_feature_stage.py +++ b/python/packages/core/agent_framework/_feature_stage.py @@ -52,7 +52,6 @@ class ExperimentalFeature(str, Enum): FUNCTIONAL_WORKFLOWS = "FUNCTIONAL_WORKFLOWS" HARNESS = "HARNESS" SKILLS = "SKILLS" - TOOLBOXES = "TOOLBOXES" class ReleaseCandidateFeature(str, Enum): diff --git a/python/packages/core/agent_framework/foundry/__init__.py b/python/packages/core/agent_framework/foundry/__init__.py index 01bb1d90c3..82a476ddff 100644 --- a/python/packages/core/agent_framework/foundry/__init__.py +++ b/python/packages/core/agent_framework/foundry/__init__.py @@ -30,7 +30,6 @@ "FoundryEmbeddingOptions": ("agent_framework_foundry", "agent-framework-foundry"), "FoundryEmbeddingSettings": ("agent_framework_foundry", "agent-framework-foundry"), "FoundryEvals": ("agent_framework_foundry", "agent-framework-foundry"), - "FoundryHostedToolType": ("agent_framework_foundry", "agent-framework-foundry"), "FoundryMemoryProvider": ("agent_framework_foundry", "agent-framework-foundry"), "FoundryLocalChatOptions": ("agent_framework_foundry_local", "agent-framework-foundry-local"), "FoundryLocalClient": ("agent_framework_foundry_local", "agent-framework-foundry-local"), @@ -42,9 +41,6 @@ "RawFoundryEmbeddingClient": ("agent_framework_foundry", "agent-framework-foundry"), "evaluate_foundry_target": ("agent_framework_foundry", "agent-framework-foundry"), "evaluate_traces": ("agent_framework_foundry", "agent-framework-foundry"), - "get_toolbox_tool_name": ("agent_framework_foundry", "agent-framework-foundry"), - "get_toolbox_tool_type": ("agent_framework_foundry", "agent-framework-foundry"), - "select_toolbox_tools": ("agent_framework_foundry", "agent-framework-foundry"), } diff --git a/python/packages/core/agent_framework/foundry/__init__.pyi b/python/packages/core/agent_framework/foundry/__init__.pyi index 85ab6c267e..7deb709c2a 100644 --- a/python/packages/core/agent_framework/foundry/__init__.pyi +++ b/python/packages/core/agent_framework/foundry/__init__.pyi @@ -19,7 +19,6 @@ from agent_framework_foundry import ( FoundryEmbeddingOptions, FoundryEmbeddingSettings, FoundryEvals, - FoundryHostedToolType, FoundryMemoryProvider, RawFoundryAgent, RawFoundryAgentChatClient, @@ -27,9 +26,6 @@ from agent_framework_foundry import ( RawFoundryEmbeddingClient, evaluate_foundry_target, evaluate_traces, - get_toolbox_tool_name, - get_toolbox_tool_type, - select_toolbox_tools, ) from agent_framework_foundry_local import ( FoundryLocalChatOptions, @@ -51,7 +47,6 @@ __all__ = [ "FoundryEmbeddingOptions", "FoundryEmbeddingSettings", "FoundryEvals", - "FoundryHostedToolType", "FoundryLocalChatOptions", "FoundryLocalClient", "FoundryLocalSettings", @@ -63,7 +58,4 @@ __all__ = [ "RawFoundryEmbeddingClient", "evaluate_foundry_target", "evaluate_traces", - "get_toolbox_tool_name", - "get_toolbox_tool_type", - "select_toolbox_tools", ] diff --git a/python/packages/foundry/README.md b/python/packages/foundry/README.md index 26f9a6e309..188535cd17 100644 --- a/python/packages/foundry/README.md +++ b/python/packages/foundry/README.md @@ -21,32 +21,7 @@ For hosted `FoundryAgent`, the toolbox must already be attached to the agent in ### Using toolboxes with `FoundryChatClient` -There are two patterns for wiring a toolbox into a `FoundryChatClient`-backed agent. - -**1. Fetch, optionally filter, and pass the tools directly** - -Load the toolbox from the Microsoft Foundry project, optionally select a subset of its tools, and hand them to an `Agent` alongside any other tools you own: - -```python -from agent_framework import Agent -from agent_framework.foundry import FoundryChatClient, select_toolbox_tools - -client = FoundryChatClient(...) -toolbox = await client.get_toolbox("my-toolbox", version="3") - -# Pass the whole toolbox: -agent = Agent(client=client, tools=toolbox) - -# Or filter to a subset first: -selected = select_toolbox_tools(toolbox, include_types=["code_interpreter", "mcp"]) -agent = Agent(client=client, tools=selected) -``` - -See [`foundry_chat_client_with_toolbox.py`](../../samples/02-agents/providers/foundry/foundry_chat_client_with_toolbox.py) for a full example, including combining multiple toolboxes. - -**2. Connect to the toolbox's MCP endpoint with `MCPStreamableHTTPTool`** - -Each toolbox is reachable as an MCP server. Instead of fetching and fanning out its individual tool definitions, you can point a MAF `MCPStreamableHTTPTool` at the toolbox's MCP endpoint — the agent then discovers and calls its tools over MCP at runtime: +Each toolbox is reachable as an MCP server. Connect to the toolbox's MCP endpoint with `MCPStreamableHTTPTool` — the agent then discovers and calls its tools over MCP at runtime: ```python from agent_framework import Agent, MCPStreamableHTTPTool diff --git a/python/packages/foundry/agent_framework_foundry/__init__.py b/python/packages/foundry/agent_framework_foundry/__init__.py index 93953d667a..002e63f8a6 100644 --- a/python/packages/foundry/agent_framework_foundry/__init__.py +++ b/python/packages/foundry/agent_framework_foundry/__init__.py @@ -16,7 +16,6 @@ evaluate_traces, ) from ._memory_provider import FoundryMemoryProvider -from ._tools import FoundryHostedToolType, get_toolbox_tool_name, get_toolbox_tool_type, select_toolbox_tools try: __version__ = importlib.metadata.version(__name__) @@ -32,7 +31,6 @@ "FoundryEmbeddingOptions", "FoundryEmbeddingSettings", "FoundryEvals", - "FoundryHostedToolType", "FoundryMemoryProvider", "RawFoundryAgent", "RawFoundryAgentChatClient", @@ -41,7 +39,4 @@ "__version__", "evaluate_foundry_target", "evaluate_traces", - "get_toolbox_tool_name", - "get_toolbox_tool_type", - "select_toolbox_tools", ] diff --git a/python/packages/foundry/agent_framework_foundry/_agent.py b/python/packages/foundry/agent_framework_foundry/_agent.py index c47ba7dc41..8b737694e3 100644 --- a/python/packages/foundry/agent_framework_foundry/_agent.py +++ b/python/packages/foundry/agent_framework_foundry/_agent.py @@ -418,12 +418,6 @@ def _prepare_tools_for_openai( self, tools: ToolTypes | Callable[..., Any] | Sequence[ToolTypes | Callable[..., Any]] | None, ) -> list[Any]: - """Prepare tools for Foundry agent Responses API calls. - - Mirrors ``RawFoundryChatClient`` sanitization so toolbox-fetched MCP - tools with extra read-model fields continue to work through the agent - surface. - """ response_tools = super()._prepare_tools_for_openai(tools) return [_sanitize_foundry_response_tool(tool_item) for tool_item in response_tools] diff --git a/python/packages/foundry/agent_framework_foundry/_chat_client.py b/python/packages/foundry/agent_framework_foundry/_chat_client.py index fc2b29e1e4..614efcad15 100644 --- a/python/packages/foundry/agent_framework_foundry/_chat_client.py +++ b/python/packages/foundry/agent_framework_foundry/_chat_client.py @@ -16,7 +16,6 @@ load_settings, ) from agent_framework._compaction import CompactionStrategy, TokenizerProtocol -from agent_framework._feature_stage import ExperimentalFeature, experimental from agent_framework._telemetry import get_user_agent from agent_framework.observability import ChatTelemetryLayer from agent_framework_openai._chat_client import OpenAIChatOptions, RawOpenAIChatClient @@ -36,7 +35,7 @@ from agent_framework_foundry._oauth_helpers import try_parse_oauth_consent_event -from ._tools import _sanitize_foundry_response_tool, fetch_toolbox # pyright: ignore[reportPrivateUsage] +from ._tools import _sanitize_foundry_response_tool # pyright: ignore[reportPrivateUsage] if sys.version_info >= (3, 13): from typing import TypeVar # type: ignore # pragma: no cover @@ -53,7 +52,6 @@ if TYPE_CHECKING: from agent_framework import ChatAndFunctionMiddlewareTypes, ToolTypes - from azure.ai.projects.models import ToolboxVersionObject logger: logging.Logger = logging.getLogger("agent_framework.foundry") @@ -234,13 +232,6 @@ def _prepare_tools_for_openai( self, tools: ToolTypes | Callable[..., Any] | Sequence[ToolTypes | Callable[..., Any]] | None, ) -> list[Any]: - """Prepare tools for Foundry Responses API calls. - - Foundry toolbox reads can surface MCP tool objects with extra fields - (for example ``name``) that are accepted by the toolbox API but rejected - by the Responses API. Sanitize those hosted-tool payloads before sending - them downstream. - """ response_tools = super()._prepare_tools_for_openai(tools) return [_sanitize_foundry_response_tool(tool_item) for tool_item in response_tools] @@ -510,37 +501,6 @@ def get_mcp_tool( # endregion - # region Toolbox methods (instance methods — these hit the network) - - @experimental(feature_id=ExperimentalFeature.TOOLBOXES) - async def get_toolbox( - self, - name: str, - *, - version: str | None = None, - ) -> ToolboxVersionObject: - """Fetch a Foundry toolbox by name. - - If ``version`` is omitted, resolves the toolbox's current default version - (two requests). If ``version`` is specified, fetches that version directly - (single request). - - Args: - name: The name of the toolbox. - - Keyword Args: - version: Optional immutable version identifier to pin to. - - Returns: - A ``ToolboxVersionObject``. Pass its ``tools`` attribute to - ``Agent(tools=toolbox.tools)``. - - Raises: - azure.core.exceptions.ResourceNotFoundError: If the toolbox or - the requested version does not exist. - """ - return await fetch_toolbox(self.project_client, name, version) - class FoundryChatClient( # type: ignore[misc] FunctionInvocationLayer[FoundryChatOptionsT], diff --git a/python/packages/foundry/agent_framework_foundry/_tools.py b/python/packages/foundry/agent_framework_foundry/_tools.py index 40b8bf0905..b392ab2dc5 100644 --- a/python/packages/foundry/agent_framework_foundry/_tools.py +++ b/python/packages/foundry/agent_framework_foundry/_tools.py @@ -2,179 +2,54 @@ """Shared tool helpers for Foundry chat clients. -Includes: - -* *Toolbox* helpers — a *toolbox* is a named, versioned bundle of tool - definitions stored in an Azure AI Foundry project. -* Responses-API payload sanitization for Foundry hosted tools. +Includes Responses-API payload sanitization for Foundry hosted tools. """ from __future__ import annotations -from collections.abc import Callable, Collection, Mapping, Sequence -from typing import TYPE_CHECKING, Any, Literal, TypeAlias, cast +from collections.abc import Mapping +from typing import Any, cast -from agent_framework._feature_stage import ExperimentalFeature, experimental from azure.ai.projects.models import MCPTool as FoundryMCPTool -if TYPE_CHECKING: - from azure.ai.projects.aio import AIProjectClient - from azure.ai.projects.models import Tool, ToolboxVersionObject - -FoundryHostedToolType: TypeAlias = ( - Literal[ - "code_interpreter", - "file_search", - "image_generation", - "mcp", - "web_search", - ] - | str -) -ToolboxToolSelectionInput: TypeAlias = "ToolboxVersionObject | Sequence[Tool | dict[str, Any]]" - - -@experimental(feature_id=ExperimentalFeature.TOOLBOXES) -async def fetch_toolbox( - project_client: AIProjectClient, - name: str, - version: str | None = None, -) -> ToolboxVersionObject: - """Fetch a toolbox version via an ``AIProjectClient``. - - If ``version`` is omitted, resolves the toolbox's current default - version (two requests: one to ``.get(name)`` for the default version - pointer, one to ``.get_version(name, version)`` for the tools). If - ``version`` is specified, fetches that version directly (single request). - """ - if version is None: - handle = await project_client.beta.toolboxes.get(name) - version = handle.default_version - return await project_client.beta.toolboxes.get_version(name, version) - - -@experimental(feature_id=ExperimentalFeature.TOOLBOXES) -def get_toolbox_tool_name(tool: Tool | dict[str, Any]) -> str | None: - """Return the best-effort display/selection name for a toolbox tool. - - Selection precedence: - 1. MCP ``server_label`` - 2. Generic tool ``name`` - 3. Tool ``type`` - """ - if isinstance(tool, dict): - if server_label := tool.get("server_label"): - return str(server_label) - if name := tool.get("name"): - return str(name) - if tool_type := tool.get("type"): - return str(tool_type) - return None - - if server_label := getattr(tool, "server_label", None): - return str(server_label) - if name := getattr(tool, "name", None): - return str(name) - if tool_type := getattr(tool, "type", None): - return str(tool_type) - return None - - -@experimental(feature_id=ExperimentalFeature.TOOLBOXES) -def get_toolbox_tool_type(tool: Tool | dict[str, Any]) -> str | None: - """Return the raw tool ``type`` if present.""" - tool_type = tool.get("type") if isinstance(tool, dict) else getattr(tool, "type", None) - return str(tool_type) if tool_type is not None else None - - -@experimental(feature_id=ExperimentalFeature.TOOLBOXES) -def select_toolbox_tools( - tools: ToolboxToolSelectionInput, - *, - include_names: Collection[str] | None = None, - exclude_names: Collection[str] | None = None, - include_types: Collection[FoundryHostedToolType] | None = None, - exclude_types: Collection[FoundryHostedToolType] | None = None, - predicate: Callable[[Tool | dict[str, Any]], bool] | None = None, -) -> list[Tool | dict[str, Any]]: - """Filter toolbox tools by normalized name, raw type, and/or predicate. - - Normalized name precedence: - 1. ``server_label`` for MCP tools - 2. ``name`` - 3. ``type`` - """ - tool_items: Sequence[Tool | dict[str, Any]] = ( - tools if isinstance(tools, Sequence) else cast("Sequence[Tool | dict[str, Any]]", tools.tools) - ) - include_name_set = {str(item) for item in include_names} if include_names is not None else None - exclude_name_set = {str(item) for item in exclude_names} if exclude_names is not None else None - include_type_set = {str(item) for item in include_types} if include_types is not None else None - exclude_type_set = {str(item) for item in exclude_types} if exclude_types is not None else None - - selected: list[Tool | dict[str, Any]] = [] - for tool in tool_items: - tool_name = get_toolbox_tool_name(tool) - tool_type = get_toolbox_tool_type(tool) - - if include_name_set is not None and tool_name not in include_name_set: - continue - if exclude_name_set is not None and tool_name in exclude_name_set: - continue - if include_type_set is not None and tool_type not in include_type_set: - continue - if exclude_type_set is not None and tool_type in exclude_type_set: - continue - if predicate is not None and not predicate(tool): - continue - - selected.append(tool) - - return selected - def _validate_hosted_tool_payload(sanitized: Mapping[str, Any]) -> None: """Fail fast on hosted tool payloads that would always be rejected by the Responses API. These mismatches are not injectable defaults — the caller must supply the - missing information — so surfacing a clear error here points at the toolbox + missing information — so surfacing a clear error here points at the tool definition instead of letting the API return a generic 400. """ tool_type = sanitized.get("type") if tool_type == "file_search" and not sanitized.get("vector_store_ids"): raise ValueError( "'file_search' tool is missing required 'vector_store_ids'. " - "If this came from a Foundry toolbox, update the toolbox definition " - "to include at least one vector store ID." + "Update the tool definition to include at least one vector store ID." ) if tool_type == "mcp" and not sanitized.get("server_url") and not sanitized.get("project_connection_id"): raise ValueError( "'mcp' tool is missing both 'server_url' and 'project_connection_id'. " - "If this came from a Foundry toolbox, update the toolbox definition " - "to include one of these." + "Update the tool definition to include one of these." ) def _sanitize_foundry_response_tool(tool_item: Any) -> Any: # pyright: ignore[reportUnusedFunction] """Return a Responses-API-safe tool payload for Foundry hosted tools. - Reconciles known mismatches between toolbox reads and the Responses API: + Reconciles known mismatches between hosted tool definitions and the Responses API: - 1. Toolbox reads can return hosted tool objects decorated with read-model - fields such as top-level ``name`` and ``description``. The Responses API - rejects at least ``name`` with ``Unknown parameter: 'tools[0].name'``. - These fields are stripped from non-function hosted tool payloads. - 2. ``code_interpreter`` tools stored in a toolbox without a ``container`` - field (the Azure SDK treats it as optional) are rejected by the Responses - API with ``Missing required parameter: 'tools[N].container'``. A default + 1. Hosted tool objects may carry read-model fields such as top-level ``name`` + and ``description``. The Responses API rejects at least ``name`` with + ``Unknown parameter: 'tools[0].name'``. These fields are stripped from + non-function hosted tool payloads. + 2. ``code_interpreter`` tools without a ``container`` field (the Azure SDK + treats it as optional) are rejected by the Responses API with + ``Missing required parameter: 'tools[N].container'``. A default ``{"type": "auto"}`` container is injected when absent. 3. Hosted tools that are structurally incomplete in ways that cannot be defaulted (``file_search`` without ``vector_store_ids``, ``mcp`` without either ``server_url`` or ``project_connection_id``) raise ``ValueError`` - with a message that points at the toolbox definition. - - These are workarounds until the toolbox/Responses proxy normalizes payloads - server-side. + with a message that points at the tool definition. """ if isinstance(tool_item, FoundryMCPTool): sanitized: dict[str, Any] = dict(cast("Mapping[str, Any]", tool_item)) diff --git a/python/packages/foundry/tests/foundry/test_foundry_chat_client.py b/python/packages/foundry/tests/foundry/test_foundry_chat_client.py index 2ef5ca04ee..5dd0806604 100644 --- a/python/packages/foundry/tests/foundry/test_foundry_chat_client.py +++ b/python/packages/foundry/tests/foundry/test_foundry_chat_client.py @@ -641,8 +641,8 @@ def test_prepare_tools_for_openai_strips_extraneous_name_from_foundry_mcp_tool() assert "name" not in prepared -def test_prepare_tools_for_openai_strips_read_model_fields_from_toolbox_code_interpreter() -> None: - """Toolbox-returned code interpreter tools may carry read-model-only name/description.""" +def test_prepare_tools_for_openai_strips_read_model_fields_from_hosted_code_interpreter() -> None: + """Hosted code interpreter tools may carry read-model-only name/description.""" project_client = MagicMock() project_client.get_openai_client.return_value = _make_mock_openai_client() client = FoundryChatClient(project_client=project_client, model="test-model") @@ -650,7 +650,7 @@ def test_prepare_tools_for_openai_strips_read_model_fields_from_toolbox_code_int tool = { "type": "code_interpreter", "name": "code_interpreter_t6bbtm", - "description": "Toolbox read model description", + "description": "Hosted tool read model description", "container": {"file_ids": [], "type": "auto"}, } @@ -665,7 +665,7 @@ def test_prepare_tools_for_openai_strips_read_model_fields_from_toolbox_code_int def test_prepare_tools_for_openai_injects_default_container_for_code_interpreter_dict() -> None: - """Toolbox-returned code_interpreter without a container must get a default injected. + """Hosted code_interpreter without a container must get a default injected. The Azure SDK treats ``container`` as optional, but the Responses API rejects ``code_interpreter`` entries without one. The sanitizer backfills ``{"type": "auto"}``. @@ -691,7 +691,7 @@ def test_prepare_tools_for_openai_injects_default_container_for_code_interpreter def test_prepare_tools_for_openai_injects_default_container_for_code_interpreter_sdk_instance() -> None: """SDK ``CodeInterpreterTool`` instances without a container must also be backfilled. - Reproduces the toolbox creation path that calls + Reproduces the hosted tool creation path that calls ``CodeInterpreterTool(name="code_interpreter")`` without a container. """ from azure.ai.projects.models import CodeInterpreterTool @@ -771,13 +771,13 @@ def test_prepare_tools_for_openai_strips_name_from_non_function_hosted_tool_dict { "type": "file_search", "name": "file_search_tool_123", - "description": "toolbox decoration", + "description": "hosted tool decoration", "vector_store_ids": ["vs_123"], }, { "type": "web_search", "name": "web_search_tool_456", - "description": "toolbox decoration", + "description": "hosted tool decoration", }, ]) diff --git a/python/packages/foundry/tests/test_toolbox.py b/python/packages/foundry/tests/test_toolbox.py deleted file mode 100644 index 1933084e10..0000000000 --- a/python/packages/foundry/tests/test_toolbox.py +++ /dev/null @@ -1,435 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Unit tests for toolbox helpers on FoundryChatClient. - -Return types are the raw azure-ai-projects SDK models (ToolboxVersionObject, -ToolboxObject) — no custom wrapper. Tests verify the chat-client get path and -tool-selection ergonomics. -""" - -from __future__ import annotations - -import datetime as dt -import os -from typing import Any -from unittest.mock import AsyncMock, MagicMock - -import pytest - -try: - from azure.ai.projects.models import ( - AutoCodeInterpreterToolParam, - CodeInterpreterTool, - Tool, - ToolboxObject, - ToolboxVersionObject, - ) -except ImportError: - pytest.skip( - "Toolbox types require azure-ai-projects>=2.1.0 (unreleased).", - allow_module_level=True, - ) - -from azure.core.exceptions import ResourceNotFoundError -from azure.identity import AzureCliCredential - -# --------------------------------------------------------------------------- # -# Helpers # -# --------------------------------------------------------------------------- # - - -class _AsyncIter: - """Minimal async-iterable for mocking ``AsyncItemPaged`` in tests.""" - - def __init__(self, items: list[Any]) -> None: - self._items = items - - def __aiter__(self) -> _AsyncIter: - self._iter = iter(self._items) - return self - - async def __anext__(self) -> Any: - try: - return next(self._iter) - except StopIteration: - raise StopAsyncIteration from None - - -def _make_code_interpreter() -> CodeInterpreterTool: - return CodeInterpreterTool(container=AutoCodeInterpreterToolParam()) - - -def _make_version_object( - *, - name: str = "research_tools", - version: str = "v1", - tools: list[Tool] | None = None, - description: str | None = None, -) -> ToolboxVersionObject: - return ToolboxVersionObject( - id=f"tbv_{name}_{version}", - name=name, - version=version, - metadata={}, - created_at=dt.datetime(2026, 4, 10, tzinfo=dt.timezone.utc), - tools=tools if tools is not None else [_make_code_interpreter()], - description=description, - ) - - -def _make_mock_foundry_client(*, project_client: MagicMock) -> Any: - """Build a FoundryChatClient wired to a mock project_client.""" - from agent_framework_foundry import FoundryChatClient - - project_client.get_openai_client = MagicMock(return_value=MagicMock()) - return FoundryChatClient(project_client=project_client, model="test-model") - - -# --------------------------------------------------------------------------- # -# get_toolbox — explicit version path # -# --------------------------------------------------------------------------- # - - -async def test_get_toolbox_with_explicit_version_makes_single_request() -> None: - project_client = MagicMock() - version_obj = _make_version_object(name="research_tools", version="v3") - project_client.beta.toolboxes.get_version = AsyncMock(return_value=version_obj) - project_client.beta.toolboxes.get = AsyncMock( - side_effect=AssertionError("get() must not be called when version is explicit") - ) - - client = _make_mock_foundry_client(project_client=project_client) - - toolbox = await client.get_toolbox("research_tools", version="v3") - - assert isinstance(toolbox, ToolboxVersionObject) - assert toolbox.name == "research_tools" - assert toolbox.version == "v3" - project_client.beta.toolboxes.get_version.assert_awaited_once_with("research_tools", "v3") - project_client.beta.toolboxes.get.assert_not_called() - - -# --------------------------------------------------------------------------- # -# get_toolbox — default-version path + error + passthrough + smoke # -# --------------------------------------------------------------------------- # - - -async def test_get_toolbox_default_version_resolves_then_fetches() -> None: - project_client = MagicMock() - handle = ToolboxObject(id="tb_1", name="research_tools", default_version="v5") - version_obj = _make_version_object(name="research_tools", version="v5") - - project_client.beta.toolboxes.get = AsyncMock(return_value=handle) - project_client.beta.toolboxes.get_version = AsyncMock(return_value=version_obj) - - client = _make_mock_foundry_client(project_client=project_client) - - toolbox = await client.get_toolbox("research_tools") - - assert toolbox.version == "v5" - project_client.beta.toolboxes.get.assert_awaited_once_with("research_tools") - project_client.beta.toolboxes.get_version.assert_awaited_once_with("research_tools", "v5") - - -async def test_get_toolbox_propagates_resource_not_found() -> None: - project_client = MagicMock() - project_client.beta.toolboxes.get = AsyncMock(side_effect=ResourceNotFoundError("no such toolbox")) - - client = _make_mock_foundry_client(project_client=project_client) - - with pytest.raises(ResourceNotFoundError): - await client.get_toolbox("missing_toolbox") - - -async def test_get_toolbox_tool_passthrough_preserves_heterogeneous_types() -> None: - """Ensure all Tool subclasses pass through unchanged — critical for MCP tools - with project_connection_id, which must reach the runtime untouched.""" - from azure.ai.projects.models import MCPTool as FoundryMCPTool - - mcp_tool = FoundryMCPTool( - server_label="github_oauth", - server_url="https://api.githubcopilot.com/mcp", - ) - mcp_tool["project_connection_id"] = "conn_abc" - - project_client = MagicMock() - version_obj = _make_version_object( - name="mixed", - version="v1", - tools=[_make_code_interpreter(), mcp_tool], - ) - project_client.beta.toolboxes.get_version = AsyncMock(return_value=version_obj) - - client = _make_mock_foundry_client(project_client=project_client) - - toolbox = await client.get_toolbox("mixed", version="v1") - - assert len(toolbox.tools) == 2 - assert isinstance(toolbox.tools[0], CodeInterpreterTool) - assert isinstance(toolbox.tools[1], FoundryMCPTool) - assert toolbox.tools[1]["project_connection_id"] == "conn_abc" - - -async def test_toolbox_tools_can_be_passed_to_agent() -> None: - """Integration smoke: toolbox.tools can be passed directly to Agent(tools=...) .""" - from agent_framework import Agent - - project_client = MagicMock() - version_obj = _make_version_object(name="research_tools", version="v1", tools=[_make_code_interpreter()]) - project_client.beta.toolboxes.get_version = AsyncMock(return_value=version_obj) - - client = _make_mock_foundry_client(project_client=project_client) - - toolbox = await client.get_toolbox("research_tools", version="v1") - - agent = Agent( - client=client, - instructions="You are a test agent.", - tools=toolbox.tools, - ) - - agent_tools = agent.default_options["tools"] - assert len(agent_tools) == 1 - assert agent_tools[0]["type"] == "code_interpreter" - - -async def test_multiple_toolbox_tool_lists_can_be_combined_in_agent() -> None: - """Nested toolbox ``.tools`` lists flatten into one tool list on Agent construction.""" - from agent_framework import Agent - - project_client = MagicMock() - project_client.get_openai_client = MagicMock(return_value=MagicMock()) - client = _make_mock_foundry_client(project_client=project_client) - - toolbox_a = _make_version_object(name="research_tools", version="v1", tools=[_make_code_interpreter()]) - toolbox_b = _make_version_object(name="some_other_tools", version="v3", tools=[_make_code_interpreter()]) - - agent = Agent( - client=client, - instructions="You are a test agent.", - tools=[toolbox_a.tools, toolbox_b.tools], - ) - - agent_tools = agent.default_options["tools"] - assert len(agent_tools) == 2 - assert agent_tools[0]["type"] == "code_interpreter" - assert agent_tools[1]["type"] == "code_interpreter" - - -# --------------------------------------------------------------------------- # -# toolbox tool selection helpers # -# --------------------------------------------------------------------------- # - - -def test_get_toolbox_tool_name_prefers_server_label_then_name_then_type() -> None: - from azure.ai.projects.models import MCPTool as FoundryMCPTool - - from agent_framework_foundry import get_toolbox_tool_name - - mcp_tool = FoundryMCPTool( - server_label="githubmcp", - server_url="https://api.githubcopilot.com/mcp", - ) - assert get_toolbox_tool_name(mcp_tool) == "githubmcp" - - named_tool = {"type": "code_interpreter", "name": "ci_tool"} - assert get_toolbox_tool_name(named_tool) == "ci_tool" - - unnamed_tool = {"type": "web_search"} - assert get_toolbox_tool_name(unnamed_tool) == "web_search" - - -def test_select_toolbox_tools_filters_by_names() -> None: - from azure.ai.projects.models import MCPTool as FoundryMCPTool - - from agent_framework_foundry import select_toolbox_tools - - tools: list[Tool | dict[str, Any]] = [ - FoundryMCPTool(server_label="githubmcp", server_url="https://api.githubcopilot.com/mcp"), - {"type": "code_interpreter", "name": "python_runner"}, - {"type": "web_search"}, - ] - - selected = select_toolbox_tools(tools, include_names=["githubmcp", "python_runner"]) - - assert len(selected) == 2 - assert selected[0] is tools[0] - assert selected[1] is tools[1] - - -def test_select_toolbox_tools_filters_by_typed_tool_types() -> None: - from agent_framework_foundry import select_toolbox_tools - - tools: list[Tool | dict[str, Any]] = [ - {"type": "mcp", "server_label": "githubmcp"}, - {"type": "code_interpreter", "name": "python_runner"}, - {"type": "web_search"}, - ] - - selected = select_toolbox_tools(tools, include_types=["mcp", "code_interpreter"]) - - assert len(selected) == 2 - assert selected[0]["type"] == "mcp" - assert selected[1]["type"] == "code_interpreter" - - -def test_select_toolbox_tools_accepts_toolbox_object_directly() -> None: - from agent_framework_foundry import select_toolbox_tools - - toolbox = _make_version_object( - name="research_tools", - version="v1", - tools=[ - {"type": "mcp", "server_label": "githubmcp"}, # type: ignore[list-item] - {"type": "code_interpreter", "name": "python_runner"}, # type: ignore[list-item] - {"type": "web_search"}, # type: ignore[list-item] - ], - ) - - selected = select_toolbox_tools(toolbox, include_types=["mcp", "code_interpreter"]) - - assert len(selected) == 2 - assert selected[0]["type"] == "mcp" - assert selected[1]["type"] == "code_interpreter" - - -async def test_fetched_toolbox_can_be_combined_with_function_tool() -> None: - from agent_framework import Agent, FunctionTool, tool - - project_client = MagicMock() - version_obj = _make_version_object(name="research_tools", version="v1", tools=[_make_code_interpreter()]) - project_client.beta.toolboxes.get_version = AsyncMock(return_value=version_obj) - - client = _make_mock_foundry_client(project_client=project_client) - toolbox = await client.get_toolbox("research_tools", version="v1") - - @tool(name="local_lookup", description="A local helper tool") - def local_lookup(query: str) -> str: - return query - - agent = Agent( - client=client, - instructions="You are a test agent.", - tools=[toolbox, local_lookup], - ) - - agent_tools = agent.default_options["tools"] - assert len(agent_tools) == 2 - assert agent_tools[0]["type"] == "code_interpreter" - assert isinstance(agent_tools[1], FunctionTool) - assert agent_tools[1].name == "local_lookup" - - -def test_select_toolbox_tools_supports_excludes_and_predicate() -> None: - from agent_framework_foundry import select_toolbox_tools - - tools: list[Tool | dict[str, Any]] = [ - {"type": "mcp", "server_label": "githubmcp"}, - {"type": "mcp", "server_label": "learnmcp"}, - {"type": "web_search"}, - ] - - selected = select_toolbox_tools( - tools, - exclude_names=["learnmcp"], - predicate=lambda tool: tool.get("type") == "mcp", # type: ignore[union-attr] - ) - - assert len(selected) == 1 - assert selected[0]["server_label"] == "githubmcp" - - -async def test_selected_toolbox_subset_can_be_combined_with_function_tool() -> None: - from agent_framework import Agent, FunctionTool, tool - - from agent_framework_foundry import select_toolbox_tools - - project_client = MagicMock() - version_obj = _make_version_object( - name="research_tools", - version="v1", - tools=[ - {"type": "mcp", "server_label": "githubmcp"}, # type: ignore[list-item] - {"type": "code_interpreter", "name": "python_runner"}, # type: ignore[list-item] - {"type": "web_search"}, # type: ignore[list-item] - ], - ) - project_client.beta.toolboxes.get_version = AsyncMock(return_value=version_obj) - - client = _make_mock_foundry_client(project_client=project_client) - toolbox = await client.get_toolbox("research_tools", version="v1") - selected_tools = select_toolbox_tools(toolbox, include_types=["mcp", "code_interpreter"]) - - @tool(name="local_lookup", description="A local helper tool") - def local_lookup(query: str) -> str: - return query - - agent = Agent( - client=client, - instructions="You are a test agent.", - tools=[selected_tools, local_lookup], - ) - - agent_tools = agent.default_options["tools"] - assert len(agent_tools) == 3 - assert agent_tools[0]["type"] == "mcp" - assert agent_tools[1]["type"] == "code_interpreter" - assert isinstance(agent_tools[2], FunctionTool) - assert agent_tools[2].name == "local_lookup" - - -# --------------------------------------------------------------------------- # -# Integration # -# --------------------------------------------------------------------------- # - - -skip_if_foundry_integration_tests_disabled = pytest.mark.skipif( - os.getenv("FOUNDRY_PROJECT_ENDPOINT", "") in ("", "https://test-project.services.ai.azure.com/") - or os.getenv("FOUNDRY_MODEL", "") == "", - reason="No real FOUNDRY_PROJECT_ENDPOINT or FOUNDRY_MODEL provided; skipping integration tests.", -) - - -@pytest.mark.flaky -@pytest.mark.integration -@skip_if_foundry_integration_tests_disabled -async def test_integration_get_toolbox_round_trip_against_real_project() -> None: - """Create a toolbox via the raw SDK, fetch via FoundryChatClient, then delete. - - Self-contained to avoid depending on toolboxes that may be cleaned up - externally. Exercises both the default-version resolution path - (``get`` + ``get_version``) and the explicit-version path. - """ - from uuid import uuid4 - - from agent_framework import Agent - - from agent_framework_foundry import FoundryChatClient - - client = FoundryChatClient(credential=AzureCliCredential()) - project_client = client.project_client - - toolbox_name = f"af-int-toolbox-{uuid4().hex[:12]}" - created = await project_client.beta.toolboxes.create_version( - name=toolbox_name, - tools=[CodeInterpreterTool()], - description=f"{toolbox_name} integration test", - ) - assert isinstance(created, ToolboxVersionObject) - try: - toolbox_default = await client.get_toolbox(toolbox_name) - assert toolbox_default.name == toolbox_name - assert toolbox_default.tools, "Default-version fetch returned no tools" - - toolbox_pinned = await client.get_toolbox(toolbox_name, version=created.version) - assert toolbox_pinned.version == created.version - assert toolbox_pinned.tools - - agent = Agent( - client=client, - instructions="You are a test agent.", - tools=toolbox_pinned.tools, - ) - assert len(agent.default_options["tools"]) == len(toolbox_pinned.tools) - finally: - await project_client.beta.toolboxes.delete(toolbox_name) diff --git a/python/samples/02-agents/context_providers/README.md b/python/samples/02-agents/context_providers/README.md index 7c34e10518..04f3a1395f 100644 --- a/python/samples/02-agents/context_providers/README.md +++ b/python/samples/02-agents/context_providers/README.md @@ -7,7 +7,6 @@ These samples demonstrate how to use context providers to enrich agent conversat | File / Folder | Description | |---------------|-------------| | [`simple_context_provider.py`](simple_context_provider.py) | Implement a custom context provider by extending `ContextProvider` to extract and inject structured user information across turns. | -| [`foundry_toolbox_context_provider.py`](foundry_toolbox_context_provider.py) | Compose a Microsoft Foundry toolbox with a `ContextProvider` that caches the toolbox once and picks a subset of its tools per-turn via `select_toolbox_tools`, driven by keywords in the latest user message. | | [`azure_ai_foundry_memory.py`](azure_ai_foundry_memory.py) | Use `FoundryMemoryProvider` to add semantic memory — automatically retrieves, searches, and stores memories via Azure AI Foundry. | | [`azure_ai_search/`](azure_ai_search/) | Retrieval Augmented Generation (RAG) with Azure AI Search in semantic and agentic modes. See its own [README](azure_ai_search/README.md). | | [`mem0/`](mem0/) | Memory-powered context using the Mem0 integration (open-source and managed). See its own [README](mem0/README.md). | @@ -20,12 +19,6 @@ These samples demonstrate how to use context providers to enrich agent conversat - `FOUNDRY_MODEL`: Model deployment name - Azure CLI authentication (`az login`) -**For `foundry_toolbox_context_provider.py`:** -- `FOUNDRY_PROJECT_ENDPOINT`: Your Microsoft Foundry project endpoint -- `FOUNDRY_MODEL`: Model deployment name -- A toolbox already configured in that project; set `TOOLBOX_NAME` / `TOOLBOX_VERSION` at the top of the sample -- Azure CLI authentication (`az login`) - **For `azure_ai_foundry_memory.py`:** - `FOUNDRY_PROJECT_ENDPOINT`: Your Azure AI Foundry project endpoint - `FOUNDRY_MODEL`: Chat/responses model deployment name diff --git a/python/samples/02-agents/context_providers/foundry_toolbox_context_provider.py b/python/samples/02-agents/context_providers/foundry_toolbox_context_provider.py deleted file mode 100644 index d889c7c1ac..0000000000 --- a/python/samples/02-agents/context_providers/foundry_toolbox_context_provider.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import os -from typing import Any - -from agent_framework import Agent, AgentSession, ContextProvider, Message, SessionContext -from agent_framework.foundry import ( - FoundryChatClient, - get_toolbox_tool_name, - get_toolbox_tool_type, - select_toolbox_tools, -) -from azure.identity import AzureCliCredential -from dotenv import load_dotenv -from pydantic import BaseModel - -# Load environment variables from .env file -load_dotenv() - -""" -Foundry Toolbox + Context Provider Example - -This sample composes a Foundry toolbox with a ContextProvider so the agent's -tool list is chosen dynamically per-turn. It uses the chat client itself as a lightweight "tool router": the -latest user message plus a short menu of toolbox tools is sent to the model -with a Pydantic ``response_format``, and the returned tool names drive -``select_toolbox_tools``. The toolbox is fetched once and cached on the -provider's state dict; subsequent turns reuse the cache. - -Prerequisites: -- A Microsoft Foundry project -- A toolbox already configured in that project (set TOOLBOX_NAME below) -- FOUNDRY_PROJECT_ENDPOINT and FOUNDRY_MODEL environment variables set -- Azure CLI authentication (`az login`) -""" - -# Replace with your own Foundry toolbox name and version. -TOOLBOX_NAME = "research_toolbox" -# Set to None to resolve the toolbox's current default version at fetch time. -TOOLBOX_VERSION: str | None = None - -# Generic queries that exercise the router without assuming any specific tool -# types are configured. The first is introspective, the second forces a -# non-empty pick for whichever tools the toolbox actually contains, and the -# third should route to nothing. -QUERIES: list[str] = [ - "Introduce yourself and briefly describe the tools you can use to help me.", - "Pick the tool you think is most useful and demonstrate it with a short example.", - "Say hi in one short sentence - no tools needed.", -] - - -def create_sample_toolbox(name: str) -> str: - """Create (or replace) a toolbox version in the Foundry project. - - Toolboxes are normally configured in the Foundry portal or a deployment - script, not the application itself. This helper exists so the sample can - be run end-to-end without first setting a toolbox up by hand — delete any - existing toolbox under ``name``, then create a fresh version containing a - single MCP tool. Returns the created version identifier. - """ - from azure.ai.projects import AIProjectClient - from azure.ai.projects.models import MCPTool, Tool - from azure.core.exceptions import ResourceNotFoundError - - with ( - AzureCliCredential() as credential, - AIProjectClient(credential=credential, endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"]) as project_client, - ): - try: - project_client.beta.toolboxes.delete(name) - print(f"Toolbox `{name}` deleted") - except ResourceNotFoundError: - pass - - tools: list[Tool] = [ - MCPTool( - server_label="api_specs", - server_url="https://gitmcp.io/Azure/azure-rest-api-specs", - require_approval="never", - ) - ] - - created = project_client.beta.toolboxes.create_version( - name=name, - description="Toolbox version with MCP require_approval set to 'never'.", - tools=tools, - ) - print(f"Created toolbox {created.name}@{created.version} ({len(created.tools)} tool(s))") - return created.version - - -class ToolSelection(BaseModel): - """Structured output for the per-turn tool router.""" - - tool_names: list[str] - - -ROUTER_INSTRUCTIONS = ( - "You are a tool router. Given the user's latest message and a menu of " - "available tools (one per line, formatted as 'NAME - TYPE'), return the " - "NAMES of the tools that would plausibly help answer the message. Return " - "an empty list if no tool is needed." -) - - -class DynamicToolboxProvider(ContextProvider): - """Fetches a Foundry toolbox once and lets the model pick tools per-turn.""" - - DEFAULT_SOURCE_ID = "foundry_toolbox" - - def __init__( - self, - source_id: str = DEFAULT_SOURCE_ID, - *, - client: FoundryChatClient, - toolbox_name: str, - toolbox_version: str | None = None, - ) -> None: - super().__init__(source_id) - self._client = client - self._toolbox_name = toolbox_name - self._toolbox_version = toolbox_version - - async def before_run( - self, - *, - agent: Any, - session: AgentSession | None, - context: SessionContext, - state: dict[str, Any], - ) -> None: - """Cache the toolbox on first call, then let the model pick tools per-turn.""" - toolbox = state.get("toolbox") - if toolbox is None: - toolbox = await self._client.get_toolbox(self._toolbox_name, version=self._toolbox_version) - state["toolbox"] = toolbox - print(f"[{self.source_id}] Loaded toolbox {toolbox.name}@{toolbox.version} ({len(toolbox.tools)} tool(s))") - - user_messages = [m for m in context.get_messages(include_input=True) if getattr(m, "role", None) == "user"] - if not user_messages: - context.extend_tools(self.source_id, list(toolbox.tools)) - return - - picks = await self._route_tools(user_messages[-1].text, toolbox.tools) - if picks: - tools = select_toolbox_tools(toolbox, include_names=picks) - print(f"[{self.source_id}] Router picked {sorted(picks)} - surfacing {len(tools)} tool(s)") - else: - tools = list(toolbox.tools) - print(f"[{self.source_id}] Router picked nothing - surfacing all {len(tools)} tool(s)") - context.extend_tools(self.source_id, tools) - - async def _route_tools(self, user_text: str, tools: Any) -> list[str]: - """Ask the model which toolbox tools to surface for this turn.""" - menu = "\n".join(f"- {get_toolbox_tool_name(t)} - {get_toolbox_tool_type(t)}" for t in tools) - prompt = ( - f"User message:\n{user_text}\n\n" - f"Available tools:\n{menu}\n\n" - "Return the names of tools that should be surfaced for this turn." - ) - response = await self._client.get_response( - messages=[Message("user", [prompt])], - options={ - "instructions": ROUTER_INSTRUCTIONS, - "response_format": ToolSelection, - }, - ) - selection: ToolSelection = response.value # type: ignore - return selection.tool_names - - -async def main() -> None: - client = FoundryChatClient( - project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], - model=os.environ["FOUNDRY_MODEL"], - credential=AzureCliCredential(), - ) - - # Comment out if the toolbox already exists in your Foundry project. - create_sample_toolbox(TOOLBOX_NAME) - - toolbox_provider = DynamicToolboxProvider( - client=client, - toolbox_name=TOOLBOX_NAME, - toolbox_version=TOOLBOX_VERSION, - ) - - async with Agent( - client=client, - instructions=( - "You are a helpful assistant. Use the tools available to you on each " - "turn to answer the user. If no tools are relevant, reply directly." - ), - context_providers=[toolbox_provider], - ) as agent: - session = agent.create_session() - - for query in QUERIES: - print(f"\nUser: {query}") - result = await agent.run(query, session=session) - print(f"Assistant: {result}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/foundry/README.md b/python/samples/02-agents/providers/foundry/README.md index 120c4d9a1c..598b849620 100644 --- a/python/samples/02-agents/providers/foundry/README.md +++ b/python/samples/02-agents/providers/foundry/README.md @@ -26,8 +26,7 @@ This folder contains Azure AI Foundry and Foundry Local samples for Agent Framew | [`foundry_chat_client_with_hosted_mcp.py`](foundry_chat_client_with_hosted_mcp.py) | Foundry Chat Client with hosted MCP | | [`foundry_chat_client_with_local_mcp.py`](foundry_chat_client_with_local_mcp.py) | Foundry Chat Client with local MCP | | [`foundry_chat_client_with_session.py`](foundry_chat_client_with_session.py) | Foundry Chat Client with session management | -| [`foundry_chat_client_with_toolbox.py`](foundry_chat_client_with_toolbox.py) | Foundry Chat Client with Foundry toolbox loading and multi-toolbox composition | -| [`foundry_chat_client_with_toolbox_mcp.py`](foundry_chat_client_with_toolbox_mcp.py) | Foundry Chat Client connected to a toolbox via its MCP endpoint using `MCPStreamableHTTPTool` | +| [`foundry_chat_client_with_toolbox.py`](foundry_chat_client_with_toolbox.py) | Foundry Chat Client connected to a toolbox via its MCP endpoint using `MCPStreamableHTTPTool` | ## FoundryLocalClient Samples diff --git a/python/samples/02-agents/providers/foundry/foundry_chat_client_with_toolbox.py b/python/samples/02-agents/providers/foundry/foundry_chat_client_with_toolbox.py index 2d85c12a4b..1fbfe20a9a 100644 --- a/python/samples/02-agents/providers/foundry/foundry_chat_client_with_toolbox.py +++ b/python/samples/02-agents/providers/foundry/foundry_chat_client_with_toolbox.py @@ -2,52 +2,48 @@ import asyncio import os +from collections.abc import Callable +from typing import Any -from agent_framework import Agent -from agent_framework.foundry import FoundryChatClient, select_toolbox_tools -from azure.identity import AzureCliCredential +from agent_framework import Agent, MCPStreamableHTTPTool +from agent_framework.foundry import FoundryChatClient +from azure.core.credentials import TokenCredential +from azure.identity import AzureCliCredential, DefaultAzureCredential, get_bearer_token_provider from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() """ -Foundry Chat Client with Toolbox Example +Foundry Toolbox via MAF ``MCPStreamableHTTPTool`` -This sample demonstrates loading a named, versioned Foundry toolbox into an -Agent via ``FoundryChatClient.get_toolbox()``. A toolbox is a server-side -bundle of tool configurations (code interpreter, file search, MCP, web search, -etc.) configured in the Foundry portal or via the raw SDK. +Instead of fetching the toolbox and fanning out individual tool specs, point +MAF's ``MCPStreamableHTTPTool`` at the toolbox's MCP endpoint. The agent +discovers and calls the toolbox's tools over MCP at runtime. Prerequisites: -- A Microsoft Foundry project -- A toolbox already configured in that project (set TOOLBOX_NAME below) +- A Microsoft Foundry project with a toolbox configured - FOUNDRY_PROJECT_ENDPOINT and FOUNDRY_MODEL environment variables set +- FOUNDRY_TOOLBOX_ENDPOINT: the toolbox's MCP endpoint URL, e.g. + ``https://.services.ai.azure.com/api/projects//toolsets//mcp?api-version=v1`` +- Azure CLI authentication (``az login``) """ -# Replace with your own Foundry toolbox name and version. +# Must match the ```` segment of FOUNDRY_TOOLBOX_ENDPOINT. TOOLBOX_NAME = "research_toolbox" -TOOLBOX_VERSION = "1" -# Used only by combine_toolboxes() — swap in a second toolbox you own. -SECOND_TOOLBOX_NAME = "analysis_toolbox" -SECOND_TOOLBOX_VERSION = "1" - -# Replace with any question that exercises the tools configured in your toolbox. -QUERY = "Introduce yourself and briefly describe the tools you can use to help me." def create_sample_toolbox(name: str) -> str: """Create (or replace) a toolbox version in the Foundry project. Toolboxes are normally configured in the Foundry portal or a deployment - script, not the application itself. This helper exists so the samples can + script, not the application itself. This helper exists so the sample can be run end-to-end without first setting a toolbox up by hand — delete any - existing toolbox under ``name``, then create a fresh version containing an - MCP tool, a web search tool, and a code interpreter tool. Returns the - created version identifier. + existing toolbox under ``name``, then create a fresh version containing a + single MCP tool. Returns the created version identifier. """ from azure.ai.projects import AIProjectClient - from azure.ai.projects.models import CodeInterpreterTool, MCPTool, Tool, WebSearchTool + from azure.ai.projects.models import MCPTool, Tool from azure.core.exceptions import ResourceNotFoundError with ( @@ -68,9 +64,6 @@ def create_sample_toolbox(name: str) -> str: ) ] - tools.append(WebSearchTool(name="web_search")) - tools.append(CodeInterpreterTool(name="code_interpreter")) - created = project_client.beta.toolboxes.create_version( name=name, description="Toolbox version with MCP require_approval set to 'never'.", @@ -80,99 +73,46 @@ def create_sample_toolbox(name: str) -> str: return created.version -async def main() -> None: - """Example showing how to use a single Foundry toolbox with FoundryChatClient.""" - print("=== Foundry Chat Client with Toolbox Example ===") - - # For authentication, run `az login` in your terminal or replace - # AzureCliCredential with your preferred authentication option. - client = FoundryChatClient( - credential=AzureCliCredential(), - project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], - model=os.environ["FOUNDRY_MODEL"], - ) - - # Comment out if the toolbox already exists in your Foundry project. - create_sample_toolbox(TOOLBOX_NAME) - - # Omit ``version`` to resolve the toolbox's current default version at runtime. - toolbox = await client.get_toolbox(TOOLBOX_NAME) - print(f"Loaded toolbox {toolbox.name}@{toolbox.version} ({len(toolbox.tools)} tool(s))") - - agent = Agent( - client=client, - instructions="You are a research assistant. Use the available tools to answer questions.", - tools=toolbox, - ) +def make_toolbox_header_provider(credential: TokenCredential) -> Callable[[dict[str, Any]], dict[str, str]]: + """Build a header_provider that injects a fresh Azure AI bearer token on every MCP request.""" + get_token = get_bearer_token_provider(credential, "https://ai.azure.com/.default") - print(f"User: {QUERY}") - result = await agent.run(QUERY) - print(f"Result: {result}\n") + def provide(_kwargs: dict[str, Any]) -> dict[str, str]: + return { + "Authorization": f"Bearer {get_token()}", + } + return provide -async def combine_toolboxes() -> None: - """Alternative flow: combine the tools from multiple Foundry toolboxes.""" - client = FoundryChatClient( - credential=AzureCliCredential(), - project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], - model=os.environ["FOUNDRY_MODEL"], - ) - # Comment out if the toolboxes already exist in your Foundry project. - create_sample_toolbox(TOOLBOX_NAME) - create_sample_toolbox(SECOND_TOOLBOX_NAME) - - toolbox_a = await client.get_toolbox(TOOLBOX_NAME, version=TOOLBOX_VERSION) - toolbox_b = await client.get_toolbox(SECOND_TOOLBOX_NAME, version=SECOND_TOOLBOX_VERSION) - print( - "Loaded toolboxes: " - f"{toolbox_a.name}@{toolbox_a.version} ({len(toolbox_a.tools)} tool(s)), " - f"{toolbox_b.name}@{toolbox_b.version} ({len(toolbox_b.tools)} tool(s))" - ) - - agent = Agent( - client=client, - instructions="You are a research assistant. Use all available tools to answer questions.", - tools=[toolbox_a, toolbox_b], - ) - - print(f"User: {QUERY}") - result = await agent.run(QUERY) - print(f"Combined-toolbox result: {result}\n") - - -async def select_tools_from_toolbox() -> None: - """Alternative flow: keep only a subset of toolbox tools before agent creation.""" - client = FoundryChatClient( - credential=AzureCliCredential(), - project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], - model=os.environ["FOUNDRY_MODEL"], - ) +async def main() -> None: + credential = DefaultAzureCredential() # Comment out if the toolbox already exists in your Foundry project. create_sample_toolbox(TOOLBOX_NAME) - toolbox = await client.get_toolbox(TOOLBOX_NAME, version=TOOLBOX_VERSION) - print(f"Loaded toolbox {toolbox.name}@{toolbox.version} ({len(toolbox.tools)} tool(s))") - - selected_tools = select_toolbox_tools( - toolbox, - include_types=["code_interpreter", "mcp"], - ) - print(f"Selected {len(selected_tools)} toolbox tools for the agent") - - agent = Agent( - client=client, - instructions="You are a research assistant. Use only the selected toolbox tools.", - tools=selected_tools, + toolbox_tool = MCPStreamableHTTPTool( + name="foundry_toolbox", + description="Tools exposed by the configured Foundry toolbox", + url=os.environ["FOUNDRY_TOOLBOX_ENDPOINT"], + header_provider=make_toolbox_header_provider(credential), + load_prompts=False, ) - print(f"User: {QUERY}") - result = await agent.run(QUERY) - print(f"Selected-toolbox result: {result}\n") + async with Agent( + client=FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=credential, + ), + instructions="You are a helpful assistant. Use the available toolbox tools to answer the user.", + tools=toolbox_tool, + ) as agent: + query = "What tools do you have access to?" + print(f"User: {query}") + result = await agent.run(query) + print(f"Assistant: {result}") if __name__ == "__main__": asyncio.run(main()) - # asyncio.run(combine_toolboxes()) - # asyncio.run(select_tools_from_toolbox()) diff --git a/python/samples/02-agents/providers/foundry/foundry_chat_client_with_toolbox_mcp.py b/python/samples/02-agents/providers/foundry/foundry_chat_client_with_toolbox_mcp.py deleted file mode 100644 index 1fbfe20a9a..0000000000 --- a/python/samples/02-agents/providers/foundry/foundry_chat_client_with_toolbox_mcp.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import os -from collections.abc import Callable -from typing import Any - -from agent_framework import Agent, MCPStreamableHTTPTool -from agent_framework.foundry import FoundryChatClient -from azure.core.credentials import TokenCredential -from azure.identity import AzureCliCredential, DefaultAzureCredential, get_bearer_token_provider -from dotenv import load_dotenv - -# Load environment variables from .env file -load_dotenv() - -""" -Foundry Toolbox via MAF ``MCPStreamableHTTPTool`` - -Instead of fetching the toolbox and fanning out individual tool specs, point -MAF's ``MCPStreamableHTTPTool`` at the toolbox's MCP endpoint. The agent -discovers and calls the toolbox's tools over MCP at runtime. - -Prerequisites: -- A Microsoft Foundry project with a toolbox configured -- FOUNDRY_PROJECT_ENDPOINT and FOUNDRY_MODEL environment variables set -- FOUNDRY_TOOLBOX_ENDPOINT: the toolbox's MCP endpoint URL, e.g. - ``https://.services.ai.azure.com/api/projects//toolsets//mcp?api-version=v1`` -- Azure CLI authentication (``az login``) -""" - -# Must match the ```` segment of FOUNDRY_TOOLBOX_ENDPOINT. -TOOLBOX_NAME = "research_toolbox" - - -def create_sample_toolbox(name: str) -> str: - """Create (or replace) a toolbox version in the Foundry project. - - Toolboxes are normally configured in the Foundry portal or a deployment - script, not the application itself. This helper exists so the sample can - be run end-to-end without first setting a toolbox up by hand — delete any - existing toolbox under ``name``, then create a fresh version containing a - single MCP tool. Returns the created version identifier. - """ - from azure.ai.projects import AIProjectClient - from azure.ai.projects.models import MCPTool, Tool - from azure.core.exceptions import ResourceNotFoundError - - with ( - AzureCliCredential() as credential, - AIProjectClient(credential=credential, endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"]) as project_client, - ): - try: - project_client.beta.toolboxes.delete(name) - print(f"Toolbox `{name}` deleted") - except ResourceNotFoundError: - pass - - tools: list[Tool] = [ - MCPTool( - server_label="api_specs", - server_url="https://gitmcp.io/Azure/azure-rest-api-specs", - require_approval="never", - ) - ] - - created = project_client.beta.toolboxes.create_version( - name=name, - description="Toolbox version with MCP require_approval set to 'never'.", - tools=tools, - ) - print(f"Created toolbox {created.name}@{created.version} ({len(created.tools)} tool(s))") - return created.version - - -def make_toolbox_header_provider(credential: TokenCredential) -> Callable[[dict[str, Any]], dict[str, str]]: - """Build a header_provider that injects a fresh Azure AI bearer token on every MCP request.""" - get_token = get_bearer_token_provider(credential, "https://ai.azure.com/.default") - - def provide(_kwargs: dict[str, Any]) -> dict[str, str]: - return { - "Authorization": f"Bearer {get_token()}", - } - - return provide - - -async def main() -> None: - credential = DefaultAzureCredential() - - # Comment out if the toolbox already exists in your Foundry project. - create_sample_toolbox(TOOLBOX_NAME) - - toolbox_tool = MCPStreamableHTTPTool( - name="foundry_toolbox", - description="Tools exposed by the configured Foundry toolbox", - url=os.environ["FOUNDRY_TOOLBOX_ENDPOINT"], - header_provider=make_toolbox_header_provider(credential), - load_prompts=False, - ) - - async with Agent( - client=FoundryChatClient( - project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], - model=os.environ["FOUNDRY_MODEL"], - credential=credential, - ), - instructions="You are a helpful assistant. Use the available toolbox tools to answer the user.", - tools=toolbox_tool, - ) as agent: - query = "What tools do you have access to?" - print(f"User: {query}") - result = await agent.run(query) - print(f"Assistant: {result}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/security/email_security_example.py b/python/samples/02-agents/security/email_security_example.py index b8cd0a36d1..97cd20c158 100644 --- a/python/samples/02-agents/security/email_security_example.py +++ b/python/samples/02-agents/security/email_security_example.py @@ -284,7 +284,9 @@ async def run_scenarios(agent, config): # attempt to call send_email, so the policy enforcer would never trigger. session = agent.create_session() - response = await agent.run("Please fetch my recent emails and give me a brief summary of each one.", session=session) + response = await agent.run( + "Please fetch my recent emails and give me a brief summary of each one.", session=session + ) print(f"\n📋 Agent Response:\n{'-' * 40}") print(response.text) diff --git a/python/samples/04-hosting/container/hyperlight_codeact/call_server.py b/python/samples/04-hosting/container/hyperlight_codeact/call_server.py index e57da7c086..90ea113b1f 100644 --- a/python/samples/04-hosting/container/hyperlight_codeact/call_server.py +++ b/python/samples/04-hosting/container/hyperlight_codeact/call_server.py @@ -20,8 +20,7 @@ # https://.services.ai.azure.com/api/projects//agents/ ENDPOINT = os.environ.get( "FOUNDRY_AGENT_ENDPOINT", - "https://.services.ai.azure.com" - "/api/projects//agents/", + "https://.services.ai.azure.com/api/projects//agents/", ) SCOPE = "https://ai.azure.com/.default" PROMPT = ( diff --git a/python/samples/04-hosting/foundry-hosted-agents/responses/04_foundry_toolbox/.env.example b/python/samples/04-hosting/foundry-hosted-agents/responses/04_foundry_toolbox/.env.example index 91ae96ac46..81eb25faff 100644 --- a/python/samples/04-hosting/foundry-hosted-agents/responses/04_foundry_toolbox/.env.example +++ b/python/samples/04-hosting/foundry-hosted-agents/responses/04_foundry_toolbox/.env.example @@ -1,3 +1,3 @@ FOUNDRY_PROJECT_ENDPOINT="..." AZURE_AI_MODEL_DEPLOYMENT_NAME="..." -TOOLBOX_NAME="..." \ No newline at end of file +FOUNDRY_TOOLBOX_ENDPOINT="..." \ No newline at end of file diff --git a/python/samples/04-hosting/foundry-hosted-agents/responses/04_foundry_toolbox/README.md b/python/samples/04-hosting/foundry-hosted-agents/responses/04_foundry_toolbox/README.md index 1d57a051fd..bd261061c6 100644 --- a/python/samples/04-hosting/foundry-hosted-agents/responses/04_foundry_toolbox/README.md +++ b/python/samples/04-hosting/foundry-hosted-agents/responses/04_foundry_toolbox/README.md @@ -14,7 +14,7 @@ You can also create a Foundry Toolbox in the Foundry portal. Read more about it ### Model Integration -The agent uses `FoundryChatClient` from the Agent Framework to create an OpenAI-compatible Responses client. It loads a named Foundry Toolbox via `client.get_toolbox(name)` — the toolbox is a server-side bundle of tool configurations (e.g., `code_interpreter`, `web_search`) defined in the Foundry portal or by `azd provision`. Omitting `version` resolves the toolbox's current default version at runtime. +The agent uses `FoundryChatClient` from the Agent Framework to create an OpenAI-compatible Responses client. It connects to the toolbox's MCP endpoint via `MCPStreamableHTTPTool`, which discovers and invokes the toolbox's tools over MCP at runtime. The endpoint URL is provided through the `FOUNDRY_TOOLBOX_ENDPOINT` environment variable. See [main.py](main.py) for the full implementation. @@ -26,18 +26,29 @@ The agent is hosted using the [Agent Framework](https://github.com/microsoft/age Follow the instructions in the [Running the Agent Host Locally](../../README.md#running-the-agent-host-locally) section of the README in the parent directory to run the agent host. -An extra environment variable `TOOLBOX_NAME` must be set to the name of the Foundry Toolbox that the agent should load at runtime. This allows the agent host to dynamically retrieve the correct toolbox from Foundry when it starts. Run the following: +An extra environment variable must be set to point to the toolbox MCP endpoint. You can provide it in one of two ways: + +**Option A – Set `FOUNDRY_TOOLBOX_ENDPOINT` directly** (recommended for local development): ```bash -export TOOLBOX_NAME="" +export FOUNDRY_TOOLBOX_ENDPOINT="https://.services.ai.azure.com/api/projects//toolsets//mcp?api-version=v1" ``` Or in PowerShell: ```powershell -$env:TOOLBOX_NAME="" +$env:FOUNDRY_TOOLBOX_ENDPOINT="https://.services.ai.azure.com/api/projects//toolsets//mcp?api-version=v1" +``` + +**Option B – Set `TOOLBOX_NAME`** (used automatically by the Foundry hosting scaffolding after `azd provision`): + +The agent derives the endpoint at runtime as: +``` +{FOUNDRY_PROJECT_ENDPOINT}/toolsets/{TOOLBOX_NAME}/mcp?api-version=v1 ``` +When deployed via `azd provision`, the scaffolding injects `TOOLBOX_NAME=agent-tools` and `FOUNDRY_PROJECT_ENDPOINT` automatically from the provisioned resources declared in [`agent.manifest.yaml`](agent.manifest.yaml). + ## Interacting with the agent > Depending on how you run the agent host, you can invoke the agent using `curl` (`Invoke-WebRequest` in PowerShell) or `azd`. Please refer to the [parent README](../../README.md) for more details. Use this README for sample queries you can send to the agent. diff --git a/python/samples/04-hosting/foundry-hosted-agents/responses/04_foundry_toolbox/main.py b/python/samples/04-hosting/foundry-hosted-agents/responses/04_foundry_toolbox/main.py index 6b82811c66..c836ae0ec1 100644 --- a/python/samples/04-hosting/foundry-hosted-agents/responses/04_foundry_toolbox/main.py +++ b/python/samples/04-hosting/foundry-hosted-agents/responses/04_foundry_toolbox/main.py @@ -2,40 +2,76 @@ import asyncio import os +from collections.abc import Callable +from typing import Any -from agent_framework import Agent +from agent_framework import Agent, MCPStreamableHTTPTool from agent_framework.foundry import FoundryChatClient from agent_framework_foundry_hosting import ResponsesHostServer -from azure.identity import DefaultAzureCredential +from azure.core.credentials import TokenCredential +from azure.identity import DefaultAzureCredential, get_bearer_token_provider from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() +def _resolve_toolbox_endpoint() -> str: + """Resolve the toolbox MCP endpoint URL. + + Prefers the explicit ``FOUNDRY_TOOLBOX_ENDPOINT`` env var; falls back to + constructing the URL from ``FOUNDRY_PROJECT_ENDPOINT`` and ``TOOLBOX_NAME`` + (the variables injected by the Foundry hosting scaffolding after ``azd provision``). + """ + if (endpoint := os.environ.get("FOUNDRY_TOOLBOX_ENDPOINT")) is not None: + if not endpoint: + raise ValueError("FOUNDRY_TOOLBOX_ENDPOINT is set but empty") + return endpoint + project_endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"].rstrip("/") + toolbox_name = os.environ["TOOLBOX_NAME"] + return f"{project_endpoint}/toolsets/{toolbox_name}/mcp?api-version=v1" + + +def make_toolbox_header_provider(credential: TokenCredential) -> Callable[[dict[str, Any]], dict[str, str]]: + """Build a header_provider that injects a fresh Azure AI bearer token on every MCP request.""" + get_token = get_bearer_token_provider(credential, "https://ai.azure.com/.default") + + def provide(_kwargs: dict[str, Any]) -> dict[str, str]: + return { + "Authorization": f"Bearer {get_token()}", + } + + return provide + + async def main(): + credential = DefaultAzureCredential() + client = FoundryChatClient( project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], - credential=DefaultAzureCredential(), + credential=credential, ) - # Load the named toolbox from the Foundry project. Omitting `version` - # resolves the toolbox's current default version at runtime. - toolbox = await client.get_toolbox(os.environ["TOOLBOX_NAME"]) + toolbox_tool = MCPStreamableHTTPTool( + name="foundry_toolbox", + description="Tools exposed by the configured Foundry toolbox", + url=_resolve_toolbox_endpoint(), + header_provider=make_toolbox_header_provider(credential), + load_prompts=False, + ) - agent = Agent( + async with Agent( client=client, instructions="You are a friendly assistant. Keep your answers brief.", - tools=toolbox, + tools=toolbox_tool, # History will be managed by the hosting infrastructure, thus there # is no need to store history by the service. Learn more at: # https://developers.openai.com/api/reference/resources/responses/methods/create default_options={"store": False}, - ) - - server = ResponsesHostServer(agent) - await server.run_async() + ) as agent: + server = ResponsesHostServer(agent) + await server.run_async() if __name__ == "__main__": diff --git a/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/.env.example b/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/.env.example index 91ae96ac46..81eb25faff 100644 --- a/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/.env.example +++ b/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/.env.example @@ -1,3 +1,3 @@ FOUNDRY_PROJECT_ENDPOINT="..." AZURE_AI_MODEL_DEPLOYMENT_NAME="..." -TOOLBOX_NAME="..." \ No newline at end of file +FOUNDRY_TOOLBOX_ENDPOINT="..." \ No newline at end of file diff --git a/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/README.md b/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/README.md index 9e10f2f9c1..d68ddc16c2 100644 --- a/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/README.md +++ b/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/README.md @@ -29,18 +29,29 @@ This agent uses four tools: Follow the instructions in the [Running the Agent Host Locally](../../README.md#running-the-agent-host-locally) section of the README in the parent directory to run the agent host. -An extra environment variable `TOOLBOX_NAME` must be set to the name of the Foundry Toolbox that the agent should load at runtime. This allows the agent host to dynamically retrieve the correct toolbox from Foundry when it starts. Run the following: +An extra environment variable must be set to point to the toolbox MCP endpoint. You can provide it in one of two ways: + +**Option A – Set `FOUNDRY_TOOLBOX_ENDPOINT` directly** (recommended for local development): ```bash -export TOOLBOX_NAME="" +export FOUNDRY_TOOLBOX_ENDPOINT="https://.services.ai.azure.com/api/projects//toolsets//mcp?api-version=v1" ``` Or in PowerShell: ```powershell -$env:TOOLBOX_NAME="" +$env:FOUNDRY_TOOLBOX_ENDPOINT="https://.services.ai.azure.com/api/projects//toolsets//mcp?api-version=v1" +``` + +**Option B – Set `TOOLBOX_NAME`** (used automatically by the Foundry hosting scaffolding after `azd provision`): + +The agent derives the endpoint at runtime as: +``` +{FOUNDRY_PROJECT_ENDPOINT}/toolsets/{TOOLBOX_NAME}/mcp?api-version=v1 ``` +When deployed via `azd provision`, the scaffolding injects `TOOLBOX_NAME=agent-tools` and `FOUNDRY_PROJECT_ENDPOINT` automatically from the provisioned resources declared in [`agent.manifest.yaml`](agent.manifest.yaml). + ## Interacting with the agent > Depending on how you run the agent host, you can invoke the agent using `curl` (`Invoke-WebRequest` in PowerShell) or `azd`. Please refer to the [parent README](../../README.md) for more details. Use this README for sample queries you can send to the agent. diff --git a/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/agent.yaml b/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/agent.yaml index 7b3b1f01d1..5a62d90ff7 100644 --- a/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/agent.yaml +++ b/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/agent.yaml @@ -9,4 +9,6 @@ resources: memory: '0.5Gi' environment_variables: - name: AZURE_AI_MODEL_DEPLOYMENT_NAME - value: ${AZURE_AI_MODEL_DEPLOYMENT_NAME} \ No newline at end of file + value: ${AZURE_AI_MODEL_DEPLOYMENT_NAME} + - name: TOOLBOX_NAME + value: "agent-tools" \ No newline at end of file diff --git a/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/main.py b/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/main.py index 2bd40055e9..a324ab16a1 100644 --- a/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/main.py +++ b/python/samples/04-hosting/foundry-hosted-agents/responses/06_files/main.py @@ -2,18 +2,48 @@ import asyncio import os +from collections.abc import Callable +from typing import Any -from agent_framework import Agent, tool +from agent_framework import Agent, MCPStreamableHTTPTool, tool from agent_framework.foundry import FoundryChatClient -from agent_framework_foundry import select_toolbox_tools from agent_framework_foundry_hosting import ResponsesHostServer -from azure.identity import DefaultAzureCredential +from azure.core.credentials import TokenCredential +from azure.identity import DefaultAzureCredential, get_bearer_token_provider from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() +def _resolve_toolbox_endpoint() -> str: + """Resolve the toolbox MCP endpoint URL. + + Prefers the explicit ``FOUNDRY_TOOLBOX_ENDPOINT`` env var; falls back to + constructing the URL from ``FOUNDRY_PROJECT_ENDPOINT`` and ``TOOLBOX_NAME`` + (the variables injected by the Foundry hosting scaffolding after ``azd provision``). + """ + if (endpoint := os.environ.get("FOUNDRY_TOOLBOX_ENDPOINT")) is not None: + if not endpoint: + raise ValueError("FOUNDRY_TOOLBOX_ENDPOINT is set but empty") + return endpoint + project_endpoint = os.environ["FOUNDRY_PROJECT_ENDPOINT"].rstrip("/") + toolbox_name = os.environ["TOOLBOX_NAME"] + return f"{project_endpoint}/toolsets/{toolbox_name}/mcp?api-version=v1" + + +def make_toolbox_header_provider(credential: TokenCredential) -> Callable[[dict[str, Any]], dict[str, str]]: + """Build a header_provider that injects a fresh Azure AI bearer token on every MCP request.""" + get_token = get_bearer_token_provider(credential, "https://ai.azure.com/.default") + + def provide(_kwargs: dict[str, Any]) -> dict[str, str]: + return { + "Authorization": f"Bearer {get_token()}", + } + + return provide + + @tool(description="Get the current working directory.", approval_mode="never_require") def get_cwd() -> str: """Get the current working directory.""" @@ -43,40 +73,43 @@ def read_file(file_path: str) -> str: async def main(): + credential = DefaultAzureCredential() + client = FoundryChatClient( project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], - credential=DefaultAzureCredential(), + credential=credential, ) - # Load the named toolbox from the Foundry project. Omitting `version` - # resolves the toolbox's current default version at runtime. - toolbox = await client.get_toolbox(os.environ["TOOLBOX_NAME"]) + # Connect to the toolbox MCP endpoint and expose only the code_interpreter tool. # The toolbox deployed has two tools: (see agent.manifest.yaml) # - `code_interpreter` # - `web_search` - # We only need the `code_interpreter` tool for this sample - selected_tools = select_toolbox_tools( - toolbox, - include_names=["code_interpreter"], + # We only need the `code_interpreter` tool for this sample. + toolbox_tool = MCPStreamableHTTPTool( + name="foundry_toolbox", + description="Tools exposed by the configured Foundry toolbox", + url=_resolve_toolbox_endpoint(), + header_provider=make_toolbox_header_provider(credential), + load_prompts=False, + allowed_tools=["code_interpreter"], ) - agent = Agent( + async with Agent( client=client, instructions=( "You are a friendly assistant. Keep your answers brief. " "Make sure all mathematical calculations are performed using the code interpreter " "instead of mental arithmetic." ), - tools=[get_cwd, list_files, read_file] + selected_tools, + tools=[get_cwd, list_files, read_file, toolbox_tool], # History will be managed by the hosting infrastructure, thus there # is no need to store history by the service. Learn more at: # https://developers.openai.com/api/reference/resources/responses/methods/create default_options={"store": False}, - ) - - server = ResponsesHostServer(agent) - await server.run_async() + ) as agent: + server = ResponsesHostServer(agent) + await server.run_async() if __name__ == "__main__": diff --git a/python/tests/samples/hosting/test_toolbox_endpoint.py b/python/tests/samples/hosting/test_toolbox_endpoint.py new file mode 100644 index 0000000000..b43b889315 --- /dev/null +++ b/python/tests/samples/hosting/test_toolbox_endpoint.py @@ -0,0 +1,102 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Unit tests for _resolve_toolbox_endpoint() in the foundry-hosted-agents response samples. + +Covers both 04_foundry_toolbox/main.py and 06_files/main.py which share the same +implementation of _resolve_toolbox_endpoint(). +""" + +import importlib +import importlib.util +import sys +from pathlib import Path +from unittest.mock import MagicMock + +import pytest + +# --------------------------------------------------------------------------- +# Stub out packages unavailable in the unit-test environment so that importing +# the sample modules does not fail. +# --------------------------------------------------------------------------- +_MISSING_MODULES = ( + "agent_framework_foundry_hosting", + "azure.ai.agentserver", + "azure.ai.agentserver.responses", +) +for _mod_name in _MISSING_MODULES: + sys.modules.setdefault(_mod_name, MagicMock()) + +# --------------------------------------------------------------------------- +# Load the two sample modules by file path to avoid needing them on sys.path. +# --------------------------------------------------------------------------- +_RESPONSES_DIR = ( + Path(__file__).parent.parent.parent.parent + / "samples" + / "04-hosting" + / "foundry-hosted-agents" + / "responses" +) + + +def _load_sample(subdir: str, module_alias: str): + spec = importlib.util.spec_from_file_location(module_alias, _RESPONSES_DIR / subdir / "main.py") + mod = importlib.util.module_from_spec(spec) # type: ignore[arg-type] + spec.loader.exec_module(mod) # type: ignore[union-attr] + return mod + + +_toolbox_mod = _load_sample("04_foundry_toolbox", "foundry_toolbox_main") +_files_mod = _load_sample("06_files", "files_main") + + +# --------------------------------------------------------------------------- +# Parameterise over both modules so the same test cases run for each. +# --------------------------------------------------------------------------- +@pytest.fixture(params=["04_foundry_toolbox", "06_files"]) +def resolve_endpoint(request): + """Return _resolve_toolbox_endpoint from the requested sample module.""" + mod = _toolbox_mod if request.param == "04_foundry_toolbox" else _files_mod + return mod._resolve_toolbox_endpoint + + +class TestResolveToolboxEndpoint: + def test_explicit_endpoint_returned_as_is(self, resolve_endpoint, monkeypatch: pytest.MonkeyPatch): + monkeypatch.setenv("FOUNDRY_TOOLBOX_ENDPOINT", "https://example.com/mcp") + monkeypatch.delenv("FOUNDRY_PROJECT_ENDPOINT", raising=False) + monkeypatch.delenv("TOOLBOX_NAME", raising=False) + + assert resolve_endpoint() == "https://example.com/mcp" + + def test_empty_string_raises_value_error(self, resolve_endpoint, monkeypatch: pytest.MonkeyPatch): + monkeypatch.setenv("FOUNDRY_TOOLBOX_ENDPOINT", "") + + with pytest.raises(ValueError, match="FOUNDRY_TOOLBOX_ENDPOINT is set but empty"): + resolve_endpoint() + + def test_fallback_constructs_url_from_project_vars(self, resolve_endpoint, monkeypatch: pytest.MonkeyPatch): + monkeypatch.delenv("FOUNDRY_TOOLBOX_ENDPOINT", raising=False) + monkeypatch.setenv("FOUNDRY_PROJECT_ENDPOINT", "https://project.azure.com/") + monkeypatch.setenv("TOOLBOX_NAME", "my-toolbox") + + result = resolve_endpoint() + + assert result == "https://project.azure.com/toolsets/my-toolbox/mcp?api-version=v1" + + def test_fallback_strips_trailing_slash_from_project_endpoint( + self, resolve_endpoint, monkeypatch: pytest.MonkeyPatch + ): + monkeypatch.delenv("FOUNDRY_TOOLBOX_ENDPOINT", raising=False) + monkeypatch.setenv("FOUNDRY_PROJECT_ENDPOINT", "https://project.azure.com///") + monkeypatch.setenv("TOOLBOX_NAME", "my-toolbox") + + result = resolve_endpoint() + + assert result == "https://project.azure.com/toolsets/my-toolbox/mcp?api-version=v1" + + def test_neither_variable_group_set_raises_key_error(self, resolve_endpoint, monkeypatch: pytest.MonkeyPatch): + monkeypatch.delenv("FOUNDRY_TOOLBOX_ENDPOINT", raising=False) + monkeypatch.delenv("FOUNDRY_PROJECT_ENDPOINT", raising=False) + monkeypatch.delenv("TOOLBOX_NAME", raising=False) + + with pytest.raises(KeyError): + resolve_endpoint()