Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions python/AGENTS.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,13 @@ When making changes to a package, check if the following need updates:
- The package's `AGENTS.md` file (adding/removing/renaming public APIs, architecture changes, import path changes)
- The agent skills in `.github/skills/` if conventions, commands, or workflows change

## Pull Request Description Guidance

When preparing a PR description:
- Follow the repository PR template at `.github/pull_request_template.md` and keep its structure/headings.
- Describe the net change relative to `main` (this is implied; do not call it out explicitly as "vs main").
- Do not add ad-hoc validation sections (for example, "Validation" or "Tests run"); CI/CD and the template checklist cover validation status.

## Quick Reference

Run `uv run poe` from the `python/` directory to see available commands. See [DEV_SETUP.md](DEV_SETUP.md) for detailed usage.
Expand Down
31 changes: 16 additions & 15 deletions python/packages/azure-ai/agent_framework_azure_ai/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,8 @@
from azure.ai.projects.aio import AIProjectClient
from azure.ai.projects.models import (
ApproximateLocation,
CodeInterpreterContainerAuto,
AutoCodeInterpreterToolParam,
CodeInterpreterTool,
FoundryFeaturesOptInKeys,
ImageGenTool,
MCPTool,
PromptAgentDefinition,
Expand All @@ -66,7 +65,6 @@
else:
from typing_extensions import Self, TypedDict # type: ignore # pragma: no cover


logger = logging.getLogger("agent_framework.azure")


Expand All @@ -79,9 +77,6 @@ class AzureAIProjectAgentOptions(OpenAIResponsesOptions, total=False):
reasoning: Reasoning # type: ignore[misc]
"""Configuration for enabling reasoning capabilities (requires azure.ai.projects.models.Reasoning)."""

foundry_features: FoundryFeaturesOptInKeys | str
"""Optional Foundry preview feature opt-in for agent version creation."""


AzureAIClientOptionsT = TypeVar(
"AzureAIClientOptionsT",
Expand Down Expand Up @@ -123,6 +118,7 @@ def __init__(
model_deployment_name: str | None = None,
credential: AzureCredentialTypes | None = None,
use_latest_version: bool | None = None,
allow_preview: bool | None = None,
env_file_path: str | None = None,
env_file_encoding: str | None = None,
**kwargs: Any,
Expand All @@ -148,6 +144,7 @@ def __init__(
AsyncTokenCredential, or a callable token provider.
use_latest_version: Boolean flag that indicates whether to use latest agent version
if it exists in the service.
allow_preview: Enables preview opt-in on internally-created ``AIProjectClient``.
env_file_path: Path to environment file for loading settings.
env_file_encoding: Encoding of the environment file.
kwargs: Additional keyword arguments passed to the parent class.
Expand Down Expand Up @@ -208,11 +205,14 @@ class MyOptions(ChatOptions, total=False):
# Use provided credential
if not credential:
raise ValueError("Azure credential is required when project_client is not provided.")
project_client = AIProjectClient(
endpoint=resolved_endpoint,
credential=credential, # type: ignore[arg-type]
user_agent=AGENT_FRAMEWORK_USER_AGENT,
)
project_client_kwargs: dict[str, Any] = {
"endpoint": resolved_endpoint,
"credential": credential, # type: ignore[arg-type]
"user_agent": AGENT_FRAMEWORK_USER_AGENT,
}
if allow_preview is not None:
project_client_kwargs["allow_preview"] = allow_preview
project_client = AIProjectClient(**project_client_kwargs)
should_close_client = True

# Initialize parent
Expand Down Expand Up @@ -413,8 +413,6 @@ async def _get_agent_reference_or_create(
"definition": PromptAgentDefinition(**args),
"description": self.agent_description,
}
if foundry_features := run_options.get("foundry_features"):
create_version_kwargs["foundry_features"] = foundry_features

created_agent = await self.project_client.agents.create_version(**create_version_kwargs)

Expand Down Expand Up @@ -513,7 +511,7 @@ def _remove_agent_level_run_options(
"temperature": ("temperature",),
"top_p": ("top_p",),
"reasoning": ("reasoning",),
"foundry_features": ("foundry_features",),
"allow_preview": ("allow_preview",),
}

for run_keys in agent_level_option_to_run_keys.values():
Expand Down Expand Up @@ -939,7 +937,7 @@ def get_code_interpreter_tool( # type: ignore[override]
if file_ids is None and isinstance(container, dict):
file_ids = container.get("file_ids")
resolved = resolve_file_ids(file_ids)
tool_container = CodeInterpreterContainerAuto(file_ids=resolved)
tool_container = AutoCodeInterpreterToolParam(file_ids=resolved)
return CodeInterpreterTool(container=tool_container, **kwargs)

@staticmethod
Expand Down Expand Up @@ -1244,6 +1242,7 @@ def __init__(
model_deployment_name: str | None = None,
credential: AzureCredentialTypes | None = None,
use_latest_version: bool | None = None,
allow_preview: bool | None = None,
middleware: Sequence[ChatAndFunctionMiddlewareTypes] | None = None,
function_invocation_configuration: FunctionInvocationConfiguration | None = None,
env_file_path: str | None = None,
Expand All @@ -1268,6 +1267,7 @@ def __init__(
or AsyncTokenCredential.
use_latest_version: Boolean flag that indicates whether to use latest agent version
if it exists in the service.
allow_preview: Enables preview opt-in on internally-created ``AIProjectClient``
middleware: Optional sequence of chat middlewares to include.
function_invocation_configuration: Optional function invocation configuration.
env_file_path: Path to environment file for loading settings.
Expand Down Expand Up @@ -1318,6 +1318,7 @@ class MyOptions(ChatOptions, total=False):
model_deployment_name=model_deployment_name,
credential=credential,
use_latest_version=use_latest_version,
allow_preview=allow_preview,
middleware=middleware,
function_invocation_configuration=function_invocation_configuration,
env_file_path=env_file_path,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from agent_framework._settings import load_settings
from agent_framework.azure._entra_id_authentication import AzureCredentialTypes
from azure.ai.projects.aio import AIProjectClient
from openai.types.responses import ResponseInputItemParam

from ._shared import AzureAISettings

Expand Down Expand Up @@ -58,6 +59,7 @@ def __init__(
project_client: AIProjectClient | None = None,
project_endpoint: str | None = None,
credential: AzureCredentialTypes | None = None,
allow_preview: bool | None = None,
memory_store_name: str,
scope: str | None = None,
context_prompt: str | None = None,
Expand All @@ -74,6 +76,7 @@ def __init__(
credential: Azure credential for authentication. Accepts a TokenCredential,
AsyncTokenCredential, or a callable token provider.
Required when project_client is not provided.
allow_preview: Enables preview opt-in on internally-created ``AIProjectClient``.
memory_store_name: The name of the memory store to use.
scope: The namespace that logically groups and isolates memories (e.g., user ID).
If None, `session_id` will be used.
Expand All @@ -100,11 +103,14 @@ def __init__(
)
if not credential:
raise ValueError("Azure credential is required when project_client is not provided.")
project_client = AIProjectClient(
endpoint=resolved_endpoint,
credential=credential, # type: ignore[arg-type]
user_agent=AGENT_FRAMEWORK_USER_AGENT,
)
project_client_kwargs: dict[str, Any] = {
"endpoint": resolved_endpoint,
"credential": credential, # type: ignore[arg-type]
"user_agent": AGENT_FRAMEWORK_USER_AGENT,
}
if allow_preview is not None:
project_client_kwargs["allow_preview"] = allow_preview
project_client = AIProjectClient(**project_client_kwargs)

if not memory_store_name:
raise ValueError("memory_store_name is required")
Expand Down Expand Up @@ -169,8 +175,8 @@ async def before_run(
return

# Convert input messages to memory search item format
items = [
{"type": "text", "text": msg.text}
items: list[ResponseInputItemParam] = [
{"type": "message", "role": "user", "content": msg.text}
for msg in context.input_messages
if msg and msg.text and msg.text.strip()
]
Expand Down Expand Up @@ -224,7 +230,7 @@ async def after_run(
messages_to_store.extend(context.response.messages)

# Filter and convert messages to memory update item format
items: list[dict[str, str]] = []
items: list[ResponseInputItemParam] = []
for message in messages_to_store:
if message.role in {"user", "assistant", "system"} and message.text and message.text.strip():
if message.role == "user":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ def __init__(
project_endpoint: str | None = None,
model: str | None = None,
credential: AzureCredentialTypes | None = None,
allow_preview: bool | None = None,
env_file_path: str | None = None,
env_file_encoding: str | None = None,
) -> None:
Expand All @@ -117,6 +118,7 @@ def __init__(
credential: Azure credential for authentication. Accepts a TokenCredential,
AsyncTokenCredential, or a callable token provider.
Required when project_client is not provided.
allow_preview: Enables preview opt-in on internally-created ``AIProjectClient``.
env_file_path: Path to environment file for loading settings.
env_file_encoding: Encoding of the environment file.

Expand Down Expand Up @@ -146,11 +148,14 @@ def __init__(
if not credential:
raise ValueError("Azure credential is required when project_client is not provided.")

project_client = AIProjectClient(
endpoint=resolved_endpoint,
credential=credential, # type: ignore[arg-type]
user_agent=AGENT_FRAMEWORK_USER_AGENT,
)
project_client_kwargs: dict[str, Any] = {
"endpoint": resolved_endpoint,
"credential": credential, # type: ignore[arg-type]
"user_agent": AGENT_FRAMEWORK_USER_AGENT,
}
if allow_preview is not None:
project_client_kwargs["allow_preview"] = allow_preview
project_client = AIProjectClient(**project_client_kwargs)
self._should_close_client = True

self._project_client = project_client
Expand Down Expand Up @@ -199,7 +204,6 @@ async def create_agent(
response_format = opts.get("response_format")
rai_config = opts.get("rai_config")
reasoning = opts.get("reasoning")
foundry_features = opts.get("foundry_features")

args: dict[str, Any] = {"model": resolved_model}

Expand Down Expand Up @@ -246,8 +250,6 @@ async def create_agent(
"definition": PromptAgentDefinition(**args),
"description": description,
}
if foundry_features:
create_version_kwargs["foundry_features"] = foundry_features

created_agent = await self._project_client.agents.create_version(**create_version_kwargs)

Expand Down
14 changes: 5 additions & 9 deletions python/packages/azure-ai/agent_framework_azure_ai/_shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
from azure.ai.projects.models import (
CodeInterpreterTool,
MCPTool,
TextResponseFormatConfigurationResponseFormatJsonObject,
TextResponseFormatConfigurationResponseFormatText,
TextResponseFormatJsonObject,
TextResponseFormatJsonSchema,
TextResponseFormatText,
Tool,
WebSearchPreviewTool,
)
Expand Down Expand Up @@ -479,11 +479,7 @@ def _prepare_mcp_tool_dict_for_azure_ai(tool_dict: dict[str, Any]) -> MCPTool:

def create_text_format_config(
response_format: type[BaseModel] | Mapping[str, Any],
) -> (
TextResponseFormatJsonSchema
| TextResponseFormatConfigurationResponseFormatJsonObject
| TextResponseFormatConfigurationResponseFormatText
):
) -> TextResponseFormatJsonSchema | TextResponseFormatJsonObject | TextResponseFormatText:
"""Convert response_format into Azure text format configuration."""
if isinstance(response_format, type) and issubclass(response_format, BaseModel):
schema = response_format.model_json_schema()
Expand Down Expand Up @@ -513,9 +509,9 @@ def create_text_format_config(
config_kwargs["description"] = format_config["description"]
return TextResponseFormatJsonSchema(**config_kwargs)
if format_type == "json_object":
return TextResponseFormatConfigurationResponseFormatJsonObject()
return TextResponseFormatJsonObject()
if format_type == "text":
return TextResponseFormatConfigurationResponseFormatText()
return TextResponseFormatText()

raise IntegrationInvalidRequestException("response_format must be a Pydantic model or mapping.")

Expand Down
4 changes: 2 additions & 2 deletions python/packages/azure-ai/tests/test_azure_ai_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
from azure.ai.projects.aio import AIProjectClient
from azure.ai.projects.models import (
ApproximateLocation,
CodeInterpreterContainerAuto,
AutoCodeInterpreterToolParam,
CodeInterpreterTool,
FileSearchTool,
ImageGenTool,
Expand Down Expand Up @@ -1296,7 +1296,7 @@ def test_from_azure_ai_tools_mcp() -> None:

def test_from_azure_ai_tools_code_interpreter() -> None:
"""Test from_azure_ai_tools with Code Interpreter tool."""
ci_tool = CodeInterpreterTool(container=CodeInterpreterContainerAuto(file_ids=["file-1"]))
ci_tool = CodeInterpreterTool(container=AutoCodeInterpreterToolParam(file_ids=["file-1"]))
parsed_tools = from_azure_ai_tools([ci_tool])
assert len(parsed_tools) == 1
assert parsed_tools[0]["type"] == "code_interpreter"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,13 +86,15 @@ def test_init_with_project_endpoint_and_credential(
provider = FoundryMemoryProvider(
project_endpoint="https://test.project.endpoint",
credential=mock_credential, # type: ignore[arg-type]
allow_preview=True,
memory_store_name="test_store",
scope="user_123",
)
assert provider.project_client is mock_project_client
mock_ai_project_client.assert_called_once_with(
endpoint="https://test.project.endpoint",
credential=mock_credential,
allow_preview=True,
user_agent=AGENT_FRAMEWORK_USER_AGENT,
)

Expand Down
4 changes: 1 addition & 3 deletions python/packages/core/agent_framework/_skills.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,9 +112,7 @@ def __init__(
self._accepts_kwargs: bool = False
if function is not None:
sig = inspect.signature(function)
self._accepts_kwargs = any(
p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()
)
self._accepts_kwargs = any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values())


class Skill:
Expand Down
32 changes: 13 additions & 19 deletions python/packages/core/agent_framework/azure/_responses_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def __init__(
async_client: AsyncOpenAI | None = None,
project_client: Any | None = None,
project_endpoint: str | None = None,
allow_preview: bool | None = None,
env_file_path: str | None = None,
env_file_encoding: str | None = None,
instruction_role: str | None = None,
Expand Down Expand Up @@ -120,6 +121,7 @@ def __init__(
project_endpoint: The Azure AI Foundry project endpoint URL.
When provided with ``credential``, an ``AIProjectClient`` will be created
and used to obtain the OpenAI client. Requires the ``azure-ai-projects`` package.
allow_preview: Enables preview opt-in on internally-created ``AIProjectClient``.
env_file_path: Use the environment settings file as a fallback to using env vars.
env_file_encoding: The encoding of the environment settings file, defaults to 'utf-8'.
instruction_role: The role to use for 'instruction' messages, for example, summarization
Expand Down Expand Up @@ -189,6 +191,7 @@ class MyOptions(AzureOpenAIResponsesOptions, total=False):
project_client=project_client,
project_endpoint=project_endpoint,
credential=credential,
allow_preview=allow_preview,
)

azure_openai_settings = load_settings(
Expand Down Expand Up @@ -246,33 +249,24 @@ def _create_client_from_project(
project_client: AIProjectClient | None,
project_endpoint: str | None,
credential: AzureCredentialTypes | AzureTokenProvider | None,
allow_preview: bool | None = None,
) -> AsyncOpenAI:
"""Create an AsyncOpenAI client from an Azure AI Foundry project.

Args:
project_client: An existing AIProjectClient to use.
project_endpoint: The Azure AI Foundry project endpoint URL.
credential: Azure credential for authentication.

Returns:
An AsyncAzureOpenAI client obtained from the project client.

Raises:
ValueError: If required parameters are missing or
the azure-ai-projects package is not installed.
"""
"""Create an AsyncOpenAI client from an Azure AI Foundry project."""
if project_client is not None:
return project_client.get_openai_client()

if not project_endpoint:
raise ValueError("Azure AI project endpoint is required when project_client is not provided.")
if not credential:
raise ValueError("Azure credential is required when using project_endpoint without a project_client.")
project_client = AIProjectClient(
endpoint=project_endpoint,
credential=credential, # type: ignore[arg-type]
user_agent=AGENT_FRAMEWORK_USER_AGENT,
)
project_client_kwargs: dict[str, Any] = {
"endpoint": project_endpoint,
"credential": credential, # type: ignore[arg-type]
"user_agent": AGENT_FRAMEWORK_USER_AGENT,
}
if allow_preview is not None:
project_client_kwargs["allow_preview"] = allow_preview
project_client = AIProjectClient(**project_client_kwargs)
return project_client.get_openai_client()

@override
Expand Down
Loading