-
Notifications
You must be signed in to change notification settings - Fork 103
Expand file tree
/
Copy pathagent_evaluation.py
More file actions
329 lines (287 loc) · 13.3 KB
/
agent_evaluation.py
File metadata and controls
329 lines (287 loc) · 13.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
import asyncio
import json
import logging
import os
from typing import Annotated
from agent_framework import Agent, tool
from agent_framework.openai import OpenAIChatClient
from azure.ai.evaluation import (
AzureOpenAIModelConfiguration,
IntentResolutionEvaluator,
OpenAIModelConfiguration,
ResponseCompletenessEvaluator,
TaskAdherenceEvaluator,
ToolCallAccuracyEvaluator,
)
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
from dotenv import load_dotenv
from pydantic import Field
from rich import print
from rich.logging import RichHandler
from rich.panel import Panel
from rich.table import Table
handler = RichHandler(show_path=False, rich_tracebacks=True, show_level=False)
logging.basicConfig(level=logging.WARNING, handlers=[handler], force=True, format="%(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST", "github")
async_credential = None
if API_HOST == "azure":
async_credential = DefaultAzureCredential()
token_provider = get_bearer_token_provider(async_credential, "https://cognitiveservices.azure.com/.default")
client = OpenAIChatClient(
base_url=f"{os.environ['AZURE_OPENAI_ENDPOINT']}/openai/v1/",
api_key=token_provider,
model_id=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"],
)
eval_model_config = AzureOpenAIModelConfiguration(
type="azure_openai",
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
azure_deployment=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"],
)
elif API_HOST == "github":
client = OpenAIChatClient(
base_url="https://models.github.ai/inference",
api_key=os.environ["GITHUB_TOKEN"],
model_id=os.getenv("GITHUB_MODEL", "openai/gpt-4.1-mini"),
)
eval_model_config = OpenAIModelConfiguration(
type="openai",
base_url="https://models.github.ai/inference",
api_key=os.environ["GITHUB_TOKEN"],
model="openai/gpt-4.1-mini",
)
else:
client = OpenAIChatClient(
api_key=os.environ["OPENAI_API_KEY"], model_id=os.environ.get("OPENAI_MODEL", "gpt-4.1-mini")
)
eval_model_config = OpenAIModelConfiguration(
type="openai",
api_key=os.environ["OPENAI_API_KEY"],
model=os.environ.get("OPENAI_MODEL", "gpt-4.1-mini"),
)
@tool
def get_weather(
city: Annotated[str, Field(description="The city to get the weather forecast for.")],
date_range: Annotated[str, Field(description="Date range in format 'YYYY-MM-DD to YYYY-MM-DD'.")],
) -> dict:
"""Returns a weather forecast for a city over a date range, including temperature and conditions."""
logger.info(f"Getting weather for {city} ({date_range})")
return {
"city": city,
"date_range": date_range,
"forecast": [
{"date": "Day 1", "high_f": 65, "low_f": 52, "conditions": "Partly cloudy"},
{"date": "Day 2", "high_f": 70, "low_f": 55, "conditions": "Sunny"},
{"date": "Day 3", "high_f": 62, "low_f": 50, "conditions": "Light rain"},
],
}
@tool
def search_flights(
origin: Annotated[str, Field(description="Departure city or airport code.")],
destination: Annotated[str, Field(description="Arrival city or airport code.")],
departure_date: Annotated[str, Field(description="Departure date in YYYY-MM-DD format.")],
return_date: Annotated[str, Field(description="Return date in YYYY-MM-DD format.")],
) -> list[dict]:
"""Searches for round-trip flights and returns options with prices."""
logger.info(f"Searching flights {origin} -> {destination} ({departure_date} to {return_date})")
return [
{"airline": "SkyAir", "price_usd": 850, "duration": "14h 20m", "stops": 1},
{"airline": "OceanWings", "price_usd": 720, "duration": "16h 45m", "stops": 2},
{"airline": "DirectJet", "price_usd": 1100, "duration": "12h 30m", "stops": 0},
]
@tool
def search_hotels(
city: Annotated[str, Field(description="The city to search hotels in.")],
checkin: Annotated[str, Field(description="Check-in date in YYYY-MM-DD format.")],
checkout: Annotated[str, Field(description="Check-out date in YYYY-MM-DD format.")],
max_price_per_night: Annotated[int, Field(description="Maximum price per night in USD.")],
) -> list[dict]:
"""Searches for hotels within a nightly budget and returns options with ratings."""
logger.info(f"Searching hotels in {city} ({checkin} to {checkout}, max ${max_price_per_night}/night)")
return [
{"name": "Budget Inn Tokyo", "price_per_night_usd": 80, "rating": 3.8, "neighborhood": "Asakusa"},
{"name": "Sakura Hotel", "price_per_night_usd": 120, "rating": 4.2, "neighborhood": "Shinjuku"},
{"name": "Tokyo Garden Suites", "price_per_night_usd": 200, "rating": 4.6, "neighborhood": "Ginza"},
]
@tool
def get_activities(
city: Annotated[str, Field(description="The city to find activities in.")],
interests: Annotated[list[str], Field(description="List of interests, e.g. ['hiking', 'museums'].")],
) -> list[dict]:
"""Returns activity suggestions for a city based on user interests."""
logger.info(f"Getting activities in {city} for interests: {interests}")
activities = []
if "hiking" in [i.lower() for i in interests]:
activities.extend(
[
{"name": "Mt. Takao Day Hike", "cost_usd": 15, "duration": "4-5 hours"},
{"name": "Kamakura Trail Walk", "cost_usd": 25, "duration": "3 hours"},
]
)
if "museums" in [i.lower() for i in interests]:
activities.extend(
[
{"name": "Tokyo National Museum", "cost_usd": 10, "duration": "2-3 hours"},
{"name": "teamLab Borderless", "cost_usd": 30, "duration": "2 hours"},
]
)
if not activities:
activities = [{"name": "City walking tour", "cost_usd": 0, "duration": "3 hours"}]
return activities
@tool
def estimate_budget(
total_budget: Annotated[int, Field(description="Total trip budget in USD.")],
num_days: Annotated[int, Field(description="Number of days for the trip.")],
) -> dict:
"""Provides a recommended budget breakdown for flights, hotels, activities, and food."""
logger.info(f"Estimating budget: ${total_budget} for {num_days} days")
flight_pct = 0.40
hotel_pct = 0.30
activities_pct = 0.15
food_pct = 0.15
return {
"total_budget_usd": total_budget,
"flights_usd": int(total_budget * flight_pct),
"hotels_usd": int(total_budget * hotel_pct),
"hotels_per_night_usd": int(total_budget * hotel_pct / num_days),
"activities_usd": int(total_budget * activities_pct),
"food_usd": int(total_budget * food_pct),
"food_per_day_usd": int(total_budget * food_pct / num_days),
}
tools = [get_weather, search_flights, search_hotels, get_activities, estimate_budget]
tool_definitions = [t.to_json_schema_spec()["function"] for t in tools]
AGENT_INSTRUCTIONS = (
"You are a travel planning assistant. Help users plan trips by checking weather, "
"finding flights and hotels within budget, and suggesting activities based on their interests. "
"Always provide a complete itinerary with costs for each component and ensure the total stays "
"within the user's budget. Include weather information to help with packing."
)
agent = Agent(
client=client,
instructions=AGENT_INSTRUCTIONS,
tools=tools,
)
def convert_to_evaluator_messages(messages) -> list[dict]:
"""Convert agent framework ChatMessages to the Azure AI Evaluation message schema.
Remaps content types: function_call -> tool_call, function_result -> tool_result.
See: https://learn.microsoft.com/azure/ai-foundry/how-to/develop/agent-evaluate-sdk#agent-message-schema
"""
evaluator_messages = []
for msg in messages:
role = str(msg.role.value) if hasattr(msg.role, "value") else str(msg.role)
content_items = []
for c in msg.contents:
if c.type == "function_call":
content_items.append(
{
"type": "tool_call",
"tool_call_id": c.call_id,
"name": c.name,
"arguments": json.loads(c.arguments) if isinstance(c.arguments, str) else c.arguments,
}
)
elif c.type == "function_result":
if c.call_id:
if content_items:
evaluator_messages.append({"role": role, "content": content_items})
content_items = []
evaluator_messages.append(
{
"role": "tool",
"tool_call_id": c.call_id,
"content": [
{
"type": "tool_result",
"tool_result": c.result,
}
],
}
)
continue
content_items.append(
{
"type": "tool_result",
"tool_result": c.result,
}
)
elif c.type == "text" and c.text:
content_items.append({"type": "text", "text": c.text})
if content_items:
evaluator_messages.append({"role": role, "content": content_items})
return evaluator_messages
def display_evaluation_results(results: dict[str, dict]) -> None:
"""Display evaluation results in a formatted table using rich."""
table = Table(title="Agent Evaluation Results", show_lines=True)
table.add_column("Evaluator", style="cyan", width=28)
table.add_column("Score", style="bold", justify="center", width=8)
table.add_column("Result", justify="center", width=8)
table.add_column("Reason", style="dim", width=70)
for evaluator_name, result in results.items():
score = str(result.get("score", "N/A"))
pass_fail = result.get("result", "N/A")
reason = result.get("reason", "N/A")
if pass_fail == "pass":
result_str = "[green]pass[/green]"
elif pass_fail == "fail":
result_str = "[red]fail[/red]"
else:
result_str = str(pass_fail)
table.add_row(evaluator_name, score, result_str, reason)
print()
print(table)
async def main():
query = "Plan a 3-day trip from New York (JFK) to Tokyo, departing March 15 and returning March 18, 2026. My budget is $2000 total. I like hiking and museums. Please search for flights, hotels under $150/night, check the weather, and suggest activities."
logger.info("Running travel planner agent...")
response = await agent.run(query)
print(Panel(response.text, title="Agent Response", border_style="blue"))
eval_query = [
{"role": "system", "content": AGENT_INSTRUCTIONS},
{"role": "user", "content": [{"type": "text", "text": query}]},
]
eval_response = convert_to_evaluator_messages(response.messages)
ground_truth = (
"A complete 3-day Tokyo trip itinerary from New York including: round-trip flight options with prices, "
"hotel recommendations within nightly budget, hiking activities (e.g. Mt. Takao), museum visits "
"(e.g. Tokyo National Museum, teamLab Borderless), weather forecast for the travel dates, "
"a full cost breakdown showing total under $2000, and packing suggestions based on weather."
)
logger.info("Running agent evaluators...")
evaluator_kwargs = {"model_config": eval_model_config, "is_reasoning_model": True}
result_keys = {
"IntentResolution": "intent_resolution",
"ResponseCompleteness": "response_completeness",
"TaskAdherence": "task_adherence",
"ToolCallAccuracy": "tool_call_accuracy",
}
intent_evaluator = IntentResolutionEvaluator(**evaluator_kwargs)
completeness_evaluator = ResponseCompletenessEvaluator(**evaluator_kwargs)
adherence_evaluator = TaskAdherenceEvaluator(**evaluator_kwargs)
tool_accuracy_evaluator = ToolCallAccuracyEvaluator(**evaluator_kwargs)
intent_result = intent_evaluator(query=eval_query, response=eval_response, tool_definitions=tool_definitions)
completeness_result = completeness_evaluator(response=response.text, ground_truth=ground_truth)
adherence_result = adherence_evaluator(
query=eval_query, response=eval_response, tool_definitions=tool_definitions
)
tool_accuracy_result = tool_accuracy_evaluator(
query=eval_query, response=eval_response, tool_definitions=tool_definitions
)
evaluation_results = {}
for name, result in [
("IntentResolution", intent_result),
("ResponseCompleteness", completeness_result),
("TaskAdherence", adherence_result),
("ToolCallAccuracy", tool_accuracy_result),
]:
key = result_keys[name]
evaluation_results[name] = {
"score": result.get(key, "N/A"),
"result": result.get(f"{key}_result", "N/A"),
"reason": result.get(f"{key}_reason", result.get("error_message", "N/A")),
}
display_evaluation_results(evaluation_results)
if async_credential:
await async_credential.close()
if __name__ == "__main__":
asyncio.run(main())