-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathagent.py
More file actions
177 lines (135 loc) · 5.36 KB
/
agent.py
File metadata and controls
177 lines (135 loc) · 5.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
"""Azure Container Apps agent with py-code-mode.
This example shows:
- Same agent pattern as the autogen example
- Deployed to Azure Container Apps with GPT-4o via Azure OpenAI
- Uses Redis for both skills and artifacts when REDIS_URL is set
- CLI tools (curl, jq) and MCP tools (fetch, time)
- Multi-tool skill (analyze_repo.py)
Run locally (with Azure OpenAI):
cd examples/azure-container-apps
AZURE_OPENAI_ENDPOINT=https://your-openai.openai.azure.com uv run python agent.py
Run with Redis backend:
# First, provision skills to Redis (one-time or deploy-time)
python -m py_code_mode.store bootstrap \
--source ../shared/skills \
--target redis://localhost:6379 \
--prefix agent-skills
# Then run the agent
REDIS_URL=redis://localhost:6379 uv run python agent.py
Deploy to Azure:
See deploy/README.md
"""
import asyncio
import os
from pathlib import Path
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
from dotenv import load_dotenv
from py_code_mode import FileStorage, RedisStorage, Session
from py_code_mode.integrations.autogen import create_run_code_tool
# Load .env file for local development
load_dotenv()
# Directory paths
HERE = Path(__file__).parent
SHARED = HERE.parent / "shared"
def get_model_client():
"""Get Azure OpenAI model client."""
from autogen_core.models import ModelFamily, ModelInfo
from autogen_ext.models.openai import AzureOpenAIChatCompletionClient
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
# Use managed identity for Azure OpenAI auth
token_provider = get_bearer_token_provider(
DefaultAzureCredential(),
"https://cognitiveservices.azure.com/.default",
)
deployment = os.environ.get("AZURE_OPENAI_DEPLOYMENT", "gpt-4o")
# Model info for models not yet in autogen's registry
model_info_map = {
"gpt-41": ModelInfo(
vision=True,
function_calling=True,
json_output=True,
family=ModelFamily.GPT_4O,
context_window=128000,
),
}
return AzureOpenAIChatCompletionClient(
azure_deployment=deployment,
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
azure_ad_token_provider=token_provider,
model=deployment,
api_version="2024-08-01-preview",
model_info=model_info_map.get(deployment),
)
def create_storage():
"""Create storage backend.
When REDIS_URL is set:
- Tools, skills, and artifacts are loaded from Redis
Without REDIS_URL:
- Tools, skills, and artifacts are loaded from shared/ directory
"""
redis_url = os.environ.get("REDIS_URL")
if redis_url:
# Redis mode: everything from Redis (provisioned separately)
print(f"Using Redis backend: {redis_url}")
return RedisStorage(url=redis_url, prefix="agent")
else:
# File mode: load directly from shared/
print("Using file-based backend (set REDIS_URL for Redis mode)")
return FileStorage(base_path=SHARED)
async def main():
# Create storage and session
storage = create_storage()
async with Session(storage=storage) as session:
# Create the run_code tool for AutoGen
run_code = create_run_code_tool(session=session)
system_prompt = """You are a helpful assistant that writes Python code to accomplish tasks.
You have access to `tools` and `skills` namespaces in your code environment.
WORKFLOW:
1. For any nontrivial task, FIRST search skills: skills.search("relevant keywords")
2. If a skill exists, use it: skills.invoke("name", arg=value)
3. If no skill matches, search tools: tools.search("keywords")
4. Script tools together: tools.name(arg=value)
DISCOVERY:
- skills.search("query") / skills.list() - find prebaked solutions
- tools.search("query") / tools.list() - find individual tools
Skills are reusable recipes that combine tools. Prefer them over scripting from scratch.
ARTIFACTS (persistent storage):
- artifacts.save("name", data, description="...") - Save data for later
- artifacts.load("name") - Load previously saved data
- artifacts.list() - List saved artifacts
Always wrap your code in ```python blocks."""
# Get appropriate model client for environment
model = get_model_client()
# Create agent
agent = AssistantAgent(
name="assistant",
model_client=model,
tools=[run_code],
system_message=system_prompt,
reflect_on_tool_use=True,
max_tool_iterations=5,
)
# Check for command line argument
import sys
if len(sys.argv) > 1:
query = " ".join(sys.argv[1:])
await Console(agent.run_stream(task=query))
return
# Interactive loop
print("Assistant ready. Type your request (or 'quit' to exit).\n")
while True:
try:
user_input = input("You: ").strip()
except (EOFError, KeyboardInterrupt):
break
if not user_input:
continue
if user_input.lower() in ("quit", "exit", "q"):
break
# Run agent
result = await agent.run(task=user_input)
# Print response
print(f"\nAssistant: {result.messages[-1].content}\n")
if __name__ == "__main__":
asyncio.run(main())