-
Notifications
You must be signed in to change notification settings - Fork 105
Expand file tree
/
Copy pathworkflow_agents.py
More file actions
94 lines (77 loc) · 3.09 KB
/
workflow_agents.py
File metadata and controls
94 lines (77 loc) · 3.09 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
"""Writer → Reviewer workflow using AI agents as executors.
Demonstrates: Agent as a WorkflowBuilder executor, direct edges,
and collecting output with workflow.run() + get_outputs().
This example uses the same WorkflowBuilder + add_edge pattern as
workflow_rag_ingest.py, but with AI agents instead of Python functions.
Run:
uv run examples/workflow_agents.py
uv run examples/workflow_agents.py --devui (opens DevUI at http://localhost:8092)
"""
import asyncio
import os
import sys
from agent_framework import Agent, WorkflowBuilder
from agent_framework.openai import OpenAIChatClient
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
from dotenv import load_dotenv
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST", "github")
# Configure the chat client based on the API host
async_credential = None
if API_HOST == "azure":
async_credential = DefaultAzureCredential()
token_provider = get_bearer_token_provider(async_credential, "https://cognitiveservices.azure.com/.default")
client = OpenAIChatClient(
base_url=f"{os.environ['AZURE_OPENAI_ENDPOINT']}/openai/v1/",
api_key=token_provider,
model_id=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"],
)
elif API_HOST == "github":
client = OpenAIChatClient(
base_url="https://models.github.ai/inference",
api_key=os.environ["GITHUB_TOKEN"],
model_id=os.getenv("GITHUB_MODEL", "openai/gpt-4.1-mini"),
)
else:
client = OpenAIChatClient(
api_key=os.environ["OPENAI_API_KEY"], model_id=os.environ.get("OPENAI_MODEL", "gpt-4o-mini")
)
# Create AI agents — these are passed directly as executors to WorkflowBuilder,
# exactly like the Python Executor subclasses in workflow_rag_ingest.py.
writer = Agent(
client=client,
name="Writer",
instructions=(
"You are a concise content writer. "
"Write a clear, engaging short article (2-3 paragraphs) based on the user's topic. "
"Focus on accuracy and readability."
),
)
reviewer = Agent(
client=client,
name="Reviewer",
instructions=(
"You are a thoughtful content reviewer. "
"Read the writer's draft and provide specific, constructive feedback. "
"Comment on clarity, accuracy, and structure. Keep your review concise."
),
)
# Build the workflow: Writer → Reviewer
# The same WorkflowBuilder.add_edge pattern works for both Python executors
# and AI agents — the graph structure is identical.
workflow = WorkflowBuilder(start_executor=writer).add_edge(writer, reviewer).build()
async def main():
prompt = 'Write a 2-sentence LinkedIn post: "Why your AI pilot looks good but fails in production."'
print(f"Prompt: {prompt}\n")
events = await workflow.run(prompt)
for output in events.get_outputs():
print("===== Output =====")
print(output)
if async_credential:
await async_credential.close()
if __name__ == "__main__":
if "--devui" in sys.argv:
from agent_framework.devui import serve
serve(entities=[workflow], port=8092, auto_open=True)
else:
asyncio.run(main())