Skip to content

Commit 8f007bc

Browse files
scale-ballenclaude
andcommitted
fix: use built SDK wheel for tutorial tests and fix tutorial agent bugs
- run_agent_test.sh: pass built wheel to pytest so tests use local SDK instead of PyPI version (fixes send_message NDJSON pydantic errors) - agents.py: rewrite send_message (sync+async) to use streaming internally and handle NDJSON responses from FastACP server - agent_rpc_response.py: make SendMessageStreamResponse.result Optional to handle null result in streaming done events - hooks.py: fix execute_activity_method -> execute_activity for function-based Temporal activities (fixes tool_request not appearing) - Tutorial handlers: add missing return after no-API-key sorry messages to prevent fall-through to LLM calls (010_multiturn, 020_streaming, 030_tracing, 040_other_sdks, 010_agent_chat, 050_agent_chat_guardrails) - 010_agent_chat/workflow.py: fix gpt-5 -> gpt-4o, remove invalid reasoning params (only valid for o-series models) - 080_human_in_the_loop/workflow.py: guard span.output access against None - test_agent.py (010_multiturn): add sleep for async state init race Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
1 parent 0bcda2f commit 8f007bc

File tree

12 files changed

+160
-75
lines changed

12 files changed

+160
-75
lines changed

examples/tutorials/10_async/00_base/010_multiturn/project/acp.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,7 @@ async def handle_event_send(params: SendEventParams):
9494
content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.",
9595
),
9696
)
97+
return
9798

9899
#########################################################
99100
# 5. (👋) Retrieve the task state.

examples/tutorials/10_async/00_base/010_multiturn/tests/test_agent.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,7 @@ async def test_send_event_and_stream(self, client: AsyncAgentex, agent_id: str):
145145
task = task_response.result
146146
assert task is not None
147147

148+
await asyncio.sleep(1) # wait for state to be initialized
148149
# Check initial state
149150
states = await client.states.list(agent_id=agent_id, task_id=task.id)
150151
assert len(states) == 1

examples/tutorials/10_async/00_base/020_streaming/project/acp.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ async def handle_event_send(params: SendEventParams):
7676
content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.",
7777
),
7878
)
79+
return
7980

8081
#########################################################
8182
# 5. Retrieve the task state.

examples/tutorials/10_async/00_base/030_tracing/project/acp.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,7 @@ async def handle_event_send(params: SendEventParams):
117117
),
118118
parent_span_id=span.id if span else None,
119119
)
120+
return
120121

121122
#########################################################
122123
# 7. Call an LLM to respond to the user's message

examples/tutorials/10_async/00_base/040_other_sdks/project/acp.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ async def handle_event_send(params: SendEventParams):
111111
),
112112
parent_span_id=span.id if span else None,
113113
)
114+
return
114115

115116
#########################################################
116117
# (👋) Call an LLM to respond to the user's message using custom streaming

examples/tutorials/10_async/10_temporal/010_agent_chat/project/workflow.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,7 @@ async def on_task_event_send(self, params: SendEventParams) -> None:
216216
),
217217
parent_span_id=span.id if span else None,
218218
)
219+
return
219220

220221
# Call an LLM to respond to the user's message
221222
# When send_as_agent_task_message=True, returns a TaskMessage
@@ -234,13 +235,7 @@ async def on_task_event_send(self, params: SendEventParams) -> None:
234235
"to provide accurate and well-reasoned responses."
235236
),
236237
parent_span_id=span.id if span else None,
237-
model="gpt-5",
238-
model_settings=ModelSettings(
239-
# Include reasoning items in the response (IDs, summaries)
240-
# response_include=["reasoning.encrypted_content"],
241-
# Ask the model to include a short reasoning summary
242-
reasoning=Reasoning(effort="medium", summary="detailed"),
243-
),
238+
model="gpt-4o",
244239
# tools=[CALCULATOR_TOOL],
245240
)
246241
if self._state:

examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/project/workflow.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -408,6 +408,7 @@ async def on_task_event_send(self, params: SendEventParams) -> None:
408408
),
409409
parent_span_id=span.id if span else None,
410410
)
411+
return
411412

412413
# Call an LLM to respond to the user's message
413414
# When send_as_agent_task_message=True, returns a TaskMessage

examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/workflow.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,8 @@ async def on_task_event_send(self, params: SendEventParams) -> None:
204204
self._state.input_list.append(msg)
205205

206206
# Set span output for tracing - include full state
207-
span.output = self._state.model_dump()
207+
if span:
208+
span.output = self._state.model_dump()
208209

209210
@workflow.run
210211
async def on_task_create(self, params: CreateTaskParams) -> str:

examples/tutorials/run_agent_test.sh

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -259,6 +259,19 @@ run_test() {
259259
cd "$tutorial_path" || return 1
260260

261261

262+
# Determine pytest command - use built wheel if available (same wheel used to start agent)
263+
local pytest_cmd="uv run pytest"
264+
if [ "$BUILD_CLI" = true ]; then
265+
local wheel_file=$(ls /home/runner/work/*/*/dist/agentex_sdk-*.whl 2>/dev/null | head -n1)
266+
if [[ -z "$wheel_file" ]]; then
267+
# Fallback for local development
268+
wheel_file=$(ls "${SCRIPT_DIR}/../../dist/agentex_sdk-*.whl" 2>/dev/null | head -n1)
269+
fi
270+
if [[ -n "$wheel_file" ]]; then
271+
pytest_cmd="uv run --with $wheel_file pytest"
272+
fi
273+
fi
274+
262275
# Run the tests with retry mechanism
263276
local max_retries=5
264277
local retry_count=0
@@ -270,7 +283,7 @@ run_test() {
270283
fi
271284

272285
# Stream pytest output directly in real-time
273-
uv run pytest tests/test_agent.py -v -s
286+
$pytest_cmd tests/test_agent.py -v -s
274287
exit_code=$?
275288

276289
if [ $exit_code -eq 0 ]; then

src/agentex/lib/core/temporal/plugins/openai_agents/hooks/hooks.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ async def on_tool_start(self, context: RunContextWrapper, agent: Agent, tool: To
131131
logger.warning(f"Failed to parse tool arguments: {tool_context.tool_arguments}")
132132
tool_arguments = {}
133133

134-
await workflow.execute_activity_method(
134+
await workflow.execute_activity(
135135
stream_lifecycle_content,
136136
args=[
137137
self.task_id,
@@ -167,7 +167,7 @@ async def on_tool_end(
167167
else f"call_{id(tool)}"
168168
)
169169

170-
await workflow.execute_activity_method(
170+
await workflow.execute_activity(
171171
stream_lifecycle_content,
172172
args=[
173173
self.task_id,
@@ -195,7 +195,7 @@ async def on_handoff(
195195
from_agent: The agent transferring control
196196
to_agent: The agent receiving control
197197
"""
198-
await workflow.execute_activity_method(
198+
await workflow.execute_activity(
199199
stream_lifecycle_content,
200200
args=[
201201
self.task_id,

0 commit comments

Comments
 (0)