-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathrun_test.py
More file actions
280 lines (236 loc) · 9.67 KB
/
run_test.py
File metadata and controls
280 lines (236 loc) · 9.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
from datetime import datetime, timezone
from unittest.mock import Mock, patch
import pytest
from pydantic import BaseModel
from workflowai.core.domain.completion import Completion, CompletionUsage, Message
from workflowai.core.domain.run import (
Run,
_AgentBase, # pyright: ignore [reportPrivateUsage]
)
from workflowai.core.domain.tool_call import ToolCallRequest
from workflowai.core.domain.version import Version
from workflowai.core.domain.version_properties import VersionProperties
class _TestOutput(BaseModel):
message: str
class _TestOutputWithDatetime(BaseModel):
timestamp: datetime
message: str
@pytest.fixture
def mock_agent() -> Mock:
mock = Mock(spec=_AgentBase)
return mock
@pytest.fixture
def run1(mock_agent: Mock) -> Run[_TestOutput]:
run = Run[_TestOutput](
id="run-id",
agent_id="agent-id",
schema_id=1,
output=_TestOutput(message="test output"),
duration_seconds=1.0,
cost_usd=0.1,
version=Version(properties=VersionProperties()),
metadata={"test": "data"},
tool_calls=[],
tool_call_requests=[],
)
run._agent = mock_agent # pyright: ignore [reportPrivateUsage]
return run
@pytest.fixture
def run2(run1: Run[_TestOutput]) -> Run[_TestOutput]:
return run1.model_copy(deep=True)
class TestRunEquality:
def test_identical(self, run1: Run[_TestOutput], run2: Run[_TestOutput]):
assert run1 == run2
def test_different_output(self, run1: Run[_TestOutput], run2: Run[_TestOutput]):
run2.output.message = "different output"
assert run1 != run2
def test_different_agents(self, run1: Run[_TestOutput], run2: Run[_TestOutput]):
run2._agent = Mock() # pyright: ignore [reportPrivateUsage]
assert run1._agent != run2._agent, "sanity check" # pyright: ignore [reportPrivateUsage]
assert run1 == run2
class TestRunFormatOutput:
@pytest.fixture(autouse=True)
def mock_app_url(self):
with patch("workflowai.env.WORKFLOWAI_APP_URL", "https://workflowai.hello") as mock:
yield mock
# Test that format_output correctly formats:
# 1. The output as a JSON object
# 2. The cost with $ prefix and correct precision
# 3. The latency with 2 decimal places and 's' suffix
# 4. The run URL
def test_format_output_full(self):
run = Run[_TestOutput](
id="run-id",
agent_id="agent-id",
schema_id=1,
output=_TestOutput(message="hello"),
duration_seconds=1.23,
cost_usd=0.001,
)
expected = """\nOutput:
==================================================
{
"message": "hello"
}
==================================================
Cost: $ 0.00100
Latency: 1.23s
URL: https://workflowai.hello/_/agents/agent-id/runs/run-id"""
assert run.format_output() == expected
def test_format_output_very_low_cost(self):
run = Run[_TestOutput](
id="run-id",
agent_id="agent-id",
schema_id=1,
output=_TestOutput(message="hello"),
duration_seconds=1.23,
cost_usd=4.97625e-05,
)
expected = """\nOutput:
==================================================
{
"message": "hello"
}
==================================================
Cost: $ 0.00005
Latency: 1.23s
URL: https://workflowai.hello/_/agents/agent-id/runs/run-id"""
assert run.format_output() == expected
# Test that format_output works correctly when cost and latency are not provided:
# 1. The output is still formatted as a JSON object
# 2. No cost or latency lines are included in the output
# 3. The run URL is still included
def test_format_output_no_cost_latency(self):
run = Run[_TestOutput](
id="run-id",
agent_id="agent-id",
schema_id=1,
output=_TestOutput(message="hello"),
)
expected = """\nOutput:
==================================================
{
"message": "hello"
}
==================================================
URL: https://workflowai.hello/_/agents/agent-id/runs/run-id"""
assert run.format_output() == expected
def test_format_output_tool_call_requests(self):
run = Run[_TestOutput](
id="run-id",
agent_id="agent-id",
schema_id=1,
output=_TestOutput.model_construct(),
tool_call_requests=[
ToolCallRequest(
id="tool-call-id",
name="tool-call-name",
input={"key": "value"},
),
],
)
assert (
run.format_output()
== """\nTool Call Requests:
==================================================
[
{
"id": "tool-call-id",
"name": "tool-call-name",
"input": {
"key": "value"
}
}
]
==================================================
URL: https://workflowai.hello/_/agents/agent-id/runs/run-id"""
)
def test_format_output_with_datetime(self):
"""Test that datetimes in the output model are correctly serialized to ISO strings."""
test_dt = datetime(2024, 1, 1, 12, 30, 0, tzinfo=timezone.utc)
run = Run[_TestOutputWithDatetime](
id="run-dt-id",
agent_id="agent-dt-id",
schema_id=2,
output=_TestOutputWithDatetime(timestamp=test_dt, message="datetime test"),
duration_seconds=0.5,
cost_usd=0.0001,
)
expected_json_part = '{\n "timestamp": "2024-01-01T12:30:00Z",\n "message": "datetime test"\n}'
expected = f"""\nOutput:
==================================================
{expected_json_part}
==================================================
Cost: $ 0.00010
Latency: 0.50s
URL: https://workflowai.hello/_/agents/agent-dt-id/runs/run-dt-id"""
assert run.format_output() == expected
class TestRunURL:
# The @patch decorator from unittest.mock temporarily replaces the value of an attribute
# during the execution of the decorated test function. The original value is restored
# after the test completes.
# To check what happens in different environemnt configurations, see env_test.py
# Here we patch WORKFLOWAI_APP_URL to test the direct app URL case
@patch("workflowai.env.WORKFLOWAI_APP_URL", "https://workflowai.hello")
def test_run_url(self, run1: Run[_TestOutput]):
# The patched value is only active during this test method
assert run1.run_url == "https://workflowai.hello/_/agents/agent-id/runs/run-id"
class TestFetchCompletions:
"""Tests for the fetch_completions method of the Run class."""
# Test that the underlying agent is called with the proper run id
async def test_fetch_completions_success(self, run1: Run[_TestOutput], mock_agent: Mock):
mock_agent.fetch_completions.return_value = [
Completion(
messages=[
Message(role="system", content="You are a helpful assistant"),
Message(role="user", content="Hello"),
Message(role="assistant", content="Hi there!"),
],
response="Hi there!",
usage=CompletionUsage(
completion_token_count=3,
completion_cost_usd=0.001,
reasoning_token_count=10,
prompt_token_count=20,
prompt_token_count_cached=0,
prompt_cost_usd=0.002,
prompt_audio_token_count=0,
prompt_audio_duration_seconds=0,
prompt_image_count=0,
model_context_window_size=32000,
),
),
]
# Call fetch_completions
completions = await run1.fetch_completions()
# Verify the API was called correctly
mock_agent.fetch_completions.assert_called_once_with("run-id")
# Verify the response
assert len(completions) == 1
completion = completions[0]
assert len(completion.messages) == 3
assert completion.messages[0].role == "system"
assert completion.messages[0].content == "You are a helpful assistant"
assert completion.response == "Hi there!"
assert completion.usage.completion_token_count == 3
assert completion.usage.completion_cost_usd == 0.001
# Test that fetch_completions fails appropriately when the agent is not set:
# 1. This is a common error case that occurs when a Run object is created without an agent
# 2. The method should fail fast with a clear error message before attempting any API calls
# 3. This protects users from confusing errors that would occur if we tried to use the API client
async def test_fetch_completions_no_agent(self, run1: Run[_TestOutput]):
run1._agent = None # pyright: ignore [reportPrivateUsage]
with pytest.raises(ValueError, match="Agent is not set"):
await run1.fetch_completions()
# Test that fetch_completions fails appropriately when the run ID is not set:
# 1. The run ID is required to construct the API endpoint URL
# 2. Without it, we can't make a valid API request
# 3. This validates that we fail fast with a clear error message
# 4. This should never happen in practice (as Run objects always have an ID),
# but we test it for completeness and to ensure robust error handling
async def test_fetch_completions_no_id(self, run1: Run[_TestOutput]):
mock_agent = Mock()
run1._agent = mock_agent # pyright: ignore [reportPrivateUsage]
run1.id = "" # Empty ID
with pytest.raises(ValueError, match="Run id is not set"):
await run1.fetch_completions()