-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_reflection_agent.py
More file actions
78 lines (63 loc) · 2.66 KB
/
test_reflection_agent.py
File metadata and controls
78 lines (63 loc) · 2.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
from reflection_pattern_agent.reflection_agent import ReflectionAgent
# Create a fake response object that looks exactly like the real OpenAI response.
class MockChoice:
def __init__(self, content):
self.message = MockMessage(content)
class MockMessage:
def __init__(self, content):
self.content = content
class MockCompletion:
def __init__(self, content):
self.choices = [MockChoice(content)]
# Test the generate method
def test_generate_without_reflection(mocker):
"""
Tests the generate method in its simplest form, without any reflection history.
We will "mock" the OpenAI API call to avoid making a real, slow, and expensive call.
"""
# Arrange: Set up the test conditions
mock_api_response = "This is a mock response from OpenAI."
# Tell pytest-mock to replace the 'create' method of the chat completions
# with a function that returns our fake response object.
mocker.patch(
"openai.resources.chat.completions.Completions.create",
return_value=MockCompletion(mock_api_response)
)
agent = ReflectionAgent()
test_prompt = "Test prompt"
# Act: Call the method we are testing
result = agent.generate(prompt=test_prompt)
# Assert: Check if the result is what we expected
assert result == mock_api_response
# Test the reflect method
def test_reflect_parses_critique_correctly(mocker):
"""
Tests if the reflect method correctly calls the API and parses
the bulleted list response into a Python list of strings.
"""
# Arrange: Set up the test conditions
# This is our fake critique from the "mocked" OpenAI API.
mock_api_critique = """
- The tweet is a bit too long.
- It could use a more engaging emoji.
- The call to action is weak.
"""
# Mock the API call to return our fake critique.
mocker.patch(
"openai.resources.chat.completions.Completions.create",
return_value=MockCompletion(mock_api_critique) # The same Mock classes from before
)
agent = ReflectionAgent()
test_prompt = "Original prompt"
test_output = "Generated output to be critiqued"
# Act: Call the method we are testing
reflections = agent.reflect(prompt=test_prompt, generated_output=test_output)
# Assert: Check if our parsing logic worked as expected
expected_reflections = [
"- The tweet is a bit too long.",
"- It could use a more engaging emoji.",
"- The call to action is weak."
]
assert reflections == expected_reflections
assert isinstance(reflections, list) # Ensure the type is correct
assert len(reflections) == 3 # Be specific about the expected outcome