Skip to content

Commit 8450c2a

Browse files
committed
Fix format issues
1 parent 9deb481 commit 8450c2a

6 files changed

Lines changed: 48 additions & 48 deletions

File tree

.flake8

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
[flake8]
22
max-line-length = 100
3+
ignore = E712,W503

devchat/assistant.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def _check_limit(self):
3333
def make_prompt(self, request: str,
3434
instruct_contents: Optional[List[str]], context_contents: Optional[List[str]],
3535
functions: Optional[List[dict]],
36-
parent: Optional[str] = None, references: Optional[List[str]] = None,
36+
parent: Optional[str] = None, references: Optional[List[str]] = None,
3737
function_name: Optional[str] = None):
3838
"""
3939
Make a prompt for the chat API.

devchat/openai/openai_message.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@ def function_call_to_json(self):
5656
pass
5757
return '\n```command\n' + json.dumps(function_call_copy) + '\n```\n'
5858

59-
6059
def stream_from_dict(self, message_data: dict) -> str:
6160
"""Append to the message from a dictionary returned from a streaming chat API."""
6261
delta = message_data.get('content', '')

devchat/openai/openai_prompt.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ def prepend_history(self, prompt: 'OpenAIPrompt', token_limit: int = math.inf) -
140140
def set_request(self, content: str, function_name: Optional[str] = None) -> int:
141141
if not content.strip():
142142
raise ValueError("The request cannot be empty.")
143-
message = OpenAIMessage(content, role = ('user' if not function_name else 'function'),
143+
message = OpenAIMessage(content, role=('user' if not function_name else 'function'),
144144
name=function_name)
145145
self._new_messages['request'] = message
146146
self._request_tokens += message_tokens(message.to_dict(), self.model)
@@ -165,7 +165,7 @@ def set_response(self, response_str: str):
165165
if index >= len(self.response):
166166
self.response.extend([None] * (index - len(self.response) + 1))
167167
self.response[index] = OpenAIMessage(**choice['message'],
168-
finish_reason = choice['finish_reason'])
168+
finish_reason=choice['finish_reason'])
169169
self.set_hash()
170170

171171
def append_response(self, delta_str: str) -> str:

devchat/prompt.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -218,8 +218,8 @@ def shortlog(self) -> List[dict]:
218218
"date": self._timestamp,
219219
"context": [msg.to_dict() for msg in self.new_context],
220220
"request": self.request.content,
221-
"response": (message.content if message.content else "") + \
222-
message.function_call_to_json(),
221+
"response": ((message.content if message.content else "")
222+
+ message.function_call_to_json()),
223223
"hash": self.hash,
224224
"parent": self.parent
225225
}

tests/test_cli_prompt.py

Lines changed: 42 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -83,33 +83,35 @@ def fixture_temp_files(tmpdir):
8383
context.write("It is summer.")
8484
return str(instruct0), str(instruct1), str(instruct2), str(context)
8585

86+
8687
@pytest.fixture(name="functions_file")
8788
def fixture_functions_file(tmpdir):
8889
functions_file = tmpdir.join('functions.json')
8990
functions_file.write("""
90-
[
91-
{
92-
"name": "get_current_weather",
93-
"description": "Get the current weather in a given location",
94-
"parameters": {
95-
"type": "object",
96-
"properties": {
97-
"location": {
98-
"type": "string",
99-
"description": "The city and state, e.g. San Francisco, CA"
100-
},
101-
"unit": {
102-
"type": "string",
103-
"enum": ["celsius", "fahrenheit"]
104-
}
105-
},
106-
"required": ["location"]
107-
}
108-
}
109-
]
110-
""")
91+
[
92+
{
93+
"name": "get_current_weather",
94+
"description": "Get the current weather in a given location",
95+
"parameters": {
96+
"type": "object",
97+
"properties": {
98+
"location": {
99+
"type": "string",
100+
"description": "The city and state, e.g. San Francisco, CA"
101+
},
102+
"unit": {
103+
"type": "string",
104+
"enum": ["celsius", "fahrenheit"]
105+
}
106+
},
107+
"required": ["location"]
108+
}
109+
}
110+
]
111+
""")
111112
return str(functions_file)
112113

114+
113115
def test_prompt_with_instruct(git_repo, temp_files): # pylint: disable=W0613
114116
result = runner.invoke(main, ['prompt', '-m', 'gpt-4',
115117
'-i', temp_files[0], '-i', temp_files[1],
@@ -129,19 +131,18 @@ def test_prompt_with_instruct_and_context(git_repo, temp_files): # pylint: disa
129131

130132
def test_prompt_with_functions(git_repo, functions_file): # pylint: disable=W0613
131133
# call with -f option
132-
result = runner.invoke(main, ['prompt', '-m', 'gpt-4',
133-
'-f', functions_file,
134-
"What is the weather like in Boston?"])
134+
result = runner.invoke(main, ['prompt', '-m', 'gpt-4', '-f', functions_file,
135+
"What is the weather like in Boston?"])
135136

136137
core_content = _get_core_content(result.output)
137138
assert result.exit_code == 0
138139
assert core_content.find("finish_reason: function_call") >= 0
139140
assert core_content.find('"name": "get_current_weather"') >= 0
140141
assert core_content.find('command') > 0
141142

142-
# compare with no -f options
143+
# compare with no -f options
143144
result = runner.invoke(main, ['prompt', '-m', 'gpt-4',
144-
"What is the weather like in Boston?"])
145+
'What is the weather like in Boston?'])
145146

146147
core_content = _get_core_content(result.output)
147148
assert result.exit_code == 0
@@ -151,9 +152,8 @@ def test_prompt_with_functions(git_repo, functions_file): # pylint: disable=W06
151152

152153
def test_prompt_log_with_functions(git_repo, functions_file): # pylint: disable=W0613
153154
# call with -f option
154-
result = runner.invoke(main, ['prompt', '-m', 'gpt-4',
155-
'-f', functions_file,
156-
"What is the weather like in Boston?"])
155+
result = runner.invoke(main, ['prompt', '-m', 'gpt-4', '-f', functions_file,
156+
'What is the weather like in Boston?'])
157157

158158
prompt_hash = _get_prompt_hash(result.output)
159159
result = runner.invoke(main, ['log', '-t', prompt_hash])
@@ -166,17 +166,17 @@ def test_prompt_log_with_functions(git_repo, functions_file): # pylint: disable
166166

167167

168168
def test_prompt_log_compatibility():
169-
# import test!!
170-
# Historical Record Compatibility Test
171-
# create git repo folder
172-
# install old devchat
173-
# run prompt, create old version records
174-
# run topic -l, expect topic list
175-
# uninstall old devchat
176-
# install new devchat
177-
# run topic -l, expect topic list
178-
# run prompt -f ./.chat/functions.json "list files in porject", expect function call return
179-
# run topic -l, expect function call in topic list
169+
# import test!!
170+
# Historical Record Compatibility Test
171+
# create git repo folder
172+
# install old devchat
173+
# run prompt, create old version records
174+
# run topic -l, expect topic list
175+
# uninstall old devchat
176+
# install new devchat
177+
# run topic -l, expect topic list
178+
# run prompt -f ./.chat/functions.json "list files in porject", expect function call return
179+
# run topic -l, expect function call in topic list
180180
assert True
181181

182182

@@ -185,7 +185,7 @@ def test_prompt_with_function_replay(git_repo, functions_file): # pylint: disab
185185
result = runner.invoke(main, ['prompt', '-m', 'gpt-4',
186186
'-f', functions_file,
187187
'-n', 'get_current_weather',
188-
'{"temperature": "22", "unit": "celsius", "description": "Sunny"}'])
188+
'{"temperature": "22", "unit": "celsius", "weather": "Sunny"}'])
189189

190190
core_content = _get_core_content(result.output)
191191
assert result.exit_code == 0
@@ -195,7 +195,7 @@ def test_prompt_with_function_replay(git_repo, functions_file): # pylint: disab
195195
prompt_hash = _get_prompt_hash(result.output)
196196
result = runner.invoke(main, ['prompt', '-m', 'gpt-4',
197197
'-p', prompt_hash,
198-
'what is the GPT function name?'])
198+
'what is the GPT function name?'])
199199

200200
core_content = _get_core_content(result.output)
201201
assert result.exit_code == 0

0 commit comments

Comments
 (0)