-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcrown_ai.py
More file actions
30 lines (26 loc) · 1.33 KB
/
crown_ai.py
File metadata and controls
30 lines (26 loc) · 1.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import config
import logging
import config
import openai
def advance_conversation(Conversation, Max_Completion_Tokens=4096, Temperature=0.25, Top_p=0.9):
'''Return a new chat message from the AI model, based on the Conversation.'''
openai.api_key = config.OPENAI_API_KEY
expected_length = Conversation.get_token_length() + Max_Completion_Tokens
if (expected_length > config.OPENAI_CHAT_MODEL_MAX_TOKENS):
logging.error(
f"AI - Conversation + prompt = ({expected_length}) tokens is over AI model limit ({config.OPENAI_CHAT_MODEL_MAX_TOKENS}).")
return Conversation
else:
ai_response = openai.chat.completions.create(
model=config.OPENAI_CHAT_MODEL,
max_tokens=Max_Completion_Tokens,
temperature=Temperature,
top_p=Top_p,
messages=Conversation.get_messages(),
stop=["translation", "Translation", "was"]
)
logging.debug(
f'AI - Conversation advance finish reason - {ai_response.choices[0].finish_reason}.')
logging.debug(
f'AI - Conversation advance used P-{ai_response.usage.prompt_tokens} C-{ai_response.usage.completion_tokens} T-{ai_response.usage.total_tokens} tokens.')
return Conversation.add_message({"role": "assistant", "content": ai_response.choices[0].message.content})