move json_validated_response to standalone function

pull/13/head
Felix Boehme 2023-04-14 16:29:45 -04:00
rodzic 923f7057e3
commit 2497fb816b
1 zmienionych plików z 52 dodań i 39 usunięć

Wyświetl plik

@ -35,11 +35,11 @@ def run_script(script_name, script_args):
return result.decode("utf-8"), 0
def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL):
def json_validated_response(model, messages):
"""
This function is needed because the API can return a non-json response.
This will run recursively until a valid json response is returned.
todo: might want to stop after a certain number of retries
"""
response = openai.ChatCompletion.create(
model=model,
@ -72,10 +72,12 @@ def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL):
return json_validated_response(model, messages)
except Exception as e:
cprint(f"Unknown error: {e}", "red")
cprint(f"\n\GPT RESPONSE:\n\n{content}\n\n", "yellow")
cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow")
raise e
return json_response
def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL):
with open(file_path, "r") as f:
file_lines = f.readlines()
@ -106,6 +108,16 @@ def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL):
"content": prompt,
},
]
messages = [
{
"role": "system",
"content": SYSTEM_PROMPT,
},
{
"role": "user",
"content": prompt,
},
]
return json_validated_response(model, messages)
@ -188,6 +200,7 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL):
file_path=script_name,
args=script_args,
error_message=output,
model=model,
)
apply_changes(script_name, json_response)