kopia lustrzana https://github.com/biobootloader/wolverine
move json_validated_response to standalone function
rodzic
923f7057e3
commit
2497fb816b
91
wolverine.py
91
wolverine.py
|
@ -35,47 +35,49 @@ def run_script(script_name, script_args):
|
||||||
return result.decode("utf-8"), 0
|
return result.decode("utf-8"), 0
|
||||||
|
|
||||||
|
|
||||||
def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL):
|
def json_validated_response(model, messages):
|
||||||
def json_validated_response(model, messages):
|
"""
|
||||||
"""
|
This function is needed because the API can return a non-json response.
|
||||||
This function is needed because the API can return a non-json response.
|
This will run recursively until a valid json response is returned.
|
||||||
This will run recursively until a valid json response is returned.
|
todo: might want to stop after a certain number of retries
|
||||||
"""
|
"""
|
||||||
response = openai.ChatCompletion.create(
|
response = openai.ChatCompletion.create(
|
||||||
model=model,
|
model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
temperature=0.5,
|
temperature=0.5,
|
||||||
|
)
|
||||||
|
messages.append(response.choices[0].message)
|
||||||
|
content = response.choices[0].message.content
|
||||||
|
# see if json can be parsed
|
||||||
|
try:
|
||||||
|
json_start_index = content.index(
|
||||||
|
"["
|
||||||
|
) # find the starting position of the JSON data
|
||||||
|
json_data = content[
|
||||||
|
json_start_index:
|
||||||
|
] # extract the JSON data from the response string
|
||||||
|
json_response = json.loads(json_data)
|
||||||
|
except (json.decoder.JSONDecodeError, ValueError) as e:
|
||||||
|
cprint(f"{e}. Re-running the query.", "red")
|
||||||
|
# debug
|
||||||
|
cprint(f"\n\GPT RESPONSE:\n\n{content}\n\n", "yellow")
|
||||||
|
# append a user message that says the json is invalid
|
||||||
|
messages.append(
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Your response could not be parsed by json.loads. Please restate your last message as pure JSON.",
|
||||||
|
}
|
||||||
)
|
)
|
||||||
messages.append(response.choices[0].message)
|
# rerun the api call
|
||||||
content = response.choices[0].message.content
|
return json_validated_response(model, messages)
|
||||||
# see if json can be parsed
|
except Exception as e:
|
||||||
try:
|
cprint(f"Unknown error: {e}", "red")
|
||||||
json_start_index = content.index(
|
cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow")
|
||||||
"["
|
raise e
|
||||||
) # find the starting position of the JSON data
|
return json_response
|
||||||
json_data = content[
|
|
||||||
json_start_index:
|
|
||||||
] # extract the JSON data from the response string
|
|
||||||
json_response = json.loads(json_data)
|
|
||||||
except (json.decoder.JSONDecodeError, ValueError) as e:
|
|
||||||
cprint(f"{e}. Re-running the query.", "red")
|
|
||||||
# debug
|
|
||||||
cprint(f"\n\GPT RESPONSE:\n\n{content}\n\n", "yellow")
|
|
||||||
# append a user message that says the json is invalid
|
|
||||||
messages.append(
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": "Your response could not be parsed by json.loads. Please restate your last message as pure JSON.",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
# rerun the api call
|
|
||||||
return json_validated_response(model, messages)
|
|
||||||
except Exception as e:
|
|
||||||
cprint(f"Unknown error: {e}", "red")
|
|
||||||
cprint(f"\n\GPT RESPONSE:\n\n{content}\n\n", "yellow")
|
|
||||||
raise e
|
|
||||||
return json_response
|
|
||||||
|
|
||||||
|
|
||||||
|
def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL):
|
||||||
with open(file_path, "r") as f:
|
with open(file_path, "r") as f:
|
||||||
file_lines = f.readlines()
|
file_lines = f.readlines()
|
||||||
|
|
||||||
|
@ -106,6 +108,16 @@ def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL):
|
||||||
"content": prompt,
|
"content": prompt,
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": SYSTEM_PROMPT,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": prompt,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
return json_validated_response(model, messages)
|
return json_validated_response(model, messages)
|
||||||
|
|
||||||
|
@ -188,6 +200,7 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL):
|
||||||
file_path=script_name,
|
file_path=script_name,
|
||||||
args=script_args,
|
args=script_args,
|
||||||
error_message=output,
|
error_message=output,
|
||||||
|
model=model,
|
||||||
)
|
)
|
||||||
|
|
||||||
apply_changes(script_name, json_response)
|
apply_changes(script_name, json_response)
|
||||||
|
|
Ładowanie…
Reference in New Issue