kopia lustrzana https://github.com/biobootloader/wolverine
Add env variable VALIDATE_JSON_RETREY
Add env variable VALIDATE_JSON_RETREY to configure number of retries when validating json object. Add typing and comments.pull/31/head^2^2
rodzic
2f5a026ff9
commit
9f75fc9123
88
wolverine.py
88
wolverine.py
|
@ -6,6 +6,7 @@ import shutil
|
|||
import subprocess
|
||||
import sys
|
||||
import openai
|
||||
from typing import List, Dict
|
||||
from termcolor import cprint
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
@ -14,14 +15,18 @@ from dotenv import load_dotenv
|
|||
load_dotenv()
|
||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Default model is GPT-4
|
||||
DEFAULT_MODEL = os.environ.get("DEFAULT_MODEL", "gpt-4")
|
||||
|
||||
# Nb retries for json_validated_response, default to -1, infinite
|
||||
VALIDATE_JSON_RETRY = int(os.getenv("VALIDATE_JSON_RETRY", -1))
|
||||
|
||||
with open("prompt.txt") as f:
|
||||
# Read the system prompt
|
||||
with open(os.path.join(os.path.dirname(__file__), "prompt.txt"), 'r') as f:
|
||||
SYSTEM_PROMPT = f.read()
|
||||
|
||||
|
||||
def run_script(script_name, script_args):
|
||||
def run_script(script_name: str, script_args: List) -> str:
|
||||
script_args = [str(arg) for arg in script_args]
|
||||
"""
|
||||
If script_name.endswith(".py") then run with python
|
||||
|
@ -40,49 +45,54 @@ def run_script(script_name, script_args):
|
|||
return result.decode("utf-8"), 0
|
||||
|
||||
|
||||
def json_validated_response(model, messages):
|
||||
def json_validated_response(model: str, messages: List[Dict], nb_retry: int = 0) -> Dict:
|
||||
"""
|
||||
This function is needed because the API can return a non-json response.
|
||||
This will run recursively until a valid json response is returned.
|
||||
todo: might want to stop after a certain number of retries
|
||||
This will run recursively VALIDATE_JSON_RETRY times.
|
||||
If VALIDATE_JSON_RETRY is -1, it will run recursively until a valid json response is returned.
|
||||
"""
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=0.5,
|
||||
)
|
||||
messages.append(response.choices[0].message)
|
||||
content = response.choices[0].message.content
|
||||
# see if json can be parsed
|
||||
try:
|
||||
json_start_index = content.index(
|
||||
"["
|
||||
) # find the starting position of the JSON data
|
||||
json_data = content[
|
||||
json_start_index:
|
||||
] # extract the JSON data from the response string
|
||||
json_response = json.loads(json_data)
|
||||
except (json.decoder.JSONDecodeError, ValueError) as e:
|
||||
cprint(f"{e}. Re-running the query.", "red")
|
||||
# debug
|
||||
cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow")
|
||||
# append a user message that says the json is invalid
|
||||
messages.append(
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Your response could not be parsed by json.loads. Please restate your last message as pure JSON.",
|
||||
}
|
||||
json_response = {}
|
||||
if VALIDATE_JSON_RETRY == -1 or nb_retry < VALIDATE_JSON_RETRY:
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=0.5,
|
||||
)
|
||||
# rerun the api call
|
||||
return json_validated_response(model, messages)
|
||||
except Exception as e:
|
||||
cprint(f"Unknown error: {e}", "red")
|
||||
cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow")
|
||||
raise e
|
||||
messages.append(response.choices[0].message)
|
||||
content = response.choices[0].message.content
|
||||
# see if json can be parsed
|
||||
try:
|
||||
json_start_index = content.index(
|
||||
"["
|
||||
) # find the starting position of the JSON data
|
||||
json_data = content[
|
||||
json_start_index:
|
||||
] # extract the JSON data from the response string
|
||||
json_response = json.loads(json_data)
|
||||
except (json.decoder.JSONDecodeError, ValueError) as e:
|
||||
cprint(f"{e}. Re-running the query.", "red")
|
||||
# debug
|
||||
cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow")
|
||||
# append a user message that says the json is invalid
|
||||
messages.append(
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Your response could not be parsed by json.loads. Please restate your last message as pure JSON.",
|
||||
}
|
||||
)
|
||||
# inc nb_retry
|
||||
nb_retry+=1
|
||||
# rerun the api call
|
||||
return json_validated_response(model, messages, nb_retry)
|
||||
except Exception as e:
|
||||
cprint(f"Unknown error: {e}", "red")
|
||||
cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow")
|
||||
raise e
|
||||
# If not valid after VALIDATE_JSON_RETRY retries, return an empty object / or raise an exception and exit
|
||||
return json_response
|
||||
|
||||
|
||||
def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL):
|
||||
def send_error_to_gpt(file_path: str, args: List, error_message: str, model: str = DEFAULT_MODEL) -> Dict:
|
||||
with open(file_path, "r") as f:
|
||||
file_lines = f.readlines()
|
||||
|
||||
|
@ -117,7 +127,7 @@ def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL):
|
|||
return json_validated_response(model, messages)
|
||||
|
||||
|
||||
def apply_changes(file_path, changes: list, confirm=False):
|
||||
def apply_changes(file_path: str, changes: List, confirm: bool = False):
|
||||
"""
|
||||
Pass changes as loaded json (list of dicts)
|
||||
"""
|
||||
|
|
Ładowanie…
Reference in New Issue