use Fire for args, add flag to use 3.5-turbo

pull/8/head
BioBootloader 2023-04-08 12:38:14 -07:00
rodzic 4ff6bfae22
commit 552600d671
4 zmienionych plików z 32 dodań i 17 usunięć

2
.flake8 100644
Wyświetl plik

@ -0,0 +1,2 @@
[flake8]
max-line-length = 120

Wyświetl plik

@ -18,13 +18,19 @@ Add your openAI api key to `openai_key.txt` - _warning!_ by default this uses GP
## Example Usage
To run with gpt-4 (the default, tested option):
python wolverine.py buggy_script.py "subtract" 20 3
You can also run with other models, but be warned they may not adhere to the edit format as well:
python wolverine.py --model=gpt-3.5-turbo buggy_script.py "subtract" 20 3
## Future Plans
This is just a quick prototype I threw together in a few hours. There are many possible extensions and contributions are welcome:
- add flags to customize usage, such as using GPT3.5-turbo instead or asking for user confirmation before running changed code
- add flags to customize usage, such as asking for user confirmation before running changed code
- further iterations on the edit format that GPT responds in. Currently it struggles a bit with indentation, but I'm sure that can be improved
- a suite of example buggy files that we can test prompts on to ensure reliablity and measure improvement
- multiple files / codebases: send GPT everything that appears in the stacktrace

Wyświetl plik

@ -5,10 +5,14 @@ attrs==22.2.0
certifi==2022.12.7
charset-normalizer==3.1.0
fire==0.5.0
flake8==6.0.0
frozenlist==1.3.3
idna==3.4
mccabe==0.7.0
multidict==6.0.4
openai==0.27.2
pycodestyle==2.10.0
pyflakes==3.0.1
requests==2.28.2
six==1.16.0
termcolor==2.2.0

Wyświetl plik

@ -1,4 +1,5 @@
import difflib
import fire
import json
import os
import shutil
@ -13,17 +14,18 @@ with open("openai_key.txt") as f:
openai.api_key = f.read().strip()
def run_script(script_name, *args):
def run_script(script_name, script_args):
script_args = [str(arg) for arg in script_args]
try:
result = subprocess.check_output(
[sys.executable, script_name, *args], stderr=subprocess.STDOUT
[sys.executable, script_name, *script_args], stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
return e.output.decode("utf-8"), e.returncode
return result.decode("utf-8"), 0
def send_error_to_gpt4(file_path, args, error_message):
def send_error_to_gpt4(file_path, args, error_message, model):
with open(file_path, "r") as f:
file_lines = f.readlines()
@ -51,8 +53,7 @@ def send_error_to_gpt4(file_path, args, error_message):
# print(prompt)
response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
model="gpt-4",
model=model,
messages=[
{
"role": "user",
@ -113,16 +114,13 @@ def apply_changes(file_path, changes_json):
print(line, end="")
def main():
if len(sys.argv) < 3:
print("Usage: wolverine.py <script_name> <arg1> <arg2> ... [--revert]")
sys.exit(1)
script_name = sys.argv[1]
args = sys.argv[2:]
def main(script_name, *script_args, revert=False, model="gpt-4"):
# if len(sys.argv) < 3:
# print("Usage: python wolverine.py <script_name> <arg1> <arg2> ... [--revert]")
# sys.exit(1)
# Revert changes if requested
if "--revert" in args:
if revert:
backup_file = script_name + ".bak"
if os.path.exists(backup_file):
shutil.copy(backup_file, script_name)
@ -136,7 +134,7 @@ def main():
shutil.copy(script_name, script_name + ".bak")
while True:
output, returncode = run_script(script_name, *args)
output, returncode = run_script(script_name, script_args)
if returncode == 0:
cprint("Script ran successfully.", "blue")
@ -146,10 +144,15 @@ def main():
cprint("Script crashed. Trying to fix...", "blue")
print("Output:", output)
json_response = send_error_to_gpt4(script_name, args, output)
json_response = send_error_to_gpt4(
file_path=script_name,
args=script_args,
error_message=output,
model=model,
)
apply_changes(script_name, json_response)
cprint("Changes applied. Rerunning...", "blue")
if __name__ == "__main__":
main()
fire.Fire(main)