Itezaz ul Hassan 2023-04-16 02:45:40 +05:00 zatwierdzone przez GitHub
commit b8b456373d
Nie znaleziono w bazie danych klucza dla tego podpisu
ID klucza GPG: 4AEE18F83AFDEB23
4 zmienionych plików z 59 dodań i 6 usunięć

12
.gitignore vendored
Wyświetl plik

@ -1,5 +1,13 @@
# Virtual environments
venv
.venv
.env
env/
# Byte-compiled / optimized / DLL files
__pycache__/
# IDE files
.idea/
.vscode/
# Miscellaneous
.DS_Store

Wyświetl plik

@ -27,7 +27,19 @@ To run with gpt-4 (the default, tested option):
You can also run with other models, but be warned they may not adhere to the edit format as well:
python wolverine.py --model=gpt-3.5-turbo buggy_script.py "subtract" 20 3
python wolverine.py --model=gpt-3.5-turbo -f buggy_script.py "subtract" 20 3
## Flags and their usage
- To run with specific model, pass the `--model` or `-m` flag with model name
- To pass the buggy script name, pass the `-f` or `--flag` flag with script name
- To run the updated changes to the script till success, pass the `-y` or `--yes` flag
- To revert the script to its original state, pass the `-r` or `--revert` flag
## Sample full command
python wolverine.py --model=gpt-3.5-turbo -f buggy_script.py -y "subtract" 20 3
If you want to use GPT-3.5 by default instead of GPT-4 uncomment the default model line in `.env`:

16
args.py 100644
Wyświetl plik

@ -0,0 +1,16 @@
import argparse
parser = argparse.ArgumentParser(
description='Give your python scripts regenerative healing abilities!'
)
parser.add_argument('-y', '--yes', help='Run Every Change made by GPT',
required=False, action='store_true'
)
parser.add_argument('-f', '--file', help='Path to buggy file', required=True)
parser.add_argument('-m', '--model', help='Model Name', required=False,
default='gpt-4'
)
parser.add_argument('-r', '--revert', help='Revert changes from backup file',
required=False, default=False
)
parser.add_argument('args', nargs='+', help='Arguments to pass to script')

Wyświetl plik

@ -5,10 +5,13 @@ import os
import shutil
import subprocess
import sys
import openai
from termcolor import cprint
from dotenv import load_dotenv
from args import parser
# Set up the OpenAI API
load_dotenv()
@ -187,8 +190,13 @@ def apply_changes(file_path, changes: list, confirm=False):
print("Changes applied.")
def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=False):
def main():
args = parser.parse_args()
script_name = args.file
script_args = args.args
revert = args.revert
model = args.model if args.model else DEFAULT_MODEL
run_until_success = args.yes
if revert:
backup_file = script_name + ".bak"
if os.path.exists(backup_file):
@ -201,9 +209,18 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=F
# Make a backup of the original script
shutil.copy(script_name, script_name + ".bak")
run_first_time = False
while True:
if run_first_time and not run_until_success:
cprint("Do you want to run the script again? [y/n]", "blue")
user_input = input()
while user_input.lower() != "y" and user_input.lower() != "n":
cprint("Incorrect entry. Please try again.", "red")
if user_input.lower() == "n":
break
output, returncode = run_script(script_name, script_args)
run_first_time = True
if returncode == 0:
cprint("Script ran successfully.", "blue")
@ -225,4 +242,4 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=F
if __name__ == "__main__":
fire.Fire(main)
main()