diff --git a/README.md b/README.md index 6d29bd7..66c9f0b 100644 --- a/README.md +++ b/README.md @@ -23,11 +23,11 @@ _warning!_ By default wolverine uses GPT-4 and may make many repeated calls to t To run with gpt-4 (the default, tested option): - python wolverine.py buggy_script.py "subtract" 20 3 + python -m wolverine examples/buggy_script.py "subtract" 20 3 You can also run with other models, but be warned they may not adhere to the edit format as well: - python wolverine.py --model=gpt-3.5-turbo buggy_script.py "subtract" 20 3 + python -m wolverine --model=gpt-3.5-turbo examples/buggy_script.py "subtract" 20 3 If you want to use GPT-3.5 by default instead of GPT-4 uncomment the default model line in `.env`: @@ -35,7 +35,7 @@ If you want to use GPT-3.5 by default instead of GPT-4 uncomment the default mod You can also use flag `--confirm=True` which will ask you `yes or no` before making changes to the file. If flag is not used then it will apply the changes to the file - python wolverine.py buggy_script.py "subtract" 20 3 --confirm=True + python -m wolverine examples/buggy_script.py "subtract" 20 3 --confirm=True ## Future Plans diff --git a/buggy_script.js b/examples/buggy_script.js similarity index 100% rename from buggy_script.js rename to examples/buggy_script.js diff --git a/buggy_script.py b/examples/buggy_script.py similarity index 82% rename from buggy_script.py rename to examples/buggy_script.py index 3eeb6bb..d85445d 100644 --- a/buggy_script.py +++ b/examples/buggy_script.py @@ -1,5 +1,9 @@ import sys import fire +""" +Run With: `wolverine examples/buggy_script.py "subtract" 20 3` +Purpose: Show self-regenerating fixing of subtraction operator +""" def add_numbers(a, b): return a + b diff --git a/examples/buggy_script_2.py b/examples/buggy_script_2.py new file mode 100644 index 0000000..0cf9128 --- /dev/null +++ b/examples/buggy_script_2.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +import fire + +""" +Run With: with `python wolverine.py examples/buggy_script_2.py` +Purpose: Fix singleton code bug in Python +""" + +class SingletonClass(object): + def __new__(cls): + cls.instance = super(SingletonClass, cls).__new__(cls) + return cls.instance + +def check_singleton_works(): + """ + check that singleton pattern is working + """ + singleton = SingletonClass() + new_singleton = SingletonClass() + singleton.a = 1 + new_singleton.a = 2 + should_be_4 = (singleton.a + new_singleton.a) + assert should_be_4 == 4 + +if __name__=="__main__": + fire.Fire(check_singleton_works) + diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..b4500ed --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,2 @@ +-r requirements.txt +pytest==7.3.1 \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_wolverine.py b/tests/test_wolverine.py new file mode 100644 index 0000000..1f82d76 --- /dev/null +++ b/tests/test_wolverine.py @@ -0,0 +1,56 @@ +import os +import json +import pytest +import tempfile +from wolverine import apply_changes, json_validated_response + + +@pytest.fixture(scope='function') +def temp_file(): + # Create a temporary file + with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: + f.write("first line\nsecond line\nthird line") + file_path = f.name + yield file_path + # Clean up the temporary file + os.remove(file_path) + + +def test_apply_changes_replace(temp_file): + # Make a "replace" change to the second line + changes = [ + {"operation": "Replace", "line": 2, "content": "new second line"} + ] + apply_changes(temp_file, changes) + + # Check that the file was updated correctly + with open(temp_file) as f: + content = f.read() + assert content == "first line\nnew second line\nthird line" + + +def test_apply_changes_delete(temp_file): + # Make a "delete" change to the third line + changes = [ + {"operation": "Delete", "line": 3, "content": ""}, + ] + apply_changes(temp_file, changes) + + # Check that the file was updated correctly + with open(temp_file) as f: + content = f.read() + assert content == "first line\nsecond line\n" + + +def test_apply_changes_insert(temp_file): + # Make an "insert" change after the second line + changes = [ + {"operation": "InsertAfter", "line": 2, "content": "inserted line"}, + ] + apply_changes(temp_file, changes) + + # Check that the file was updated correctly + with open(temp_file) as f: + content = f.read() + assert content == 'first line\nsecond line\ninserted line\nthird line' + diff --git a/wolverine/__init__.py b/wolverine/__init__.py new file mode 100644 index 0000000..9d157c1 --- /dev/null +++ b/wolverine/__init__.py @@ -0,0 +1 @@ +from .wolverine import apply_changes, json_validated_response diff --git a/wolverine/__main__.py b/wolverine/__main__.py new file mode 100644 index 0000000..d800804 --- /dev/null +++ b/wolverine/__main__.py @@ -0,0 +1,6 @@ +import fire + +from .wolverine import main + +if __name__ == "__main__": + fire.Fire(main) diff --git a/wolverine.py b/wolverine/wolverine.py similarity index 89% rename from wolverine.py rename to wolverine/wolverine.py index 37b958b..a0ced6d 100644 --- a/wolverine.py +++ b/wolverine/wolverine.py @@ -5,7 +5,6 @@ import shutil import subprocess import sys -import fire import openai from dotenv import load_dotenv from termcolor import cprint @@ -73,8 +72,10 @@ def json_validated_response(model, messages): messages.append( { "role": "user", - "content": """Your response could not be parsed by json.loads. - Please restate your last message as pure JSON.""", + "content": ( + "Your response could not be parsed by json.loads. " + "Please restate your last message as pure JSON." + ), } ) # rerun the api call @@ -150,9 +151,13 @@ def apply_changes(file_path, changes: list, confirm=False): elif operation == "InsertAfter": file_lines.insert(line, content + "\n") - # Ask for user confirmation before writing changes - print("\nChanges to be made:") + # Print explanations + cprint("Explanations:", "blue") + for explanation in explanations: + cprint(f"- {explanation}", "blue") + # Display changes diff + print("\nChanges to be made:") diff = difflib.unified_diff(original_file_lines, file_lines, lineterm="") for line in diff: if line.startswith("+"): @@ -162,8 +167,8 @@ def apply_changes(file_path, changes: list, confirm=False): else: print(line, end="") - # Checking if user used confirm flag if confirm: + # check if user wants to apply changes or exit confirmation = input("Do you want to apply these changes? (y/n): ") if confirmation.lower() != "y": print("Changes not applied") @@ -171,27 +176,20 @@ def apply_changes(file_path, changes: list, confirm=False): with open(file_path, "w") as f: f.writelines(file_lines) - - # Print explanations - cprint("Explanations:", "blue") - for explanation in explanations: - cprint(f"- {explanation}", "blue") - - # Show the diff - print("\nChanges:") - diff = difflib.unified_diff( - original_file_lines, file_lines, lineterm="") - for line in diff: - if line.startswith("+"): - cprint(line, "green", end="") - elif line.startswith("-"): - cprint(line, "red", end="") - else: - print(line, end="") - print("Changes applied.") +def check_model_availability(model): + available_models = [x['id'] for x in openai.Model.list()["data"]] + if model not in available_models: + print( + f"Model {model} is not available. Perhaps try running with " + "`--model=gpt-3.5-turbo` instead? You can also configure a " + "default model in the .env" + ) + exit() + + def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=False): if revert: backup_file = script_name + ".bak" @@ -203,6 +201,9 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=F print(f"No backup file found for {script_name}") sys.exit(1) + # check if model is available + check_model_availability(model) + # Make a backup of the original script shutil.copy(script_name, script_name + ".bak") @@ -213,10 +214,10 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=F cprint("Script ran successfully.", "blue") print("Output:", output) break + else: cprint("Script crashed. Trying to fix...", "blue") print("Output:", output) - json_response = send_error_to_gpt( file_path=script_name, args=script_args, @@ -226,7 +227,3 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=F apply_changes(script_name, json_response, confirm=confirm) cprint("Changes applied. Rerunning...", "blue") - - -if __name__ == "__main__": - fire.Fire(main)