kopia lustrzana https://github.com/biobootloader/wolverine
Porównaj commity
11 Commity
a1fb03ea81
...
3b91ee62d7
Autor | SHA1 | Data |
---|---|---|
biobootloader | 3b91ee62d7 | |
BioBootloader | b8a7e04e61 | |
BioBootloader | fb6466a810 | |
biobootloader | b11dfc79a4 | |
BioBootloader | 12d78109cf | |
Serj | 507272c4d6 | |
Serj | 4c8e7f7964 | |
Serj | c50b51e949 | |
Serj | cbc0909145 | |
ksfi | 23e53dbb87 | |
ksfi | 6bc85a41e2 |
|
@ -23,11 +23,11 @@ _warning!_ By default wolverine uses GPT-4 and may make many repeated calls to t
|
|||
|
||||
To run with gpt-4 (the default, tested option):
|
||||
|
||||
python wolverine.py examples/buggy_script.py "subtract" 20 3
|
||||
python -m wolverine examples/buggy_script.py "subtract" 20 3
|
||||
|
||||
You can also run with other models, but be warned they may not adhere to the edit format as well:
|
||||
|
||||
python wolverine.py --model=gpt-3.5-turbo buggy_script.py "subtract" 20 3
|
||||
python -m wolverine --model=gpt-3.5-turbo examples/buggy_script.py "subtract" 20 3
|
||||
|
||||
If you want to use GPT-3.5 by default instead of GPT-4 uncomment the default model line in `.env`:
|
||||
|
||||
|
@ -35,7 +35,7 @@ If you want to use GPT-3.5 by default instead of GPT-4 uncomment the default mod
|
|||
|
||||
You can also use flag `--confirm=True` which will ask you `yes or no` before making changes to the file. If flag is not used then it will apply the changes to the file
|
||||
|
||||
python wolverine.py buggy_script.py "subtract" 20 3 --confirm=True
|
||||
python -m wolverine examples/buggy_script.py "subtract" 20 3 --confirm=True
|
||||
|
||||
## Future Plans
|
||||
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
-r requirements.txt
|
||||
pytest==7.3.1
|
|
@ -0,0 +1,56 @@
|
|||
import os
|
||||
import json
|
||||
import pytest
|
||||
import tempfile
|
||||
from wolverine import apply_changes, json_validated_response
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def temp_file():
|
||||
# Create a temporary file
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
|
||||
f.write("first line\nsecond line\nthird line")
|
||||
file_path = f.name
|
||||
yield file_path
|
||||
# Clean up the temporary file
|
||||
os.remove(file_path)
|
||||
|
||||
|
||||
def test_apply_changes_replace(temp_file):
|
||||
# Make a "replace" change to the second line
|
||||
changes = [
|
||||
{"operation": "Replace", "line": 2, "content": "new second line"}
|
||||
]
|
||||
apply_changes(temp_file, changes)
|
||||
|
||||
# Check that the file was updated correctly
|
||||
with open(temp_file) as f:
|
||||
content = f.read()
|
||||
assert content == "first line\nnew second line\nthird line"
|
||||
|
||||
|
||||
def test_apply_changes_delete(temp_file):
|
||||
# Make a "delete" change to the third line
|
||||
changes = [
|
||||
{"operation": "Delete", "line": 3, "content": ""},
|
||||
]
|
||||
apply_changes(temp_file, changes)
|
||||
|
||||
# Check that the file was updated correctly
|
||||
with open(temp_file) as f:
|
||||
content = f.read()
|
||||
assert content == "first line\nsecond line\n"
|
||||
|
||||
|
||||
def test_apply_changes_insert(temp_file):
|
||||
# Make an "insert" change after the second line
|
||||
changes = [
|
||||
{"operation": "InsertAfter", "line": 2, "content": "inserted line"},
|
||||
]
|
||||
apply_changes(temp_file, changes)
|
||||
|
||||
# Check that the file was updated correctly
|
||||
with open(temp_file) as f:
|
||||
content = f.read()
|
||||
assert content == 'first line\nsecond line\ninserted line\nthird line'
|
||||
|
|
@ -0,0 +1 @@
|
|||
from .wolverine import apply_changes, json_validated_response
|
|
@ -0,0 +1,6 @@
|
|||
import fire
|
||||
|
||||
from .wolverine import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
fire.Fire(main)
|
|
@ -1,5 +1,4 @@
|
|||
import difflib
|
||||
import fire
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
|
@ -177,6 +176,17 @@ def apply_changes(file_path, changes: list, confirm=False):
|
|||
print("Changes applied.")
|
||||
|
||||
|
||||
def check_model_availability(model):
|
||||
available_models = [x['id'] for x in openai.Model.list()["data"]]
|
||||
if model not in available_models:
|
||||
print(
|
||||
f"Model {model} is not available. Perhaps try running with "
|
||||
"`--model=gpt-3.5-turbo` instead? You can also configure a "
|
||||
"default model in the .env"
|
||||
)
|
||||
exit()
|
||||
|
||||
|
||||
def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=False):
|
||||
if revert:
|
||||
backup_file = script_name + ".bak"
|
||||
|
@ -188,6 +198,9 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=F
|
|||
print(f"No backup file found for {script_name}")
|
||||
sys.exit(1)
|
||||
|
||||
# check if model is available
|
||||
check_model_availability(model)
|
||||
|
||||
# Make a backup of the original script
|
||||
shutil.copy(script_name, script_name + ".bak")
|
||||
|
||||
|
@ -198,10 +211,10 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=F
|
|||
cprint("Script ran successfully.", "blue")
|
||||
print("Output:", output)
|
||||
break
|
||||
|
||||
else:
|
||||
cprint("Script crashed. Trying to fix...", "blue")
|
||||
print("Output:", output)
|
||||
|
||||
json_response = send_error_to_gpt(
|
||||
file_path=script_name,
|
||||
args=script_args,
|
||||
|
@ -211,7 +224,3 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=F
|
|||
|
||||
apply_changes(script_name, json_response, confirm=confirm)
|
||||
cprint("Changes applied. Rerunning...", "blue")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fire.Fire(main)
|
Ładowanie…
Reference in New Issue