Porównaj commity

...

11 Commity

Autor SHA1 Wiadomość Data
biobootloader 3b91ee62d7
Merge pull request #34 from biobootloader/improve-structure-add-tests
Improve project structure and add tests
2023-04-24 11:35:10 -07:00
BioBootloader b8a7e04e61 improve project structure 2023-04-24 09:21:14 -07:00
BioBootloader fb6466a810 Merge branch 'main' into add_unit_tests 2023-04-23 18:11:05 -07:00
biobootloader b11dfc79a4
Merge pull request #15 from ksfi/main
switch to gpt3 if gpt4 not available
2023-04-23 15:22:09 -07:00
BioBootloader 12d78109cf change to inform model unavailable, fix conflicts 2023-04-23 15:18:02 -07:00
Serj 507272c4d6 Fix up test logic 2023-04-17 22:23:46 +03:00
Serj 4c8e7f7964 Added tests for apply changes method 2023-04-17 22:13:48 +03:00
Serj c50b51e949 Create dev requirements instead 2023-04-17 22:11:29 +03:00
Serj cbc0909145 added pytest to requirements 2023-04-17 22:07:05 +03:00
ksfi 23e53dbb87
let model in default 2023-04-14 01:55:29 +02:00
ksfi 6bc85a41e2
switch to gpt3 if gpt4 not available 2023-04-14 00:07:57 +02:00
8 zmienionych plików z 83 dodań i 9 usunięć

Wyświetl plik

@ -23,11 +23,11 @@ _warning!_ By default wolverine uses GPT-4 and may make many repeated calls to t
To run with gpt-4 (the default, tested option):
python wolverine.py examples/buggy_script.py "subtract" 20 3
python -m wolverine examples/buggy_script.py "subtract" 20 3
You can also run with other models, but be warned they may not adhere to the edit format as well:
python wolverine.py --model=gpt-3.5-turbo buggy_script.py "subtract" 20 3
python -m wolverine --model=gpt-3.5-turbo examples/buggy_script.py "subtract" 20 3
If you want to use GPT-3.5 by default instead of GPT-4 uncomment the default model line in `.env`:
@ -35,7 +35,7 @@ If you want to use GPT-3.5 by default instead of GPT-4 uncomment the default mod
You can also use flag `--confirm=True` which will ask you `yes or no` before making changes to the file. If flag is not used then it will apply the changes to the file
python wolverine.py buggy_script.py "subtract" 20 3 --confirm=True
python -m wolverine examples/buggy_script.py "subtract" 20 3 --confirm=True
## Future Plans

Wyświetl plik

@ -0,0 +1,2 @@
-r requirements.txt
pytest==7.3.1

Wyświetl plik

Wyświetl plik

@ -0,0 +1,56 @@
import os
import json
import pytest
import tempfile
from wolverine import apply_changes, json_validated_response
@pytest.fixture(scope='function')
def temp_file():
# Create a temporary file
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
f.write("first line\nsecond line\nthird line")
file_path = f.name
yield file_path
# Clean up the temporary file
os.remove(file_path)
def test_apply_changes_replace(temp_file):
# Make a "replace" change to the second line
changes = [
{"operation": "Replace", "line": 2, "content": "new second line"}
]
apply_changes(temp_file, changes)
# Check that the file was updated correctly
with open(temp_file) as f:
content = f.read()
assert content == "first line\nnew second line\nthird line"
def test_apply_changes_delete(temp_file):
# Make a "delete" change to the third line
changes = [
{"operation": "Delete", "line": 3, "content": ""},
]
apply_changes(temp_file, changes)
# Check that the file was updated correctly
with open(temp_file) as f:
content = f.read()
assert content == "first line\nsecond line\n"
def test_apply_changes_insert(temp_file):
# Make an "insert" change after the second line
changes = [
{"operation": "InsertAfter", "line": 2, "content": "inserted line"},
]
apply_changes(temp_file, changes)
# Check that the file was updated correctly
with open(temp_file) as f:
content = f.read()
assert content == 'first line\nsecond line\ninserted line\nthird line'

Wyświetl plik

@ -0,0 +1 @@
from .wolverine import apply_changes, json_validated_response

Wyświetl plik

@ -0,0 +1,6 @@
import fire
from .wolverine import main
if __name__ == "__main__":
fire.Fire(main)

Wyświetl plik

@ -1,5 +1,4 @@
import difflib
import fire
import json
import os
import shutil
@ -177,6 +176,17 @@ def apply_changes(file_path, changes: list, confirm=False):
print("Changes applied.")
def check_model_availability(model):
available_models = [x['id'] for x in openai.Model.list()["data"]]
if model not in available_models:
print(
f"Model {model} is not available. Perhaps try running with "
"`--model=gpt-3.5-turbo` instead? You can also configure a "
"default model in the .env"
)
exit()
def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=False):
if revert:
backup_file = script_name + ".bak"
@ -188,6 +198,9 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=F
print(f"No backup file found for {script_name}")
sys.exit(1)
# check if model is available
check_model_availability(model)
# Make a backup of the original script
shutil.copy(script_name, script_name + ".bak")
@ -198,10 +211,10 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=F
cprint("Script ran successfully.", "blue")
print("Output:", output)
break
else:
cprint("Script crashed. Trying to fix...", "blue")
print("Output:", output)
json_response = send_error_to_gpt(
file_path=script_name,
args=script_args,
@ -211,7 +224,3 @@ def main(script_name, *script_args, revert=False, model=DEFAULT_MODEL, confirm=F
apply_changes(script_name, json_response, confirm=confirm)
cprint("Changes applied. Rerunning...", "blue")
if __name__ == "__main__":
fire.Fire(main)