diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 6deafc2..0000000 --- a/.flake8 +++ /dev/null @@ -1,2 +0,0 @@ -[flake8] -max-line-length = 120 diff --git a/README.md b/README.md index 68a3b40..b9478de 100644 --- a/README.md +++ b/README.md @@ -37,12 +37,13 @@ You can also use flag `--confirm=True` which will ask you `yes or no` before mak python -m wolverine examples/buggy_script.py "subtract" 20 3 --confirm=True -## :label: Environement variables -| env name | description | default value | -| -------------------------------| ----------------| -------------------| -| OPENAI_API_KEY | OpenAI API key | None | -| DEFAULT_MODEL | GPT model to use | "gpt-4" | -| VALIDATE_JSON_RETRY | Number of retries when requesting OpenAI API (-1 means unlimites) | -1 | +## Environment variables + +| env name | description | default value | +| ------------------- | ----------------------------------------------------------------- | ------------- | +| OPENAI_API_KEY | OpenAI API key | None | +| DEFAULT_MODEL | GPT model to use | "gpt-4" | +| VALIDATE_JSON_RETRY | Number of retries when requesting OpenAI API (-1 means unlimites) | -1 | ## Future Plans diff --git a/tests/test_wolverine.py b/tests/test_wolverine.py index 84ecbe4..24b35a8 100644 --- a/tests/test_wolverine.py +++ b/tests/test_wolverine.py @@ -1,6 +1,5 @@ import os import pytest -import tempfile from wolverine import apply_changes, json_validated_response from .conftest import ( @@ -73,7 +72,7 @@ def test_json_validated_response(mocker, chat_completion_response, nb_retry, fai nb_retry=nb_retry ) # Check that the exception is raised after nb_retry time - assert err.value == f"No valid json response found after 3 tries. Exiting." + assert err.value == "No valid json response found after 3 tries. Exiting." else: json_response = json_validated_response("gpt-4", [ { diff --git a/wolverine/wolverine.py b/wolverine/wolverine.py index 664734d..6c75fad 100644 --- a/wolverine/wolverine.py +++ b/wolverine/wolverine.py @@ -10,7 +10,6 @@ import openai from typing import List, Dict from termcolor import cprint from dotenv import load_dotenv -from termcolor import cprint # Set up the OpenAI API load_dotenv() @@ -23,7 +22,7 @@ DEFAULT_MODEL = os.environ.get("DEFAULT_MODEL", "gpt-4") VALIDATE_JSON_RETRY = int(os.getenv("VALIDATE_JSON_RETRY", -1)) # Read the system prompt -with open(os.path.join(os.path.dirname(__file__), "..", "prompt.txt"), 'r') as f: +with open(os.path.join(os.path.dirname(__file__), "..", "prompt.txt"), "r") as f: SYSTEM_PROMPT = f.read() @@ -40,20 +39,20 @@ def run_script(script_name: str, script_args: List) -> str: ) try: - result = subprocess.check_output( - subprocess_args, - stderr=subprocess.STDOUT - ) + result = subprocess.check_output(subprocess_args, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as error: return error.output.decode("utf-8"), error.returncode return result.decode("utf-8"), 0 -def json_validated_response(model: str, messages: List[Dict], nb_retry: int = VALIDATE_JSON_RETRY) -> Dict: +def json_validated_response( + model: str, messages: List[Dict], nb_retry: int = VALIDATE_JSON_RETRY +) -> Dict: """ This function is needed because the API can return a non-json response. This will run recursively VALIDATE_JSON_RETRY times. - If VALIDATE_JSON_RETRY is -1, it will run recursively until a valid json response is returned. + If VALIDATE_JSON_RETRY is -1, it will run recursively until a valid json + response is returned. """ json_response = {} if nb_retry != 0: @@ -82,21 +81,28 @@ def json_validated_response(model: str, messages: List[Dict], nb_retry: int = VA messages.append( { "role": "user", - "content": "Your response could not be parsed by json.loads. Please restate your last message as pure JSON.", + "content": ( + "Your response could not be parsed by json.loads. " + "Please restate your last message as pure JSON." + ), } ) # dec nb_retry - nb_retry-=1 + nb_retry -= 1 # rerun the api call return json_validated_response(model, messages, nb_retry) except Exception as e: cprint(f"Unknown error: {e}", "red") cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow") raise e - raise Exception(f"No valid json response found after {VALIDATE_JSON_RETRY} tries. Exiting.") + raise Exception( + f"No valid json response found after {VALIDATE_JSON_RETRY} tries. Exiting." + ) -def send_error_to_gpt(file_path: str, args: List, error_message: str, model: str = DEFAULT_MODEL) -> Dict: +def send_error_to_gpt( + file_path: str, args: List, error_message: str, model: str = DEFAULT_MODEL +) -> Dict: with open(file_path, "r") as f: file_lines = f.readlines() @@ -189,7 +195,7 @@ def apply_changes(file_path: str, changes: List, confirm: bool = False): def check_model_availability(model): - available_models = [x['id'] for x in openai.Model.list()["data"]] + available_models = [x["id"] for x in openai.Model.list()["data"]] if model not in available_models: print( f"Model {model} is not available. Perhaps try running with "