From 9f75fc91230434e3881906fd1ea7816a986a64c8 Mon Sep 17 00:00:00 2001 From: nervousapps Date: Tue, 18 Apr 2023 11:42:09 +0200 Subject: [PATCH 1/9] Add env variable VALIDATE_JSON_RETREY Add env variable VALIDATE_JSON_RETREY to configure number of retries when validating json object. Add typing and comments. --- wolverine.py | 88 +++++++++++++++++++++++++++++----------------------- 1 file changed, 49 insertions(+), 39 deletions(-) diff --git a/wolverine.py b/wolverine.py index 8f729fe..e9d7c40 100644 --- a/wolverine.py +++ b/wolverine.py @@ -6,6 +6,7 @@ import shutil import subprocess import sys import openai +from typing import List, Dict from termcolor import cprint from dotenv import load_dotenv @@ -14,14 +15,18 @@ from dotenv import load_dotenv load_dotenv() openai.api_key = os.getenv("OPENAI_API_KEY") +# Default model is GPT-4 DEFAULT_MODEL = os.environ.get("DEFAULT_MODEL", "gpt-4") +# Nb retries for json_validated_response, default to -1, infinite +VALIDATE_JSON_RETRY = int(os.getenv("VALIDATE_JSON_RETRY", -1)) -with open("prompt.txt") as f: +# Read the system prompt +with open(os.path.join(os.path.dirname(__file__), "prompt.txt"), 'r') as f: SYSTEM_PROMPT = f.read() -def run_script(script_name, script_args): +def run_script(script_name: str, script_args: List) -> str: script_args = [str(arg) for arg in script_args] """ If script_name.endswith(".py") then run with python @@ -40,49 +45,54 @@ def run_script(script_name, script_args): return result.decode("utf-8"), 0 -def json_validated_response(model, messages): +def json_validated_response(model: str, messages: List[Dict], nb_retry: int = 0) -> Dict: """ This function is needed because the API can return a non-json response. - This will run recursively until a valid json response is returned. - todo: might want to stop after a certain number of retries + This will run recursively VALIDATE_JSON_RETRY times. + If VALIDATE_JSON_RETRY is -1, it will run recursively until a valid json response is returned. """ - response = openai.ChatCompletion.create( - model=model, - messages=messages, - temperature=0.5, - ) - messages.append(response.choices[0].message) - content = response.choices[0].message.content - # see if json can be parsed - try: - json_start_index = content.index( - "[" - ) # find the starting position of the JSON data - json_data = content[ - json_start_index: - ] # extract the JSON data from the response string - json_response = json.loads(json_data) - except (json.decoder.JSONDecodeError, ValueError) as e: - cprint(f"{e}. Re-running the query.", "red") - # debug - cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow") - # append a user message that says the json is invalid - messages.append( - { - "role": "user", - "content": "Your response could not be parsed by json.loads. Please restate your last message as pure JSON.", - } + json_response = {} + if VALIDATE_JSON_RETRY == -1 or nb_retry < VALIDATE_JSON_RETRY: + response = openai.ChatCompletion.create( + model=model, + messages=messages, + temperature=0.5, ) - # rerun the api call - return json_validated_response(model, messages) - except Exception as e: - cprint(f"Unknown error: {e}", "red") - cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow") - raise e + messages.append(response.choices[0].message) + content = response.choices[0].message.content + # see if json can be parsed + try: + json_start_index = content.index( + "[" + ) # find the starting position of the JSON data + json_data = content[ + json_start_index: + ] # extract the JSON data from the response string + json_response = json.loads(json_data) + except (json.decoder.JSONDecodeError, ValueError) as e: + cprint(f"{e}. Re-running the query.", "red") + # debug + cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow") + # append a user message that says the json is invalid + messages.append( + { + "role": "user", + "content": "Your response could not be parsed by json.loads. Please restate your last message as pure JSON.", + } + ) + # inc nb_retry + nb_retry+=1 + # rerun the api call + return json_validated_response(model, messages, nb_retry) + except Exception as e: + cprint(f"Unknown error: {e}", "red") + cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow") + raise e + # If not valid after VALIDATE_JSON_RETRY retries, return an empty object / or raise an exception and exit return json_response -def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL): +def send_error_to_gpt(file_path: str, args: List, error_message: str, model: str = DEFAULT_MODEL) -> Dict: with open(file_path, "r") as f: file_lines = f.readlines() @@ -117,7 +127,7 @@ def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL): return json_validated_response(model, messages) -def apply_changes(file_path, changes: list, confirm=False): +def apply_changes(file_path: str, changes: List, confirm: bool = False): """ Pass changes as loaded json (list of dicts) """ From 3f333a4df7387f5f8594d9d61eeaeda74d2f95d4 Mon Sep 17 00:00:00 2001 From: nervousapps Date: Tue, 18 Apr 2023 11:47:46 +0200 Subject: [PATCH 2/9] Remove handle prompt path --- wolverine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wolverine.py b/wolverine.py index e9d7c40..ce5cb1a 100644 --- a/wolverine.py +++ b/wolverine.py @@ -22,7 +22,7 @@ DEFAULT_MODEL = os.environ.get("DEFAULT_MODEL", "gpt-4") VALIDATE_JSON_RETRY = int(os.getenv("VALIDATE_JSON_RETRY", -1)) # Read the system prompt -with open(os.path.join(os.path.dirname(__file__), "prompt.txt"), 'r') as f: +with open("prompt.txt") as f: SYSTEM_PROMPT = f.read() From 5c91c4ccba205f9aaa5918cf5e3b6d7d18e076bb Mon Sep 17 00:00:00 2001 From: nervousapps Date: Tue, 18 Apr 2023 13:48:13 +0200 Subject: [PATCH 3/9] Decrement instead of increment, nb_retry default to VALIDATE_JSON_RETRY --- wolverine.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/wolverine.py b/wolverine.py index ce5cb1a..d0f324b 100644 --- a/wolverine.py +++ b/wolverine.py @@ -45,14 +45,14 @@ def run_script(script_name: str, script_args: List) -> str: return result.decode("utf-8"), 0 -def json_validated_response(model: str, messages: List[Dict], nb_retry: int = 0) -> Dict: +def json_validated_response(model: str, messages: List[Dict], nb_retry: int = VALIDATE_JSON_RETRY) -> Dict: """ This function is needed because the API can return a non-json response. This will run recursively VALIDATE_JSON_RETRY times. If VALIDATE_JSON_RETRY is -1, it will run recursively until a valid json response is returned. """ json_response = {} - if VALIDATE_JSON_RETRY == -1 or nb_retry < VALIDATE_JSON_RETRY: + if nb_retry != 0: response = openai.ChatCompletion.create( model=model, messages=messages, @@ -80,8 +80,8 @@ def json_validated_response(model: str, messages: List[Dict], nb_retry: int = 0) "content": "Your response could not be parsed by json.loads. Please restate your last message as pure JSON.", } ) - # inc nb_retry - nb_retry+=1 + # dec nb_retry + nb_retry-=1 # rerun the api call return json_validated_response(model, messages, nb_retry) except Exception as e: From 5eba2d9e1b47a1e18660d8ce482d3e67af256118 Mon Sep 17 00:00:00 2001 From: nervousapps Date: Tue, 18 Apr 2023 11:42:09 +0200 Subject: [PATCH 4/9] Add env variable VALIDATE_JSON_RETREY Add env variable VALIDATE_JSON_RETREY to configure number of retries when validating json object. Add typing and comments. --- wolverine/wolverine.py | 91 +++++++++++++++++++++++------------------- 1 file changed, 49 insertions(+), 42 deletions(-) diff --git a/wolverine/wolverine.py b/wolverine/wolverine.py index 527f0f2..e4b1688 100644 --- a/wolverine/wolverine.py +++ b/wolverine/wolverine.py @@ -5,6 +5,7 @@ import shutil import subprocess import sys import openai +from typing import List, Dict from termcolor import cprint from dotenv import load_dotenv @@ -13,14 +14,18 @@ from dotenv import load_dotenv load_dotenv() openai.api_key = os.getenv("OPENAI_API_KEY") +# Default model is GPT-4 DEFAULT_MODEL = os.environ.get("DEFAULT_MODEL", "gpt-4") +# Nb retries for json_validated_response, default to -1, infinite +VALIDATE_JSON_RETRY = int(os.getenv("VALIDATE_JSON_RETRY", -1)) -with open("prompt.txt") as f: +# Read the system prompt +with open(os.path.join(os.path.dirname(__file__), "prompt.txt"), 'r') as f: SYSTEM_PROMPT = f.read() -def run_script(script_name, script_args): +def run_script(script_name: str, script_args: List) -> str: script_args = [str(arg) for arg in script_args] """ If script_name.endswith(".py") then run with python @@ -39,52 +44,54 @@ def run_script(script_name, script_args): return result.decode("utf-8"), 0 -def json_validated_response(model, messages): +def json_validated_response(model: str, messages: List[Dict], nb_retry: int = 0) -> Dict: """ This function is needed because the API can return a non-json response. - This will run recursively until a valid json response is returned. - todo: might want to stop after a certain number of retries + This will run recursively VALIDATE_JSON_RETRY times. + If VALIDATE_JSON_RETRY is -1, it will run recursively until a valid json response is returned. """ - response = openai.ChatCompletion.create( - model=model, - messages=messages, - temperature=0.5, - ) - messages.append(response.choices[0].message) - content = response.choices[0].message.content - # see if json can be parsed - try: - json_start_index = content.index( - "[" - ) # find the starting position of the JSON data - json_data = content[ - json_start_index: - ] # extract the JSON data from the response string - json_response = json.loads(json_data) - except (json.decoder.JSONDecodeError, ValueError) as e: - cprint(f"{e}. Re-running the query.", "red") - # debug - cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow") - # append a user message that says the json is invalid - messages.append( - { - "role": "user", - "content": ( - "Your response could not be parsed by json.loads. " - "Please restate your last message as pure JSON." - ), - } + json_response = {} + if VALIDATE_JSON_RETRY == -1 or nb_retry < VALIDATE_JSON_RETRY: + response = openai.ChatCompletion.create( + model=model, + messages=messages, + temperature=0.5, ) - # rerun the api call - return json_validated_response(model, messages) - except Exception as e: - cprint(f"Unknown error: {e}", "red") - cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow") - raise e + messages.append(response.choices[0].message) + content = response.choices[0].message.content + # see if json can be parsed + try: + json_start_index = content.index( + "[" + ) # find the starting position of the JSON data + json_data = content[ + json_start_index: + ] # extract the JSON data from the response string + json_response = json.loads(json_data) + except (json.decoder.JSONDecodeError, ValueError) as e: + cprint(f"{e}. Re-running the query.", "red") + # debug + cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow") + # append a user message that says the json is invalid + messages.append( + { + "role": "user", + "content": "Your response could not be parsed by json.loads. Please restate your last message as pure JSON.", + } + ) + # inc nb_retry + nb_retry+=1 + # rerun the api call + return json_validated_response(model, messages, nb_retry) + except Exception as e: + cprint(f"Unknown error: {e}", "red") + cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow") + raise e + # If not valid after VALIDATE_JSON_RETRY retries, return an empty object / or raise an exception and exit return json_response -def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL): +def send_error_to_gpt(file_path: str, args: List, error_message: str, model: str = DEFAULT_MODEL) -> Dict: with open(file_path, "r") as f: file_lines = f.readlines() @@ -119,7 +126,7 @@ def send_error_to_gpt(file_path, args, error_message, model=DEFAULT_MODEL): return json_validated_response(model, messages) -def apply_changes(file_path, changes: list, confirm=False): +def apply_changes(file_path: str, changes: List, confirm: bool = False): """ Pass changes as loaded json (list of dicts) """ From 7447a151c07f9ef63e60f55421c5e090a7be779a Mon Sep 17 00:00:00 2001 From: nervousapps Date: Tue, 18 Apr 2023 11:47:46 +0200 Subject: [PATCH 5/9] Remove handle prompt path --- wolverine/wolverine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wolverine/wolverine.py b/wolverine/wolverine.py index e4b1688..43f782b 100644 --- a/wolverine/wolverine.py +++ b/wolverine/wolverine.py @@ -21,7 +21,7 @@ DEFAULT_MODEL = os.environ.get("DEFAULT_MODEL", "gpt-4") VALIDATE_JSON_RETRY = int(os.getenv("VALIDATE_JSON_RETRY", -1)) # Read the system prompt -with open(os.path.join(os.path.dirname(__file__), "prompt.txt"), 'r') as f: +with open("prompt.txt") as f: SYSTEM_PROMPT = f.read() From 0730ad8c9b178a2038afb347e0be9b9eafe6dda0 Mon Sep 17 00:00:00 2001 From: nervousapps Date: Tue, 18 Apr 2023 13:48:13 +0200 Subject: [PATCH 6/9] Decrement instead of increment, nb_retry default to VALIDATE_JSON_RETRY --- wolverine/wolverine.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/wolverine/wolverine.py b/wolverine/wolverine.py index 43f782b..8e21c22 100644 --- a/wolverine/wolverine.py +++ b/wolverine/wolverine.py @@ -44,14 +44,14 @@ def run_script(script_name: str, script_args: List) -> str: return result.decode("utf-8"), 0 -def json_validated_response(model: str, messages: List[Dict], nb_retry: int = 0) -> Dict: +def json_validated_response(model: str, messages: List[Dict], nb_retry: int = VALIDATE_JSON_RETRY) -> Dict: """ This function is needed because the API can return a non-json response. This will run recursively VALIDATE_JSON_RETRY times. If VALIDATE_JSON_RETRY is -1, it will run recursively until a valid json response is returned. """ json_response = {} - if VALIDATE_JSON_RETRY == -1 or nb_retry < VALIDATE_JSON_RETRY: + if nb_retry != 0: response = openai.ChatCompletion.create( model=model, messages=messages, @@ -79,8 +79,8 @@ def json_validated_response(model: str, messages: List[Dict], nb_retry: int = 0) "content": "Your response could not be parsed by json.loads. Please restate your last message as pure JSON.", } ) - # inc nb_retry - nb_retry+=1 + # dec nb_retry + nb_retry-=1 # rerun the api call return json_validated_response(model, messages, nb_retry) except Exception as e: From 489d593156652ac3d6475176744ce2b820af3d23 Mon Sep 17 00:00:00 2001 From: nervousapps Date: Tue, 25 Apr 2023 22:38:09 +0200 Subject: [PATCH 7/9] Rebase, add test, add last line in prompt.txt to be sure to have only a json object in response. --- prompt.txt | 2 ++ tests/conftest.py | 31 +++++++++++++++++ tests/test_files/cc_resp.txt | 8 +++++ tests/test_files/cc_resp_fail.txt | 7 ++++ tests/test_wolverine.py | 57 ++++++++++++++++++++++++------- wolverine/wolverine.py | 4 +-- 6 files changed, 95 insertions(+), 14 deletions(-) create mode 100644 tests/conftest.py create mode 100644 tests/test_files/cc_resp.txt create mode 100644 tests/test_files/cc_resp_fail.txt diff --git a/prompt.txt b/prompt.txt index ec0582a..8b4af94 100644 --- a/prompt.txt +++ b/prompt.txt @@ -14,3 +14,5 @@ example response: {"operation": "Replace", "line": 18, "content": " x += 1"}, {"operation": "Delete", "line": 20, "content": ""} ] + +From now, your response must be only the json object, no talking, no comments. diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..1c93f2d --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,31 @@ +""" +Conftest +""" +import os +import pytest +import tempfile + + +TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "test_files") + + +@pytest.fixture(scope='function') +def temp_file(): + # Create a temporary file + with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: + f.write("first line\nsecond line\nthird line") + file_path = f.name + yield file_path + # Clean up the temporary file + os.remove(file_path) + + +def mock_open_ai_response_object(mocker, content: str): + """ + Mocks the response object from the openai api. + """ + mock_generator_object = mocker.MagicMock() + mock_message_object = mocker.MagicMock() + mock_message_object.configure_mock(**{"message.content": content}) + mock_generator_object.configure_mock(**{"choices": [mock_message_object]}) + return mock_generator_object \ No newline at end of file diff --git a/tests/test_files/cc_resp.txt b/tests/test_files/cc_resp.txt new file mode 100644 index 0000000..83e3155 --- /dev/null +++ b/tests/test_files/cc_resp.txt @@ -0,0 +1,8 @@ +Explanation: The function `subtract_numbers` is never defined in the script, causing a `NameError` when it is called in the `calculate` function. + +[ + {"explanation": "The 'subtract_numbers' function is never defined in the script."}, + {"operation": "InsertAfter", "line": 12, "content": "\n# Define subtract_numbers function\ndef subtract_numbers(a, b):\n return a - b\n"}, + {"operation": "Replace", "line": 18, "content": " if operation == \"add\":\n result = add_numbers(num1, num2)\n elif operation == \"subtract\":\n result = subtract_numbers(num1, num2)\n elif operation == \"multiply\":\n result = multiply_numbers(num1, num2)\n elif operation == \"divide\":\n result = divide_numbers(num1, num2)\n else:\n print(\"Invalid operation\")\n"}, + {"operation": "Replace", "line": 30, "content": " return result\n"} +] \ No newline at end of file diff --git a/tests/test_files/cc_resp_fail.txt b/tests/test_files/cc_resp_fail.txt new file mode 100644 index 0000000..452a594 --- /dev/null +++ b/tests/test_files/cc_resp_fail.txt @@ -0,0 +1,7 @@ +Explanation: The function `subtract_numbers` is never defined in the script, causing a `NameError` when it is called in the `calculate` function. + +[ + {"explanation": "The 'subtract_numbers' function is never defined in the script."}, + {"operation": "InsertAfter", "line": 12, "content": "\n# Define subtract_numbers function\ndef subtract_numbers(a, b):\n return a - b\n"}, + {"operation": "Replace", "line": 18, "content": " if operation == \"add\":\n result = add_numbers(num1, num2)\n elif operation == \"subtract\":\n result = subtract_numbers(num1, num2)\n elif operation == \"multiply\":\n result = multiply_numbers(num1, num2)\n elif operation == \"divide\":\n result = divide_numbers(num1, num2)\n else:\n print(\"Invalid operation\")\n"}, + {"operation": "Replace", "line": 30, "content": " return result\n"} diff --git a/tests/test_wolverine.py b/tests/test_wolverine.py index 1f82d76..e2d4415 100644 --- a/tests/test_wolverine.py +++ b/tests/test_wolverine.py @@ -1,19 +1,15 @@ import os -import json import pytest -import tempfile -from wolverine import apply_changes, json_validated_response +from wolverine import ( + apply_changes, + json_validated_response, +) +from .conftest import ( + mock_open_ai_response_object, + TEST_FILES_DIR +) -@pytest.fixture(scope='function') -def temp_file(): - # Create a temporary file - with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: - f.write("first line\nsecond line\nthird line") - file_path = f.name - yield file_path - # Clean up the temporary file - os.remove(file_path) def test_apply_changes_replace(temp_file): @@ -54,3 +50,40 @@ def test_apply_changes_insert(temp_file): content = f.read() assert content == 'first line\nsecond line\ninserted line\nthird line' + +@pytest.mark.parametrize("chat_completion_response, nb_retry, fail", [ + (os.path.join(TEST_FILES_DIR, "cc_resp.txt"), 3, False), + (os.path.join(TEST_FILES_DIR, "cc_resp_fail.txt"), 3, True), + (os.path.join(TEST_FILES_DIR, "cc_resp_fail.txt"), 10, True), +]) +def test_json_validated_response(mocker, chat_completion_response, nb_retry, fail): + # Open the test file + with open(chat_completion_response, 'r') as file: + response = file.read() + # Mock the openAi chat completion API call + mocker.patch( + "openai.ChatCompletion.create", + return_value=mock_open_ai_response_object(mocker=mocker, content=response)) + # ChatCompletion returned an invalid response + if fail: + with pytest.raises(Exception) as err: + json_response = json_validated_response("gpt-4", [ + { + "role": "user", + "content": "prompt" + } + ], + nb_retry=nb_retry + ) + # Check that the exception is raised after nb_retry time + assert err.value == f"No valid json response found after 3 tries. Exiting." + else: + json_response = json_validated_response("gpt-4", [ + { + "role": "user", + "content": "prompt" + } + ], + nb_retry=nb_retry + ) + assert json_response \ No newline at end of file diff --git a/wolverine/wolverine.py b/wolverine/wolverine.py index 8e21c22..30f3960 100644 --- a/wolverine/wolverine.py +++ b/wolverine/wolverine.py @@ -68,6 +68,7 @@ def json_validated_response(model: str, messages: List[Dict], nb_retry: int = VA json_start_index: ] # extract the JSON data from the response string json_response = json.loads(json_data) + return json_response except (json.decoder.JSONDecodeError, ValueError) as e: cprint(f"{e}. Re-running the query.", "red") # debug @@ -87,8 +88,7 @@ def json_validated_response(model: str, messages: List[Dict], nb_retry: int = VA cprint(f"Unknown error: {e}", "red") cprint(f"\nGPT RESPONSE:\n\n{content}\n\n", "yellow") raise e - # If not valid after VALIDATE_JSON_RETRY retries, return an empty object / or raise an exception and exit - return json_response + raise Exception(f"No valid json response found after {VALIDATE_JSON_RETRY} tries. Exiting.") def send_error_to_gpt(file_path: str, args: List, error_message: str, model: str = DEFAULT_MODEL) -> Dict: From bcd1db9f7bc3b0edcd6a3b1503f42c07c1dad03a Mon Sep 17 00:00:00 2001 From: nervousapps Date: Tue, 18 Apr 2023 11:42:09 +0200 Subject: [PATCH 8/9] Add env variable VALIDATE_JSON_RETREY Add env variable VALIDATE_JSON_RETREY to configure number of retries when validating json object. Add typing and comments. --- wolverine/wolverine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wolverine/wolverine.py b/wolverine/wolverine.py index 30f3960..43a564a 100644 --- a/wolverine/wolverine.py +++ b/wolverine/wolverine.py @@ -21,7 +21,7 @@ DEFAULT_MODEL = os.environ.get("DEFAULT_MODEL", "gpt-4") VALIDATE_JSON_RETRY = int(os.getenv("VALIDATE_JSON_RETRY", -1)) # Read the system prompt -with open("prompt.txt") as f: +with open(os.path.join(os.path.dirname(__file__), "..", "prompt.txt"), 'r') as f: SYSTEM_PROMPT = f.read() From 3cb64c623a430073af2ea4f4050579389b573448 Mon Sep 17 00:00:00 2001 From: nervousapps Date: Tue, 25 Apr 2023 23:15:08 +0200 Subject: [PATCH 9/9] Add env variables to README --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 66c9f0b..68a3b40 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,13 @@ You can also use flag `--confirm=True` which will ask you `yes or no` before mak python -m wolverine examples/buggy_script.py "subtract" 20 3 --confirm=True +## :label: Environement variables +| env name | description | default value | +| -------------------------------| ----------------| -------------------| +| OPENAI_API_KEY | OpenAI API key | None | +| DEFAULT_MODEL | GPT model to use | "gpt-4" | +| VALIDATE_JSON_RETRY | Number of retries when requesting OpenAI API (-1 means unlimites) | -1 | + ## Future Plans This is just a quick prototype I threw together in a few hours. There are many possible extensions and contributions are welcome: