Added OpenAI's detector and all the test run reports along with a ROC diagram

Signed-off-by: Jacob Torrey <jacob@thinkst.com>
pull/6/head
Jacob Torrey 2023-05-23 21:01:36 -06:00
rodzic 8af95c8296
commit 5fe84f9aed
8 zmienionych plików z 1450 dodań i 44 usunięć

BIN
ai_detect_roc.png 100644

Plik binarny nie jest wyświetlany.

Po

Szerokość:  |  Wysokość:  |  Rozmiar: 58 KiB

340
lzma-report.xml 100644

File diff suppressed because one or more lines are too long

547
openai-report.xml 100644

File diff suppressed because one or more lines are too long

Wyświetl plik

@ -1,6 +1,6 @@
#!/usr/bin/env python3
import os, requests
import os, requests, re
from typing import Optional, Dict, Tuple
MODEL_NAME = 'model-detect-v2'
@ -28,19 +28,50 @@ def make_req(text : str) -> Optional[Dict]:
'logprobs': 5
}
res = requests.post(API_URL, headers=headers, json=data)
return res.json().get('choices', [None])[0]
#print(str(res.status_code) + ' ' + res.text)
res = res.json().get('choices', [None])[0]
if res is None:
return None
if res.get('text') == '"':
return ('AI', abs(res.get('logprobs').get('token_logprobs')[0]))
elif res.get('text') == '!':
return ('Human', abs(res.get('logprobs').get('token_logprobs')[0]))
return None #res.get('text')
def run_on_file(fn : str) -> Optional[Tuple[str, float]]:
def run_on_file_chunked(fn : str, chunk_size : int = 4096) -> Optional[Tuple[str, float]]:
with open(fn, 'r') as fp:
contents = fp.read()
res = make_req(contents)
if res is None:
print("Unable to classify!")
return None
else:
#print(res)
if res.get('text') == '"':
return ('AI', abs(res.get('logprobs').get('token_logprobs')[0]))
elif res.get('text') == '!':
return ('Human', abs(res.get('logprobs').get('token_logprobs')[0]))
return None #res.get('text')
return run_on_text_chunked(contents)
def run_on_text_chunked(contents : str, chunk_size : int = 4096) -> Optional[Tuple[str, float]]:
# Remove extra spaces and duplicate newlines.
contents = re.sub(' +', ' ', contents)
contents = re.sub('\t', '', contents)
contents = re.sub('\n+', '\n', contents)
contents = re.sub('\n ', '\n', contents)
start = 0
end = 0
chunks = []
while start + chunk_size < len(contents) and end != -1:
end = contents.rfind(' ', start, start + chunk_size + 1)
chunks.append(contents[start:end])
start = end + 1
chunks.append(contents[start:])
scores = []
for c in chunks:
scores.append(make_req(c))
ssum : float = 0.0
for s in scores:
if s is None:
continue
if s[0] == 'AI':
ssum -= s[1]
else:
ssum += s[1]
sa : float = ssum / len(scores)
if sa < 0:
return ('AI', abs(sa))
else:
return ('Human', abs(sa))

Wyświetl plik

@ -9,7 +9,7 @@ from itertools import chain
from math import sqrt
from junitparser import JUnitXml
MODELS = ['lzma', 'roberta', 'gptzero']
MODELS = ['lzma', 'roberta', 'gptzero', 'openai']
plt.figure()
@ -53,8 +53,9 @@ for model in MODELS:
# Plot the ROC curve
plt.plot(fpr, tpr, lw=2, label=model.capitalize() + ': ROC curve (AUC = %0.2f)' % roc_auc)
plt.scatter(fpr[ix], tpr[ix], marker='o', color='black', label='Best @ threshold = %0.2f' % thresholds[ix])
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--', label="Random classifier")
plt.scatter(fpr[ix], tpr[ix], marker='o', color='black')#, label=model.capitalize() + ': Best @ threshold = %0.2f' % thresholds[ix])
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--', label="Random classifier")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')

394
roberta-report.xml 100644

File diff suppressed because one or more lines are too long

Wyświetl plik

@ -1,26 +1,110 @@
#!/usr/bin/env python3
import pytest, os
import pytest, os, jsonlines
from warnings import warn
from openai_detect import run_on_file
from openai_detect import run_on_file_chunked, run_on_text_chunked
MIN_LEN = 1000
NUM_JSONL_SAMPLES = 50
AI_SAMPLE_DIR = 'samples/llm-generated/'
HUMAN_SAMPLE_DIR = 'samples/human-generated/'
ai_files = os.listdir(AI_SAMPLE_DIR)
ai_files = filter(lambda f: os.path.getsize(AI_SAMPLE_DIR + f) >= 1000, ai_files)
human_files = os.listdir(HUMAN_SAMPLE_DIR)
human_files = filter(lambda f: os.path.getsize(HUMAN_SAMPLE_DIR + f) >= 1000, human_files)
ai_files = [f for f in os.listdir(AI_SAMPLE_DIR) if os.path.getsize(AI_SAMPLE_DIR + f) >= MIN_LEN]
human_files = [f for f in os.listdir(HUMAN_SAMPLE_DIR) if os.path.getsize(HUMAN_SAMPLE_DIR + f) >= MIN_LEN]
def test_training_file():
assert run_on_file('ai-generated.txt')[0] == 'AI', 'The training corpus should always be detected as AI-generated... since it is'
CONFIDENCE_THRESHOLD : float = 0.00 # What confidence to treat as error vs warning
def test_training_file(record_property):
(classification, score) = run_on_file_chunked('ai-generated.txt')
record_property("score", str(score))
assert classification == 'AI', 'The training corpus should always be detected as AI-generated... since it is (score: ' + str(round(score, 8)) + ')'
@pytest.mark.parametrize('f', human_files)
def test_human_samples(f):
(classification, score) = run_on_file(HUMAN_SAMPLE_DIR + f)
assert classification == 'Human', f + ' is a human-generated file, misclassified as AI-generated with confidence ' + str(round(score, 8))
def test_human_samples(f, record_property):
res = run_on_file_chunked(HUMAN_SAMPLE_DIR + f)
if res is None:
pytest.skip('Unable to classify')
(classification, score) = res
record_property("score", str(score))
if score > CONFIDENCE_THRESHOLD:
assert classification == 'Human', f + ' is a human-generated file, misclassified as AI-generated with confidence ' + str(round(score, 8))
else:
if classification != 'Human':
warn("Misclassified " + f + " with score of: " + str(round(score, 8)))
else:
warn("Unable to confidently classify: " + f)
@pytest.mark.parametrize('f', ai_files)
def test_llm_sample(f):
(classification, score) = run_on_file(AI_SAMPLE_DIR + f)
assert classification == 'AI', f + ' is an LLM-generated file, misclassified as human-generated with confidence ' + str(round(score, 8))
def test_llm_sample(f, record_property):
res = run_on_file_chunked(AI_SAMPLE_DIR + f)
if res is None:
pytest.skip('Unable to classify')
(classification, score) = res
record_property("score", str(score))
if score > CONFIDENCE_THRESHOLD:
assert classification == 'AI', f + ' is an LLM-generated file, misclassified as human-generated with confidence ' + str(round(score, 8))
else:
if classification != 'AI':
warn("Misclassified " + f + " with score of: " + str(round(score, 8)))
else:
warn("Unable to confidently classify: " + f)
HUMAN_JSONL_FILE = 'samples/webtext.test.jsonl'
human_samples = []
with jsonlines.open(HUMAN_JSONL_FILE) as reader:
for obj in reader:
if len(obj.get('text')) >= MIN_LEN:
human_samples.append(obj)
@pytest.mark.parametrize('i', human_samples[0:NUM_JSONL_SAMPLES])
def test_human_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('text', ''))
record_property("score", str(score))
assert classification == 'Human', HUMAN_JSONL_FILE + ':' + str(i.get('id')) + ' (len: ' + str(i.get('length', -1)) + ') is a human-generated sample, misclassified as AI-generated with confidence ' + str(round(score, 8))
AI_JSONL_FILE = 'samples/xl-1542M.test.jsonl'
ai_samples = []
with jsonlines.open(AI_JSONL_FILE) as reader:
for obj in reader:
if len(obj.get('text')) >= MIN_LEN:
ai_samples.append(obj)
@pytest.mark.parametrize('i', ai_samples[0:NUM_JSONL_SAMPLES])
def test_llm_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('text', ''))
record_property("score", str(score))
assert classification == 'AI', AI_JSONL_FILE + ':' + str(i.get('id')) + ' (text: ' + i.get('text', "").replace('\n', ' ')[:50] + ') is an LLM-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))
GPT3_JSONL_FILE = 'samples/GPT-3-175b_samples.jsonl'
gpt3_samples = []
with jsonlines.open(GPT3_JSONL_FILE) as reader:
for o in reader:
for l in o.split('<|endoftext|>'):
if len(l) >= MIN_LEN:
gpt3_samples.append(l)
@pytest.mark.parametrize('i', gpt3_samples[0:NUM_JSONL_SAMPLES])
def test_gpt3_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i)
record_property("score", str(score))
assert classification == 'AI', GPT3_JSONL_FILE + ' is an LLM-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))
NEWS_JSONL_FILE = 'samples/news.jsonl'
news_samples = []
with jsonlines.open(NEWS_JSONL_FILE) as reader:
for obj in reader:
if len(obj.get('human')) >= MIN_LEN:
news_samples.append(obj)
@pytest.mark.parametrize('i', news_samples[0:NUM_JSONL_SAMPLES])
def test_humannews_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('human', ''))
record_property("score", str(score))
assert classification == 'Human', NEWS_JSONL_FILE + ' is a human-generated sample, misclassified as AI-generated with confidence ' + str(round(score, 8))
@pytest.mark.parametrize('i', news_samples[0:NUM_JSONL_SAMPLES])
def test_chatgptnews_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('chatgpt', ''))
record_property("score", str(score))
assert classification == 'AI', NEWS_JSONL_FILE + ' is a AI-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))

Wyświetl plik

@ -8,19 +8,22 @@ AI_SAMPLE_DIR = 'samples/llm-generated/'
HUMAN_SAMPLE_DIR = 'samples/human-generated/'
MIN_LEN = 150
NUM_JSONL_SAMPLES = 50
ai_files = os.listdir(AI_SAMPLE_DIR)
human_files = os.listdir(HUMAN_SAMPLE_DIR)
CONFIDENCE_THRESHOLD : float = 0.00 # What confidence to treat as error vs warning
def test_training_file():
def test_training_file(record_property):
(classification, score) = run_on_file_chunked('ai-generated.txt')
record_property("score", str(score))
assert classification == 'AI', 'The training corpus should always be detected as AI-generated... since it is (score: ' + str(round(score, 8)) + ')'
@pytest.mark.parametrize('f', human_files)
def test_human_samples(f):
def test_human_samples(f, record_property):
(classification, score) = run_on_file_chunked(HUMAN_SAMPLE_DIR + f)
record_property("score", str(score))
if score > CONFIDENCE_THRESHOLD:
assert classification == 'Human', f + ' is a human-generated file, misclassified as AI-generated with confidence ' + str(round(score, 8))
else:
@ -30,8 +33,9 @@ def test_human_samples(f):
warn("Unable to confidently classify: " + f)
@pytest.mark.parametrize('f', ai_files)
def test_llm_sample(f):
def test_llm_sample(f, record_property):
(classification, score) = run_on_file_chunked(AI_SAMPLE_DIR + f)
record_property("score", str(score))
if score > CONFIDENCE_THRESHOLD:
assert classification == 'AI', f + ' is an LLM-generated file, misclassified as human-generated with confidence ' + str(round(score, 8))
else:
@ -46,9 +50,10 @@ with jsonlines.open(HUMAN_JSONL_FILE) as reader:
for obj in reader:
human_samples.append(obj)
@pytest.mark.parametrize('i', human_samples[0:250])
def test_human_jsonl(i):
@pytest.mark.parametrize('i', human_samples[0:NUM_JSONL_SAMPLES])
def test_human_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('text', ''))
record_property("score", str(score))
assert classification == 'Human', HUMAN_JSONL_FILE + ':' + str(i.get('id')) + ' (len: ' + str(i.get('length', -1)) + ') is a human-generated sample, misclassified as AI-generated with confidence ' + str(round(score, 8))
AI_JSONL_FILE = 'samples/xl-1542M.test.jsonl'
@ -57,9 +62,10 @@ with jsonlines.open(AI_JSONL_FILE) as reader:
for obj in reader:
ai_samples.append(obj)
@pytest.mark.parametrize('i', ai_samples[0:250])
def test_llm_jsonl(i):
@pytest.mark.parametrize('i', ai_samples[0:NUM_JSONL_SAMPLES])
def test_llm_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('text', ''))
record_property("score", str(score))
assert classification == 'AI', AI_JSONL_FILE + ':' + str(i.get('id')) + ' (text: ' + i.get('text', "").replace('\n', ' ')[:50] + ') is an LLM-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))
GPT3_JSONL_FILE = 'samples/GPT-3-175b_samples.jsonl'
@ -70,9 +76,10 @@ with jsonlines.open(GPT3_JSONL_FILE) as reader:
if len(l) >= MIN_LEN:
gpt3_samples.append(l)
@pytest.mark.parametrize('i', gpt3_samples)
def test_gpt3_jsonl(i):
@pytest.mark.parametrize('i', gpt3_samples[0:NUM_JSONL_SAMPLES])
def test_gpt3_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i)
record_property("score", str(score))
assert classification == 'AI', GPT3_JSONL_FILE + ' is an LLM-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))
NEWS_JSONL_FILE = 'samples/news.jsonl'
@ -81,12 +88,14 @@ with jsonlines.open(NEWS_JSONL_FILE) as reader:
for obj in reader:
news_samples.append(obj)
@pytest.mark.parametrize('i', news_samples[0:250])
def test_humannews_jsonl(i):
@pytest.mark.parametrize('i', news_samples[0:NUM_JSONL_SAMPLES])
def test_humannews_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('human', ''))
record_property("score", str(score))
assert classification == 'Human', NEWS_JSONL_FILE + ' is a human-generated sample, misclassified as AI-generated with confidence ' + str(round(score, 8))
@pytest.mark.parametrize('i', news_samples[0:250])
def test_chatgptnews_jsonl(i):
@pytest.mark.parametrize('i', news_samples[0:NUM_JSONL_SAMPLES])
def test_chatgptnews_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('chatgpt', ''))
record_property("score", str(score))
assert classification == 'AI', NEWS_JSONL_FILE + ' is a AI-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))