kopia lustrzana https://github.com/thinkst/zippy
Added GPTZero API for testing and comparison
Signed-off-by: Jacob Torrey <jacob@thinkst.com>pull/6/head
rodzic
949aa2482a
commit
2474b058e6
|
@ -0,0 +1,78 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import requests, re, os
|
||||
from typing import Optional, Dict, Tuple
|
||||
|
||||
API_KEY = os.getenv('GPTZERO_APIKEY')
|
||||
API_URL = 'https://api.gptzero.me/v2/predict/text'
|
||||
|
||||
def make_req(text : str) -> Optional[Dict]:
|
||||
headers = {
|
||||
'X-Api-Key': API_KEY
|
||||
}
|
||||
data = {
|
||||
'document': text,
|
||||
}
|
||||
res = requests.post(API_URL, headers=headers, json=data)
|
||||
if res.status_code != 200:
|
||||
print(res.text)
|
||||
return [None]
|
||||
return res.json().get('documents', [None])[0]
|
||||
|
||||
def classify_text(s : str) -> Optional[Tuple[str, float]]:
|
||||
res = make_req(s)
|
||||
if res is None:
|
||||
print("Unable to classify!")
|
||||
return None
|
||||
else:
|
||||
#print(res)
|
||||
if res.get('average_generated_prob') > 0.5:
|
||||
return ('AI', res.get('completely_generated_prob'))
|
||||
else:
|
||||
return ('Human', 1 - res.get('completely_generated_prob'))
|
||||
|
||||
def run_on_file_chunked(filename : str, chunk_size : int = 1025) -> Optional[Tuple[str, float]]:
|
||||
'''
|
||||
Given a filename (and an optional chunk size) returns the score for the contents of that file.
|
||||
This function chunks the file into at most chunk_size parts to score separately, then returns an average. This prevents a very large input
|
||||
overwhelming the model.
|
||||
'''
|
||||
with open(filename, 'r') as fp:
|
||||
contents = fp.read()
|
||||
return run_on_text_chunked(contents, chunk_size)
|
||||
|
||||
def run_on_text_chunked(contents : str, chunk_size : int = 1025) -> Optional[Tuple[str, float]]:
|
||||
'''
|
||||
Given a text (and an optional chunk size) returns the score for the contents of that string.
|
||||
This function chunks the string into at most chunk_size parts to score separately, then returns an average. This prevents a very large input
|
||||
overwhelming the model.
|
||||
'''
|
||||
|
||||
# Remove extra spaces and duplicate newlines.
|
||||
contents = re.sub(' +', ' ', contents)
|
||||
contents = re.sub('\t', '', contents)
|
||||
contents = re.sub('\n+', '\n', contents)
|
||||
contents = re.sub('\n ', '\n', contents)
|
||||
|
||||
start = 0
|
||||
end = 0
|
||||
chunks = []
|
||||
while start + chunk_size < len(contents) and end != -1:
|
||||
end = contents.rfind(' ', start, start + chunk_size + 1)
|
||||
chunks.append(contents[start:end])
|
||||
start = end + 1
|
||||
chunks.append(contents[start:])
|
||||
scores = []
|
||||
for c in chunks:
|
||||
scores.append(classify_text(c))
|
||||
ssum : float = 0.0
|
||||
for s in scores:
|
||||
if s[0] == 'AI':
|
||||
ssum -= s[1]
|
||||
else:
|
||||
ssum += s[1]
|
||||
sa : float = ssum / len(scores)
|
||||
if sa < 0:
|
||||
return ('AI', abs(sa))
|
||||
else:
|
||||
return ('Human', abs(sa))
|
|
@ -0,0 +1,93 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import pytest, os, jsonlines
|
||||
from warnings import warn
|
||||
from gptzero_detect import run_on_file_chunked, run_on_text_chunked
|
||||
|
||||
AI_SAMPLE_DIR = 'samples/llm-generated/'
|
||||
HUMAN_SAMPLE_DIR = 'samples/human-generated/'
|
||||
|
||||
MIN_LEN = 150
|
||||
NUM_JSONL_SAMPLES = 50
|
||||
|
||||
ai_files = os.listdir(AI_SAMPLE_DIR)
|
||||
human_files = os.listdir(HUMAN_SAMPLE_DIR)
|
||||
|
||||
CONFIDENCE_THRESHOLD : float = 0.00 # What confidence to treat as error vs warning
|
||||
|
||||
def test_training_file():
|
||||
(classification, score) = run_on_file_chunked('ai-generated.txt')
|
||||
assert classification == 'AI', 'The training corpus should always be detected as AI-generated... since it is (score: ' + str(round(score, 8)) + ')'
|
||||
|
||||
@pytest.mark.parametrize('f', human_files)
|
||||
def test_human_samples(f):
|
||||
(classification, score) = run_on_file_chunked(HUMAN_SAMPLE_DIR + f)
|
||||
if score > CONFIDENCE_THRESHOLD:
|
||||
assert classification == 'Human', f + ' is a human-generated file, misclassified as AI-generated with confidence ' + str(round(score, 8))
|
||||
else:
|
||||
if classification != 'Human':
|
||||
warn("Misclassified " + f + " with score of: " + str(round(score, 8)))
|
||||
else:
|
||||
warn("Unable to confidently classify: " + f)
|
||||
|
||||
@pytest.mark.parametrize('f', ai_files)
|
||||
def test_llm_sample(f):
|
||||
(classification, score) = run_on_file_chunked(AI_SAMPLE_DIR + f)
|
||||
if score > CONFIDENCE_THRESHOLD:
|
||||
assert classification == 'AI', f + ' is an LLM-generated file, misclassified as human-generated with confidence ' + str(round(score, 8))
|
||||
else:
|
||||
if classification != 'AI':
|
||||
warn("Misclassified " + f + " with score of: " + str(round(score, 8)))
|
||||
else:
|
||||
warn("Unable to confidently classify: " + f)
|
||||
|
||||
HUMAN_JSONL_FILE = 'samples/webtext.test.jsonl'
|
||||
human_samples = []
|
||||
with jsonlines.open(HUMAN_JSONL_FILE) as reader:
|
||||
for obj in reader:
|
||||
human_samples.append(obj)
|
||||
|
||||
@pytest.mark.parametrize('i', human_samples[0:NUM_JSONL_SAMPLES])
|
||||
def test_human_jsonl(i):
|
||||
(classification, score) = run_on_text_chunked(i.get('text', ''))
|
||||
assert classification == 'Human', HUMAN_JSONL_FILE + ':' + str(i.get('id')) + ' (len: ' + str(i.get('length', -1)) + ') is a human-generated sample, misclassified as AI-generated with confidence ' + str(round(score, 8))
|
||||
|
||||
AI_JSONL_FILE = 'samples/xl-1542M.test.jsonl'
|
||||
ai_samples = []
|
||||
with jsonlines.open(AI_JSONL_FILE) as reader:
|
||||
for obj in reader:
|
||||
ai_samples.append(obj)
|
||||
|
||||
@pytest.mark.parametrize('i', ai_samples[0:NUM_JSONL_SAMPLES])
|
||||
def test_llm_jsonl(i):
|
||||
(classification, score) = run_on_text_chunked(i.get('text', ''))
|
||||
assert classification == 'AI', AI_JSONL_FILE + ':' + str(i.get('id')) + ' (text: ' + i.get('text', "").replace('\n', ' ')[:50] + ') is an LLM-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))
|
||||
|
||||
GPT3_JSONL_FILE = 'samples/GPT-3-175b_samples.jsonl'
|
||||
gpt3_samples = []
|
||||
with jsonlines.open(GPT3_JSONL_FILE) as reader:
|
||||
for o in reader:
|
||||
for l in o.split('<|endoftext|>'):
|
||||
if len(l) >= MIN_LEN:
|
||||
gpt3_samples.append(l)
|
||||
|
||||
@pytest.mark.parametrize('i', gpt3_samples[0:NUM_JSONL_SAMPLES])
|
||||
def test_gpt3_jsonl(i):
|
||||
(classification, score) = run_on_text_chunked(i)
|
||||
assert classification == 'AI', GPT3_JSONL_FILE + ' is an LLM-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))
|
||||
|
||||
NEWS_JSONL_FILE = 'samples/news.jsonl'
|
||||
news_samples = []
|
||||
with jsonlines.open(NEWS_JSONL_FILE) as reader:
|
||||
for obj in reader:
|
||||
news_samples.append(obj)
|
||||
|
||||
@pytest.mark.parametrize('i', news_samples[0:NUM_JSONL_SAMPLES])
|
||||
def test_humannews_jsonl(i):
|
||||
(classification, score) = run_on_text_chunked(i.get('human', ''))
|
||||
assert classification == 'Human', NEWS_JSONL_FILE + ' is a human-generated sample, misclassified as AI-generated with confidence ' + str(round(score, 8))
|
||||
|
||||
@pytest.mark.parametrize('i', news_samples[0:NUM_JSONL_SAMPLES])
|
||||
def test_chatgptnews_jsonl(i):
|
||||
(classification, score) = run_on_text_chunked(i.get('chatgpt', ''))
|
||||
assert classification == 'AI', NEWS_JSONL_FILE + ' is a AI-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))
|
Ładowanie…
Reference in New Issue