Added CHEAT to roberta and lzma tests

Signed-off-by: Jacob Torrey <jacob@thinkst.com>
pull/6/head
Jacob Torrey 2023-05-31 11:07:35 -06:00
rodzic ddff7c10ec
commit 894a09c332
2 zmienionych plików z 92 dodań i 14 usunięć

Wyświetl plik

@ -7,8 +7,8 @@ from lzma_detect import run_on_file_chunked, run_on_text_chunked, PRELUDE_STR, L
AI_SAMPLE_DIR = 'samples/llm-generated/'
HUMAN_SAMPLE_DIR = 'samples/human-generated/'
MIN_LEN = 1000
NUM_JSONL_SAMPLES = 250
MIN_LEN = 50
NUM_JSONL_SAMPLES = 500
ai_files = os.listdir(AI_SAMPLE_DIR)
human_files = os.listdir(HUMAN_SAMPLE_DIR)
@ -60,18 +60,18 @@ def test_human_jsonl(i, record_property):
record_property("score", str(score))
assert classification == 'Human', HUMAN_JSONL_FILE + ':' + str(i.get('id')) + ' (len: ' + str(i.get('length', -1)) + ') is a human-generated sample, misclassified as AI-generated with confidence ' + str(round(score, 8))
# AI_JSONL_FILE = 'samples/xl-1542M.test.jsonl'
# ai_samples = []
# with jsonlines.open(AI_JSONL_FILE) as reader:
# for obj in reader:
# if obj.get('length', 0) >= MIN_LEN:
# ai_samples.append(obj)
AI_JSONL_FILE = 'samples/xl-1542M.test.jsonl'
ai_samples = []
with jsonlines.open(AI_JSONL_FILE) as reader:
for obj in reader:
if obj.get('length', 0) >= MIN_LEN:
ai_samples.append(obj)
# @pytest.mark.parametrize('i', ai_samples[0:NUM_JSONL_SAMPLES])
# def test_gpt2_jsonl(i, record_property):
# (classification, score) = run_on_text_chunked(i.get('text', ''), fuzziness=FUZZINESS, prelude_ratio=PRELUDE_RATIO)
# record_property("score", str(score))
# assert classification == 'AI', AI_JSONL_FILE + ':' + str(i.get('id')) + ' (text: ' + i.get('text', "").replace('\n', ' ')[:50] + ') is an LLM-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))
@pytest.mark.parametrize('i', ai_samples[0:NUM_JSONL_SAMPLES])
def test_gpt2_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('text', ''), fuzziness=FUZZINESS, prelude_ratio=PRELUDE_RATIO)
record_property("score", str(score))
assert classification == 'AI', AI_JSONL_FILE + ':' + str(i.get('id')) + ' (text: ' + i.get('text', "").replace('\n', ' ')[:50] + ') is an LLM-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))
GPT3_JSONL_FILE = 'samples/GPT-3-175b_samples.jsonl'
gpt3_samples = []
@ -104,3 +104,42 @@ def test_chatgptnews_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('chatgpt', ''), fuzziness=FUZZINESS, prelude_ratio=PRELUDE_RATIO)
record_property("score", str(score))
assert classification == 'AI', NEWS_JSONL_FILE + ' is a AI-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))
CHEAT_HUMAN_JSONL_FILE = 'samples/ieee-init.jsonl'
ch_samples = []
with jsonlines.open(CHEAT_HUMAN_JSONL_FILE) as reader:
for obj in reader:
if len(obj.get('abstract', '')) >= MIN_LEN:
ch_samples.append(obj)
@pytest.mark.parametrize('i', ch_samples[0:NUM_JSONL_SAMPLES])
def test_cheat_human_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('abstract', ''), fuzziness=FUZZINESS, prelude_ratio=PRELUDE_RATIO)
record_property("score", str(score))
assert classification == 'Human', CHEAT_HUMAN_JSONL_FILE + ':' + str(i.get('id')) + ' [' + str(len(i.get('abstract', ''))) + '] (title: ' + i.get('title', "").replace('\n', ' ')[:15] + ') is a human-generated sample, misclassified as AI-generated with confidence ' + str(round(score, 8))
CHEAT_GEN_JSONL_FILE = 'samples/ieee-chatgpt-generation.jsonl'
cg_samples = []
with jsonlines.open(CHEAT_GEN_JSONL_FILE) as reader:
for obj in reader:
if len(obj.get('abstract', '')) >= MIN_LEN:
cg_samples.append(obj)
@pytest.mark.parametrize('i', cg_samples[0:NUM_JSONL_SAMPLES])
def test_cheat_generation_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('abstract', ''), fuzziness=FUZZINESS, prelude_ratio=PRELUDE_RATIO)
record_property("score", str(score))
assert classification == 'AI', CHEAT_GEN_JSONL_FILE + ':' + str(i.get('id')) + ' (title: ' + i.get('title', "").replace('\n', ' ')[:50] + ') is an LLM-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))
CHEAT_POLISH_JSONL_FILE = 'samples/ieee-chatgpt-polish.jsonl'
cp_samples = []
with jsonlines.open(CHEAT_POLISH_JSONL_FILE) as reader:
for obj in reader:
if len(obj.get('abstract', '')) >= MIN_LEN:
cp_samples.append(obj)
@pytest.mark.parametrize('i', cp_samples[0:NUM_JSONL_SAMPLES])
def test_cheat_polish_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('abstract', ''), fuzziness=FUZZINESS, prelude_ratio=PRELUDE_RATIO)
record_property("score", str(score))
assert classification == 'AI', CHEAT_POLISH_JSONL_FILE + ':' + str(i.get('id')) + ' (title: ' + i.get('title', "").replace('\n', ' ')[:50] + ') is an LLM-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))

Wyświetl plik

@ -8,7 +8,7 @@ AI_SAMPLE_DIR = 'samples/llm-generated/'
HUMAN_SAMPLE_DIR = 'samples/human-generated/'
MIN_LEN = 150
NUM_JSONL_SAMPLES = 50
NUM_JSONL_SAMPLES = 500
ai_files = os.listdir(AI_SAMPLE_DIR)
human_files = os.listdir(HUMAN_SAMPLE_DIR)
@ -99,3 +99,42 @@ def test_chatgptnews_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('chatgpt', ''))
record_property("score", str(score))
assert classification == 'AI', NEWS_JSONL_FILE + ' is a AI-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))
CHEAT_HUMAN_JSONL_FILE = 'samples/ieee-init.jsonl'
ch_samples = []
with jsonlines.open(CHEAT_HUMAN_JSONL_FILE) as reader:
for obj in reader:
if len(obj.get('abstract', '')) >= MIN_LEN:
ch_samples.append(obj)
@pytest.mark.parametrize('i', ch_samples[0:NUM_JSONL_SAMPLES])
def test_cheat_human_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('abstract', ''), fuzziness=FUZZINESS, prelude_ratio=PRELUDE_RATIO)
record_property("score", str(score))
assert classification == 'Human', CHEAT_HUMAN_JSONL_FILE + ':' + str(i.get('id')) + ' [' + str(len(i.get('abstract', ''))) + '] (title: ' + i.get('title', "").replace('\n', ' ')[:15] + ') is a human-generated sample, misclassified as AI-generated with confidence ' + str(round(score, 8))
CHEAT_GEN_JSONL_FILE = 'samples/ieee-chatgpt-generation.jsonl'
cg_samples = []
with jsonlines.open(CHEAT_GEN_JSONL_FILE) as reader:
for obj in reader:
if len(obj.get('abstract', '')) >= MIN_LEN:
cg_samples.append(obj)
@pytest.mark.parametrize('i', cg_samples[0:NUM_JSONL_SAMPLES])
def test_cheat_generation_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('abstract', ''), fuzziness=FUZZINESS, prelude_ratio=PRELUDE_RATIO)
record_property("score", str(score))
assert classification == 'AI', CHEAT_GEN_JSONL_FILE + ':' + str(i.get('id')) + ' (title: ' + i.get('title', "").replace('\n', ' ')[:50] + ') is an LLM-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))
CHEAT_POLISH_JSONL_FILE = 'samples/ieee-chatgpt-polish.jsonl'
cp_samples = []
with jsonlines.open(CHEAT_POLISH_JSONL_FILE) as reader:
for obj in reader:
if len(obj.get('abstract', '')) >= MIN_LEN:
cp_samples.append(obj)
@pytest.mark.parametrize('i', cp_samples[0:NUM_JSONL_SAMPLES])
def test_cheat_polish_jsonl(i, record_property):
(classification, score) = run_on_text_chunked(i.get('abstract', ''), fuzziness=FUZZINESS, prelude_ratio=PRELUDE_RATIO)
record_property("score", str(score))
assert classification == 'AI', CHEAT_POLISH_JSONL_FILE + ':' + str(i.get('id')) + ' (title: ' + i.get('title', "").replace('\n', ' ')[:50] + ') is an LLM-generated sample, misclassified as human-generated with confidence ' + str(round(score, 8))