2023-05-10 17:46:59 +00:00
#!/usr/bin/env python3
2023-05-18 14:31:40 +00:00
import pytest , os , jsonlines
2023-05-11 20:26:59 +00:00
from warnings import warn
2023-05-18 14:31:40 +00:00
from lzma_detect import run_on_file_chunked , run_on_text_chunked , PRELUDE_STR , LzmaLlmDetector
2023-05-10 17:46:59 +00:00
2023-05-10 20:32:43 +00:00
AI_SAMPLE_DIR = ' samples/llm-generated/ '
HUMAN_SAMPLE_DIR = ' samples/human-generated/ '
ai_files = os . listdir ( AI_SAMPLE_DIR )
human_files = os . listdir ( HUMAN_SAMPLE_DIR )
2023-05-11 20:26:59 +00:00
FUZZINESS = 3
CONFIDENCE_THRESHOLD : float = 0.00 # What confidence to treat as error vs warning
2023-05-12 14:20:29 +00:00
PRELUDE_RATIO = LzmaLlmDetector ( prelude_str = PRELUDE_STR ) . prelude_ratio
2023-05-10 20:32:43 +00:00
def test_training_file ( ) :
2023-05-11 20:26:59 +00:00
assert run_on_file_chunked ( ' ai-generated.txt ' ) [ 0 ] == ' AI ' , ' The training corpus should always be detected as AI-generated... since it is '
2023-05-10 20:32:43 +00:00
@pytest.mark.parametrize ( ' f ' , human_files )
def test_human_samples ( f ) :
2023-05-12 14:20:29 +00:00
( classification , score ) = run_on_file_chunked ( HUMAN_SAMPLE_DIR + f , fuzziness = FUZZINESS , prelude_ratio = PRELUDE_RATIO )
2023-05-11 20:26:59 +00:00
if score > CONFIDENCE_THRESHOLD :
assert classification == ' Human ' , f + ' is a human-generated file, misclassified as AI-generated with confidence ' + str ( round ( score , 8 ) )
else :
if classification != ' Human ' :
warn ( " Misclassified " + f + " with score of: " + str ( round ( score , 8 ) ) )
else :
warn ( " Unable to confidently classify: " + f )
2023-05-10 20:32:43 +00:00
@pytest.mark.parametrize ( ' f ' , ai_files )
def test_llm_sample ( f ) :
2023-05-12 14:20:29 +00:00
( classification , score ) = run_on_file_chunked ( AI_SAMPLE_DIR + f , fuzziness = FUZZINESS , prelude_ratio = PRELUDE_RATIO )
2023-05-11 20:26:59 +00:00
if score > CONFIDENCE_THRESHOLD :
assert classification == ' AI ' , f + ' is an LLM-generated file, misclassified as human-generated with confidence ' + str ( round ( score , 8 ) )
else :
if classification != ' AI ' :
warn ( " Misclassified " + f + " with score of: " + str ( round ( score , 8 ) ) )
else :
2023-05-18 14:31:40 +00:00
warn ( " Unable to confidently classify: " + f )
MIN_LEN = 150
HUMAN_JSONL_FILE = ' samples/webtext.test.jsonl '
human_samples = [ ]
with jsonlines . open ( HUMAN_JSONL_FILE ) as reader :
for obj in reader :
if obj . get ( ' length ' , 0 ) > = MIN_LEN :
human_samples . append ( obj )
@pytest.mark.parametrize ( ' i ' , human_samples [ 0 : 250 ] )
def test_human_jsonl ( i ) :
( classification , score ) = run_on_text_chunked ( i . get ( ' text ' , ' ' ) , fuzziness = FUZZINESS , prelude_ratio = PRELUDE_RATIO )
assert classification == ' Human ' , HUMAN_JSONL_FILE + ' : ' + str ( i . get ( ' id ' ) ) + ' (len: ' + str ( i . get ( ' length ' , - 1 ) ) + ' ) is a human-generated sample, misclassified as AI-generated with confidence ' + str ( round ( score , 8 ) )
AI_JSONL_FILE = ' samples/xl-1542M.test.jsonl '
ai_samples = [ ]
with jsonlines . open ( AI_JSONL_FILE ) as reader :
for obj in reader :
if obj . get ( ' length ' , 0 ) > = MIN_LEN :
ai_samples . append ( obj )
@pytest.mark.parametrize ( ' i ' , ai_samples [ 0 : 250 ] )
def test_llm_jsonl ( i ) :
( classification , score ) = run_on_text_chunked ( i . get ( ' text ' , ' ' ) , fuzziness = FUZZINESS , prelude_ratio = PRELUDE_RATIO )
assert classification == ' AI ' , AI_JSONL_FILE + ' : ' + str ( i . get ( ' id ' ) ) + ' (text: ' + i . get ( ' text ' , " " ) . replace ( ' \n ' , ' ' ) [ : 50 ] + ' ) is an LLM-generated sample, misclassified as human-generated with confidence ' + str ( round ( score , 8 ) )