2024-07-25 12:39:01 +00:00
|
|
|
#!/usr/bin/env python3
|
2025-03-21 08:50:32 +00:00
|
|
|
import psutil
|
2024-07-11 13:03:42 +00:00
|
|
|
import time
|
|
|
|
from threading import Thread
|
2021-02-16 20:36:41 +00:00
|
|
|
|
|
|
|
import pytest
|
2021-08-16 13:24:37 +00:00
|
|
|
from changedetectionio import changedetection_app
|
|
|
|
from changedetectionio import store
|
2021-02-21 13:26:19 +00:00
|
|
|
import os
|
2024-01-16 08:48:16 +00:00
|
|
|
import sys
|
|
|
|
from loguru import logger
|
2021-02-16 20:36:41 +00:00
|
|
|
|
|
|
|
# https://github.com/pallets/flask/blob/1.1.2/examples/tutorial/tests/test_auth.py
|
|
|
|
# Much better boilerplate than the docs
|
|
|
|
# https://www.python-boilerplate.com/py3+flask+pytest/
|
|
|
|
|
2021-02-21 12:41:00 +00:00
|
|
|
global app
|
2021-02-16 20:36:41 +00:00
|
|
|
|
2024-01-16 08:48:16 +00:00
|
|
|
# https://loguru.readthedocs.io/en/latest/resources/migration.html#replacing-caplog-fixture-from-pytest-library
|
|
|
|
# Show loguru logs only if CICD pytest fails.
|
|
|
|
from loguru import logger
|
|
|
|
@pytest.fixture
|
|
|
|
def reportlog(pytestconfig):
|
|
|
|
logging_plugin = pytestconfig.pluginmanager.getplugin("logging-plugin")
|
|
|
|
handler_id = logger.add(logging_plugin.report_handler, format="{message}")
|
|
|
|
yield
|
|
|
|
logger.remove(handler_id)
|
2021-07-21 02:49:32 +00:00
|
|
|
|
2024-07-11 13:03:42 +00:00
|
|
|
|
|
|
|
def track_memory(memory_usage, ):
|
2025-03-21 08:50:32 +00:00
|
|
|
process = psutil.Process(os.getpid())
|
2024-07-11 13:03:42 +00:00
|
|
|
while not memory_usage["stop"]:
|
2025-03-21 08:50:32 +00:00
|
|
|
current_rss = process.memory_info().rss
|
|
|
|
memory_usage["peak"] = max(memory_usage["peak"], current_rss)
|
2024-07-11 13:03:42 +00:00
|
|
|
time.sleep(0.01) # Adjust the sleep time as needed
|
|
|
|
|
|
|
|
@pytest.fixture(scope='function')
|
|
|
|
def measure_memory_usage(request):
|
|
|
|
memory_usage = {"peak": 0, "stop": False}
|
|
|
|
tracker_thread = Thread(target=track_memory, args=(memory_usage,))
|
|
|
|
tracker_thread.start()
|
|
|
|
|
|
|
|
yield
|
|
|
|
|
|
|
|
memory_usage["stop"] = True
|
|
|
|
tracker_thread.join()
|
|
|
|
|
|
|
|
# Note: ru_maxrss is in kilobytes on Unix-based systems
|
|
|
|
max_memory_used = memory_usage["peak"] / 1024 # Convert to MB
|
|
|
|
s = f"Peak memory used by the test {request.node.fspath} - '{request.node.name}': {max_memory_used:.2f} MB"
|
|
|
|
logger.debug(s)
|
|
|
|
|
|
|
|
with open("test-memory.log", 'a') as f:
|
|
|
|
f.write(f"{s}\n")
|
|
|
|
|
|
|
|
# Assert that the memory usage is less than 200MB
|
2024-07-12 12:46:36 +00:00
|
|
|
# assert max_memory_used < 150, f"Memory usage exceeded 200MB: {max_memory_used:.2f} MB"
|
2024-07-11 13:03:42 +00:00
|
|
|
|
|
|
|
|
2021-07-21 02:49:32 +00:00
|
|
|
def cleanup(datastore_path):
|
2023-12-01 17:05:19 +00:00
|
|
|
import glob
|
2021-07-21 02:49:32 +00:00
|
|
|
# Unlink test output files
|
2023-12-01 17:05:19 +00:00
|
|
|
|
|
|
|
for g in ["*.txt", "*.json", "*.pdf"]:
|
|
|
|
files = glob.glob(os.path.join(datastore_path, g))
|
|
|
|
for f in files:
|
|
|
|
if 'proxies.json' in f:
|
|
|
|
# Usually mounted by docker container during test time
|
|
|
|
continue
|
|
|
|
if os.path.isfile(f):
|
|
|
|
os.unlink(f)
|
2021-07-21 02:49:32 +00:00
|
|
|
|
2021-02-16 20:36:41 +00:00
|
|
|
@pytest.fixture(scope='session')
|
|
|
|
def app(request):
|
|
|
|
"""Create application for the tests."""
|
|
|
|
datastore_path = "./test-datastore"
|
2021-02-21 12:41:00 +00:00
|
|
|
|
2022-06-15 20:56:43 +00:00
|
|
|
# So they don't delay in fetching
|
|
|
|
os.environ["MINIMUM_SECONDS_RECHECK_TIME"] = "0"
|
2021-02-21 13:08:34 +00:00
|
|
|
try:
|
|
|
|
os.mkdir(datastore_path)
|
|
|
|
except FileExistsError:
|
|
|
|
pass
|
|
|
|
|
2021-07-21 02:49:32 +00:00
|
|
|
cleanup(datastore_path)
|
2021-02-21 12:41:00 +00:00
|
|
|
|
2022-11-01 17:26:29 +00:00
|
|
|
app_config = {'datastore_path': datastore_path, 'disable_checkver' : True}
|
2021-08-12 10:05:59 +00:00
|
|
|
cleanup(app_config['datastore_path'])
|
2024-01-16 08:48:16 +00:00
|
|
|
|
|
|
|
logger_level = 'TRACE'
|
|
|
|
|
|
|
|
logger.remove()
|
|
|
|
log_level_for_stdout = { 'DEBUG', 'SUCCESS' }
|
|
|
|
logger.configure(handlers=[
|
|
|
|
{"sink": sys.stdout, "level": logger_level,
|
|
|
|
"filter" : lambda record: record['level'].name in log_level_for_stdout},
|
|
|
|
{"sink": sys.stderr, "level": logger_level,
|
|
|
|
"filter": lambda record: record['level'].name not in log_level_for_stdout},
|
|
|
|
])
|
|
|
|
|
2021-02-21 13:21:14 +00:00
|
|
|
datastore = store.ChangeDetectionStore(datastore_path=app_config['datastore_path'], include_default_watches=False)
|
2021-02-21 12:41:00 +00:00
|
|
|
app = changedetection_app(app_config, datastore)
|
2022-03-21 21:54:27 +00:00
|
|
|
|
|
|
|
# Disable CSRF while running tests
|
|
|
|
app.config['WTF_CSRF_ENABLED'] = False
|
2021-03-01 13:29:21 +00:00
|
|
|
app.config['STOP_THREADS'] = True
|
2021-02-16 20:36:41 +00:00
|
|
|
|
|
|
|
def teardown():
|
2021-02-21 12:41:00 +00:00
|
|
|
datastore.stop_thread = True
|
2021-03-01 13:29:21 +00:00
|
|
|
app.config.exit.set()
|
2021-08-12 10:05:59 +00:00
|
|
|
cleanup(app_config['datastore_path'])
|
|
|
|
|
|
|
|
|
2021-02-26 19:07:26 +00:00
|
|
|
request.addfinalizer(teardown)
|
2021-02-27 08:05:25 +00:00
|
|
|
yield app
|