kopia lustrzana https://github.com/dgtlmoon/changedetection.io
Tidy up async worker names and cleanups when in test mode
rodzic
6c3e88e261
commit
03e751b57f
|
@ -25,6 +25,11 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore):
|
||||||
app: Flask application instance
|
app: Flask application instance
|
||||||
datastore: Application datastore
|
datastore: Application datastore
|
||||||
"""
|
"""
|
||||||
|
# Set a descriptive name for this task
|
||||||
|
task = asyncio.current_task()
|
||||||
|
if task:
|
||||||
|
task.set_name(f"async-worker-{worker_id}")
|
||||||
|
|
||||||
logger.info(f"Starting async worker {worker_id}")
|
logger.info(f"Starting async worker {worker_id}")
|
||||||
|
|
||||||
while not app.config.exit.is_set():
|
while not app.config.exit.is_set():
|
||||||
|
@ -387,6 +392,11 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore):
|
||||||
if app.config.exit.is_set():
|
if app.config.exit.is_set():
|
||||||
break
|
break
|
||||||
|
|
||||||
|
# Check if we're in pytest environment - if so, be more gentle with logging
|
||||||
|
import sys
|
||||||
|
in_pytest = "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ
|
||||||
|
|
||||||
|
if not in_pytest:
|
||||||
logger.info(f"Worker {worker_id} shutting down")
|
logger.info(f"Worker {worker_id} shutting down")
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -139,6 +139,11 @@ class SignalHandler:
|
||||||
break
|
break
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
|
|
||||||
|
# Check if we're in pytest environment - if so, be more gentle with logging
|
||||||
|
import sys
|
||||||
|
in_pytest = "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ
|
||||||
|
|
||||||
|
if not in_pytest:
|
||||||
logger.info("Queue update thread stopped (threading mode)")
|
logger.info("Queue update thread stopped (threading mode)")
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -407,7 +407,12 @@ class ChangeDetectionStore:
|
||||||
# This is a fairly basic strategy to deal with the case that the file is corrupted,
|
# This is a fairly basic strategy to deal with the case that the file is corrupted,
|
||||||
# system was out of memory, out of RAM etc
|
# system was out of memory, out of RAM etc
|
||||||
with open(self.json_store_path+".tmp", 'w') as json_file:
|
with open(self.json_store_path+".tmp", 'w') as json_file:
|
||||||
|
# Use compact JSON in production for better performance
|
||||||
|
debug_mode = os.environ.get('CHANGEDETECTION_DEBUG', 'false').lower() == 'true'
|
||||||
|
if debug_mode:
|
||||||
json.dump(data, json_file, indent=4)
|
json.dump(data, json_file, indent=4)
|
||||||
|
else:
|
||||||
|
json.dump(data, json_file, separators=(',', ':'))
|
||||||
os.replace(self.json_store_path+".tmp", self.json_store_path)
|
os.replace(self.json_store_path+".tmp", self.json_store_path)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error writing JSON!! (Main JSON file save was skipped) : {str(e)}")
|
logger.error(f"Error writing JSON!! (Main JSON file save was skipped) : {str(e)}")
|
||||||
|
|
|
@ -106,8 +106,33 @@ def app(request):
|
||||||
app.config['STOP_THREADS'] = True
|
app.config['STOP_THREADS'] = True
|
||||||
|
|
||||||
def teardown():
|
def teardown():
|
||||||
|
# Stop all threads and services
|
||||||
datastore.stop_thread = True
|
datastore.stop_thread = True
|
||||||
app.config.exit.set()
|
app.config.exit.set()
|
||||||
|
|
||||||
|
# Shutdown workers gracefully before loguru cleanup
|
||||||
|
try:
|
||||||
|
from changedetectionio import worker_handler
|
||||||
|
worker_handler.shutdown_workers()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Stop socket server threads
|
||||||
|
try:
|
||||||
|
from changedetectionio.flask_app import socketio_server
|
||||||
|
if socketio_server and hasattr(socketio_server, 'shutdown'):
|
||||||
|
socketio_server.shutdown()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Give threads a moment to finish their shutdown
|
||||||
|
import time
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
# Remove all loguru handlers to prevent "closed file" errors
|
||||||
|
logger.remove()
|
||||||
|
|
||||||
|
# Cleanup files
|
||||||
cleanup(app_config['datastore_path'])
|
cleanup(app_config['datastore_path'])
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -76,9 +76,16 @@ def start_async_workers(n_workers, update_q, notification_q, app, datastore):
|
||||||
logger.info(f"Starting {n_workers} async workers")
|
logger.info(f"Starting {n_workers} async workers")
|
||||||
for i in range(n_workers):
|
for i in range(n_workers):
|
||||||
try:
|
try:
|
||||||
task_future = asyncio.run_coroutine_threadsafe(
|
# Use a factory function to create named worker coroutines
|
||||||
start_single_async_worker(i, update_q, notification_q, app, datastore), async_loop
|
def create_named_worker(worker_id):
|
||||||
)
|
async def named_worker():
|
||||||
|
task = asyncio.current_task()
|
||||||
|
if task:
|
||||||
|
task.set_name(f"async-worker-{worker_id}")
|
||||||
|
return await start_single_async_worker(worker_id, update_q, notification_q, app, datastore)
|
||||||
|
return named_worker()
|
||||||
|
|
||||||
|
task_future = asyncio.run_coroutine_threadsafe(create_named_worker(i), async_loop)
|
||||||
running_async_tasks.append(task_future)
|
running_async_tasks.append(task_future)
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
logger.error(f"Failed to start async worker {i}: {e}")
|
logger.error(f"Failed to start async worker {i}: {e}")
|
||||||
|
@ -89,22 +96,31 @@ async def start_single_async_worker(worker_id, update_q, notification_q, app, da
|
||||||
"""Start a single async worker with auto-restart capability"""
|
"""Start a single async worker with auto-restart capability"""
|
||||||
from changedetectionio.async_update_worker import async_update_worker
|
from changedetectionio.async_update_worker import async_update_worker
|
||||||
|
|
||||||
|
# Check if we're in pytest environment - if so, be more gentle with logging
|
||||||
|
import os
|
||||||
|
in_pytest = "pytest" in os.sys.modules or "PYTEST_CURRENT_TEST" in os.environ
|
||||||
|
|
||||||
while not app.config.exit.is_set():
|
while not app.config.exit.is_set():
|
||||||
try:
|
try:
|
||||||
|
if not in_pytest:
|
||||||
logger.info(f"Starting async worker {worker_id}")
|
logger.info(f"Starting async worker {worker_id}")
|
||||||
await async_update_worker(worker_id, update_q, notification_q, app, datastore)
|
await async_update_worker(worker_id, update_q, notification_q, app, datastore)
|
||||||
# If we reach here, worker exited cleanly
|
# If we reach here, worker exited cleanly
|
||||||
|
if not in_pytest:
|
||||||
logger.info(f"Async worker {worker_id} exited cleanly")
|
logger.info(f"Async worker {worker_id} exited cleanly")
|
||||||
break
|
break
|
||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
# Task was cancelled (normal shutdown)
|
# Task was cancelled (normal shutdown)
|
||||||
|
if not in_pytest:
|
||||||
logger.info(f"Async worker {worker_id} cancelled")
|
logger.info(f"Async worker {worker_id} cancelled")
|
||||||
break
|
break
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Async worker {worker_id} crashed: {e}")
|
logger.error(f"Async worker {worker_id} crashed: {e}")
|
||||||
|
if not in_pytest:
|
||||||
logger.info(f"Restarting async worker {worker_id} in 5 seconds...")
|
logger.info(f"Restarting async worker {worker_id} in 5 seconds...")
|
||||||
await asyncio.sleep(5)
|
await asyncio.sleep(5)
|
||||||
|
|
||||||
|
if not in_pytest:
|
||||||
logger.info(f"Async worker {worker_id} shutdown complete")
|
logger.info(f"Async worker {worker_id} shutdown complete")
|
||||||
|
|
||||||
|
|
||||||
|
@ -187,12 +203,17 @@ def shutdown_workers():
|
||||||
"""Shutdown all async workers fast and aggressively"""
|
"""Shutdown all async workers fast and aggressively"""
|
||||||
global async_loop, async_loop_thread, running_async_tasks
|
global async_loop, async_loop_thread, running_async_tasks
|
||||||
|
|
||||||
|
# Check if we're in pytest environment - if so, be more gentle with logging
|
||||||
|
import os
|
||||||
|
in_pytest = "pytest" in os.sys.modules or "PYTEST_CURRENT_TEST" in os.environ
|
||||||
|
|
||||||
|
if not in_pytest:
|
||||||
logger.info("Fast shutdown of async workers initiated...")
|
logger.info("Fast shutdown of async workers initiated...")
|
||||||
|
|
||||||
# Cancel all async tasks immediately
|
# Cancel all async tasks immediately
|
||||||
for task_future in running_async_tasks:
|
for task_future in running_async_tasks:
|
||||||
|
if not task_future.done():
|
||||||
task_future.cancel()
|
task_future.cancel()
|
||||||
running_async_tasks.clear()
|
|
||||||
|
|
||||||
# Stop the async event loop immediately
|
# Stop the async event loop immediately
|
||||||
if async_loop and not async_loop.is_closed():
|
if async_loop and not async_loop.is_closed():
|
||||||
|
@ -201,18 +222,23 @@ def shutdown_workers():
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
# Loop might already be stopped
|
# Loop might already be stopped
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
running_async_tasks.clear()
|
||||||
async_loop = None
|
async_loop = None
|
||||||
|
|
||||||
# Give async thread minimal time to finish, then continue
|
# Give async thread minimal time to finish, then continue
|
||||||
if async_loop_thread and async_loop_thread.is_alive():
|
if async_loop_thread and async_loop_thread.is_alive():
|
||||||
async_loop_thread.join(timeout=1.0) # Only 1 second timeout
|
async_loop_thread.join(timeout=1.0) # Only 1 second timeout
|
||||||
if async_loop_thread.is_alive():
|
if async_loop_thread.is_alive() and not in_pytest:
|
||||||
logger.info("Async thread still running after timeout - continuing with shutdown")
|
logger.info("Async thread still running after timeout - continuing with shutdown")
|
||||||
async_loop_thread = None
|
async_loop_thread = None
|
||||||
|
|
||||||
|
if not in_pytest:
|
||||||
logger.info("Async workers fast shutdown complete")
|
logger.info("Async workers fast shutdown complete")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def adjust_async_worker_count(new_count, update_q=None, notification_q=None, app=None, datastore=None):
|
def adjust_async_worker_count(new_count, update_q=None, notification_q=None, app=None, datastore=None):
|
||||||
"""
|
"""
|
||||||
Dynamically adjust the number of async workers.
|
Dynamically adjust the number of async workers.
|
||||||
|
|
Ładowanie…
Reference in New Issue