work on missing exceptions etc

socketio-tweaks
dgtlmoon 2025-05-30 17:35:41 +02:00
rodzic 40498a59b6
commit f7695f59d3
9 zmienionych plików z 629 dodań i 13 usunięć

Wyświetl plik

@ -148,8 +148,132 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore):
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text})
process_changedetection_results = False
# [Include all other exception handlers from original worker...]
# (Abbreviated for brevity - same exception handling logic applies)
except FilterNotFoundInResponse as e:
if not datastore.data['watching'].get(uuid):
continue
err_text = "Warning, no filters were found, no change detection ran - Did the page change layout? update your Visual Filter if necessary."
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text})
# Filter wasnt found, but we should still update the visual selector so that they can have a chance to set it up again
if e.screenshot:
watch.save_screenshot(screenshot=e.screenshot)
if e.xpath_data:
watch.save_xpath_data(data=e.xpath_data)
# Only when enabled, send the notification
if watch.get('filter_failure_notification_send', False):
c = watch.get('consecutive_filter_failures', 0)
c += 1
# Send notification if we reached the threshold?
threshold = datastore.data['settings']['application'].get('filter_failure_notification_threshold_attempts', 0)
logger.debug(f"Filter for {uuid} not found, consecutive_filter_failures: {c} of threshold {threshold}")
if c >= threshold:
if not watch.get('notification_muted'):
logger.debug(f"Sending filter failed notification for {uuid}")
await send_filter_failure_notification(uuid, notification_q, datastore)
c = 0
logger.debug(f"Reset filter failure count back to zero")
datastore.update_watch(uuid=uuid, update_obj={'consecutive_filter_failures': c})
else:
logger.trace(f"{uuid} - filter_failure_notification_send not enabled, skipping")
process_changedetection_results = False
except content_fetchers_exceptions.checksumFromPreviousCheckWasTheSame as e:
# Yes fine, so nothing todo, don't continue to process.
process_changedetection_results = False
changed_detected = False
except content_fetchers_exceptions.BrowserConnectError as e:
datastore.update_watch(uuid=uuid,
update_obj={'last_error': e.msg})
process_changedetection_results = False
except content_fetchers_exceptions.BrowserFetchTimedOut as e:
datastore.update_watch(uuid=uuid,
update_obj={'last_error': e.msg})
process_changedetection_results = False
except content_fetchers_exceptions.BrowserStepsStepException as e:
if not datastore.data['watching'].get(uuid):
continue
error_step = e.step_n + 1
from playwright._impl._errors import TimeoutError, Error
# Generally enough info for TimeoutError (couldnt locate the element after default seconds)
err_text = f"Browser step at position {error_step} could not run, check the watch, add a delay if necessary, view Browser Steps to see screenshot at that step."
if e.original_e.name == "TimeoutError":
# Just the first line is enough, the rest is the stack trace
err_text += " Could not find the target."
else:
# Other Error, more info is good.
err_text += " " + str(e.original_e).splitlines()[0]
logger.debug(f"BrowserSteps exception at step {error_step} {str(e.original_e)}")
datastore.update_watch(uuid=uuid,
update_obj={'last_error': err_text,
'browser_steps_last_error_step': error_step})
if watch.get('filter_failure_notification_send', False):
c = watch.get('consecutive_filter_failures', 0)
c += 1
# Send notification if we reached the threshold?
threshold = datastore.data['settings']['application'].get('filter_failure_notification_threshold_attempts', 0)
logger.error(f"Step for {uuid} not found, consecutive_filter_failures: {c}")
if threshold > 0 and c >= threshold:
if not watch.get('notification_muted'):
await send_step_failure_notification(watch_uuid=uuid, step_n=e.step_n, notification_q=notification_q, datastore=datastore)
c = 0
datastore.update_watch(uuid=uuid, update_obj={'consecutive_filter_failures': c})
process_changedetection_results = False
except content_fetchers_exceptions.EmptyReply as e:
# Some kind of custom to-str handler in the exception handler that does this?
err_text = "EmptyReply - try increasing 'Wait seconds before extracting text', Status Code {}".format(e.status_code)
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text,
'last_check_status': e.status_code})
process_changedetection_results = False
except content_fetchers_exceptions.ScreenshotUnavailable as e:
err_text = "Screenshot unavailable, page did not render fully in the expected time or page was too long - try increasing 'Wait seconds before extracting text'"
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text,
'last_check_status': e.status_code})
process_changedetection_results = False
except content_fetchers_exceptions.JSActionExceptions as e:
err_text = "Error running JS Actions - Page request - "+e.message
if e.screenshot:
watch.save_screenshot(screenshot=e.screenshot, as_error=True)
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text,
'last_check_status': e.status_code})
process_changedetection_results = False
except content_fetchers_exceptions.PageUnloadable as e:
err_text = "Page request from server didnt respond correctly"
if e.message:
err_text = "{} - {}".format(err_text, e.message)
if e.screenshot:
watch.save_screenshot(screenshot=e.screenshot, as_error=True)
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text,
'last_check_status': e.status_code,
'has_ldjson_price_data': None})
process_changedetection_results = False
except content_fetchers_exceptions.BrowserStepsInUnsupportedFetcher as e:
err_text = "This watch has Browser Steps configured and so it cannot run with the 'Basic fast Plaintext/HTTP Client', either remove the Browser Steps or select a Chrome fetcher."
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text})
process_changedetection_results = False
logger.error(f"Exception (BrowserStepsInUnsupportedFetcher) reached processing watch UUID: {uuid}")
except Exception as e:
logger.error(f"Worker {worker_id} exception processing watch UUID: {uuid}")
@ -296,4 +420,36 @@ async def send_content_changed_notification(watch_uuid, notification_q, datastor
temp_worker.send_content_changed_notification(watch_uuid)
except Exception as e:
logger.error(f"Error sending notification for {watch_uuid}: {e}")
logger.error(f"Error sending notification for {watch_uuid}: {e}")
async def send_filter_failure_notification(watch_uuid, notification_q, datastore):
"""Helper function to send filter failure notifications"""
try:
# Import here to avoid circular imports
from changedetectionio.update_worker import update_worker
# Create temporary worker instance just for notification methods
temp_worker = update_worker(None, notification_q, None, datastore)
temp_worker.datastore = datastore
temp_worker.notification_q = notification_q
temp_worker.send_filter_failure_notification(watch_uuid)
except Exception as e:
logger.error(f"Error sending filter failure notification for {watch_uuid}: {e}")
async def send_step_failure_notification(watch_uuid, step_n, notification_q, datastore):
"""Helper function to send step failure notifications"""
try:
# Import here to avoid circular imports
from changedetectionio.update_worker import update_worker
# Create temporary worker instance just for notification methods
temp_worker = update_worker(None, notification_q, None, datastore)
temp_worker.datastore = datastore
temp_worker.notification_q = notification_q
temp_worker.send_step_failure_notification(watch_uuid, step_n)
except Exception as e:
logger.error(f"Error sending step failure notification for {watch_uuid}: {e}")

Wyświetl plik

@ -234,7 +234,6 @@ def construct_blueprint(datastore: ChangeDetectionStore):
return make_response("Error fetching screenshot and element data - " + str(e), 401)
# SEND THIS BACK TO THE BROWSER
output = {
"screenshot": f"data:image/jpeg;base64,{base64.b64encode(screenshot).decode('ascii')}",
"xpath_data": xpath_data,

Wyświetl plik

@ -137,7 +137,9 @@
</fieldset>
<div class="pure-control-group">
{{ render_field(form.requests.form.workers) }}
<span class="pure-form-message-inline">Number of concurrent workers to process watches. More workers = faster processing but higher memory usage.</span>
{% set worker_info = get_worker_status_info() %}
<span class="pure-form-message-inline">Number of concurrent workers to process watches. More workers = faster processing but higher memory usage.<br>
Currently running: <strong>{{ worker_info.count }}</strong> operational {{ worker_info.type }} workers{% if worker_info.active_workers > 0 %} ({{ worker_info.active_workers }} actively processing){% endif %}.</span>
</div>
<div class="pure-control-group inline-radio">
{{ render_field(form.requests.form.default_ua) }}

Wyświetl plik

@ -51,6 +51,212 @@ class SignalPriorityQueue(queue.PriorityQueue):
except Exception as e:
logger.critical(f"Exception: {e}")
return item
def get_uuid_position(self, target_uuid):
"""
Find the position of a watch UUID in the priority queue.
Optimized for large queues - O(n) complexity instead of O(n log n).
Args:
target_uuid: The UUID to search for
Returns:
dict: Contains position info or None if not found
- position: 0-based position in queue (0 = next to be processed)
- total_items: total number of items in queue
- priority: the priority value of the found item
"""
with self.mutex:
queue_list = list(self.queue)
total_items = len(queue_list)
if total_items == 0:
return {
'position': None,
'total_items': 0,
'priority': None,
'found': False
}
# Find the target item and its priority first - O(n)
target_item = None
target_priority = None
for item in queue_list:
if (hasattr(item, 'item') and
isinstance(item.item, dict) and
item.item.get('uuid') == target_uuid):
target_item = item
target_priority = item.priority
break
if target_item is None:
return {
'position': None,
'total_items': total_items,
'priority': None,
'found': False
}
# Count how many items have higher priority (lower numbers) - O(n)
position = 0
for item in queue_list:
# Items with lower priority numbers are processed first
if item.priority < target_priority:
position += 1
elif item.priority == target_priority and item != target_item:
# For same priority, count items that come before this one
# (Note: this is approximate since heap order isn't guaranteed for equal priorities)
position += 1
return {
'position': position,
'total_items': total_items,
'priority': target_priority,
'found': True
}
def get_all_queued_uuids(self, limit=None, offset=0):
"""
Get UUIDs currently in the queue with their positions.
For large queues, use limit/offset for pagination.
Args:
limit: Maximum number of items to return (None = all)
offset: Number of items to skip (for pagination)
Returns:
dict: Contains items and metadata
- items: List of dicts with uuid, position, and priority
- total_items: Total number of items in queue
- returned_items: Number of items returned
- has_more: Whether there are more items after this page
"""
with self.mutex:
queue_list = list(self.queue)
total_items = len(queue_list)
if total_items == 0:
return {
'items': [],
'total_items': 0,
'returned_items': 0,
'has_more': False
}
# For very large queues, warn about performance
if total_items > 1000 and limit is None:
logger.warning(f"Getting all {total_items} queued items without limit - this may be slow")
# Sort only if we need exact positions (expensive for large queues)
if limit is not None and limit <= 100:
# For small requests, we can afford to sort
queue_items = sorted(queue_list)
end_idx = min(offset + limit, len(queue_items)) if limit else len(queue_items)
items_to_process = queue_items[offset:end_idx]
result = []
for position, item in enumerate(items_to_process, start=offset):
if (hasattr(item, 'item') and
isinstance(item.item, dict) and
'uuid' in item.item):
result.append({
'uuid': item.item['uuid'],
'position': position,
'priority': item.priority
})
return {
'items': result,
'total_items': total_items,
'returned_items': len(result),
'has_more': (offset + len(result)) < total_items
}
else:
# For large requests, return items with approximate positions
# This is much faster O(n) instead of O(n log n)
result = []
processed = 0
skipped = 0
for item in queue_list:
if (hasattr(item, 'item') and
isinstance(item.item, dict) and
'uuid' in item.item):
if skipped < offset:
skipped += 1
continue
if limit and processed >= limit:
break
# Approximate position based on priority comparison
approx_position = sum(1 for other in queue_list if other.priority < item.priority)
result.append({
'uuid': item.item['uuid'],
'position': approx_position, # Approximate
'priority': item.priority
})
processed += 1
return {
'items': result,
'total_items': total_items,
'returned_items': len(result),
'has_more': (offset + len(result)) < total_items,
'note': 'Positions are approximate for performance with large queues'
}
def get_queue_summary(self):
"""
Get a quick summary of queue state without expensive operations.
O(n) complexity - fast even for large queues.
Returns:
dict: Queue summary statistics
"""
with self.mutex:
queue_list = list(self.queue)
total_items = len(queue_list)
if total_items == 0:
return {
'total_items': 0,
'priority_breakdown': {},
'immediate_items': 0,
'clone_items': 0,
'scheduled_items': 0
}
# Count items by priority type - O(n)
immediate_items = 0 # priority 1
clone_items = 0 # priority 5
scheduled_items = 0 # priority > 100 (timestamps)
priority_counts = {}
for item in queue_list:
priority = item.priority
priority_counts[priority] = priority_counts.get(priority, 0) + 1
if priority == 1:
immediate_items += 1
elif priority == 5:
clone_items += 1
elif priority > 100:
scheduled_items += 1
return {
'total_items': total_items,
'priority_breakdown': priority_counts,
'immediate_items': immediate_items,
'clone_items': clone_items,
'scheduled_items': scheduled_items,
'min_priority': min(priority_counts.keys()) if priority_counts else None,
'max_priority': max(priority_counts.keys()) if priority_counts else None
}
class AsyncSignalPriorityQueue(asyncio.PriorityQueue):
@ -107,3 +313,188 @@ class AsyncSignalPriorityQueue(asyncio.PriorityQueue):
Returns the internal queue for template access
"""
return self._queue if hasattr(self, '_queue') else []
def get_uuid_position(self, target_uuid):
"""
Find the position of a watch UUID in the async priority queue.
Optimized for large queues - O(n) complexity instead of O(n log n).
Args:
target_uuid: The UUID to search for
Returns:
dict: Contains position info or None if not found
- position: 0-based position in queue (0 = next to be processed)
- total_items: total number of items in queue
- priority: the priority value of the found item
"""
queue_list = list(self._queue)
total_items = len(queue_list)
if total_items == 0:
return {
'position': None,
'total_items': 0,
'priority': None,
'found': False
}
# Find the target item and its priority first - O(n)
target_item = None
target_priority = None
for item in queue_list:
if (hasattr(item, 'item') and
isinstance(item.item, dict) and
item.item.get('uuid') == target_uuid):
target_item = item
target_priority = item.priority
break
if target_item is None:
return {
'position': None,
'total_items': total_items,
'priority': None,
'found': False
}
# Count how many items have higher priority (lower numbers) - O(n)
position = 0
for item in queue_list:
if item.priority < target_priority:
position += 1
elif item.priority == target_priority and item != target_item:
position += 1
return {
'position': position,
'total_items': total_items,
'priority': target_priority,
'found': True
}
def get_all_queued_uuids(self, limit=None, offset=0):
"""
Get UUIDs currently in the async queue with their positions.
For large queues, use limit/offset for pagination.
Args:
limit: Maximum number of items to return (None = all)
offset: Number of items to skip (for pagination)
Returns:
dict: Contains items and metadata (same structure as sync version)
"""
queue_list = list(self._queue)
total_items = len(queue_list)
if total_items == 0:
return {
'items': [],
'total_items': 0,
'returned_items': 0,
'has_more': False
}
# Same logic as sync version but without mutex
if limit is not None and limit <= 100:
queue_items = sorted(queue_list)
end_idx = min(offset + limit, len(queue_items)) if limit else len(queue_items)
items_to_process = queue_items[offset:end_idx]
result = []
for position, item in enumerate(items_to_process, start=offset):
if (hasattr(item, 'item') and
isinstance(item.item, dict) and
'uuid' in item.item):
result.append({
'uuid': item.item['uuid'],
'position': position,
'priority': item.priority
})
return {
'items': result,
'total_items': total_items,
'returned_items': len(result),
'has_more': (offset + len(result)) < total_items
}
else:
# Fast approximate positions for large queues
result = []
processed = 0
skipped = 0
for item in queue_list:
if (hasattr(item, 'item') and
isinstance(item.item, dict) and
'uuid' in item.item):
if skipped < offset:
skipped += 1
continue
if limit and processed >= limit:
break
approx_position = sum(1 for other in queue_list if other.priority < item.priority)
result.append({
'uuid': item.item['uuid'],
'position': approx_position,
'priority': item.priority
})
processed += 1
return {
'items': result,
'total_items': total_items,
'returned_items': len(result),
'has_more': (offset + len(result)) < total_items,
'note': 'Positions are approximate for performance with large queues'
}
def get_queue_summary(self):
"""
Get a quick summary of async queue state.
O(n) complexity - fast even for large queues.
"""
queue_list = list(self._queue)
total_items = len(queue_list)
if total_items == 0:
return {
'total_items': 0,
'priority_breakdown': {},
'immediate_items': 0,
'clone_items': 0,
'scheduled_items': 0
}
immediate_items = 0
clone_items = 0
scheduled_items = 0
priority_counts = {}
for item in queue_list:
priority = item.priority
priority_counts[priority] = priority_counts.get(priority, 0) + 1
if priority == 1:
immediate_items += 1
elif priority == 5:
clone_items += 1
elif priority > 100:
scheduled_items += 1
return {
'total_items': total_items,
'priority_breakdown': priority_counts,
'immediate_items': immediate_items,
'clone_items': clone_items,
'scheduled_items': scheduled_items,
'min_priority': min(priority_counts.keys()) if priority_counts else None,
'max_priority': max(priority_counts.keys()) if priority_counts else None
}

Wyświetl plik

@ -148,6 +148,31 @@ def _jinja2_filter_format_number_locale(value: float) -> str:
def _watch_is_checking_now(watch_obj, format="%Y-%m-%d %H:%M:%S"):
return worker_handler.is_watch_running(watch_obj['uuid'])
@app.template_global('get_watch_queue_position')
def _get_watch_queue_position(watch_obj):
"""Get the position of a watch in the queue"""
uuid = watch_obj['uuid']
return update_q.get_uuid_position(uuid)
@app.template_global('get_current_worker_count')
def _get_current_worker_count():
"""Get the current number of operational workers"""
return worker_handler.get_worker_count()
@app.template_global('get_worker_status_info')
def _get_worker_status_info():
"""Get detailed worker status information for display"""
status = worker_handler.get_worker_status()
running_uuids = worker_handler.get_running_uuids()
return {
'count': status['worker_count'],
'type': status['worker_type'],
'active_workers': len(running_uuids),
'processing_watches': running_uuids,
'loop_running': status.get('async_loop_running', None)
}
# We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread
# running or something similar.
@ -521,6 +546,51 @@ def changedetection_app(config=None, datastore_o=None):
"expected_workers": expected_workers
})
# Queue status endpoint
@app.route('/queue-status', methods=['GET'])
@login_optionally_required
def queue_status():
from flask import jsonify, request
# Get specific UUID position if requested
target_uuid = request.args.get('uuid')
if target_uuid:
position_info = update_q.get_uuid_position(target_uuid)
return jsonify({
"status": "success",
"uuid": target_uuid,
"queue_position": position_info
})
else:
# Get pagination parameters
limit = request.args.get('limit', type=int)
offset = request.args.get('offset', type=int, default=0)
summary_only = request.args.get('summary', type=bool, default=False)
if summary_only:
# Fast summary for large queues
summary = update_q.get_queue_summary()
return jsonify({
"status": "success",
"queue_summary": summary
})
else:
# Get queued items with pagination support
if limit is None:
# Default limit for large queues to prevent performance issues
queue_size = update_q.qsize()
if queue_size > 100:
limit = 50
logger.warning(f"Large queue ({queue_size} items) detected, limiting to {limit} items. Use ?limit=N for more.")
all_queued = update_q.get_all_queued_uuids(limit=limit, offset=offset)
return jsonify({
"status": "success",
"queue_size": update_q.qsize(),
"queued_data": all_queued
})
# Start the async workers during app initialization
# Can be overridden by ENV or use the default settings
n_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers']))

Wyświetl plik

@ -31,6 +31,7 @@
const socketio_url="{{ get_socketio_path() }}/socket.io";
const is_authenticated = {% if current_user.is_authenticated or not has_password %}true{% else %}false{% endif %};
</script>
<script src="https://unpkg.com/feather-icons"></script>
<script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script>
<script src="{{url_for('static_content', group='js', filename='csrf.js')}}" defer></script>
{% if socket_io_enabled %}

Wyświetl plik

@ -79,7 +79,7 @@ def test_DNS_errors(client, live_server, measure_memory_usage):
wait_for_all_checks(client)
res = client.get(url_for("watchlist.index"))
found_name_resolution_error = b"Temporary failure in name resolution" in res.data or b"Name or service not known" in res.data
found_name_resolution_error = b"No address found" in res.data or b"Name or service not known" in res.data
assert found_name_resolution_error
# Should always record that we tried
assert bytes("just now".encode('utf-8')) in res.data
@ -108,7 +108,7 @@ def test_low_level_errors_clear_correctly(client, live_server, measure_memory_us
# We should see the DNS error
res = client.get(url_for("watchlist.index"))
found_name_resolution_error = b"Temporary failure in name resolution" in res.data or b"Name or service not known" in res.data
found_name_resolution_error = b"No address found" in res.data or b"Name or service not known" in res.data
assert found_name_resolution_error
# Update with what should work
@ -123,7 +123,7 @@ def test_low_level_errors_clear_correctly(client, live_server, measure_memory_us
# Now the error should be gone
wait_for_all_checks(client)
res = client.get(url_for("watchlist.index"))
found_name_resolution_error = b"Temporary failure in name resolution" in res.data or b"Name or service not known" in res.data
found_name_resolution_error = b"No address found" in res.data or b"Name or service not known" in res.data
assert not found_name_resolution_error
res = client.get(url_for("ui.form_delete", uuid="all"), follow_redirects=True)

Wyświetl plik

@ -60,7 +60,7 @@ def test_check_notification_error_handling(client, live_server, measure_memory_u
# The error should show in the notification logs
res = client.get(
url_for("settings.notification_logs"))
found_name_resolution_error = b"Temporary failure in name resolution" in res.data or b"Name or service not known" in res.data
found_name_resolution_error = b"No address found" in res.data or b"Name or service not known" in res.data
assert found_name_resolution_error
# And the working one, which is after the 'broken' one should still have fired

Wyświetl plik

@ -143,15 +143,12 @@ def wait_for_all_checks(client=None):
required_empty_duration = 0.2
logger = logging.getLogger()
time.sleep(1.2)
empty_since = None
while attempt < max_attempts:
q_length = global_update_q.qsize()
# Check if any workers are still processing
time.sleep(1.2)
q_length = global_update_q.qsize()
running_uuids = worker_handler.get_running_uuids()
any_workers_busy = len(running_uuids) > 0