2016-11-09 23:19:40 +00:00
|
|
|
import logging
|
2016-11-11 17:55:56 +00:00
|
|
|
import traceback
|
2017-02-06 03:23:02 +00:00
|
|
|
from multiprocessing.dummy import Pool as ThreadPool
|
|
|
|
from threading import Lock
|
2016-11-11 17:55:56 +00:00
|
|
|
|
2016-10-24 18:14:35 +00:00
|
|
|
from apscheduler.schedulers import SchedulerAlreadyRunningError, SchedulerNotRunningError
|
2017-02-06 03:23:02 +00:00
|
|
|
from apscheduler.schedulers.background import BackgroundScheduler
|
2016-10-30 00:28:37 +00:00
|
|
|
from django import db
|
2017-02-06 03:23:02 +00:00
|
|
|
from django.db.models import Q, Count
|
|
|
|
|
|
|
|
from app.models import Task, Project
|
2016-11-02 22:32:24 +00:00
|
|
|
from nodeodm import status_codes
|
2017-02-06 03:23:02 +00:00
|
|
|
from nodeodm.models import ProcessingNode
|
|
|
|
from app.background import background
|
2016-10-24 18:14:35 +00:00
|
|
|
|
|
|
|
logger = logging.getLogger('app.logger')
|
2016-11-09 23:08:41 +00:00
|
|
|
scheduler = BackgroundScheduler({
|
|
|
|
'apscheduler.job_defaults.coalesce': 'false',
|
|
|
|
'apscheduler.job_defaults.max_instances': '3',
|
|
|
|
})
|
2016-10-24 18:14:35 +00:00
|
|
|
|
2016-10-29 23:25:21 +00:00
|
|
|
@background
|
2016-10-24 18:14:35 +00:00
|
|
|
def update_nodes_info():
|
|
|
|
processing_nodes = ProcessingNode.objects.all()
|
|
|
|
for processing_node in processing_nodes:
|
|
|
|
processing_node.update_node_info()
|
|
|
|
|
2016-10-25 20:04:24 +00:00
|
|
|
tasks_mutex = Lock()
|
|
|
|
|
2016-10-29 23:25:21 +00:00
|
|
|
@background
|
2016-10-24 18:14:35 +00:00
|
|
|
def process_pending_tasks():
|
2016-10-25 20:04:24 +00:00
|
|
|
tasks = []
|
|
|
|
try:
|
|
|
|
tasks_mutex.acquire()
|
|
|
|
|
|
|
|
# All tasks that have a processing node assigned
|
|
|
|
# but don't have a UUID
|
2016-11-02 22:32:24 +00:00
|
|
|
# or tasks that have a pending action
|
2016-10-25 20:04:24 +00:00
|
|
|
# and that are not locked (being processed by another thread)
|
2016-11-02 22:32:24 +00:00
|
|
|
tasks = Task.objects.filter(Q(uuid='', last_error__isnull=True, processing_node__isnull=False) |
|
|
|
|
Q(status__in=[status_codes.QUEUED, status_codes.RUNNING], processing_node__isnull=False) |
|
|
|
|
Q(status=None, processing_node__isnull=False) |
|
|
|
|
Q(pending_action__isnull=False)).exclude(Q(processing_lock=True))
|
2016-10-25 20:04:24 +00:00
|
|
|
for task in tasks:
|
|
|
|
logger.info("Acquiring lock: {}".format(task))
|
|
|
|
task.processing_lock = True
|
|
|
|
task.save()
|
|
|
|
finally:
|
|
|
|
tasks_mutex.release()
|
|
|
|
|
|
|
|
def process(task):
|
2016-11-07 22:25:33 +00:00
|
|
|
try:
|
|
|
|
task.process()
|
2016-11-04 18:19:18 +00:00
|
|
|
|
2016-11-07 22:25:33 +00:00
|
|
|
# Might have been deleted
|
|
|
|
if task.pk is not None:
|
|
|
|
task.processing_lock = False
|
|
|
|
task.save()
|
|
|
|
except Exception as e:
|
|
|
|
logger.error("Uncaught error: {} {}".format(e, traceback.format_exc()))
|
2017-02-06 03:23:02 +00:00
|
|
|
finally:
|
|
|
|
db.connections.close_all()
|
2016-10-25 20:04:24 +00:00
|
|
|
|
|
|
|
if tasks.count() > 0:
|
|
|
|
pool = ThreadPool(tasks.count())
|
2016-10-26 22:16:08 +00:00
|
|
|
pool.map(process, tasks, chunksize=1)
|
2016-10-25 20:04:24 +00:00
|
|
|
pool.close()
|
|
|
|
pool.join()
|
2016-10-24 18:14:35 +00:00
|
|
|
|
2016-11-15 16:51:19 +00:00
|
|
|
|
|
|
|
def cleanup_projects():
|
|
|
|
# Delete all projects that are marked for deletion
|
|
|
|
# and that have no tasks left
|
|
|
|
total, count_dict = Project.objects.filter(deleting=True).annotate(
|
|
|
|
tasks_count=Count('task')
|
|
|
|
).filter(tasks_count=0).delete()
|
|
|
|
if total > 0 and 'app.Project' in count_dict:
|
|
|
|
logger.info("Deleted {} projects".format(count_dict['app.Project']))
|
|
|
|
|
2016-10-24 18:14:35 +00:00
|
|
|
def setup():
|
|
|
|
logger.info("Starting background scheduler...")
|
|
|
|
try:
|
|
|
|
scheduler.start()
|
|
|
|
scheduler.add_job(update_nodes_info, 'interval', seconds=30)
|
2016-11-01 21:12:13 +00:00
|
|
|
scheduler.add_job(process_pending_tasks, 'interval', seconds=5)
|
2016-11-15 16:51:19 +00:00
|
|
|
scheduler.add_job(cleanup_projects, 'interval', seconds=15)
|
2016-10-24 18:14:35 +00:00
|
|
|
except SchedulerAlreadyRunningError:
|
2016-12-10 18:28:34 +00:00
|
|
|
logger.warning("Scheduler already running (this is OK while testing)")
|
2016-10-24 18:14:35 +00:00
|
|
|
|
|
|
|
def teardown():
|
2016-10-26 22:09:59 +00:00
|
|
|
logger.info("Stopping scheduler...")
|
|
|
|
try:
|
|
|
|
scheduler.shutdown()
|
2016-10-30 00:35:25 +00:00
|
|
|
logger.info("Scheduler stopped")
|
2016-10-26 22:09:59 +00:00
|
|
|
except SchedulerNotRunningError:
|
2016-12-10 18:28:34 +00:00
|
|
|
logger.warning("Scheduler not running")
|