2016-11-09 23:19:40 +00:00
import logging
2016-11-11 17:55:56 +00:00
import traceback
2017-02-06 03:23:02 +00:00
from multiprocessing . dummy import Pool as ThreadPool
from threading import Lock
2016-11-11 17:55:56 +00:00
2016-10-24 18:14:35 +00:00
from apscheduler . schedulers import SchedulerAlreadyRunningError , SchedulerNotRunningError
2017-02-06 03:23:02 +00:00
from apscheduler . schedulers . background import BackgroundScheduler
2016-10-30 00:28:37 +00:00
from django import db
2017-02-06 03:23:02 +00:00
from django . db . models import Q , Count
2017-02-08 16:27:23 +00:00
from webodm import settings
2017-02-06 03:23:02 +00:00
from app . models import Task , Project
2016-11-02 22:32:24 +00:00
from nodeodm import status_codes
2017-02-06 03:23:02 +00:00
from nodeodm . models import ProcessingNode
from app . background import background
2016-10-24 18:14:35 +00:00
logger = logging . getLogger ( ' app.logger ' )
2016-11-09 23:08:41 +00:00
scheduler = BackgroundScheduler ( {
2017-11-20 18:44:52 +00:00
' apscheduler.job_defaults.coalesce ' : ' true ' ,
2016-11-09 23:08:41 +00:00
' apscheduler.job_defaults.max_instances ' : ' 3 ' ,
} )
2016-10-24 18:14:35 +00:00
2016-10-29 23:25:21 +00:00
@background
2016-10-24 18:14:35 +00:00
def update_nodes_info ( ) :
processing_nodes = ProcessingNode . objects . all ( )
for processing_node in processing_nodes :
processing_node . update_node_info ( )
2016-10-25 20:04:24 +00:00
tasks_mutex = Lock ( )
2016-10-29 23:25:21 +00:00
@background
2016-10-24 18:14:35 +00:00
def process_pending_tasks ( ) :
2016-10-25 20:04:24 +00:00
tasks = [ ]
try :
tasks_mutex . acquire ( )
# All tasks that have a processing node assigned
2017-02-15 19:20:41 +00:00
# Or that need one assigned (via auto)
2017-02-18 20:29:59 +00:00
# or tasks that need a status update
2016-11-02 22:32:24 +00:00
# or tasks that have a pending action
2016-10-25 20:04:24 +00:00
# and that are not locked (being processed by another thread)
2017-02-15 19:20:41 +00:00
tasks = Task . objects . filter ( Q ( processing_node__isnull = True , auto_processing_node = True ) |
2017-02-18 20:29:59 +00:00
Q ( Q ( status = None ) | Q ( status__in = [ status_codes . QUEUED , status_codes . RUNNING ] ) , processing_node__isnull = False ) |
2016-11-02 22:32:24 +00:00
Q ( pending_action__isnull = False ) ) . exclude ( Q ( processing_lock = True ) )
2016-10-25 20:04:24 +00:00
for task in tasks :
task . processing_lock = True
task . save ( )
finally :
tasks_mutex . release ( )
def process ( task ) :
2016-11-07 22:25:33 +00:00
try :
task . process ( )
2017-02-08 16:27:23 +00:00
except Exception as e :
logger . error ( " Uncaught error! This is potentially bad. Please report it to http://github.com/OpenDroneMap/WebODM/issues: {} {} " . format ( e , traceback . format_exc ( ) ) )
if settings . TESTING : raise e
finally :
2016-11-07 22:25:33 +00:00
# Might have been deleted
if task . pk is not None :
task . processing_lock = False
task . save ( )
2017-02-08 16:27:23 +00:00
2017-02-06 03:23:02 +00:00
db . connections . close_all ( )
2016-10-25 20:04:24 +00:00
if tasks . count ( ) > 0 :
pool = ThreadPool ( tasks . count ( ) )
2016-10-26 22:16:08 +00:00
pool . map ( process , tasks , chunksize = 1 )
2016-10-25 20:04:24 +00:00
pool . close ( )
pool . join ( )
2016-10-24 18:14:35 +00:00
2016-11-15 16:51:19 +00:00
def cleanup_projects ( ) :
# Delete all projects that are marked for deletion
# and that have no tasks left
total , count_dict = Project . objects . filter ( deleting = True ) . annotate (
tasks_count = Count ( ' task ' )
) . filter ( tasks_count = 0 ) . delete ( )
if total > 0 and ' app.Project ' in count_dict :
logger . info ( " Deleted {} projects " . format ( count_dict [ ' app.Project ' ] ) )
2016-10-24 18:14:35 +00:00
def setup ( ) :
try :
scheduler . start ( )
scheduler . add_job ( update_nodes_info , ' interval ' , seconds = 30 )
2016-11-01 21:12:13 +00:00
scheduler . add_job ( process_pending_tasks , ' interval ' , seconds = 5 )
2017-11-20 18:44:52 +00:00
scheduler . add_job ( cleanup_projects , ' interval ' , seconds = 60 )
2016-10-24 18:14:35 +00:00
except SchedulerAlreadyRunningError :
2016-12-10 18:28:34 +00:00
logger . warning ( " Scheduler already running (this is OK while testing) " )
2016-10-24 18:14:35 +00:00
def teardown ( ) :
2016-10-26 22:09:59 +00:00
logger . info ( " Stopping scheduler... " )
try :
scheduler . shutdown ( )
2016-10-30 00:35:25 +00:00
logger . info ( " Scheduler stopped " )
2016-10-26 22:09:59 +00:00
except SchedulerNotRunningError :
2016-12-10 18:28:34 +00:00
logger . warning ( " Scheduler not running " )