2019-05-07 16:51:00 +00:00
|
|
|
import time
|
|
|
|
import datetime
|
2019-05-07 19:08:07 +00:00
|
|
|
import os
|
2019-05-07 20:07:39 +00:00
|
|
|
import sys
|
2019-05-07 16:51:00 +00:00
|
|
|
import threading
|
2019-05-07 20:07:39 +00:00
|
|
|
import signal
|
2019-05-06 15:35:23 +00:00
|
|
|
from opendm import log
|
2019-05-06 18:17:23 +00:00
|
|
|
from pyodm import Node, exceptions
|
2019-05-07 19:08:07 +00:00
|
|
|
from pyodm.utils import AtomicCounter
|
|
|
|
|
2019-05-07 16:51:00 +00:00
|
|
|
try:
|
|
|
|
import queue
|
|
|
|
except ImportError:
|
|
|
|
import Queue as queue
|
2019-05-06 15:35:23 +00:00
|
|
|
|
2019-05-07 19:08:07 +00:00
|
|
|
class LocalRemoteExecutor:
|
2019-05-07 16:51:00 +00:00
|
|
|
"""
|
|
|
|
A class for performing OpenSfM reconstructions and full ODM pipeline executions
|
|
|
|
using a mix of local and remote processing. Tasks are executed locally one at a time
|
|
|
|
and remotely until a node runs out of available slots for processing. This allows us
|
|
|
|
to use the processing power of the current machine as well as offloading tasks to a
|
|
|
|
network node.
|
|
|
|
"""
|
2019-05-06 15:35:23 +00:00
|
|
|
def __init__(self, nodeUrl):
|
2019-05-06 18:17:23 +00:00
|
|
|
self.node = Node.from_url(nodeUrl)
|
|
|
|
|
2019-05-07 20:07:39 +00:00
|
|
|
log.ODM_INFO("LRE: Initializing using cluster node %s:%s" % (self.node.host, self.node.port))
|
2019-05-06 18:17:23 +00:00
|
|
|
try:
|
|
|
|
odm_version = self.node.info().odm_version
|
2019-05-07 19:08:07 +00:00
|
|
|
log.ODM_INFO("LRE: Node is online and running ODM version: %s" % odm_version)
|
2019-05-06 18:17:23 +00:00
|
|
|
except exceptions.NodeConnectionError:
|
2019-05-07 19:08:07 +00:00
|
|
|
log.ODM_WARNING("LRE: The node seems to be offline! We'll still process the dataset, but it's going to run entirely locally.")
|
2019-05-06 18:17:23 +00:00
|
|
|
except Exception as e:
|
2019-05-07 19:08:07 +00:00
|
|
|
log.ODM_ERROR("LRE: An unexpected problem happened while opening the node connection: %s" % str(e))
|
2019-05-06 18:17:23 +00:00
|
|
|
exit(1)
|
2019-05-06 15:35:23 +00:00
|
|
|
|
|
|
|
def set_projects(self, paths):
|
|
|
|
self.project_paths = paths
|
|
|
|
|
2019-05-06 18:17:23 +00:00
|
|
|
def run_reconstruction(self):
|
|
|
|
if not self.project_paths:
|
|
|
|
return
|
2019-05-07 16:51:00 +00:00
|
|
|
|
|
|
|
# Shared variables across threads
|
|
|
|
class nonloc:
|
|
|
|
error = None
|
|
|
|
local_is_processing = False
|
2019-05-07 19:08:07 +00:00
|
|
|
semaphore = None
|
|
|
|
|
|
|
|
node_task_limit = AtomicCounter(0)
|
2019-05-07 16:51:00 +00:00
|
|
|
|
|
|
|
# Create queue
|
|
|
|
q = queue.Queue()
|
|
|
|
for pp in self.project_paths:
|
2019-05-07 19:08:07 +00:00
|
|
|
log.ODM_DEBUG("LRE: Adding to queue %s" % pp)
|
2019-05-07 16:51:00 +00:00
|
|
|
q.put(ReconstructionTask(pp))
|
2019-05-06 18:17:23 +00:00
|
|
|
|
2019-05-07 16:51:00 +00:00
|
|
|
def handle_result(task, local, error = None):
|
|
|
|
if error:
|
2019-05-07 19:08:07 +00:00
|
|
|
if isinstance(error, NodeTaskLimitReachedException) and not nonloc.semaphore:
|
|
|
|
nonloc.semaphore = threading.Semaphore(node_task_limit.value)
|
|
|
|
log.ODM_DEBUG("LRE: Node task limit reached. Setting semaphore to %s" % node_task_limit.value)
|
|
|
|
for i in range(node_task_limit.value):
|
|
|
|
nonloc.semaphore.acquire()
|
|
|
|
|
|
|
|
log.ODM_WARNING("LRE: %s failed with: %s" % (task, str(error)))
|
|
|
|
|
2019-05-07 16:51:00 +00:00
|
|
|
if task.retries < task.max_retries:
|
|
|
|
# Put task back in queue
|
|
|
|
task.retries += 1
|
|
|
|
task.wait_until = datetime.datetime.now() + datetime.timedelta(seconds=task.retries * task.retry_timeout)
|
2019-05-07 19:08:07 +00:00
|
|
|
log.ODM_DEBUG("LRE: Re-queueing %s (retries: %s)" % (task, task.retries))
|
2019-05-07 16:51:00 +00:00
|
|
|
q.put(task)
|
|
|
|
else:
|
|
|
|
nonloc.error = e
|
2019-05-07 19:08:07 +00:00
|
|
|
else:
|
|
|
|
if not local:
|
|
|
|
node_task_limit.increment(-1)
|
|
|
|
|
|
|
|
log.ODM_INFO("LRE: %s finished successfully" % task)
|
|
|
|
|
2019-05-07 16:51:00 +00:00
|
|
|
if local:
|
2019-05-07 19:08:07 +00:00
|
|
|
nonloc.local_is_processing = False
|
2019-05-07 16:51:00 +00:00
|
|
|
|
2019-05-07 19:08:07 +00:00
|
|
|
if nonloc.semaphore: nonloc.semaphore.release()
|
2019-05-07 16:51:00 +00:00
|
|
|
q.task_done()
|
|
|
|
|
|
|
|
def worker():
|
|
|
|
while True:
|
2019-05-07 19:08:07 +00:00
|
|
|
# If we've found a limit on the maximum number of tasks
|
|
|
|
# a node can process, we block until some tasks have completed
|
|
|
|
if nonloc.semaphore: nonloc.semaphore.acquire()
|
|
|
|
|
2019-05-07 16:51:00 +00:00
|
|
|
task = q.get()
|
|
|
|
if task is None or nonloc.error is not None:
|
|
|
|
q.task_done()
|
2019-05-07 19:08:07 +00:00
|
|
|
if nonloc.semaphore: nonloc.semaphore.release()
|
2019-05-07 16:51:00 +00:00
|
|
|
break
|
2019-05-07 19:08:07 +00:00
|
|
|
|
2019-05-07 16:51:00 +00:00
|
|
|
if not nonloc.local_is_processing:
|
|
|
|
# Process local
|
2019-05-07 19:08:07 +00:00
|
|
|
try:
|
|
|
|
nonloc.local_is_processing = True
|
|
|
|
task.process(True, handle_result)
|
|
|
|
except Exception as e:
|
|
|
|
handle_result(task, True, e)
|
2019-05-07 16:51:00 +00:00
|
|
|
else:
|
|
|
|
# Process remote
|
2019-05-07 19:08:07 +00:00
|
|
|
try:
|
|
|
|
task.process(False, handle_result)
|
|
|
|
node_task_limit.increment() # Called after upload, but before processing is started
|
|
|
|
except Exception as e:
|
|
|
|
handle_result(task, False, e)
|
2019-05-07 20:07:39 +00:00
|
|
|
|
|
|
|
# Define thread
|
|
|
|
t = threading.Thread(target=worker)
|
2019-05-07 16:51:00 +00:00
|
|
|
|
2019-05-07 20:07:39 +00:00
|
|
|
def cleanup_remote_tasks():
|
|
|
|
log.ODM_WARNING("LRE: Attempting to cleanup remote tasks")
|
|
|
|
pass # TODO
|
2019-05-07 16:51:00 +00:00
|
|
|
|
2019-05-07 20:07:39 +00:00
|
|
|
# Capture SIGTERM so that we can
|
|
|
|
# attempt to cleanup if the process is terminated
|
|
|
|
original_sigterm_handler = signal.getsignal(signal.SIGTERM)
|
2019-05-07 16:51:00 +00:00
|
|
|
|
2019-05-07 20:07:39 +00:00
|
|
|
def sigterm_handler(signum, frame):
|
|
|
|
log.ODM_WARNING("LRE: Caught SIGTERM")
|
|
|
|
cleanup_remote_tasks()
|
|
|
|
os._exit(1)
|
|
|
|
|
|
|
|
signal.signal(signal.SIGTERM, sigterm_handler)
|
|
|
|
|
|
|
|
# Start worker process
|
|
|
|
t.start()
|
2019-05-06 19:42:49 +00:00
|
|
|
|
2019-05-07 20:07:39 +00:00
|
|
|
# block until all tasks are done (or CTRL+C)
|
|
|
|
try:
|
|
|
|
while q.unfinished_tasks > 0:
|
|
|
|
time.sleep(0.5)
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
log.ODM_WARNING("LRE: CTRL+C")
|
|
|
|
cleanup_remote_tasks()
|
|
|
|
os._exit(1)
|
|
|
|
|
2019-05-07 16:51:00 +00:00
|
|
|
# stop workers
|
|
|
|
q.put(None)
|
2019-05-06 19:42:49 +00:00
|
|
|
|
2019-05-07 20:07:39 +00:00
|
|
|
# Wait for thread
|
2019-05-07 16:51:00 +00:00
|
|
|
t.join()
|
2019-05-06 18:17:23 +00:00
|
|
|
|
2019-05-07 20:07:39 +00:00
|
|
|
# restore SIGTERM handler
|
|
|
|
signal.signal(signal.SIGTERM, original_sigterm_handler)
|
|
|
|
|
2019-05-07 16:51:00 +00:00
|
|
|
if nonloc.error is not None:
|
|
|
|
raise nonloc.error
|
2019-05-06 18:17:23 +00:00
|
|
|
|
2019-05-06 15:35:23 +00:00
|
|
|
|
|
|
|
def run_toolchain(self):
|
2019-05-06 18:17:23 +00:00
|
|
|
if not self.project_paths:
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2019-05-07 19:08:07 +00:00
|
|
|
class NodeTaskLimitReachedException(Exception):
|
|
|
|
pass
|
|
|
|
|
2019-05-07 16:51:00 +00:00
|
|
|
class Task:
|
2019-05-07 19:08:07 +00:00
|
|
|
def __init__(self, project_path, max_retries=10, retry_timeout=1):
|
2019-05-07 16:51:00 +00:00
|
|
|
self.project_path = project_path
|
|
|
|
self.wait_until = datetime.datetime.now() # Don't run this task until a certain time
|
|
|
|
self.max_retries = max_retries
|
|
|
|
self.retries = 0
|
|
|
|
self.retry_timeout = retry_timeout
|
|
|
|
self.local = None
|
|
|
|
|
|
|
|
def process(self, local, done):
|
|
|
|
def handle_result(error = None):
|
|
|
|
done(self, local, error)
|
|
|
|
|
2019-05-07 19:08:07 +00:00
|
|
|
log.ODM_INFO("LRE: About to process %s %s" % (self, 'locally' if local else 'remotely'))
|
|
|
|
|
|
|
|
if local:
|
|
|
|
t = threading.Thread(target=self.process_local, args=(handle_result, ))
|
|
|
|
t.start()
|
|
|
|
else:
|
|
|
|
now = datetime.datetime.now()
|
|
|
|
if self.wait_until > now:
|
|
|
|
wait_for = (self.wait_until - now).seconds
|
|
|
|
log.ODM_DEBUG("LRE: Waiting %s seconds before processing %s" % (wait_for, self))
|
|
|
|
time.sleep(wait_for)
|
|
|
|
|
|
|
|
# TODO: we could consider uploading multiple tasks
|
|
|
|
# in parallel. But since we are using the same node
|
|
|
|
# perhaps this wouldn't be a big speedup.
|
|
|
|
self.process_remote(handle_result) # Block until upload is complete
|
2019-05-07 16:51:00 +00:00
|
|
|
|
|
|
|
def process_local(self, done):
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
def process_remote(self, done):
|
|
|
|
raise NotImplementedError()
|
2019-05-07 20:07:39 +00:00
|
|
|
|
2019-05-07 19:08:07 +00:00
|
|
|
def __str__(self):
|
|
|
|
return os.path.basename(self.project_path)
|
2019-05-07 16:51:00 +00:00
|
|
|
|
|
|
|
class ReconstructionTask(Task):
|
|
|
|
def process_local(self, done):
|
|
|
|
print("Process local: " + self.project_path)
|
2019-05-07 19:08:07 +00:00
|
|
|
time.sleep(10)
|
2019-05-07 16:51:00 +00:00
|
|
|
done()
|
|
|
|
|
|
|
|
def process_remote(self, done):
|
2019-05-07 19:08:07 +00:00
|
|
|
|
|
|
|
def test():
|
|
|
|
time.sleep(4)
|
|
|
|
done()
|
2019-05-07 16:51:00 +00:00
|
|
|
|
|
|
|
if self.project_path == '/datasets/brighton/opensfm/submodels/submodel_0001':
|
|
|
|
done(Exception("TEST EXCEPTION!" + self.project_path))
|
2019-05-07 19:08:07 +00:00
|
|
|
elif self.project_path == '/datasets/brighton/opensfm/submodels/submodel_0002':
|
|
|
|
done(NodeTaskLimitReachedException("Limit reached"))
|
|
|
|
elif self.project_path == '/datasets/brighton/opensfm/submodels/submodel_0003':
|
|
|
|
threading.Thread(target=test).start()
|
|
|
|
elif self.project_path == '/datasets/brighton/opensfm/submodels/submodel_0004':
|
|
|
|
threading.Thread(target=test).start()
|
2019-05-07 16:51:00 +00:00
|
|
|
else:
|
|
|
|
print("Process remote: " + self.project_path)
|
2019-05-07 19:08:07 +00:00
|
|
|
done()
|
2019-05-07 20:07:39 +00:00
|
|
|
|