kopia lustrzana https://github.com/OpenDroneMap/ODM
commit
c20ac4ca33
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
2.5.4
|
||||
2.5.5
|
||||
|
|
|
@ -189,7 +189,12 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
|
|||
|
||||
# Create virtual raster
|
||||
tiles_vrt_path = os.path.abspath(os.path.join(outdir, "tiles.vrt"))
|
||||
run('gdalbuildvrt "%s" "%s"' % (tiles_vrt_path, '" "'.join(map(lambda t: t['filename'], tiles))))
|
||||
tiles_file_list = os.path.abspath(os.path.join(outdir, "tiles_list.txt"))
|
||||
with open(tiles_file_list, 'w') as f:
|
||||
for t in tiles:
|
||||
f.write(t['filename'] + '\n')
|
||||
|
||||
run('gdalbuildvrt -input_file_list "%s" "%s" ' % (tiles_file_list, tiles_vrt_path))
|
||||
|
||||
merged_vrt_path = os.path.abspath(os.path.join(outdir, "merged.vrt"))
|
||||
geotiff_tmp_path = os.path.abspath(os.path.join(outdir, 'tiles.tmp.tif'))
|
||||
|
@ -266,7 +271,7 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
|
|||
else:
|
||||
os.replace(geotiff_tmp_path, io.related_file_path(output_path, postfix=".unfilled"))
|
||||
|
||||
for cleanup_file in [tiles_vrt_path, merged_vrt_path, geotiff_small_path, geotiff_small_filled_path]:
|
||||
for cleanup_file in [tiles_vrt_path, tiles_file_list, merged_vrt_path, geotiff_small_path, geotiff_small_filled_path]:
|
||||
if os.path.exists(cleanup_file): os.remove(cleanup_file)
|
||||
for t in tiles:
|
||||
if os.path.exists(t['filename']): os.remove(t['filename'])
|
||||
|
|
110
opendm/log.py
110
opendm/log.py
|
@ -1,4 +1,14 @@
|
|||
import sys
|
||||
import threading
|
||||
import os
|
||||
import json
|
||||
import datetime
|
||||
import dateutil.parser
|
||||
import shutil
|
||||
import multiprocessing
|
||||
|
||||
from opendm.loghelpers import double_quote, args_to_dict
|
||||
from vmem import virtual_memory
|
||||
|
||||
if sys.platform == 'win32':
|
||||
# No colors on Windows, sorry!
|
||||
|
@ -18,16 +28,100 @@ else:
|
|||
FAIL = '\033[91m'
|
||||
ENDC = '\033[0m'
|
||||
|
||||
# logging has too many quirks...
|
||||
lock = threading.Lock()
|
||||
|
||||
def odm_version():
|
||||
with open(os.path.join(os.path.dirname(__file__), "..", "VERSION")) as f:
|
||||
return f.read().split("\n")[0].strip()
|
||||
|
||||
def memory():
|
||||
mem = virtual_memory()
|
||||
return {
|
||||
'total': round(mem.total / 1024 / 1024),
|
||||
'available': round(mem.available / 1024 / 1024)
|
||||
}
|
||||
|
||||
class ODMLogger:
|
||||
def __init__(self):
|
||||
self.show_debug = False
|
||||
self.json = None
|
||||
self.json_output_file = None
|
||||
self.start_time = datetime.datetime.now()
|
||||
|
||||
def log(self, startc, msg, level_name):
|
||||
level = ("[" + level_name + "]").ljust(9)
|
||||
print("%s%s %s%s" % (startc, level, msg, ENDC))
|
||||
sys.stdout.flush()
|
||||
with lock:
|
||||
print("%s%s %s%s" % (startc, level, msg, ENDC))
|
||||
sys.stdout.flush()
|
||||
if self.json is not None:
|
||||
self.json['stages'][-1]['messages'].append({
|
||||
'message': msg,
|
||||
'type': level_name.lower()
|
||||
})
|
||||
|
||||
def init_json_output(self, output_files, args):
|
||||
self.json_output_files = output_files
|
||||
self.json_output_file = output_files[0]
|
||||
self.json = {}
|
||||
self.json['odmVersion'] = odm_version()
|
||||
self.json['memory'] = memory()
|
||||
self.json['cpus'] = multiprocessing.cpu_count()
|
||||
self.json['images'] = -1
|
||||
self.json['options'] = args_to_dict(args)
|
||||
self.json['startTime'] = self.start_time.isoformat()
|
||||
self.json['stages'] = []
|
||||
self.json['processes'] = []
|
||||
self.json['success'] = False
|
||||
|
||||
def log_json_stage_run(self, name, start_time):
|
||||
if self.json is not None:
|
||||
self.json['stages'].append({
|
||||
'name': name,
|
||||
'startTime': start_time.isoformat(),
|
||||
'messages': [],
|
||||
})
|
||||
|
||||
def log_json_images(self, count):
|
||||
if self.json is not None:
|
||||
self.json['images'] = count
|
||||
|
||||
def log_json_stage_error(self, error, exit_code, stack_trace = ""):
|
||||
if self.json is not None:
|
||||
self.json['error'] = {
|
||||
'code': exit_code,
|
||||
'message': error
|
||||
}
|
||||
self.json['stackTrace'] = list(map(str.strip, stack_trace.split("\n")))
|
||||
self._log_json_end_time()
|
||||
|
||||
def log_json_success(self):
|
||||
if self.json is not None:
|
||||
self.json['success'] = True
|
||||
self._log_json_end_time()
|
||||
|
||||
def log_json_process(self, cmd, exit_code, output = []):
|
||||
if self.json is not None:
|
||||
d = {
|
||||
'command': cmd,
|
||||
'exitCode': exit_code,
|
||||
}
|
||||
if output:
|
||||
d['output'] = output
|
||||
|
||||
self.json['processes'].append(d)
|
||||
|
||||
def _log_json_end_time(self):
|
||||
if self.json is not None:
|
||||
end_time = datetime.datetime.now()
|
||||
self.json['endTime'] = end_time.isoformat()
|
||||
self.json['totalTime'] = round((end_time - self.start_time).total_seconds(), 2)
|
||||
|
||||
if self.json['stages']:
|
||||
last_stage = self.json['stages'][-1]
|
||||
last_stage['endTime'] = end_time.isoformat()
|
||||
start_time = dateutil.parser.isoparse(last_stage['startTime'])
|
||||
last_stage['totalTime'] = round((end_time - start_time).total_seconds(), 2)
|
||||
|
||||
def info(self, msg):
|
||||
self.log(DEFAULT, msg, "INFO")
|
||||
|
||||
|
@ -44,6 +138,16 @@ class ODMLogger:
|
|||
if self.show_debug:
|
||||
self.log(OKGREEN, msg, "DEBUG")
|
||||
|
||||
def close(self):
|
||||
if self.json is not None and self.json_output_file is not None:
|
||||
try:
|
||||
with open(self.json_output_file, 'w') as f:
|
||||
f.write(json.dumps(self.json, indent=4))
|
||||
for f in self.json_output_files[1:]:
|
||||
shutil.copy(self.json_output_file, f)
|
||||
except Exception as e:
|
||||
print("Cannot write log.json: %s" % str(e))
|
||||
|
||||
logger = ODMLogger()
|
||||
|
||||
ODM_INFO = logger.info
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
from shlex import _find_unsafe
|
||||
|
||||
def double_quote(s):
|
||||
"""Return a shell-escaped version of the string *s*."""
|
||||
if not s:
|
||||
return '""'
|
||||
if _find_unsafe(s) is None:
|
||||
return s
|
||||
|
||||
# use double quotes, and prefix double quotes with a \
|
||||
# the string $"b is then quoted as "$\"b"
|
||||
return '"' + s.replace('"', '\\\"') + '"'
|
||||
|
||||
def args_to_dict(args):
|
||||
args_dict = vars(args)
|
||||
result = {}
|
||||
for k in sorted(args_dict.keys()):
|
||||
# Skip _is_set keys
|
||||
if k.endswith("_is_set"):
|
||||
continue
|
||||
|
||||
# Don't leak token
|
||||
if k == 'sm_cluster' and args_dict[k] is not None:
|
||||
result[k] = True
|
||||
else:
|
||||
result[k] = args_dict[k]
|
||||
|
||||
return result
|
|
@ -169,6 +169,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
|
|||
'--pointWeight {pointWeight} '
|
||||
'--samplesPerNode {samples} '
|
||||
'--threads {threads} '
|
||||
'--bType 2 '
|
||||
'--linearFit '
|
||||
'{verbose}'.format(**poissonReconArgs))
|
||||
|
||||
|
@ -182,7 +183,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
|
|||
|
||||
system.run('"{reconstructmesh}" -i "{infile}" '
|
||||
'-o "{outfile}" '
|
||||
'--remove-spikes 0 --remove-spurious 0 --smooth 0 '
|
||||
'--remove-spikes 0 --remove-spurious 20 --smooth 0 '
|
||||
'--target-face-num {max_faces} '.format(**cleanupArgs))
|
||||
|
||||
# Delete intermediate results
|
||||
|
|
|
@ -50,13 +50,12 @@ class OSFMContext:
|
|||
|
||||
# Check that a reconstruction file has been created
|
||||
if not self.reconstructed():
|
||||
log.ODM_ERROR("The program could not process this dataset using the current settings. "
|
||||
raise system.ExitException("The program could not process this dataset using the current settings. "
|
||||
"Check that the images have enough overlap, "
|
||||
"that there are enough recognizable features "
|
||||
"and that the images are in focus. "
|
||||
"You could also try to increase the --min-num-features parameter."
|
||||
"The program will now exit.")
|
||||
exit(1)
|
||||
|
||||
|
||||
def setup(self, args, images_path, reconstruction, append_config = [], rerun=False):
|
||||
|
|
|
@ -45,8 +45,7 @@ class LocalRemoteExecutor:
|
|||
log.ODM_WARNING("LRE: The node seems to be offline! We'll still process the dataset, but it's going to run entirely locally.")
|
||||
self.node_online = False
|
||||
except Exception as e:
|
||||
log.ODM_ERROR("LRE: An unexpected problem happened while opening the node connection: %s" % str(e))
|
||||
exit(1)
|
||||
raise system.ExitException("LRE: An unexpected problem happened while opening the node connection: %s" % str(e))
|
||||
|
||||
def set_projects(self, paths):
|
||||
self.project_paths = paths
|
||||
|
|
|
@ -6,6 +6,8 @@ import sys
|
|||
import subprocess
|
||||
import string
|
||||
import signal
|
||||
import io
|
||||
from collections import deque
|
||||
|
||||
from opendm import context
|
||||
from opendm import log
|
||||
|
@ -15,6 +17,9 @@ class SubprocessException(Exception):
|
|||
super().__init__(msg)
|
||||
self.errorCode = errorCode
|
||||
|
||||
class ExitException(Exception):
|
||||
pass
|
||||
|
||||
def get_ccd_widths():
|
||||
"""Return the CCD Width of the camera listed in the JSON defs file."""
|
||||
with open(context.ccd_widths_path) as f:
|
||||
|
@ -80,9 +85,20 @@ def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}, packages_path
|
|||
for k in env_vars:
|
||||
env[k] = str(env_vars[k])
|
||||
|
||||
p = subprocess.Popen(cmd, shell=True, env=env, start_new_session=True)
|
||||
p = subprocess.Popen(cmd, shell=True, env=env, start_new_session=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
running_subprocesses.append(p)
|
||||
lines = deque()
|
||||
for line in io.TextIOWrapper(p.stdout):
|
||||
print(line, end="")
|
||||
|
||||
lines.append(line.strip())
|
||||
if len(lines) == 11:
|
||||
lines.popleft()
|
||||
|
||||
retcode = p.wait()
|
||||
|
||||
log.logger.log_json_process(cmd, retcode, list(lines))
|
||||
|
||||
running_subprocesses.remove(p)
|
||||
if retcode < 0:
|
||||
raise SubprocessException("Child was terminated by signal {}".format(-retcode), -retcode)
|
||||
|
|
|
@ -314,8 +314,10 @@ class ODM_Stage:
|
|||
|
||||
def run(self, outputs = {}):
|
||||
start_time = system.now_raw()
|
||||
log.ODM_INFO('Running %s stage' % self.name)
|
||||
log.logger.log_json_stage_run(self.name, start_time)
|
||||
|
||||
log.ODM_INFO('Running %s stage' % self.name)
|
||||
|
||||
self.process(self.args, outputs)
|
||||
|
||||
# The tree variable should always be populated at this point
|
||||
|
|
|
@ -2,7 +2,7 @@ import os, shutil
|
|||
from opendm import log
|
||||
from opendm.photo import find_largest_photo_dim
|
||||
from osgeo import gdal
|
||||
from shlex import _find_unsafe
|
||||
from opendm.loghelpers import double_quote
|
||||
|
||||
def get_depthmap_resolution(args, photos):
|
||||
if 'depthmap_resolution_is_set' in args:
|
||||
|
@ -43,17 +43,6 @@ def get_raster_stats(geotiff):
|
|||
|
||||
return stats
|
||||
|
||||
def double_quote(s):
|
||||
"""Return a shell-escaped version of the string *s*."""
|
||||
if not s:
|
||||
return '""'
|
||||
if _find_unsafe(s) is None:
|
||||
return s
|
||||
|
||||
# use double quotes, and prefix double quotes with a \
|
||||
# the string $"b is then quoted as "$\"b"
|
||||
return '"' + s.replace('"', '\\\"') + '"'
|
||||
|
||||
def get_processing_results_paths():
|
||||
return [
|
||||
"odm_georeferencing",
|
||||
|
@ -67,6 +56,7 @@ def get_processing_results_paths():
|
|||
"orthophoto_tiles",
|
||||
"images.json",
|
||||
"cameras.json",
|
||||
"log.json",
|
||||
]
|
||||
|
||||
def copy_paths(paths, destination, rerun):
|
||||
|
|
15
run.py
15
run.py
|
@ -12,6 +12,7 @@ from opendm import system
|
|||
from opendm import io
|
||||
from opendm.progress import progressbc
|
||||
from opendm.utils import double_quote, get_processing_results_paths
|
||||
from opendm.loghelpers import args_to_dict
|
||||
|
||||
import os
|
||||
|
||||
|
@ -23,18 +24,10 @@ if __name__ == '__main__':
|
|||
log.ODM_INFO('Initializing ODM - %s' % system.now())
|
||||
|
||||
# Print args
|
||||
args_dict = vars(args)
|
||||
args_dict = args_to_dict(args)
|
||||
log.ODM_INFO('==============')
|
||||
for k in sorted(args_dict.keys()):
|
||||
# Skip _is_set keys
|
||||
if k.endswith("_is_set"):
|
||||
continue
|
||||
|
||||
# Don't leak token
|
||||
if k == 'sm_cluster' and args_dict[k] is not None:
|
||||
log.ODM_INFO('%s: True' % k)
|
||||
else:
|
||||
log.ODM_INFO('%s: %s' % (k, args_dict[k]))
|
||||
for k in args_dict.keys():
|
||||
log.ODM_INFO('%s: %s' % (k, args_dict[k]))
|
||||
log.ODM_INFO('==============')
|
||||
|
||||
progressbc.set_project_name(args.name)
|
||||
|
|
|
@ -91,8 +91,7 @@ class ODMLoadDatasetStage(types.ODM_Stage):
|
|||
images_database_file = os.path.join(tree.root_path, 'images.json')
|
||||
if not io.file_exists(images_database_file) or self.rerun():
|
||||
if not os.path.exists(images_dir):
|
||||
log.ODM_ERROR("There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s" % (images_dir, args.project_path))
|
||||
exit(1)
|
||||
raise system.ExitException("There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s" % (images_dir, args.project_path))
|
||||
|
||||
files, rejects = get_images(images_dir)
|
||||
if files:
|
||||
|
@ -130,13 +129,13 @@ class ODMLoadDatasetStage(types.ODM_Stage):
|
|||
# Save image database for faster restart
|
||||
save_images_database(photos, images_database_file)
|
||||
else:
|
||||
log.ODM_ERROR('Not enough supported images in %s' % images_dir)
|
||||
exit(1)
|
||||
raise system.ExitException('Not enough supported images in %s' % images_dir)
|
||||
else:
|
||||
# We have an images database, just load it
|
||||
photos = load_images_database(images_database_file)
|
||||
|
||||
log.ODM_INFO('Found %s usable images' % len(photos))
|
||||
log.logger.log_json_images(len(photos))
|
||||
|
||||
# Create reconstruction object
|
||||
reconstruction = types.ODM_Reconstruction(photos)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import os, traceback
|
||||
import os, traceback, sys
|
||||
|
||||
from opendm import context
|
||||
from opendm import types
|
||||
|
@ -27,6 +27,12 @@ class ODMApp:
|
|||
if args.debug:
|
||||
log.logger.show_debug = True
|
||||
|
||||
json_log_paths = [os.path.join(args.project_path, "log.json")]
|
||||
if args.copy_to:
|
||||
json_log_paths.append(args.copy_to)
|
||||
|
||||
log.logger.init_json_output(json_log_paths, args)
|
||||
|
||||
dataset = ODMLoadDatasetStage('dataset', args, progress=5.0,
|
||||
verbose=args.verbose)
|
||||
split = ODMSplitStage('split', args, progress=75.0)
|
||||
|
@ -81,16 +87,19 @@ class ODMApp:
|
|||
def execute(self):
|
||||
try:
|
||||
self.first_stage.run()
|
||||
log.logger.log_json_success()
|
||||
return 0
|
||||
except system.SubprocessException as e:
|
||||
print("")
|
||||
print("===== Dumping Info for Geeks (developers need this to fix bugs) =====")
|
||||
print(str(e))
|
||||
traceback.print_exc()
|
||||
stack_trace = traceback.format_exc()
|
||||
print(stack_trace)
|
||||
print("===== Done, human-readable information to follow... =====")
|
||||
print("")
|
||||
|
||||
code = e.errorCode
|
||||
log.logger.log_json_stage_error(str(e), code, stack_trace)
|
||||
|
||||
if code == 139 or code == 134 or code == 1:
|
||||
# Segfault
|
||||
|
@ -107,3 +116,12 @@ class ODMApp:
|
|||
# TODO: more?
|
||||
|
||||
return code
|
||||
except system.ExitException as e:
|
||||
log.ODM_ERROR(str(e))
|
||||
log.logger.log_json_stage_error(str(e), 1, traceback.format_exc())
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
log.logger.log_json_stage_error(str(e), 1, traceback.format_exc())
|
||||
raise e
|
||||
finally:
|
||||
log.logger.close()
|
||||
|
|
|
@ -20,8 +20,7 @@ class ODMOpenMVSStage(types.ODM_Stage):
|
|||
octx = OSFMContext(tree.opensfm)
|
||||
|
||||
if not photos:
|
||||
log.ODM_ERROR('Not enough photos in photos array to start OpenMVS')
|
||||
exit(1)
|
||||
raise system.ExitException('Not enough photos in photos array to start OpenMVS')
|
||||
|
||||
# check if reconstruction was done before
|
||||
if not io.file_exists(tree.openmvs_model) or self.rerun():
|
||||
|
@ -96,8 +95,7 @@ class ODMOpenMVSStage(types.ODM_Stage):
|
|||
|
||||
scene_files = glob.glob(os.path.join(tree.openmvs, "scene_[0-9][0-9][0-9][0-9].mvs"))
|
||||
if len(scene_files) == 0:
|
||||
log.ODM_ERROR("No OpenMVS scenes found. This could be a bug, or the reconstruction could not be processed.")
|
||||
exit(1)
|
||||
raise system.ExitException("No OpenMVS scenes found. This could be a bug, or the reconstruction could not be processed.")
|
||||
|
||||
log.ODM_INFO("Fusing depthmaps for %s scenes" % len(scene_files))
|
||||
|
||||
|
@ -159,8 +157,7 @@ class ODMOpenMVSStage(types.ODM_Stage):
|
|||
]
|
||||
system.run('%s %s' % (context.omvs_densify_path, ' '.join(config)))
|
||||
else:
|
||||
log.ODM_WARNING("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...")
|
||||
exit(1)
|
||||
raise system.ExitException("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...")
|
||||
|
||||
# TODO: add support for image masks
|
||||
|
||||
|
|
|
@ -24,8 +24,7 @@ class ODMOpenSfMStage(types.ODM_Stage):
|
|||
photos = reconstruction.photos
|
||||
|
||||
if not photos:
|
||||
log.ODM_ERROR('Not enough photos in photos array to start OpenSfM')
|
||||
exit(1)
|
||||
raise system.ExitException('Not enough photos in photos array to start OpenSfM')
|
||||
|
||||
octx = OSFMContext(tree.opensfm)
|
||||
octx.setup(args, tree.dataset_raw, reconstruction=reconstruction, rerun=self.rerun())
|
||||
|
|
|
@ -243,8 +243,8 @@ class ODMMergeStage(types.ODM_Stage):
|
|||
|
||||
if outputs['large']:
|
||||
if not os.path.exists(tree.submodels_path):
|
||||
log.ODM_ERROR("We reached the merge stage, but %s folder does not exist. Something must have gone wrong at an earlier stage. Check the log and fix possible problem before restarting?" % tree.submodels_path)
|
||||
exit(1)
|
||||
raise system.ExitException("We reached the merge stage, but %s folder does not exist. Something must have gone wrong at an earlier stage. Check the log and fix possible problem before restarting?" % tree.submodels_path)
|
||||
|
||||
|
||||
# Merge point clouds
|
||||
if args.merge in ['all', 'pointcloud']:
|
||||
|
|
Ładowanie…
Reference in New Issue