Merge branch 'OpenDroneMap:master' into master

pull/1435/head
Stephen Mather 2021-07-11 13:20:52 -04:00 zatwierdzone przez GitHub
commit b9f0bb9fba
Nie znaleziono w bazie danych klucza dla tego podpisu
ID klucza GPG: 4AEE18F83AFDEB23
31 zmienionych plików z 310 dodań i 93 usunięć

Wyświetl plik

@ -13,6 +13,10 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 12
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx

Wyświetl plik

@ -13,6 +13,10 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 12
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx

Wyświetl plik

@ -17,6 +17,10 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 12
- name: Build
id: build
uses: diddlesnaps/snapcraft-multiarch-action@v1

Wyświetl plik

@ -9,6 +9,10 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 12
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
@ -29,6 +33,10 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 12
- name: Build
id: build
uses: diddlesnaps/snapcraft-multiarch-action@v1

Wyświetl plik

@ -73,7 +73,7 @@ See http://docs.opendronemap.org for tutorials and more guides.
## Forum
We have a vibrant [community forum](https://community.opendronemap.org/). You can [search it](https://community.opendronemap.org/search?expanded=true) for issues you might be having with ODM and you can post questions there. We encourage users of ODM to partecipate in the forum and to engage with fellow drone mapping users.
We have a vibrant [community forum](https://community.opendronemap.org/). You can [search it](https://community.opendronemap.org/search?expanded=true) for issues you might be having with ODM and you can post questions there. We encourage users of ODM to participate in the forum and to engage with fellow drone mapping users.
## Windows Setup

Wyświetl plik

@ -169,7 +169,7 @@ else()
endif()
externalproject_add(poissonrecon
GIT_REPOSITORY https://github.com/OpenDroneMap/PoissonRecon.git
GIT_TAG 250
GIT_TAG 257
PREFIX ${SB_BINARY_DIR}/PoissonRecon
SOURCE_DIR ${SB_SOURCE_DIR}/PoissonRecon
UPDATE_COMMAND ""

Wyświetl plik

@ -20,7 +20,7 @@ ExternalProject_Add(${_proj_name}
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/openMVS
GIT_TAG 250
GIT_TAG 256
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------

Wyświetl plik

@ -19,7 +19,7 @@ ExternalProject_Add(${_proj_name}
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/
GIT_TAG 250
GIT_TAG 257
#--Update/Patch step----------
UPDATE_COMMAND git submodule update --init --recursive
#--Configure step-------------

Wyświetl plik

@ -1 +1 @@
2.5.4
2.5.7

Wyświetl plik

@ -97,8 +97,8 @@ installruntimedepsonly() {
installdepsfromsnapcraft runtime openmvs
}
install() {
installreqs() {
cd /code
## Set up library paths
@ -123,6 +123,10 @@ install() {
if [ ! -z "$GPU_INSTALL" ]; then
pip install --ignore-installed -r requirements.gpu.txt
fi
}
install() {
installreqs
if [ ! -z "$PORTABLE_INSTALL" ]; then
echo "Replacing g++ and gcc with our scripts for portability..."
@ -176,7 +180,7 @@ clean() {
usage() {
echo "Usage:"
echo "bash configure.sh <install|update|uninstall|help> [nproc]"
echo "bash configure.sh <install|update|uninstall|installreqs|help> [nproc]"
echo "Subcommands:"
echo " install"
echo " Installs all dependencies and modules for running OpenDroneMap"
@ -186,6 +190,8 @@ usage() {
echo " Removes SuperBuild and build modules, then re-installs them. Note this does not update OpenDroneMap to the latest version. "
echo " uninstall"
echo " Removes SuperBuild and build modules. Does not uninstall dependencies"
echo " installreqs"
echo " Only installs the requirements (does not build SuperBuild)"
echo " clean"
echo " Cleans the SuperBuild directory by removing temporary files. "
echo " help"
@ -193,7 +199,7 @@ usage() {
echo "[nproc] is an optional argument that can set the number of processes for the make -j tag. By default it uses $(nproc)"
}
if [[ $1 =~ ^(install|installruntimedepsonly|reinstall|uninstall|clean)$ ]]; then
if [[ $1 =~ ^(install|installruntimedepsonly|reinstall|uninstall|installreqs|clean)$ ]]; then
RUNPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
"$1"
else

Wyświetl plik

@ -5,7 +5,7 @@ from opendm.concurrency import get_max_memory
from opendm import io
from opendm import log
def convert_to_cogeo(src_path, blocksize=256, max_workers=1):
def convert_to_cogeo(src_path, blocksize=256, max_workers=1, compression="DEFLATE"):
"""
Guarantee that the .tif passed as an argument is a Cloud Optimized GeoTIFF (cogeo)
The file is destructively converted into a cogeo.
@ -30,6 +30,8 @@ def convert_to_cogeo(src_path, blocksize=256, max_workers=1):
'max_memory': get_max_memory(),
'src_path': src_path,
'tmpfile': tmpfile,
'compress': compression,
'predictor': '2' if compression in ['LZW', 'DEFLATE'] else '1',
}
try:
@ -37,7 +39,8 @@ def convert_to_cogeo(src_path, blocksize=256, max_workers=1):
"-of COG "
"-co NUM_THREADS={threads} "
"-co BLOCKSIZE={blocksize} "
"-co COMPRESS=deflate "
"-co COMPRESS={compress} "
"-co PREDICTOR={predictor} "
"-co BIGTIFF=IF_SAFER "
"-co RESAMPLING=NEAREST "
"--config GDAL_CACHEMAX {max_memory}% "

Wyświetl plik

@ -277,7 +277,7 @@ def config(argv=None, parser=None):
'Default: %(default)s'))
parser.add_argument('--mesh-octree-depth',
metavar='<positive integer>',
metavar='<integer: 1 <= x <= 14>',
action=StoreValue,
default=11,
type=int,
@ -362,6 +362,13 @@ def config(argv=None, parser=None):
help='Reduce the memory usage needed for depthmap fusion by splitting large scenes into tiles. Turn this on if your machine doesn\'t have much RAM and/or you\'ve set --pc-quality to high or ultra. Experimental. '
'Default: %(default)s')
parser.add_argument('--pc-geometric',
action=StoreTrue,
nargs=0,
default=False,
help='Improve the accuracy of the point cloud by computing geometrically consistent depthmaps. This increases processing time, but can improve results in urban scenes. '
'Default: %(default)s')
parser.add_argument('--smrf-scalar',
metavar='<positive float>',
action=StoreValue,

Wyświetl plik

@ -189,7 +189,12 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
# Create virtual raster
tiles_vrt_path = os.path.abspath(os.path.join(outdir, "tiles.vrt"))
run('gdalbuildvrt "%s" "%s"' % (tiles_vrt_path, '" "'.join(map(lambda t: t['filename'], tiles))))
tiles_file_list = os.path.abspath(os.path.join(outdir, "tiles_list.txt"))
with open(tiles_file_list, 'w') as f:
for t in tiles:
f.write(t['filename'] + '\n')
run('gdalbuildvrt -input_file_list "%s" "%s" ' % (tiles_file_list, tiles_vrt_path))
merged_vrt_path = os.path.abspath(os.path.join(outdir, "merged.vrt"))
geotiff_tmp_path = os.path.abspath(os.path.join(outdir, 'tiles.tmp.tif'))
@ -266,7 +271,7 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
else:
os.replace(geotiff_tmp_path, io.related_file_path(output_path, postfix=".unfilled"))
for cleanup_file in [tiles_vrt_path, merged_vrt_path, geotiff_small_path, geotiff_small_filled_path]:
for cleanup_file in [tiles_vrt_path, tiles_file_list, merged_vrt_path, geotiff_small_path, geotiff_small_filled_path]:
if os.path.exists(cleanup_file): os.remove(cleanup_file)
for t in tiles:
if os.path.exists(t['filename']): os.remove(t['filename'])

Wyświetl plik

@ -1,4 +1,14 @@
import sys
import threading
import os
import json
import datetime
import dateutil.parser
import shutil
import multiprocessing
from opendm.loghelpers import double_quote, args_to_dict
from vmem import virtual_memory
if sys.platform == 'win32':
# No colors on Windows, sorry!
@ -18,16 +28,100 @@ else:
FAIL = '\033[91m'
ENDC = '\033[0m'
# logging has too many quirks...
lock = threading.Lock()
def odm_version():
with open(os.path.join(os.path.dirname(__file__), "..", "VERSION")) as f:
return f.read().split("\n")[0].strip()
def memory():
mem = virtual_memory()
return {
'total': round(mem.total / 1024 / 1024),
'available': round(mem.available / 1024 / 1024)
}
class ODMLogger:
def __init__(self):
self.show_debug = False
self.json = None
self.json_output_file = None
self.start_time = datetime.datetime.now()
def log(self, startc, msg, level_name):
level = ("[" + level_name + "]").ljust(9)
print("%s%s %s%s" % (startc, level, msg, ENDC))
sys.stdout.flush()
with lock:
print("%s%s %s%s" % (startc, level, msg, ENDC))
sys.stdout.flush()
if self.json is not None:
self.json['stages'][-1]['messages'].append({
'message': msg,
'type': level_name.lower()
})
def init_json_output(self, output_files, args):
self.json_output_files = output_files
self.json_output_file = output_files[0]
self.json = {}
self.json['odmVersion'] = odm_version()
self.json['memory'] = memory()
self.json['cpus'] = multiprocessing.cpu_count()
self.json['images'] = -1
self.json['options'] = args_to_dict(args)
self.json['startTime'] = self.start_time.isoformat()
self.json['stages'] = []
self.json['processes'] = []
self.json['success'] = False
def log_json_stage_run(self, name, start_time):
if self.json is not None:
self.json['stages'].append({
'name': name,
'startTime': start_time.isoformat(),
'messages': [],
})
def log_json_images(self, count):
if self.json is not None:
self.json['images'] = count
def log_json_stage_error(self, error, exit_code, stack_trace = ""):
if self.json is not None:
self.json['error'] = {
'code': exit_code,
'message': error
}
self.json['stackTrace'] = list(map(str.strip, stack_trace.split("\n")))
self._log_json_end_time()
def log_json_success(self):
if self.json is not None:
self.json['success'] = True
self._log_json_end_time()
def log_json_process(self, cmd, exit_code, output = []):
if self.json is not None:
d = {
'command': cmd,
'exitCode': exit_code,
}
if output:
d['output'] = output
self.json['processes'].append(d)
def _log_json_end_time(self):
if self.json is not None:
end_time = datetime.datetime.now()
self.json['endTime'] = end_time.isoformat()
self.json['totalTime'] = round((end_time - self.start_time).total_seconds(), 2)
if self.json['stages']:
last_stage = self.json['stages'][-1]
last_stage['endTime'] = end_time.isoformat()
start_time = dateutil.parser.isoparse(last_stage['startTime'])
last_stage['totalTime'] = round((end_time - start_time).total_seconds(), 2)
def info(self, msg):
self.log(DEFAULT, msg, "INFO")
@ -44,6 +138,16 @@ class ODMLogger:
if self.show_debug:
self.log(OKGREEN, msg, "DEBUG")
def close(self):
if self.json is not None and self.json_output_file is not None:
try:
with open(self.json_output_file, 'w') as f:
f.write(json.dumps(self.json, indent=4))
for f in self.json_output_files[1:]:
shutil.copy(self.json_output_file, f)
except Exception as e:
print("Cannot write log.json: %s" % str(e))
logger = ODMLogger()
ODM_INFO = logger.info

Wyświetl plik

@ -0,0 +1,28 @@
from shlex import _find_unsafe
def double_quote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return '""'
if _find_unsafe(s) is None:
return s
# use double quotes, and prefix double quotes with a \
# the string $"b is then quoted as "$\"b"
return '"' + s.replace('"', '\\\"') + '"'
def args_to_dict(args):
args_dict = vars(args)
result = {}
for k in sorted(args_dict.keys()):
# Skip _is_set keys
if k.endswith("_is_set"):
continue
# Don't leak token
if k == 'sm_cluster' and args_dict[k] is not None:
result[k] = True
else:
result[k] = args_dict[k]
return result

Wyświetl plik

@ -4,6 +4,7 @@ from opendm.dem import commands
from opendm import system
from opendm import log
from opendm import context
from opendm import concurrency
from scipy import signal
import numpy as np
@ -125,7 +126,7 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, verbose=False, maxCo
system.run('"{reconstructmesh}" -i "{infile}" '
'-o "{outfile}" '
'--remove-spikes 0 --remove-spurious 0 --smooth 0 '
'--remove-spikes 0 --remove-spurious 20 --smooth 0 '
'--target-face-num {max_faces} '.format(**cleanupArgs))
# Delete intermediate results
@ -145,32 +146,55 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
# ext = .ply
outMeshDirty = os.path.join(mesh_path, "{}.dirty{}".format(basename, ext))
if os.path.isfile(outMeshDirty):
os.remove(outMeshDirty)
# Since PoissonRecon has some kind of a race condition on ppc64el, and this helps...
if platform.machine() == 'ppc64le':
log.ODM_WARNING("ppc64le platform detected, forcing single-threaded operation for PoissonRecon")
threads = 1
poissonReconArgs = {
'bin': context.poisson_recon_path,
'outfile': outMeshDirty,
'infile': inPointCloud,
'depth': depth,
'samples': samples,
'pointWeight': pointWeight,
'threads': threads,
'verbose': '--verbose' if verbose else ''
}
# Run PoissonRecon
system.run('"{bin}" --in "{infile}" '
'--out "{outfile}" '
'--depth {depth} '
'--pointWeight {pointWeight} '
'--samplesPerNode {samples} '
'--threads {threads} '
'--linearFit '
'{verbose}'.format(**poissonReconArgs))
while True:
poissonReconArgs = {
'bin': context.poisson_recon_path,
'outfile': outMeshDirty,
'infile': inPointCloud,
'depth': depth,
'samples': samples,
'pointWeight': pointWeight,
'threads': int(threads),
'memory': int(concurrency.get_max_memory_mb(4, 0.8) // 1024),
'verbose': '--verbose' if verbose else ''
}
# Run PoissonRecon
try:
system.run('"{bin}" --in "{infile}" '
'--out "{outfile}" '
'--depth {depth} '
'--pointWeight {pointWeight} '
'--samplesPerNode {samples} '
'--threads {threads} '
'--maxMemory {memory} '
'--bType 2 '
'--linearFit '
'{verbose}'.format(**poissonReconArgs))
except Exception as e:
log.ODM_WARNING(str(e))
if os.path.isfile(outMeshDirty):
break # Done!
else:
# PoissonRecon will sometimes fail due to race conditions
# on certain machines, especially on Windows
threads //= 2
if threads < 1:
break
else:
log.ODM_WARNING("PoissonRecon failed with %s threads, let's retry with %s..." % (threads, threads // 2))
# Cleanup and reduce vertex count if necessary
cleanupArgs = {
@ -182,7 +206,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
system.run('"{reconstructmesh}" -i "{infile}" '
'-o "{outfile}" '
'--remove-spikes 0 --remove-spurious 0 --smooth 0 '
'--remove-spikes 0 --remove-spurious 20 --smooth 0 '
'--target-face-num {max_faces} '.format(**cleanupArgs))
# Delete intermediate results

Wyświetl plik

@ -86,7 +86,7 @@ def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_ti
generate_orthophoto_tiles(orthophoto_file, orthophoto_tiles_dir, args.max_concurrency)
if args.cog:
convert_to_cogeo(orthophoto_file, max_workers=args.max_concurrency)
convert_to_cogeo(orthophoto_file, max_workers=args.max_concurrency, compression=args.orthophoto_compression)
def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance=20, only_max_coords_feature=False):
if not os.path.exists(input_raster):

Wyświetl plik

@ -50,13 +50,12 @@ class OSFMContext:
# Check that a reconstruction file has been created
if not self.reconstructed():
log.ODM_ERROR("The program could not process this dataset using the current settings. "
raise system.ExitException("The program could not process this dataset using the current settings. "
"Check that the images have enough overlap, "
"that there are enough recognizable features "
"and that the images are in focus. "
"You could also try to increase the --min-num-features parameter."
"The program will now exit.")
exit(1)
def setup(self, args, images_path, reconstruction, append_config = [], rerun=False):
@ -194,6 +193,7 @@ class OSFMContext:
"optimize_camera_parameters: %s" % ('no' if args.use_fixed_camera_params or args.cameras else 'yes'),
"undistorted_image_format: tif",
"bundle_outlier_filtering_type: AUTO",
"sift_peak_threshold: 0.066",
"align_orientation_prior: vertical",
"triangulation_type: ROBUST",
"retriangulation_ratio: 2",

Wyświetl plik

@ -45,8 +45,7 @@ class LocalRemoteExecutor:
log.ODM_WARNING("LRE: The node seems to be offline! We'll still process the dataset, but it's going to run entirely locally.")
self.node_online = False
except Exception as e:
log.ODM_ERROR("LRE: An unexpected problem happened while opening the node connection: %s" % str(e))
exit(1)
raise system.ExitException("LRE: An unexpected problem happened while opening the node connection: %s" % str(e))
def set_projects(self, paths):
self.project_paths = paths

Wyświetl plik

@ -6,6 +6,8 @@ import sys
import subprocess
import string
import signal
import io
from collections import deque
from opendm import context
from opendm import log
@ -15,6 +17,9 @@ class SubprocessException(Exception):
super().__init__(msg)
self.errorCode = errorCode
class ExitException(Exception):
pass
def get_ccd_widths():
"""Return the CCD Width of the camera listed in the JSON defs file."""
with open(context.ccd_widths_path) as f:
@ -80,9 +85,20 @@ def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}, packages_path
for k in env_vars:
env[k] = str(env_vars[k])
p = subprocess.Popen(cmd, shell=True, env=env, start_new_session=True)
p = subprocess.Popen(cmd, shell=True, env=env, start_new_session=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
running_subprocesses.append(p)
lines = deque()
for line in io.TextIOWrapper(p.stdout):
print(line, end="")
lines.append(line.strip())
if len(lines) == 11:
lines.popleft()
retcode = p.wait()
log.logger.log_json_process(cmd, retcode, list(lines))
running_subprocesses.remove(p)
if retcode < 0:
raise SubprocessException("Child was terminated by signal {}".format(-retcode), -retcode)

Wyświetl plik

@ -314,8 +314,10 @@ class ODM_Stage:
def run(self, outputs = {}):
start_time = system.now_raw()
log.ODM_INFO('Running %s stage' % self.name)
log.logger.log_json_stage_run(self.name, start_time)
log.ODM_INFO('Running %s stage' % self.name)
self.process(self.args, outputs)
# The tree variable should always be populated at this point

Wyświetl plik

@ -2,7 +2,7 @@ import os, shutil
from opendm import log
from opendm.photo import find_largest_photo_dim
from osgeo import gdal
from shlex import _find_unsafe
from opendm.loghelpers import double_quote
def get_depthmap_resolution(args, photos):
if 'depthmap_resolution_is_set' in args:
@ -43,17 +43,6 @@ def get_raster_stats(geotiff):
return stats
def double_quote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return '""'
if _find_unsafe(s) is None:
return s
# use double quotes, and prefix double quotes with a \
# the string $"b is then quoted as "$\"b"
return '"' + s.replace('"', '\\\"') + '"'
def get_processing_results_paths():
return [
"odm_georeferencing",
@ -67,6 +56,7 @@ def get_processing_results_paths():
"orthophoto_tiles",
"images.json",
"cameras.json",
"log.json",
]
def copy_paths(paths, destination, rerun):

Wyświetl plik

@ -14,7 +14,7 @@ networkx==2.5
numpy==1.19.4
Pillow==8.0.1
vmem==1.0.1
pyodm==1.5.5
pyodm==1.5.6
pyproj==3.0.0.post1
Pysolar==0.9
pytz==2020.4
@ -28,4 +28,4 @@ scikit-image==0.17.2
scipy==1.5.4
xmltodict==0.12.0
fpdf2==2.2.0rc2
Shapely==1.7.1
Shapely==1.7.1

15
run.py
Wyświetl plik

@ -12,6 +12,7 @@ from opendm import system
from opendm import io
from opendm.progress import progressbc
from opendm.utils import double_quote, get_processing_results_paths
from opendm.loghelpers import args_to_dict
import os
@ -23,18 +24,10 @@ if __name__ == '__main__':
log.ODM_INFO('Initializing ODM - %s' % system.now())
# Print args
args_dict = vars(args)
args_dict = args_to_dict(args)
log.ODM_INFO('==============')
for k in sorted(args_dict.keys()):
# Skip _is_set keys
if k.endswith("_is_set"):
continue
# Don't leak token
if k == 'sm_cluster' and args_dict[k] is not None:
log.ODM_INFO('%s: True' % k)
else:
log.ODM_INFO('%s: %s' % (k, args_dict[k]))
for k in args_dict.keys():
log.ODM_INFO('%s: %s' % (k, args_dict[k]))
log.ODM_INFO('==============')
progressbc.set_project_name(args.name)

Wyświetl plik

@ -91,8 +91,7 @@ class ODMLoadDatasetStage(types.ODM_Stage):
images_database_file = os.path.join(tree.root_path, 'images.json')
if not io.file_exists(images_database_file) or self.rerun():
if not os.path.exists(images_dir):
log.ODM_ERROR("There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s" % (images_dir, args.project_path))
exit(1)
raise system.ExitException("There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s" % (images_dir, args.project_path))
files, rejects = get_images(images_dir)
if files:
@ -130,13 +129,13 @@ class ODMLoadDatasetStage(types.ODM_Stage):
# Save image database for faster restart
save_images_database(photos, images_database_file)
else:
log.ODM_ERROR('Not enough supported images in %s' % images_dir)
exit(1)
raise system.ExitException('Not enough supported images in %s' % images_dir)
else:
# We have an images database, just load it
photos = load_images_database(images_database_file)
log.ODM_INFO('Found %s usable images' % len(photos))
log.logger.log_json_images(len(photos))
# Create reconstruction object
reconstruction = types.ODM_Reconstruction(photos)

Wyświetl plik

@ -1,4 +1,4 @@
import os, traceback
import os, traceback, sys
from opendm import context
from opendm import types
@ -27,6 +27,12 @@ class ODMApp:
if args.debug:
log.logger.show_debug = True
json_log_paths = [os.path.join(args.project_path, "log.json")]
if args.copy_to:
json_log_paths.append(args.copy_to)
log.logger.init_json_output(json_log_paths, args)
dataset = ODMLoadDatasetStage('dataset', args, progress=5.0,
verbose=args.verbose)
split = ODMSplitStage('split', args, progress=75.0)
@ -36,7 +42,7 @@ class ODMApp:
filterpoints = ODMFilterPoints('odm_filterpoints', args, progress=52.0)
meshing = ODMeshingStage('odm_meshing', args, progress=60.0,
max_vertex=args.mesh_size,
oct_tree=args.mesh_octree_depth,
oct_tree=max(1, min(14, args.mesh_octree_depth)),
samples=1.0,
point_weight=4.0,
max_concurrency=args.max_concurrency,
@ -81,16 +87,19 @@ class ODMApp:
def execute(self):
try:
self.first_stage.run()
log.logger.log_json_success()
return 0
except system.SubprocessException as e:
print("")
print("===== Dumping Info for Geeks (developers need this to fix bugs) =====")
print(str(e))
traceback.print_exc()
stack_trace = traceback.format_exc()
print(stack_trace)
print("===== Done, human-readable information to follow... =====")
print("")
code = e.errorCode
log.logger.log_json_stage_error(str(e), code, stack_trace)
if code == 139 or code == 134 or code == 1:
# Segfault
@ -107,3 +116,12 @@ class ODMApp:
# TODO: more?
return code
except system.ExitException as e:
log.ODM_ERROR(str(e))
log.logger.log_json_stage_error(str(e), 1, traceback.format_exc())
sys.exit(1)
except Exception as e:
log.logger.log_json_stage_error(str(e), 1, traceback.format_exc())
raise e
finally:
log.logger.close()

Wyświetl plik

@ -31,5 +31,6 @@ class ODMFilterPoints(types.ODM_Stage):
log.ODM_WARNING('Found a valid point cloud file in: %s' %
tree.filtered_point_cloud)
if args.optimize_disk_space:
os.remove(inputPointCloud)
if args.optimize_disk_space and inputPointCloud:
if os.path.isfile(inputPointCloud):
os.remove(inputPointCloud)

Wyświetl plik

@ -20,8 +20,7 @@ class ODMOpenMVSStage(types.ODM_Stage):
octx = OSFMContext(tree.opensfm)
if not photos:
log.ODM_ERROR('Not enough photos in photos array to start OpenMVS')
exit(1)
raise system.ExitException('Not enough photos in photos array to start OpenMVS')
# check if reconstruction was done before
if not io.file_exists(tree.openmvs_model) or self.rerun():
@ -73,6 +72,9 @@ class ODMOpenMVSStage(types.ODM_Stage):
if args.pc_tile:
config.append("--fusion-mode 1")
if not args.pc_geometric:
config.append("--geometric-iters 0")
system.run('%s "%s" %s' % (context.omvs_densify_path,
openmvs_scene_file,
@ -96,8 +98,7 @@ class ODMOpenMVSStage(types.ODM_Stage):
scene_files = glob.glob(os.path.join(tree.openmvs, "scene_[0-9][0-9][0-9][0-9].mvs"))
if len(scene_files) == 0:
log.ODM_ERROR("No OpenMVS scenes found. This could be a bug, or the reconstruction could not be processed.")
exit(1)
raise system.ExitException("No OpenMVS scenes found. This could be a bug, or the reconstruction could not be processed.")
log.ODM_INFO("Fusing depthmaps for %s scenes" % len(scene_files))
@ -159,8 +160,7 @@ class ODMOpenMVSStage(types.ODM_Stage):
]
system.run('%s %s' % (context.omvs_densify_path, ' '.join(config)))
else:
log.ODM_WARNING("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...")
exit(1)
raise system.ExitException("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...")
# TODO: add support for image masks

Wyświetl plik

@ -17,6 +17,8 @@ from opendm import thermal
from opendm import nvm
from opendm.photo import find_largest_photo
from opensfm.undistort import add_image_format_extension
class ODMOpenSfMStage(types.ODM_Stage):
def process(self, args, outputs):
tree = outputs['tree']
@ -24,8 +26,7 @@ class ODMOpenSfMStage(types.ODM_Stage):
photos = reconstruction.photos
if not photos:
log.ODM_ERROR('Not enough photos in photos array to start OpenSfM')
exit(1)
raise system.ExitException('Not enough photos in photos array to start OpenSfM')
octx = OSFMContext(tree.opensfm)
octx.setup(args, tree.dataset_raw, reconstruction=reconstruction, rerun=self.rerun())
@ -159,6 +160,7 @@ class ODMOpenSfMStage(types.ODM_Stage):
# We finally restore the original files later
added_shots_file = octx.path('added_shots_done.txt')
s2p, p2s = None, None
if not io.file_exists(added_shots_file) or self.rerun():
primary_band_name = multispectral.get_primary_band_name(reconstruction.multi_camera, args.primary_band)
@ -214,12 +216,12 @@ class ODMOpenSfMStage(types.ODM_Stage):
# Primary band maps to itself
if band['name'] == primary_band_name:
img_map[fname + '.tif'] = fname + '.tif'
img_map[add_image_format_extension(fname, 'tif')] = add_image_format_extension(fname, 'tif')
else:
band_filename = next((p.filename for p in p2s[fname] if p.band_name == band['name']), None)
if band_filename is not None:
img_map[fname + '.tif'] = band_filename + '.tif'
img_map[add_image_format_extension(fname, 'tif')] = add_image_format_extension(band_filename, 'tif')
else:
log.ODM_WARNING("Cannot find %s band equivalent for %s" % (band, fname))

Wyświetl plik

@ -243,8 +243,8 @@ class ODMMergeStage(types.ODM_Stage):
if outputs['large']:
if not os.path.exists(tree.submodels_path):
log.ODM_ERROR("We reached the merge stage, but %s folder does not exist. Something must have gone wrong at an earlier stage. Check the log and fix possible problem before restarting?" % tree.submodels_path)
exit(1)
raise system.ExitException("We reached the merge stage, but %s folder does not exist. Something must have gone wrong at an earlier stage. Check the log and fix possible problem before restarting?" % tree.submodels_path)
# Merge point clouds
if args.merge in ['all', 'pointcloud']:

Wyświetl plik

@ -55,7 +55,7 @@ if [ "$1" = "--setup" ]; then
# Misc aliases
echo "alias pdal=/code/SuperBuild/install/bin/pdal" >> $HOME/.bashrc
echo "alias opensfm=/code/SuperBuild/src/opensfm/bin/opensfm" >> $HOME/.bashrc
echo "alias opensfm=/code/SuperBuild/install/bin/opensfm/bin/opensfm" >> $HOME/.bashrc
su -c bash $2