From 4134f72e8f552312ab00306732ff22f7f5c99888 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Fri, 21 Oct 2022 12:08:41 -0400 Subject: [PATCH 001/102] Always generate sky masks with fisheye cameras --- stages/dataset.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stages/dataset.py b/stages/dataset.py index f8354aaa..8d01703f 100644 --- a/stages/dataset.py +++ b/stages/dataset.py @@ -153,14 +153,14 @@ class ODMLoadDatasetStage(types.ODM_Stage): if args.sky_removal: # For each image that : # - Doesn't already have a mask, AND - # - Is not nadir (or if orientation info is missing), AND + # - Is not nadir (or if orientation info is missing, or if camera lens is fisheye), AND # - There are no spaces in the image filename (OpenSfM requirement) # Automatically generate a sky mask # Generate list of sky images sky_images = [] for p in photos: - if p.mask is None and (p.pitch is None or (abs(p.pitch) > 20)) and (not " " in p.filename): + if p.mask is None and (args.camera_lens in ['fisheye', 'spherical'] or p.pitch is None or (abs(p.pitch) > 20)) and (not " " in p.filename): sky_images.append({'file': os.path.join(images_dir, p.filename), 'p': p}) if len(sky_images) > 0: From 70d2b76c788a84f1e0ae3548d769c6bd03b000bb Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Fri, 21 Oct 2022 17:51:39 -0400 Subject: [PATCH 002/102] Revert OpenMVS to 291 --- SuperBuild/cmake/External-OpenMVS.cmake | 2 +- stages/openmvs.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/SuperBuild/cmake/External-OpenMVS.cmake b/SuperBuild/cmake/External-OpenMVS.cmake index bde655ba..7450471d 100644 --- a/SuperBuild/cmake/External-OpenMVS.cmake +++ b/SuperBuild/cmake/External-OpenMVS.cmake @@ -53,7 +53,7 @@ ExternalProject_Add(${_proj_name} #--Download step-------------- DOWNLOAD_DIR ${SB_DOWNLOAD_DIR} GIT_REPOSITORY https://github.com/OpenDroneMap/openMVS - GIT_TAG 292 + GIT_TAG 291 #--Update/Patch step---------- UPDATE_COMMAND "" #--Configure step------------- diff --git a/stages/openmvs.py b/stages/openmvs.py index f659ab6d..19a49f36 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -77,7 +77,8 @@ class ODMOpenMVSStage(types.ODM_Stage): gpu_config = [] use_gpu = has_gpu(args) if use_gpu: - gpu_config.append("--cuda-device -3") + # gpu_config.append("--cuda-device -3") + gpu_config.append("--cuda-device -1") else: gpu_config.append("--cuda-device -2") From c7e93e18dd66c8c9b4fb60560e9b4de00ab4c3de Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Mon, 7 Nov 2022 12:53:37 -0500 Subject: [PATCH 003/102] More memory resiliant openmvs --- opendm/gcp.py | 2 +- stages/openmvs.py | 26 ++++++++++++++++++++------ 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/opendm/gcp.py b/opendm/gcp.py index 184a49eb..df172b0b 100644 --- a/opendm/gcp.py +++ b/opendm/gcp.py @@ -29,7 +29,7 @@ class GCPFile: if line != "" and line[0] != "#": parts = line.split() if len(parts) >= 6: - self.entries.append(line) + self.entries.append(line) else: log.ODM_WARNING("Malformed GCP line: %s" % line) diff --git a/stages/openmvs.py b/stages/openmvs.py index 19a49f36..895da656 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -103,7 +103,6 @@ class ODMOpenMVSStage(types.ODM_Stage): system.run('"%s" "%s" %s' % (context.omvs_densify_path, openmvs_scene_file, ' '.join(config + gpu_config + extra_config))) - try: run_densify() except system.SubprocessException as e: @@ -113,6 +112,11 @@ class ODMOpenMVSStage(types.ODM_Stage): log.ODM_WARNING("OpenMVS failed with GPU, is your graphics card driver up to date? Falling back to CPU.") gpu_config = ["--cuda-device -2"] run_densify() + elif (e.errorCode == 137 or e.errorCode == 3221226505) and not args.pc_tile: + log.ODM_WARNING("OpenMVS ran out of memory, we're going to turn on tiling to see if we can process this.") + args.pc_tile = True + config.append("--fusion-mode 1") + run_densify() else: raise e @@ -198,6 +202,12 @@ class ODMOpenMVSStage(types.ODM_Stage): # Merge fast_merge_ply(scene_ply_files, tree.openmvs_model) else: + def skip_filtering(): + # Just rename + scene_dense_ply = os.path.join(tree.openmvs, 'scene_dense.ply') + log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_dense_ply, tree.openmvs_model)) + os.rename(scene_dense_ply, tree.openmvs_model) + # Filter all at once if args.pc_filter > 0: if os.path.exists(scene_dense): @@ -206,14 +216,18 @@ class ODMOpenMVSStage(types.ODM_Stage): '-i "%s"' % scene_dense, "-v 0" ] - system.run('"%s" %s' % (context.omvs_densify_path, ' '.join(config + gpu_config + extra_config))) + try: + system.run('"%s" %s' % (context.omvs_densify_path, ' '.join(config + gpu_config + extra_config))) + except system.SubprocessException as e: + if e.errorCode == 137 or e.errorCode == 3221226505: + log.ODM_WARNING("OpenMVS filtering ran out of memory, visibility checks will be skipped.") + skip_filtering() + else: + raise e else: raise system.ExitException("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...") else: - # Just rename - scene_dense_ply = os.path.join(tree.openmvs, 'scene_dense.ply') - log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_dense_ply, tree.openmvs_model)) - os.rename(scene_dense_ply, tree.openmvs_model) + skip_filtering() self.update_progress(95) From 96ecbba8cb7efaa72bdb1caa632701e90b9d51ce Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Mon, 7 Nov 2022 13:21:44 -0500 Subject: [PATCH 004/102] GCP warning checks --- opendm/gcp.py | 32 ++++++++++++++++++++++++++++++++ opendm/types.py | 2 ++ 2 files changed, 34 insertions(+) diff --git a/opendm/gcp.py b/opendm/gcp.py index df172b0b..31f19316 100644 --- a/opendm/gcp.py +++ b/opendm/gcp.py @@ -37,6 +37,35 @@ class GCPFile: for entry in self.entries: yield self.parse_entry(entry) + def check_entries(self): + coords = {} + gcps = {} + errors = 0 + + for entry in self.iter_entries(): + k = entry.coords_key() + coords[k] = coords.get(k, 0) + 1 + if k not in gcps: + gcps[k] = [] + gcps[k].append(entry) + + for k in coords: + if coords[k] < 3: + description = "insufficient" if coords[k] < 2 else "not ideal" + for entry in gcps[k]: + log.ODM_WARNING(str(entry)) + log.ODM_WARNING("The number of images where the GCP %s has been tagged are %s" % (k, description)) + log.ODM_WARNING("You should tag at least %s more images" % (3 - coords[k])) + log.ODM_WARNING("=====================================") + errors += 1 + if len(coords) < 3: + log.ODM_WARNING("Low number of GCPs detected (%s). For best results use at least 5." % (3 - len(coords))) + log.ODM_WARNING("=====================================") + errors += 1 + + if errors > 0: + log.ODM_WARNING("Some issues detected with GCPs (but we're going to process this anyway)") + def parse_entry(self, entry): if entry: parts = entry.split() @@ -204,6 +233,9 @@ class GCPEntry: self.py = py self.filename = filename self.extras = extras + + def coords_key(self): + return "{} {} {}".format(self.x, self.y, self.z) def __str__(self): return "{} {} {} {} {} {} {}".format(self.x, self.y, self.z, diff --git a/opendm/types.py b/opendm/types.py index f568abfa..97b95c59 100644 --- a/opendm/types.py +++ b/opendm/types.py @@ -107,6 +107,8 @@ class ODM_Reconstruction(object): if gcp.exists(): if gcp.entries_count() == 0: raise RuntimeError("This GCP file does not have any entries. Are the entries entered in the proper format?") + + gcp.check_entries() # Convert GCP file to a UTM projection since the rest of the pipeline # does not handle other SRS well. From edd7809eb666170e5e087a7247759c73b43b247a Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Mon, 7 Nov 2022 16:37:32 -0500 Subject: [PATCH 005/102] Update opensfm --- SuperBuild/cmake/External-OpenSfM.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SuperBuild/cmake/External-OpenSfM.cmake b/SuperBuild/cmake/External-OpenSfM.cmake index 1aa00434..d1801495 100644 --- a/SuperBuild/cmake/External-OpenSfM.cmake +++ b/SuperBuild/cmake/External-OpenSfM.cmake @@ -25,7 +25,7 @@ ExternalProject_Add(${_proj_name} #--Download step-------------- DOWNLOAD_DIR ${SB_DOWNLOAD_DIR} GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/ - GIT_TAG 291 + GIT_TAG 292 #--Update/Patch step---------- UPDATE_COMMAND git submodule update --init --recursive #--Configure step------------- From ed8bc94288d6a5115117ba62aa898fc03728401b Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Thu, 10 Nov 2022 14:25:26 -0500 Subject: [PATCH 006/102] Remove --pc-geometric, skip visibility filtering --- VERSION | 2 +- opendm/config.py | 16 ---------------- opendm/osfm.py | 35 +++++++++++++++------------------- stages/openmvs.py | 48 +++++------------------------------------------ 4 files changed, 21 insertions(+), 80 deletions(-) diff --git a/VERSION b/VERSION index 5d9ade10..4a36342f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.9.2 +3.0.0 diff --git a/opendm/config.py b/opendm/config.py index f7234b59..64884676 100755 --- a/opendm/config.py +++ b/opendm/config.py @@ -83,15 +83,6 @@ def config(argv=None, parser=None): nargs='?', help='Name of dataset (i.e subfolder name within project folder). Default: %(default)s') - parser.add_argument('--resize-to', - metavar='', - action=StoreValue, - default=2048, - type=int, - help='Legacy option (use --feature-quality instead). Resizes images by the largest side for feature extraction purposes only. ' - 'Set to -1 to disable. This does not affect the final orthophoto ' - 'resolution quality and will not resize the original images. Default: %(default)s') - parser.add_argument('--end-with', '-e', metavar='', action=StoreValue, @@ -414,13 +405,6 @@ def config(argv=None, parser=None): help='Reduce the memory usage needed for depthmap fusion by splitting large scenes into tiles. Turn this on if your machine doesn\'t have much RAM and/or you\'ve set --pc-quality to high or ultra. Experimental. ' 'Default: %(default)s') - parser.add_argument('--pc-geometric', - action=StoreTrue, - nargs=0, - default=False, - help='Improve the accuracy of the point cloud by computing geometrically consistent depthmaps. This increases processing time, but can improve results in urban scenes. ' - 'Default: %(default)s') - parser.add_argument('--smrf-scalar', metavar='', action=StoreValue, diff --git a/opendm/osfm.py b/opendm/osfm.py index a17c6736..1fd816cc 100644 --- a/opendm/osfm.py +++ b/opendm/osfm.py @@ -202,27 +202,22 @@ class OSFMContext: # Compute feature_process_size feature_process_size = 2048 # default - if ('resize_to_is_set' in args) and args.resize_to > 0: - # Legacy - log.ODM_WARNING("Legacy option --resize-to (this might be removed in a future version). Use --feature-quality instead.") - feature_process_size = int(args.resize_to) + feature_quality_scale = { + 'ultra': 1, + 'high': 0.5, + 'medium': 0.25, + 'low': 0.125, + 'lowest': 0.0675, + } + + max_dim = find_largest_photo_dim(photos) + + if max_dim > 0: + log.ODM_INFO("Maximum photo dimensions: %spx" % str(max_dim)) + feature_process_size = int(max_dim * feature_quality_scale[args.feature_quality]) + log.ODM_INFO("Photo dimensions for feature extraction: %ipx" % feature_process_size) else: - feature_quality_scale = { - 'ultra': 1, - 'high': 0.5, - 'medium': 0.25, - 'low': 0.125, - 'lowest': 0.0675, - } - - max_dim = find_largest_photo_dim(photos) - - if max_dim > 0: - log.ODM_INFO("Maximum photo dimensions: %spx" % str(max_dim)) - feature_process_size = int(max_dim * feature_quality_scale[args.feature_quality]) - log.ODM_INFO("Photo dimensions for feature extraction: %ipx" % feature_process_size) - else: - log.ODM_WARNING("Cannot compute max image dimensions, going with defaults") + log.ODM_WARNING("Cannot compute max image dimensions, going with defaults") # create config file for OpenSfM if args.matcher_neighbors > 0: diff --git a/stages/openmvs.py b/stages/openmvs.py index 895da656..5e980526 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -87,17 +87,13 @@ class ODMOpenMVSStage(types.ODM_Stage): extra_config = [] - if not args.pc_geometric: - extra_config.append("--geometric-iters 0") - masks_dir = os.path.join(tree.opensfm, "undistorted", "masks") masks = os.path.exists(masks_dir) and len(os.listdir(masks_dir)) > 0 if masks: extra_config.append("--ignore-mask-label 0") - sharp = args.pc_geometric with open(densify_ini_file, 'w+') as f: - f.write("Optimize = %s\n" % (7 if sharp else 3)) + f.write("Optimize = 7\n") def run_densify(): system.run('"%s" "%s" %s' % (context.omvs_densify_path, @@ -171,18 +167,8 @@ class ODMOpenMVSStage(types.ODM_Stage): '-v 0', ] - try: - system.run('"%s" "%s" %s' % (context.omvs_densify_path, sf, ' '.join(config + gpu_config + extra_config))) - - # Filter - if args.pc_filter > 0: - system.run('"%s" "%s" --filter-point-cloud -1 -v 0 %s' % (context.omvs_densify_path, scene_dense_mvs, ' '.join(gpu_config))) - else: - # Just rename - log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_ply_unfiltered, scene_ply)) - os.rename(scene_ply_unfiltered, scene_ply) - except: - log.ODM_WARNING("Sub-scene %s could not be reconstructed, skipping..." % sf) + # Do not filter + os.rename(scene_ply_unfiltered, scene_ply) if not io.file_exists(scene_ply): scene_ply_files.pop() @@ -202,32 +188,8 @@ class ODMOpenMVSStage(types.ODM_Stage): # Merge fast_merge_ply(scene_ply_files, tree.openmvs_model) else: - def skip_filtering(): - # Just rename - scene_dense_ply = os.path.join(tree.openmvs, 'scene_dense.ply') - log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_dense_ply, tree.openmvs_model)) - os.rename(scene_dense_ply, tree.openmvs_model) - - # Filter all at once - if args.pc_filter > 0: - if os.path.exists(scene_dense): - config = [ - "--filter-point-cloud -1", - '-i "%s"' % scene_dense, - "-v 0" - ] - try: - system.run('"%s" %s' % (context.omvs_densify_path, ' '.join(config + gpu_config + extra_config))) - except system.SubprocessException as e: - if e.errorCode == 137 or e.errorCode == 3221226505: - log.ODM_WARNING("OpenMVS filtering ran out of memory, visibility checks will be skipped.") - skip_filtering() - else: - raise e - else: - raise system.ExitException("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...") - else: - skip_filtering() + scene_dense_ply = os.path.join(tree.openmvs, 'scene_dense.ply') + os.rename(scene_dense_ply, tree.openmvs_model) self.update_progress(95) From 5ff55a5ff839eca44407e2b1af50df9a0fd17a07 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Mon, 14 Nov 2022 11:07:51 -0500 Subject: [PATCH 007/102] Remove --verbose --- contrib/pc2dem/pc2dem.py | 1 - opendm/config.py | 8 -------- opendm/dem/commands.py | 8 ++++---- opendm/dem/pdal.py | 26 ++++---------------------- opendm/mesh.py | 32 +++++++++++++------------------- opendm/point_cloud.py | 5 ++--- stages/odm_app.py | 12 ++++-------- stages/odm_dem.py | 4 +--- stages/odm_filterpoints.py | 1 - stages/odm_meshing.py | 5 +---- stages/odm_orthophoto.py | 6 ++---- 11 files changed, 31 insertions(+), 77 deletions(-) diff --git a/contrib/pc2dem/pc2dem.py b/contrib/pc2dem/pc2dem.py index 0441565b..3e2811c1 100755 --- a/contrib/pc2dem/pc2dem.py +++ b/contrib/pc2dem/pc2dem.py @@ -51,7 +51,6 @@ commands.create_dem(args.point_cloud, outdir=outdir, resolution=args.resolution, decimation=1, - verbose=True, max_workers=multiprocessing.cpu_count(), keep_unfilled_copy=False ) \ No newline at end of file diff --git a/opendm/config.py b/opendm/config.py index 64884676..89eb00e3 100755 --- a/opendm/config.py +++ b/opendm/config.py @@ -654,14 +654,6 @@ def config(argv=None, parser=None): default=False, help='Create Cloud-Optimized GeoTIFFs instead of normal GeoTIFFs. Default: %(default)s') - - parser.add_argument('--verbose', '-v', - action=StoreTrue, - nargs=0, - default=False, - help='Print additional messages to the console. ' - 'Default: %(default)s') - parser.add_argument('--copy-to', metavar='', action=StoreValue, diff --git a/opendm/dem/commands.py b/opendm/dem/commands.py index 6f127428..4e79edfa 100755 --- a/opendm/dem/commands.py +++ b/opendm/dem/commands.py @@ -35,11 +35,11 @@ except ModuleNotFoundError: except: pass -def classify(lasFile, scalar, slope, threshold, window, verbose=False): +def classify(lasFile, scalar, slope, threshold, window): start = datetime.now() try: - pdal.run_pdaltranslate_smrf(lasFile, lasFile, scalar, slope, threshold, window, verbose) + pdal.run_pdaltranslate_smrf(lasFile, lasFile, scalar, slope, threshold, window) except: log.ODM_WARNING("Error creating classified file %s" % lasFile) @@ -90,7 +90,7 @@ error = None def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'], gapfill=True, outdir='', resolution=0.1, max_workers=1, max_tile_size=4096, - verbose=False, decimation=None, keep_unfilled_copy=False, + decimation=None, keep_unfilled_copy=False, apply_smoothing=True): """ Create DEM from multiple radii, and optionally gapfill """ @@ -187,7 +187,7 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'] d = pdal.json_add_decimation_filter(d, decimation) pdal.json_add_readers(d, [input_point_cloud]) - pdal.run_pipeline(d, verbose=verbose) + pdal.run_pipeline(d) parallel_map(process_tile, tiles, max_workers) diff --git a/opendm/dem/pdal.py b/opendm/dem/pdal.py index fb3edf31..8e4e5f6f 100644 --- a/opendm/dem/pdal.py +++ b/opendm/dem/pdal.py @@ -133,22 +133,13 @@ def json_add_readers(json, filenames): return json -def json_print(json): - """ Pretty print JSON """ - log.ODM_DEBUG(jsonlib.dumps(json, indent=4, separators=(',', ': '))) - - """ Run PDAL commands """ -def run_pipeline(json, verbose=False): +def run_pipeline(json): """ Run PDAL Pipeline with provided JSON """ - if verbose: - json_print(json) # write to temp file f, jsonfile = tempfile.mkstemp(suffix='.json') - if verbose: - log.ODM_INFO('Pipeline file: %s' % jsonfile) os.write(f, jsonlib.dumps(json).encode('utf8')) os.close(f) @@ -157,14 +148,11 @@ def run_pipeline(json, verbose=False): 'pipeline', '-i %s' % double_quote(jsonfile) ] - if verbose or sys.platform == 'win32': - system.run(' '.join(cmd)) - else: - system.run(' '.join(cmd) + ' > /dev/null 2>&1') + system.run(' '.join(cmd)) os.remove(jsonfile) -def run_pdaltranslate_smrf(fin, fout, scalar, slope, threshold, window, verbose=False): +def run_pdaltranslate_smrf(fin, fout, scalar, slope, threshold, window): """ Run PDAL translate """ cmd = [ 'pdal', @@ -178,12 +166,9 @@ def run_pdaltranslate_smrf(fin, fout, scalar, slope, threshold, window, verbose= '--filters.smrf.window=%s' % window, ] - if verbose: - log.ODM_INFO(' '.join(cmd)) - system.run(' '.join(cmd)) -def merge_point_clouds(input_files, output_file, verbose=False): +def merge_point_clouds(input_files, output_file): if len(input_files) == 0: log.ODM_WARNING("Cannot merge point clouds, no point clouds to merge.") return @@ -194,8 +179,5 @@ def merge_point_clouds(input_files, output_file, verbose=False): ' '.join(map(double_quote, input_files + [output_file])), ] - if verbose: - log.ODM_INFO(' '.join(cmd)) - system.run(' '.join(cmd)) diff --git a/opendm/mesh.py b/opendm/mesh.py index f0c524a6..fd7f21bc 100644 --- a/opendm/mesh.py +++ b/opendm/mesh.py @@ -8,7 +8,7 @@ from opendm import concurrency from scipy import signal import numpy as np -def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05, depth=8, samples=1, maxVertexCount=100000, verbose=False, available_cores=None, method='gridded', smooth_dsm=True): +def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05, depth=8, samples=1, maxVertexCount=100000, available_cores=None, method='gridded', smooth_dsm=True): # Create DSM from point cloud # Create temporary directory @@ -33,20 +33,18 @@ def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05, gapfill=True, outdir=tmp_directory, resolution=dsm_resolution, - verbose=verbose, max_workers=available_cores, apply_smoothing=smooth_dsm ) if method == 'gridded': - mesh = dem_to_mesh_gridded(os.path.join(tmp_directory, 'mesh_dsm.tif'), outMesh, maxVertexCount, verbose, maxConcurrency=max(1, available_cores)) + mesh = dem_to_mesh_gridded(os.path.join(tmp_directory, 'mesh_dsm.tif'), outMesh, maxVertexCount, maxConcurrency=max(1, available_cores)) elif method == 'poisson': - dsm_points = dem_to_points(os.path.join(tmp_directory, 'mesh_dsm.tif'), os.path.join(tmp_directory, 'dsm_points.ply'), verbose) + dsm_points = dem_to_points(os.path.join(tmp_directory, 'mesh_dsm.tif'), os.path.join(tmp_directory, 'dsm_points.ply')) mesh = screened_poisson_reconstruction(dsm_points, outMesh, depth=depth, samples=samples, maxVertexCount=maxVertexCount, - threads=max(1, available_cores - 1), # poissonrecon can get stuck on some machines if --threads == all cores - verbose=verbose) + threads=max(1, available_cores - 1)), # poissonrecon can get stuck on some machines if --threads == all cores else: raise 'Not a valid method: ' + method @@ -57,14 +55,13 @@ def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05, return mesh -def dem_to_points(inGeotiff, outPointCloud, verbose=False): +def dem_to_points(inGeotiff, outPointCloud): log.ODM_INFO('Sampling points from DSM: %s' % inGeotiff) kwargs = { 'bin': context.dem2points_path, 'outfile': outPointCloud, - 'infile': inGeotiff, - 'verbose': '-verbose' if verbose else '' + 'infile': inGeotiff } system.run('"{bin}" -inputFile "{infile}" ' @@ -72,12 +69,12 @@ def dem_to_points(inGeotiff, outPointCloud, verbose=False): '-skirtHeightThreshold 1.5 ' '-skirtIncrements 0.2 ' '-skirtHeightCap 100 ' - ' {verbose} '.format(**kwargs)) + '-verbose '.format(**kwargs)) return outPointCloud -def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, verbose=False, maxConcurrency=1): +def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, maxConcurrency=1): log.ODM_INFO('Creating mesh from DSM: %s' % inGeotiff) mesh_path, mesh_filename = os.path.split(outMesh) @@ -99,8 +96,7 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, verbose=False, maxCo 'outfile': outMeshDirty, 'infile': inGeotiff, 'maxVertexCount': maxVertexCount, - 'maxConcurrency': maxConcurrency, - 'verbose': '-verbose' if verbose else '' + 'maxConcurrency': maxConcurrency } system.run('"{bin}" -inputFile "{infile}" ' '-outputFile "{outfile}" ' @@ -108,7 +104,7 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, verbose=False, maxCo '-maxVertexCount {maxVertexCount} ' '-maxConcurrency {maxConcurrency} ' '-edgeSwapThreshold 0.15 ' - ' {verbose} '.format(**kwargs)) + '-verbose '.format(**kwargs)) break except Exception as e: maxConcurrency = math.floor(maxConcurrency / 2) @@ -138,7 +134,7 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, verbose=False, maxCo return outMesh -def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples = 1, maxVertexCount=100000, pointWeight=4, threads=context.num_cores, verbose=False): +def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples = 1, maxVertexCount=100000, pointWeight=4, threads=context.num_cores): mesh_path, mesh_filename = os.path.split(outMesh) # mesh_path = path/to @@ -165,8 +161,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples = 'depth': depth, 'samples': samples, 'pointWeight': pointWeight, - 'threads': int(threads), - 'verbose': '--verbose' if verbose else '' + 'threads': int(threads) } # Run PoissonRecon @@ -178,8 +173,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples = '--samplesPerNode {samples} ' '--threads {threads} ' '--bType 2 ' - '--linearFit ' - '{verbose}'.format(**poissonReconArgs)) + '--linearFit '.format(**poissonReconArgs)) except Exception as e: log.ODM_WARNING(str(e)) diff --git a/opendm/point_cloud.py b/opendm/point_cloud.py index 481ef33a..8d763594 100644 --- a/opendm/point_cloud.py +++ b/opendm/point_cloud.py @@ -71,7 +71,7 @@ def split(input_point_cloud, outdir, filename_template, capacity, dims=None): return [os.path.join(outdir, f) for f in os.listdir(outdir)] -def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=16, sample_radius=0, boundary=None, verbose=False, max_concurrency=1): +def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=16, sample_radius=0, boundary=None, max_concurrency=1): """ Filters a point cloud """ @@ -82,8 +82,7 @@ def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank= args = [ '--input "%s"' % input_point_cloud, '--output "%s"' % output_point_cloud, - '--concurrency %s' % max_concurrency, - '--verbose' if verbose else '', + '--concurrency %s' % max_concurrency ] if sample_radius > 0: diff --git a/stages/odm_app.py b/stages/odm_app.py index e05db6f2..b82228e8 100644 --- a/stages/odm_app.py +++ b/stages/odm_app.py @@ -35,8 +35,7 @@ class ODMApp: log.logger.init_json_output(json_log_paths, args) - dataset = ODMLoadDatasetStage('dataset', args, progress=5.0, - verbose=args.verbose) + dataset = ODMLoadDatasetStage('dataset', args, progress=5.0) split = ODMSplitStage('split', args, progress=75.0) merge = ODMMergeStage('merge', args, progress=100.0) opensfm = ODMOpenSfMStage('opensfm', args, progress=25.0) @@ -47,15 +46,12 @@ class ODMApp: oct_tree=max(1, min(14, args.mesh_octree_depth)), samples=1.0, point_weight=4.0, - max_concurrency=args.max_concurrency, - verbose=args.verbose) + max_concurrency=args.max_concurrency) texturing = ODMMvsTexStage('mvs_texturing', args, progress=70.0) georeferencing = ODMGeoreferencingStage('odm_georeferencing', args, progress=80.0, - gcp_file=args.gcp, - verbose=args.verbose) + gcp_file=args.gcp) dem = ODMDEMStage('odm_dem', args, progress=90.0, - max_concurrency=args.max_concurrency, - verbose=args.verbose) + max_concurrency=args.max_concurrency) orthophoto = ODMOrthoPhotoStage('odm_orthophoto', args, progress=98.0) report = ODMReport('odm_report', args, progress=99.0) postprocess = ODMPostProcess('odm_postprocess', args, progress=100.0) diff --git a/stages/odm_dem.py b/stages/odm_dem.py index c98eff79..f9464c0c 100755 --- a/stages/odm_dem.py +++ b/stages/odm_dem.py @@ -58,8 +58,7 @@ class ODMDEMStage(types.ODM_Stage): args.smrf_scalar, args.smrf_slope, args.smrf_threshold, - args.smrf_window, - verbose=args.verbose + args.smrf_window ) with open(pc_classify_marker, 'w') as f: @@ -103,7 +102,6 @@ class ODMDEMStage(types.ODM_Stage): outdir=odm_dem_root, resolution=resolution / 100.0, decimation=args.dem_decimation, - verbose=args.verbose, max_workers=args.max_concurrency, keep_unfilled_copy=args.dem_euclidean_map ) diff --git a/stages/odm_filterpoints.py b/stages/odm_filterpoints.py index cda5a219..b988aa90 100644 --- a/stages/odm_filterpoints.py +++ b/stages/odm_filterpoints.py @@ -53,7 +53,6 @@ class ODMFilterPoints(types.ODM_Stage): standard_deviation=args.pc_filter, sample_radius=args.pc_sample, boundary=boundary_offset(outputs.get('boundary'), reconstruction.get_proj_offset()), - verbose=args.verbose, max_concurrency=args.max_concurrency) # Quick check diff --git a/stages/odm_meshing.py b/stages/odm_meshing.py index b711bc94..72231edf 100644 --- a/stages/odm_meshing.py +++ b/stages/odm_meshing.py @@ -27,9 +27,7 @@ class ODMeshingStage(types.ODM_Stage): samples=self.params.get('samples'), maxVertexCount=self.params.get('max_vertex'), pointWeight=self.params.get('point_weight'), - threads=max(1, self.params.get('max_concurrency') - 1), # poissonrecon can get stuck on some machines if --threads == all cores - verbose=self.params.get('verbose')) - + threads=max(1, self.params.get('max_concurrency') - 1)), # poissonrecon can get stuck on some machines if --threads == all cores else: log.ODM_WARNING('Found a valid ODM Mesh file in: %s' % tree.odm_mesh) @@ -68,7 +66,6 @@ class ODMeshingStage(types.ODM_Stage): depth=self.params.get('oct_tree'), maxVertexCount=self.params.get('max_vertex'), samples=self.params.get('samples'), - verbose=self.params.get('verbose'), available_cores=args.max_concurrency, method='poisson' if args.fast_orthophoto else 'gridded', smooth_dsm=True) diff --git a/stages/odm_orthophoto.py b/stages/odm_orthophoto.py index a15e9728..39e2bf60 100644 --- a/stages/odm_orthophoto.py +++ b/stages/odm_orthophoto.py @@ -18,7 +18,6 @@ class ODMOrthoPhotoStage(types.ODM_Stage): def process(self, args, outputs): tree = outputs['tree'] reconstruction = outputs['reconstruction'] - verbose = '-verbose' if args.verbose else '' # define paths and create working directories system.mkdir_p(tree.odm_orthophoto) @@ -42,8 +41,7 @@ class ODMOrthoPhotoStage(types.ODM_Stage): 'corners': tree.odm_orthophoto_corners, 'res': resolution, 'bands': '', - 'depth_idx': '', - 'verbose': verbose + 'depth_idx': '' } models = [] @@ -85,7 +83,7 @@ class ODMOrthoPhotoStage(types.ODM_Stage): # run odm_orthophoto system.run('"{odm_ortho_bin}" -inputFiles {models} ' - '-logFile "{log}" -outputFile "{ortho}" -resolution {res} {verbose} ' + '-logFile "{log}" -outputFile "{ortho}" -resolution {res} -verbose ' '-outputCornerFile "{corners}" {bands} {depth_idx}'.format(**kwargs)) # Create georeferenced GeoTiff From 13105722ad0d39949a320d559036c51651540924 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Mon, 14 Nov 2022 16:30:20 +0000 Subject: [PATCH 008/102] Removed several flags --- opendm/config.py | 55 +---------------------------- opendm/dem/commands.py | 1 - opendm/log.py | 6 ---- opendm/orthophoto.py | 2 +- opendm/photo.py | 4 --- opendm/thermal_tools/dji_unpack.py | 2 +- opendm/types.py | 4 ++- opendm/utils.py | 56 ++++++++++++++---------------- stages/dataset.py | 14 ++++---- stages/mvstex.py | 6 ++-- stages/odm_app.py | 3 -- 11 files changed, 43 insertions(+), 110 deletions(-) diff --git a/opendm/config.py b/opendm/config.py index 89eb00e3..2d5ceadf 100755 --- a/opendm/config.py +++ b/opendm/config.py @@ -203,15 +203,6 @@ def config(argv=None, parser=None): 'processes. Peak memory requirement is ~1GB per ' 'thread and 2 megapixel image resolution. Default: %(default)s')) - parser.add_argument('--depthmap-resolution', - metavar='', - action=StoreValue, - type=float, - default=640, - help=('Controls the density of the point cloud by setting the resolution of the depthmap images. Higher values take longer to compute ' - 'but produce denser point clouds. Overrides the value calculated by --pc-quality.' - 'Default: %(default)s')) - parser.add_argument('--use-hybrid-bundle-adjustment', action=StoreTrue, nargs=0, @@ -437,20 +428,6 @@ def config(argv=None, parser=None): help='Simple Morphological Filter window radius parameter (meters). ' 'Default: %(default)s') - parser.add_argument('--texturing-data-term', - metavar='', - action=StoreValue, - default='gmi', - choices=['gmi', 'area'], - help=('When texturing the 3D mesh, for each triangle, choose to prioritize images with sharp features (gmi) or those that cover the largest area (area). Default: %(default)s')) - - parser.add_argument('--texturing-outlier-removal-type', - metavar='', - action=StoreValue, - default='gauss_clamping', - choices=['none', 'gauss_clamping', 'gauss_damping'], - help=('Type of photometric outlier removal method. Can be one of: %(choices)s. Default: %(default)s')) - parser.add_argument('--texturing-skip-global-seam-leveling', action=StoreTrue, nargs=0, @@ -470,15 +447,6 @@ def config(argv=None, parser=None): help=('Keep faces in the mesh that are not seen in any camera. ' 'Default: %(default)s')) - parser.add_argument('--texturing-tone-mapping', - metavar='', - action=StoreValue, - choices=['none', 'gamma'], - default='none', - help='Turn on gamma tone mapping or none for no tone ' - 'mapping. Can be one of %(choices)s. ' - 'Default: %(default)s ') - parser.add_argument('--gcp', metavar='', action=StoreValue, @@ -496,7 +464,7 @@ def config(argv=None, parser=None): action=StoreValue, default=None, help=('Path to the image geolocation file containing the camera center coordinates used for georeferencing. ' - 'Note that omega/phi/kappa are currently not supported (you can set them to 0). ' + 'If you don''t have values for omega/phi/kappa you can set them to 0. ' 'The file needs to ' 'use the following format: \n' 'EPSG: or <+proj definition>\n' @@ -570,13 +538,6 @@ def config(argv=None, parser=None): help=('Orthophoto resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate. To remove the cap, check --ignore-gsd also. ' 'Default: %(default)s')) - parser.add_argument('--orthophoto-no-tiled', - action=StoreTrue, - nargs=0, - default=False, - help='Set this parameter if you want a striped GeoTIFF. ' - 'Default: %(default)s') - parser.add_argument('--orthophoto-png', action=StoreTrue, nargs=0, @@ -590,7 +551,6 @@ def config(argv=None, parser=None): default=False, help='Set this parameter if you want to generate a Google Earth (KMZ) rendering of the orthophoto. ' 'Default: %(default)s') - parser.add_argument('--orthophoto-compression', metavar='', @@ -659,19 +619,6 @@ def config(argv=None, parser=None): action=StoreValue, help='Copy output results to this folder after processing.') - parser.add_argument('--time', - action=StoreTrue, - nargs=0, - default=False, - help='Generates a benchmark file with runtime info. ' - 'Default: %(default)s') - - parser.add_argument('--debug', - action=StoreTrue, - nargs=0, - default=False, - help='Print debug messages. Default: %(default)s') - parser.add_argument('--version', action='version', version='ODM {0}'.format(__version__), diff --git a/opendm/dem/commands.py b/opendm/dem/commands.py index 4e79edfa..56b80b05 100755 --- a/opendm/dem/commands.py +++ b/opendm/dem/commands.py @@ -380,5 +380,4 @@ def window_filter_2d(arr, nodata, window, kernel_size, filter): win_arr = filter(win_arr) win_arr[nodata_locs] = nodata win_arr = win_arr[window[0] - expanded_window[0] : window[2] - expanded_window[0], window[1] - expanded_window[1] : window[3] - expanded_window[1]] - log.ODM_DEBUG("Filtered window: %s" % str(window)) return win_arr diff --git a/opendm/log.py b/opendm/log.py index 01b3b118..1ff6cfca 100644 --- a/opendm/log.py +++ b/opendm/log.py @@ -43,7 +43,6 @@ def memory(): class ODMLogger: def __init__(self): - self.show_debug = False self.json = None self.json_output_file = None self.start_time = datetime.datetime.now() @@ -134,10 +133,6 @@ class ODMLogger: def exception(self, msg): self.log(FAIL, msg, "EXCEPTION") - def debug(self, msg): - if self.show_debug: - self.log(OKGREEN, msg, "DEBUG") - def close(self): if self.json is not None and self.json_output_file is not None: try: @@ -154,4 +149,3 @@ ODM_INFO = logger.info ODM_WARNING = logger.warning ODM_ERROR = logger.error ODM_EXCEPTION = logger.exception -ODM_DEBUG = logger.debug diff --git a/opendm/orthophoto.py b/opendm/orthophoto.py index 9b25e6d1..465a8aad 100644 --- a/opendm/orthophoto.py +++ b/opendm/orthophoto.py @@ -18,7 +18,7 @@ from osgeo import gdal def get_orthophoto_vars(args): return { - 'TILED': 'NO' if args.orthophoto_no_tiled else 'YES', + 'TILED': 'YES', 'COMPRESS': args.orthophoto_compression, 'PREDICTOR': '2' if args.orthophoto_compression in ['LZW', 'DEFLATE'] else '1', 'BIGTIFF': 'IF_SAFER', diff --git a/opendm/photo.py b/opendm/photo.py index 733acd29..c4bc5a38 100644 --- a/opendm/photo.py +++ b/opendm/photo.py @@ -166,10 +166,6 @@ class ODM_Photo: # parse values from metadata self.parse_exif_values(path_file) - # print log message - log.ODM_DEBUG('Loaded {}'.format(self)) - - def __str__(self): return '{} | camera: {} {} | dimensions: {} x {} | lat: {} | lon: {} | alt: {} | band: {} ({})'.format( self.filename, self.camera_make, self.camera_model, self.width, self.height, diff --git a/opendm/thermal_tools/dji_unpack.py b/opendm/thermal_tools/dji_unpack.py index 4794ab41..eb706a4c 100644 --- a/opendm/thermal_tools/dji_unpack.py +++ b/opendm/thermal_tools/dji_unpack.py @@ -41,7 +41,7 @@ def extract_temperatures_dji(photo, image, dataset_tree): except ValueError as e: log.ODM_ERROR("Error during extracting temperature values for file %s : %s" % photo.filename, e) else: - log.ODM_DEBUG("Only DJI M2EA currently supported, please wait for new updates") + log.ODM_WARNING("Only DJI M2EA currently supported, please wait for new updates") return image # Extract raw sensor values from generated image into numpy array raw_sensor_np = np.array(img) diff --git a/opendm/types.py b/opendm/types.py index 97b95c59..af04a334 100644 --- a/opendm/types.py +++ b/opendm/types.py @@ -364,8 +364,10 @@ class ODM_Stage: if outputs.get('tree') is None: raise Exception("Assert violation: tree variable is missing from outputs dictionary.") - if self.args.time: + try: system.benchmark(start_time, outputs['tree'].benchmarking, self.name) + except Exception as e: + log.ODM_WARNING("Cannot write benchmark file: %s" % str(e)) log.ODM_INFO('Finished %s stage' % self.name) self.update_progress_end() diff --git a/opendm/utils.py b/opendm/utils.py index 2be031ad..0729eb7c 100644 --- a/opendm/utils.py +++ b/opendm/utils.py @@ -5,37 +5,33 @@ from osgeo import gdal from opendm.loghelpers import double_quote def get_depthmap_resolution(args, photos): - if 'depthmap_resolution_is_set' in args: - # Override pc-quality - return int(args.depthmap_resolution) + max_dims = find_largest_photo_dims(photos) + min_dim = 320 # Never go lower than this + + if max_dims is not None: + w, h = max_dims + max_dim = max(w, h) + + megapixels = (w * h) / 1e6 + multiplier = 1 + + if megapixels < 6: + multiplier = 2 + elif megapixels > 42: + multiplier = 0.5 + + pc_quality_scale = { + 'ultra': 0.5, + 'high': 0.25, + 'medium': 0.125, + 'low': 0.0675, + 'lowest': 0.03375 + } + + return max(min_dim, int(max_dim * pc_quality_scale[args.pc_quality] * multiplier)) else: - max_dims = find_largest_photo_dims(photos) - min_dim = 320 # Never go lower than this - - if max_dims is not None: - w, h = max_dims - max_dim = max(w, h) - - megapixels = (w * h) / 1e6 - multiplier = 1 - - if megapixels < 6: - multiplier = 2 - elif megapixels > 42: - multiplier = 0.5 - - pc_quality_scale = { - 'ultra': 0.5, - 'high': 0.25, - 'medium': 0.125, - 'low': 0.0675, - 'lowest': 0.03375 - } - - return max(min_dim, int(max_dim * pc_quality_scale[args.pc_quality] * multiplier)) - else: - log.ODM_WARNING("Cannot compute max image dimensions, going with default depthmap_resolution of 640") - return 640 # Sensible default + log.ODM_WARNING("Cannot compute max image dimensions, going with default depthmap_resolution of 640") + return 640 # Sensible default def get_raster_stats(geotiff): stats = [] diff --git a/stages/dataset.py b/stages/dataset.py index 8d01703f..a167b845 100644 --- a/stages/dataset.py +++ b/stages/dataset.py @@ -49,12 +49,15 @@ class ODMLoadDatasetStage(types.ODM_Stage): tree = types.ODM_Tree(args.project_path, args.gcp, args.geo) outputs['tree'] = tree - if args.time and io.file_exists(tree.benchmarking): + if io.file_exists(tree.benchmarking): # Delete the previously made file - os.remove(tree.benchmarking) - with open(tree.benchmarking, 'a') as b: - b.write('ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores)) - + try: + os.remove(tree.benchmarking) + with open(tree.benchmarking, 'a') as b: + b.write('ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores)) + except Exception as e: + log.ODM_WARNING("Cannot write benchmark file: %s" % str(e)) + # check if the image filename is supported def valid_image_filename(filename): (pathfn, ext) = os.path.splitext(filename) @@ -62,7 +65,6 @@ class ODMLoadDatasetStage(types.ODM_Stage): # Get supported images from dir def get_images(in_dir): - log.ODM_DEBUG(in_dir) entries = os.listdir(in_dir) valid, rejects = [], [] for f in entries: diff --git a/stages/mvstex.py b/stages/mvstex.py index 082b13de..da8fd433 100644 --- a/stages/mvstex.py +++ b/stages/mvstex.py @@ -93,12 +93,12 @@ class ODMMvsTexStage(types.ODM_Stage): 'bin': context.mvstex_path, 'out_dir': os.path.join(r['out_dir'], "odm_textured_model_geo"), 'model': r['model'], - 'dataTerm': args.texturing_data_term, - 'outlierRemovalType': args.texturing_outlier_removal_type, + 'dataTerm': 'gmi', + 'outlierRemovalType': 'gauss_clamping', 'skipGlobalSeamLeveling': skipGlobalSeamLeveling, 'skipLocalSeamLeveling': skipLocalSeamLeveling, 'keepUnseenFaces': keepUnseenFaces, - 'toneMapping': args.texturing_tone_mapping, + 'toneMapping': 'none', 'nadirMode': nadir, 'maxTextureSize': '--max_texture_size=%s' % max_texture_size, 'nvm_file': r['nvm_file'], diff --git a/stages/odm_app.py b/stages/odm_app.py index b82228e8..79a09c09 100644 --- a/stages/odm_app.py +++ b/stages/odm_app.py @@ -26,9 +26,6 @@ class ODMApp: """ Initializes the application and defines the ODM application pipeline stages """ - if args.debug: - log.logger.show_debug = True - json_log_paths = [os.path.join(args.project_path, "log.json")] if args.copy_to: json_log_paths.append(args.copy_to) From 9e9037b232e8e730da43cdf6c48c46327f27b48a Mon Sep 17 00:00:00 2001 From: Stephen Mather <1174901+smathermather@users.noreply.github.com> Date: Mon, 14 Nov 2022 23:21:36 -0500 Subject: [PATCH 009/102] Create mask-duplicator.py From https://gitlab.com/appf-anu/asdc-code/-/snippets/2457263 --- contrib/mask-duplicator/mask-duplicator.py | 62 ++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 contrib/mask-duplicator/mask-duplicator.py diff --git a/contrib/mask-duplicator/mask-duplicator.py b/contrib/mask-duplicator/mask-duplicator.py new file mode 100644 index 00000000..5580c994 --- /dev/null +++ b/contrib/mask-duplicator/mask-duplicator.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +import sys +import os + +import PIL + +from PIL import Image, ExifTags + +import shutil + +from tqdm import tqdm + +import argparse +parser = argparse.ArgumentParser() + +# Usage: +# python exif_renamer.py + +parser.add_argument("file_dir", help="input folder of images") +parser.add_argument("output_dir", help="output folder to copy images to") +parser.add_argument("mask_file", help="filename or path to Mask file to be duplicated for all images") +parser.add_argument("-f", "--force", help="don't ask for confirmation", action="store_true") + +args = parser.parse_args() + +file_dir = args.file_dir +mask_file_path = args.mask_file +output_dir = args.output_dir + +file_count = len(os.listdir(file_dir)) + +if args.force is False: + print("Input dir: " + str(file_dir)) + print("Output folder: " + str(output_dir) + " (" + str(file_count) + " files)") + confirmation = input("Confirm processing [Y/N]: ") + if confirmation.lower() in ["y"]: + pass + else: + sys.exit() + +os.makedirs(output_dir, exist_ok=True) + +no_exif_n = 0 + +# Uses tqdm() for the progress bar, if not needed swap with +# for filename in os.listdir(file_dir): +for filename in tqdm(os.listdir(file_dir)): + old_path = mask_file_path + #print(mask_file_path) + file_name, file_ext = os.path.splitext(filename) + + try: + img = Image.open(old_path) + except PIL.UnidentifiedImageError as img_err: + # if it tries importing a file it can't read as an image + # can be commented out if you just wanna skip errors + sys.stderr.write(str(img_err) + "\n") + continue + new_path = os.path.join(output_dir, file_name + "_mask" + file_ext) + #print(new_path) # debugging + shutil.copy(old_path, new_path) +print("Done!") From 40973c386aab86b72c3971f1a9fdefd0e7fc78bb Mon Sep 17 00:00:00 2001 From: Stephen Mather <1174901+smathermather@users.noreply.github.com> Date: Mon, 14 Nov 2022 23:22:37 -0500 Subject: [PATCH 010/102] LICENSE.txt applies to mask-duplicator.py Applies to mask-duplicator.py --- contrib/mask-duplicator/LICENSE.txt | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 contrib/mask-duplicator/LICENSE.txt diff --git a/contrib/mask-duplicator/LICENSE.txt b/contrib/mask-duplicator/LICENSE.txt new file mode 100644 index 00000000..dd381136 --- /dev/null +++ b/contrib/mask-duplicator/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 APPF-ANU + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. From 9c4d9add0ffd747af129e1e2b6ea7aefac3b9268 Mon Sep 17 00:00:00 2001 From: Stephen Mather <1174901+smathermather@users.noreply.github.com> Date: Mon, 14 Nov 2022 23:23:42 -0500 Subject: [PATCH 011/102] Create README.md --- contrib/mask-duplicator/README.md | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 contrib/mask-duplicator/README.md diff --git a/contrib/mask-duplicator/README.md b/contrib/mask-duplicator/README.md new file mode 100644 index 00000000..a704df19 --- /dev/null +++ b/contrib/mask-duplicator/README.md @@ -0,0 +1,9 @@ +# Semi-automated Masking for 360 images + +For usage with 360 images and Open Drone Map (ODM) to mask out the tripod/user/camera mount. 360 models in ODM can be made from 360 images, but unless you mask out the camera mount there will be repeated artifacts along the camera path. ODM supports image masking but requires a mask for each image. Since the 360 camera is generally on a fixed mount (bike helmet, moving tripod, etc), you can make one mask and then duplicate this for all images, but this is tedious to do by hand. + +This snippet takes the file path of a single image mask and duplicates it for all images in the dataset. After creating the masks, process the original images and the masks together in ODM you'll get a clean model with the camera mount artifacts eliminated. + +Before using this code snippet, open one of your 360 images in an image editor and mask out the helmet or tripod, etc at the bottom of your image. Save this image as a png and then use it as the mask image that will be duplicated for all images in the dataset. + +See https://docs.opendronemap.org/masks/ for more details on mask creation. From 1a876ca540a9e94def290a8f4d1d70ccfded4b42 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 15 Nov 2022 12:59:40 -0500 Subject: [PATCH 012/102] Update dem2mesh --- SuperBuild/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SuperBuild/CMakeLists.txt b/SuperBuild/CMakeLists.txt index 862c42f5..836af1db 100644 --- a/SuperBuild/CMakeLists.txt +++ b/SuperBuild/CMakeLists.txt @@ -209,7 +209,7 @@ externalproject_add(poissonrecon externalproject_add(dem2mesh GIT_REPOSITORY https://github.com/OpenDroneMap/dem2mesh.git - GIT_TAG master + GIT_TAG 300 PREFIX ${SB_BINARY_DIR}/dem2mesh SOURCE_DIR ${SB_SOURCE_DIR}/dem2mesh CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR} From caa1c0206ea72ead153a68f33da7f24f27a195af Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 15 Nov 2022 16:20:57 -0500 Subject: [PATCH 013/102] Update mvs-texturing --- SuperBuild/cmake/External-MvsTexturing.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SuperBuild/cmake/External-MvsTexturing.cmake b/SuperBuild/cmake/External-MvsTexturing.cmake index dd2d7c68..ce1daa2d 100644 --- a/SuperBuild/cmake/External-MvsTexturing.cmake +++ b/SuperBuild/cmake/External-MvsTexturing.cmake @@ -9,7 +9,7 @@ ExternalProject_Add(${_proj_name} #--Download step-------------- DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}/${_proj_name} GIT_REPOSITORY https://github.com/OpenDroneMap/mvs-texturing - GIT_TAG 290 + GIT_TAG 300 #--Update/Patch step---------- UPDATE_COMMAND "" #--Configure step------------- From 22b5f4f23698858b911c57a20ea777002128daa5 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 15 Nov 2022 19:01:05 -0500 Subject: [PATCH 014/102] Revert mvstexturing --- SuperBuild/cmake/External-MvsTexturing.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SuperBuild/cmake/External-MvsTexturing.cmake b/SuperBuild/cmake/External-MvsTexturing.cmake index ce1daa2d..dd2d7c68 100644 --- a/SuperBuild/cmake/External-MvsTexturing.cmake +++ b/SuperBuild/cmake/External-MvsTexturing.cmake @@ -9,7 +9,7 @@ ExternalProject_Add(${_proj_name} #--Download step-------------- DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}/${_proj_name} GIT_REPOSITORY https://github.com/OpenDroneMap/mvs-texturing - GIT_TAG 300 + GIT_TAG 290 #--Update/Patch step---------- UPDATE_COMMAND "" #--Configure step------------- From 2732773eb19bba5e87ccbd2bd976f17753224c45 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 16 Nov 2022 14:00:16 -0500 Subject: [PATCH 015/102] Re-enable Multi-GPU openmvs --- SuperBuild/cmake/External-OpenMVS.cmake | 2 +- stages/openmvs.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/SuperBuild/cmake/External-OpenMVS.cmake b/SuperBuild/cmake/External-OpenMVS.cmake index 7450471d..4927ec6d 100644 --- a/SuperBuild/cmake/External-OpenMVS.cmake +++ b/SuperBuild/cmake/External-OpenMVS.cmake @@ -53,7 +53,7 @@ ExternalProject_Add(${_proj_name} #--Download step-------------- DOWNLOAD_DIR ${SB_DOWNLOAD_DIR} GIT_REPOSITORY https://github.com/OpenDroneMap/openMVS - GIT_TAG 291 + GIT_TAG 300 #--Update/Patch step---------- UPDATE_COMMAND "" #--Configure step------------- diff --git a/stages/openmvs.py b/stages/openmvs.py index 5e980526..8299fef9 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -77,8 +77,8 @@ class ODMOpenMVSStage(types.ODM_Stage): gpu_config = [] use_gpu = has_gpu(args) if use_gpu: - # gpu_config.append("--cuda-device -3") - gpu_config.append("--cuda-device -1") + gpu_config.append("--cuda-device -3") + # gpu_config.append("--cuda-device -1") else: gpu_config.append("--cuda-device -2") From 70b6e655a97d3b048b2449ab0eaeac9639f35328 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Thu, 17 Nov 2022 10:56:00 -0500 Subject: [PATCH 016/102] Keep --orthophoto-no-tiled --- opendm/config.py | 7 +++++++ opendm/orthophoto.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/opendm/config.py b/opendm/config.py index 2d5ceadf..2ea7a474 100755 --- a/opendm/config.py +++ b/opendm/config.py @@ -538,6 +538,13 @@ def config(argv=None, parser=None): help=('Orthophoto resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate. To remove the cap, check --ignore-gsd also. ' 'Default: %(default)s')) + parser.add_argument('--orthophoto-no-tiled', + action=StoreTrue, + nargs=0, + default=False, + help='Set this parameter if you want a striped GeoTIFF. ' + 'Default: %(default)s') + parser.add_argument('--orthophoto-png', action=StoreTrue, nargs=0, diff --git a/opendm/orthophoto.py b/opendm/orthophoto.py index 465a8aad..9b25e6d1 100644 --- a/opendm/orthophoto.py +++ b/opendm/orthophoto.py @@ -18,7 +18,7 @@ from osgeo import gdal def get_orthophoto_vars(args): return { - 'TILED': 'YES', + 'TILED': 'NO' if args.orthophoto_no_tiled else 'YES', 'COMPRESS': args.orthophoto_compression, 'PREDICTOR': '2' if args.orthophoto_compression in ['LZW', 'DEFLATE'] else '1', 'BIGTIFF': 'IF_SAFER', From 917380fb20cfcb45edf9a135105b1192e48808ea Mon Sep 17 00:00:00 2001 From: Stephen Mather <1174901+smathermather@users.noreply.github.com> Date: Fri, 18 Nov 2022 15:29:32 -0500 Subject: [PATCH 017/102] opendronemap available on edge --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5ea5adea..9e25d6a0 100644 --- a/README.md +++ b/README.md @@ -88,7 +88,7 @@ run C:\Users\youruser\datasets\project [--additional --parameters --here] ODM is now available as a Snap Package from the Snap Store. To install you may use the Snap Store (available itself as a Snap Package) or the command line: ```bash -sudo snap install opendronemap +sudo snap install --edge opendronemap ``` To run, you will need a terminal window into which you can type: From af765a961d64775f399afac5419eb6d7c391ce25 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 22 Nov 2022 13:38:05 -0500 Subject: [PATCH 018/102] Use sqrt(2) for DEM radius step --- opendm/mesh.py | 2 +- stages/odm_dem.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/opendm/mesh.py b/opendm/mesh.py index fd7f21bc..6708ab55 100644 --- a/opendm/mesh.py +++ b/opendm/mesh.py @@ -21,7 +21,7 @@ def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05, radius_steps = [dsm_radius] for _ in range(2): - radius_steps.append(radius_steps[-1] * 2) # 2 is arbitrary + radius_steps.append(radius_steps[-1] * math.sqrt(2)) # sqrt(2) is arbitrary log.ODM_INFO('Creating DSM for 2.5D mesh') diff --git a/stages/odm_dem.py b/stages/odm_dem.py index f9464c0c..36ecdd8e 100755 --- a/stages/odm_dem.py +++ b/stages/odm_dem.py @@ -1,4 +1,4 @@ -import os, json +import os, json, math from shutil import copyfile from opendm import io @@ -90,7 +90,7 @@ class ODMDEMStage(types.ODM_Stage): radius_steps = [(resolution / 100.0) / 2.0] for _ in range(args.dem_gapfill_steps - 1): - radius_steps.append(radius_steps[-1] * 2) # 2 is arbitrary, maybe there's a better value? + radius_steps.append(radius_steps[-1] * math.sqrt(2)) # sqrt(2) is arbitrary, maybe there's a better value? for product in products: commands.create_dem( From 1a0db4247e58814c0ad1119ff13dfacb41eb9527 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 22 Nov 2022 23:53:32 -0500 Subject: [PATCH 019/102] Graceful handling of deprecated args --- opendm/config.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/opendm/config.py b/opendm/config.py index 2ea7a474..864ad7e1 100755 --- a/opendm/config.py +++ b/opendm/config.py @@ -741,7 +741,15 @@ def config(argv=None, parser=None): 'If the images have been postprocessed and are already aligned, use this option. ' 'Default: %(default)s')) - args = parser.parse_args(argv) + args, unknown = parser.parse_known_args(argv) + DEPRECATED = ["--verbose", "--debug", "--time", "--resize-to", "--depthmap-resolution", "--pc-geometric", "--texturing-data-term", "--texturing-outlier-removal-type", "--texturing-tone-mapping"] + unknown_e = [p for p in unknown if p not in DEPRECATED] + if len(unknown_e) > 0: + raise parser.error("unrecognized arguments: %s" % " ".join(unknown_e)) + + for p in unknown: + if p in DEPRECATED: + log.ODM_WARNING("%s is no longer a valid argument and will be ignored!" % p) # check that the project path setting has been set properly if not args.project_path: From 5c33cfe62591f8b11fb57892a2a785a3d6d832c2 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 23 Nov 2022 12:20:52 -0500 Subject: [PATCH 020/102] Fix --pc-tile --- stages/openmvs.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/stages/openmvs.py b/stages/openmvs.py index 8299fef9..471bf897 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -167,19 +167,24 @@ class ODMOpenMVSStage(types.ODM_Stage): '-v 0', ] - # Do not filter - os.rename(scene_ply_unfiltered, scene_ply) + try: + system.run('"%s" "%s" %s' % (context.omvs_densify_path, sf, ' '.join(config + gpu_config + extra_config))) + except: + log.ODM_WARNING("Sub-scene %s could not be reconstructed, skipping..." % sf) - if not io.file_exists(scene_ply): + if not io.file_exists(scene_ply_unfiltered): scene_ply_files.pop() log.ODM_WARNING("Could not compute PLY for subscene %s" % sf) + else: + # Do not filter + os.rename(scene_ply_unfiltered, scene_ply) else: log.ODM_WARNING("Found existing dense scene file %s" % scene_ply) # Merge log.ODM_INFO("Merging %s scene files" % len(scene_ply_files)) if len(scene_ply_files) == 0: - log.ODM_ERROR("Could not compute dense point cloud (no PLY files available).") + raise system.ExitException("Could not compute dense point cloud (no PLY files available).") if len(scene_ply_files) == 1: # Simply rename os.replace(scene_ply_files[0], tree.openmvs_model) @@ -189,6 +194,10 @@ class ODMOpenMVSStage(types.ODM_Stage): fast_merge_ply(scene_ply_files, tree.openmvs_model) else: scene_dense_ply = os.path.join(tree.openmvs, 'scene_dense.ply') + + if not os.path.exists(scene_dense_ply): + raise system.ExitException("Dense reconstruction failed. This could be due to poor georeferencing or insufficient image overlap.") + os.rename(scene_dense_ply, tree.openmvs_model) self.update_progress(95) From 07d81017800ac21d5e2361981297f2364514a440 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Thu, 24 Nov 2022 14:51:04 -0500 Subject: [PATCH 021/102] Downgrade cmake version on Windows build --- .github/workflows/publish-windows.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/publish-windows.yml b/.github/workflows/publish-windows.yml index 9e3f00cb..b9737f55 100644 --- a/.github/workflows/publish-windows.yml +++ b/.github/workflows/publish-windows.yml @@ -22,6 +22,10 @@ jobs: id: cuda-toolkit with: cuda: '11.4.0' + - name: Setup cmake + uses: jwlawson/actions-setup-cmake@v1.13 + with: + cmake-version: '3.24.x' - name: Extract code signing cert id: code_sign uses: timheuer/base64-to-file@v1 From be4d3d0cd1bd93666ae78c84424272a45a111964 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Fri, 25 Nov 2022 20:26:28 -0500 Subject: [PATCH 022/102] Revert OpenMVS to 291 --- SuperBuild/cmake/External-OpenMVS.cmake | 2 +- stages/openmvs.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/SuperBuild/cmake/External-OpenMVS.cmake b/SuperBuild/cmake/External-OpenMVS.cmake index 4927ec6d..7450471d 100644 --- a/SuperBuild/cmake/External-OpenMVS.cmake +++ b/SuperBuild/cmake/External-OpenMVS.cmake @@ -53,7 +53,7 @@ ExternalProject_Add(${_proj_name} #--Download step-------------- DOWNLOAD_DIR ${SB_DOWNLOAD_DIR} GIT_REPOSITORY https://github.com/OpenDroneMap/openMVS - GIT_TAG 300 + GIT_TAG 291 #--Update/Patch step---------- UPDATE_COMMAND "" #--Configure step------------- diff --git a/stages/openmvs.py b/stages/openmvs.py index 471bf897..d654cf9a 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -77,8 +77,8 @@ class ODMOpenMVSStage(types.ODM_Stage): gpu_config = [] use_gpu = has_gpu(args) if use_gpu: - gpu_config.append("--cuda-device -3") - # gpu_config.append("--cuda-device -1") + #gpu_config.append("--cuda-device -3") + gpu_config.append("--cuda-device -1") else: gpu_config.append("--cuda-device -2") From 8e567ab543a5c213694e410641afaf11ae70917b Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Fri, 25 Nov 2022 21:37:16 -0500 Subject: [PATCH 023/102] Revert "Revert OpenMVS to 291" This reverts commit be4d3d0cd1bd93666ae78c84424272a45a111964. --- SuperBuild/cmake/External-OpenMVS.cmake | 2 +- stages/openmvs.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/SuperBuild/cmake/External-OpenMVS.cmake b/SuperBuild/cmake/External-OpenMVS.cmake index 7450471d..4927ec6d 100644 --- a/SuperBuild/cmake/External-OpenMVS.cmake +++ b/SuperBuild/cmake/External-OpenMVS.cmake @@ -53,7 +53,7 @@ ExternalProject_Add(${_proj_name} #--Download step-------------- DOWNLOAD_DIR ${SB_DOWNLOAD_DIR} GIT_REPOSITORY https://github.com/OpenDroneMap/openMVS - GIT_TAG 291 + GIT_TAG 300 #--Update/Patch step---------- UPDATE_COMMAND "" #--Configure step------------- diff --git a/stages/openmvs.py b/stages/openmvs.py index d654cf9a..471bf897 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -77,8 +77,8 @@ class ODMOpenMVSStage(types.ODM_Stage): gpu_config = [] use_gpu = has_gpu(args) if use_gpu: - #gpu_config.append("--cuda-device -3") - gpu_config.append("--cuda-device -1") + gpu_config.append("--cuda-device -3") + # gpu_config.append("--cuda-device -1") else: gpu_config.append("--cuda-device -2") From f1400eea038bcb9fb6534faac8b75f8d1f46efba Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Fri, 25 Nov 2022 22:09:33 -0500 Subject: [PATCH 024/102] Remove min-resolution --- stages/openmvs.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/stages/openmvs.py b/stages/openmvs.py index 471bf897..13421948 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -65,7 +65,6 @@ class ODMOpenMVSStage(types.ODM_Stage): config = [ " --resolution-level %s" % int(resolution_level), '--dense-config-file "%s"' % densify_ini_file, - "--min-resolution %s" % depthmap_resolution, "--max-resolution %s" % int(outputs['undist_image_max_size']), "--max-threads %s" % args.max_concurrency, "--number-views-fuse %s" % number_views_fuse, @@ -158,7 +157,6 @@ class ODMOpenMVSStage(types.ODM_Stage): # Fuse config = [ '--resolution-level %s' % int(resolution_level), - '--min-resolution %s' % depthmap_resolution, '--max-resolution %s' % int(outputs['undist_image_max_size']), '--dense-config-file "%s"' % subscene_densify_ini_file, '--number-views-fuse %s' % number_views_fuse, From e9f0b14bec1ea6a33448992f290fe9dfd7088c8b Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Sat, 26 Nov 2022 10:34:43 -0500 Subject: [PATCH 025/102] Fix pc-rectify --- stages/odm_dem.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stages/odm_dem.py b/stages/odm_dem.py index 36ecdd8e..67603591 100755 --- a/stages/odm_dem.py +++ b/stages/odm_dem.py @@ -72,7 +72,7 @@ class ODMDEMStage(types.ODM_Stage): self.update_progress(progress) if args.pc_rectify: - commands.rectify(dem_input, args.debug) + commands.rectify(dem_input, False) # Do we need to process anything here? if (args.dsm or args.dtm) and pc_model_found: From 5a129b6ef56c8453f7e8d17ad19b569187cfb6bd Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Sun, 27 Nov 2022 16:23:17 -0500 Subject: [PATCH 026/102] Less aggressive point visibility filtering --- stages/openmvs.py | 44 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/stages/openmvs.py b/stages/openmvs.py index 13421948..1295ec75 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -61,6 +61,7 @@ class ODMOpenMVSStage(types.ODM_Stage): number_views_fuse = 2 densify_ini_file = os.path.join(tree.openmvs, 'Densify.ini') subres_levels = 2 # The number of lower resolutions to process before estimating output resolution depthmap. + filter_point_th = -20 config = [ " --resolution-level %s" % int(resolution_level), @@ -127,7 +128,8 @@ class ODMOpenMVSStage(types.ODM_Stage): f.write("Optimize = 0\n") config = [ - "--sub-scene-area 660000", + "--sub-scene-area 6000", + # "--sub-scene-area 660000", "--max-threads %s" % args.max_concurrency, '-w "%s"' % depthmaps_dir, "-v 0", @@ -174,8 +176,13 @@ class ODMOpenMVSStage(types.ODM_Stage): scene_ply_files.pop() log.ODM_WARNING("Could not compute PLY for subscene %s" % sf) else: - # Do not filter - os.rename(scene_ply_unfiltered, scene_ply) + # Filter + if args.pc_filter > 0: + system.run('"%s" "%s" --filter-point-cloud %s -v 0 %s' % (context.omvs_densify_path, scene_dense_mvs, filter_point_th, ' '.join(gpu_config))) + else: + # Just rename + log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_ply_unfiltered, scene_ply)) + os.rename(scene_ply_unfiltered, scene_ply) else: log.ODM_WARNING("Found existing dense scene file %s" % scene_ply) @@ -191,12 +198,35 @@ class ODMOpenMVSStage(types.ODM_Stage): # Merge fast_merge_ply(scene_ply_files, tree.openmvs_model) else: - scene_dense_ply = os.path.join(tree.openmvs, 'scene_dense.ply') + def skip_filtering(): + # Just rename + scene_dense_ply = os.path.join(tree.openmvs, 'scene_dense.ply') + if not os.path.exists(scene_dense_ply): + raise system.ExitException("Dense reconstruction failed. This could be due to poor georeferencing or insufficient image overlap.") - if not os.path.exists(scene_dense_ply): - raise system.ExitException("Dense reconstruction failed. This could be due to poor georeferencing or insufficient image overlap.") + log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_dense_ply, tree.openmvs_model)) + os.rename(scene_dense_ply, tree.openmvs_model) - os.rename(scene_dense_ply, tree.openmvs_model) + # Filter all at once + if args.pc_filter > 0: + if os.path.exists(scene_dense): + config = [ + "--filter-point-cloud %s" % filter_point_th, + '-i "%s"' % scene_dense, + "-v 0" + ] + try: + system.run('"%s" %s' % (context.omvs_densify_path, ' '.join(config + gpu_config + extra_config))) + except system.SubprocessException as e: + if e.errorCode == 137 or e.errorCode == 3221226505: + log.ODM_WARNING("OpenMVS filtering ran out of memory, visibility checks will be skipped.") + skip_filtering() + else: + raise e + else: + raise system.ExitException("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...") + else: + skip_filtering() self.update_progress(95) From f7f94f4ff17db27c3748effa48403284ec02c455 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Sun, 27 Nov 2022 16:43:24 -0500 Subject: [PATCH 027/102] GSD image scale factor always 2**-N --- opendm/gsd.py | 10 +++++++--- stages/openmvs.py | 3 +-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/opendm/gsd.py b/opendm/gsd.py index b954fde3..ebfc99ef 100644 --- a/opendm/gsd.py +++ b/opendm/gsd.py @@ -51,15 +51,19 @@ def image_scale_factor(target_resolution, reconstruction_json, gsd_error_estimat :param reconstruction_json path to OpenSfM's reconstruction.json :param gsd_error_estimate percentage of estimated error in the GSD calculation to set an upper bound on resolution. :return A down-scale (<= 1) value to apply to images to achieve the target resolution by comparing the current GSD of the reconstruction. - If a GSD cannot be computed, it just returns 1. Returned scale values are never higher than 1. + If a GSD cannot be computed, it just returns 1. Returned scale values are never higher than 1 and are always obtained by dividing by 2 (e.g. 0.5, 0.25, etc.) """ gsd = opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=has_gcp) if gsd is not None and target_resolution > 0: gsd = gsd * (1 + gsd_error_estimate) - return min(1, gsd / target_resolution) + isf = min(1.0, abs(gsd) / target_resolution) + ret = 0.5 + while ret >= isf: + ret /= 2.0 + return ret * 2.0 else: - return 1 + return 1.0 def cap_resolution(resolution, reconstruction_json, gsd_error_estimate = 0.1, gsd_scaling = 1.0, ignore_gsd=False, diff --git a/stages/openmvs.py b/stages/openmvs.py index 1295ec75..6d9e6340 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -128,8 +128,7 @@ class ODMOpenMVSStage(types.ODM_Stage): f.write("Optimize = 0\n") config = [ - "--sub-scene-area 6000", - # "--sub-scene-area 660000", + "--sub-scene-area 660000", "--max-threads %s" % args.max_concurrency, '-w "%s"' % depthmaps_dir, "-v 0", From 054a4414a1ea3504de6c9c480f5259f6e9a3eb6a Mon Sep 17 00:00:00 2001 From: Shuo Zhong Date: Tue, 29 Nov 2022 16:15:07 -0500 Subject: [PATCH 028/102] upgrade laspy to 2.2.0 with laszip support, allows to directly read and write laszip file --- opendm/dem/commands.py | 25 +--------- .../extra_dimensions/distance_dimension.py | 2 +- .../extra_dimensions/extended_dimension.py | 2 +- .../extra_dimensions/partition_dimension.py | 2 +- opendm/dem/ground_rectification/io/las_io.py | 48 ++++++------------- requirements.txt | 2 +- 6 files changed, 20 insertions(+), 61 deletions(-) diff --git a/opendm/dem/commands.py b/opendm/dem/commands.py index 56b80b05..47903a68 100755 --- a/opendm/dem/commands.py +++ b/opendm/dem/commands.py @@ -50,36 +50,13 @@ def rectify(lasFile, debug=False, reclassify_threshold=5, min_area=750, min_poin start = datetime.now() try: - # Currently, no Python 2 lib that supports reading and writing LAZ, so we will do it manually until ODM is migrated to Python 3 - # When migration is done, we can move to pylas and avoid using PDAL for conversion - tempLasFile = os.path.join(os.path.dirname(lasFile), 'tmp.las') - - # Convert LAZ to LAS - cmd = [ - 'pdal', - 'translate', - '-i %s' % lasFile, - '-o %s' % tempLasFile - ] - system.run(' '.join(cmd)) - log.ODM_INFO("Rectifying {} using with [reclassify threshold: {}, min area: {}, min points: {}]".format(lasFile, reclassify_threshold, min_area, min_points)) run_rectification( - input=tempLasFile, output=tempLasFile, debug=debug, \ + input=lasFile, output=lasFile, debug=debug, \ reclassify_plan='median', reclassify_threshold=reclassify_threshold, \ extend_plan='surrounding', extend_grid_distance=5, \ min_area=min_area, min_points=min_points) - # Convert LAS to LAZ - cmd = [ - 'pdal', - 'translate', - '-i %s' % tempLasFile, - '-o %s' % lasFile - ] - system.run(' '.join(cmd)) - os.remove(tempLasFile) - except Exception as e: raise Exception("Error rectifying ground in file %s: %s" % (lasFile, str(e))) diff --git a/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py b/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py index 33b56835..d2f72fdb 100755 --- a/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py +++ b/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py @@ -35,7 +35,7 @@ class DistanceDimension(Dimension): return 'distance_to_ground' def get_las_type(self): - return 10 + return 'float64' def __calculate_angle(self, model): "Calculate the angle between the estimated plane and the XY plane" diff --git a/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py b/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py index 741d041b..c371e83b 100755 --- a/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py +++ b/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py @@ -20,4 +20,4 @@ class ExtendedDimension(Dimension): return 'extended' def get_las_type(self): - return 3 + return 'uint16' diff --git a/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py b/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py index 8d39866b..b7fed6b6 100755 --- a/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py +++ b/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py @@ -22,4 +22,4 @@ class PartitionDimension(Dimension): return self.name def get_las_type(self): - return 5 + return 'uint32' diff --git a/opendm/dem/ground_rectification/io/las_io.py b/opendm/dem/ground_rectification/io/las_io.py index 49ce0361..07f50729 100755 --- a/opendm/dem/ground_rectification/io/las_io.py +++ b/opendm/dem/ground_rectification/io/las_io.py @@ -1,52 +1,39 @@ # TODO: Move to pylas when project migrates to python3 -from laspy.file import File -from laspy.header import Header +import laspy import numpy as np from ..point_cloud import PointCloud def read_cloud(point_cloud_path): # Open point cloud and read its properties - las_file = File(point_cloud_path, mode='r') - header = (las_file.header.copy(), las_file.header.scale, las_file.header.offset,las_file.header.evlrs, las_file.header.vlrs) - [x_scale, y_scale, z_scale] = las_file.header.scale - [x_offset, y_offset, z_offset] = las_file.header.offset + las_file = laspy.read(point_cloud_path) + header = las_file.header - # Calculate the real coordinates - x = las_file.X * x_scale + x_offset - y = las_file.Y * y_scale + y_offset - z = las_file.Z * z_scale + z_offset + x = las_file.x.scaled_array() + y = las_file.y.scaled_array() + z = las_file.z.scaled_array() - cloud = PointCloud.with_dimensions(x, y, z, las_file.Classification, las_file.red, las_file.green, las_file.blue) - - # Close the file - las_file.close() + cloud = PointCloud.with_dimensions(x, y, z, las_file.classification.array, las_file.red, las_file.green, las_file.blue) # Return the result return header, cloud def write_cloud(header, point_cloud, output_point_cloud_path, write_extra_dimensions=False): - (h, scale, offset, evlrs, vlrs) = header - # Open output file - output_las_file = File(output_point_cloud_path, mode='w', header=h, evlrs=evlrs, vlrs=vlrs) + output_las_file = laspy.LasData(header) if write_extra_dimensions: - # Create new dimensions - for name, dimension in point_cloud.extra_dimensions_metadata.items(): - output_las_file.define_new_dimension(name=name, data_type=dimension.get_las_type(), description="Dimension added by Ground Extend") - + extra_dims = [laspy.ExtraBytesParams(name=name, type=dimension.get_las_type(), description="Dimension added by Ground Extend") for name, dimension in point_cloud.extra_dimensions_metadata.items()] + output_las_file.add_extra_dims(extra_dims) # Assign dimension values for dimension_name, values in point_cloud.extra_dimensions.items(): setattr(output_las_file, dimension_name, values) # Adapt points to scale and offset - [x_scale, y_scale, z_scale] = scale - [x_offset, y_offset, z_offset] = offset [x, y] = np.hsplit(point_cloud.xy, 2) - output_las_file.X = (x.ravel() - x_offset) / x_scale - output_las_file.Y = (y.ravel() - y_offset) / y_scale - output_las_file.Z = (point_cloud.z - z_offset) / z_scale + output_las_file.x = x.ravel() + output_las_file.y = y.ravel() + output_las_file.z = point_cloud.z # Set color [red, green, blue] = np.hsplit(point_cloud.rgb, 3) @@ -55,11 +42,6 @@ def write_cloud(header, point_cloud, output_point_cloud_path, write_extra_dimens output_las_file.blue = blue.ravel() # Set classification - output_las_file.Classification = point_cloud.classification.astype(np.uint8) + output_las_file.classification = point_cloud.classification.astype(np.uint8) - # Set header - output_las_file.header.scale = scale - output_las_file.header.offset = offset - - # Close files - output_las_file.close() + output_las_file.write(output_point_cloud_path) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index c5441e9a..28d71347 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ ODMExifRead==3.0.4 Fiona==1.8.17 ; sys_platform == 'linux' or sys_platform == 'darwin' https://github.com/OpenDroneMap/windows-deps/raw/main/Fiona-1.8.19-cp38-cp38-win_amd64.whl ; sys_platform == 'win32' joblib==1.1.0 -laspy==1.7.0 +laspy[laszip]==2.3.0 lxml==4.6.1 matplotlib==3.3.3 networkx==2.5 From c51d52b05693c4feec5ab66cebc5625fea0a0ab3 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 30 Nov 2022 03:25:08 -0500 Subject: [PATCH 029/102] Downgrade cmake for Windows build test --- .github/workflows/test-build-prs.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/test-build-prs.yaml b/.github/workflows/test-build-prs.yaml index 4caa9bf6..caab9530 100644 --- a/.github/workflows/test-build-prs.yaml +++ b/.github/workflows/test-build-prs.yaml @@ -58,6 +58,10 @@ jobs: with: python-version: '3.8.1' architecture: 'x64' + - name: Setup cmake + uses: jwlawson/actions-setup-cmake@v1.13 + with: + cmake-version: '3.24.x' - name: Setup Visual C++ uses: ilammy/msvc-dev-cmd@v1 with: From 4cf7f159b73a189bfddb67b97e79968538ef250c Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 30 Nov 2022 21:33:20 -0500 Subject: [PATCH 030/102] Upgrade PDAL, compile PDAL's python bindings --- SuperBuild/CMakeLists.txt | 1 + SuperBuild/cmake/External-PDAL.cmake | 3 ++- SuperBuild/cmake/External-PDALPython.cmake | 30 ++++++++++++++++++++++ opendm/context.py | 2 +- 4 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 SuperBuild/cmake/External-PDALPython.cmake diff --git a/SuperBuild/CMakeLists.txt b/SuperBuild/CMakeLists.txt index 836af1db..af59a2e5 100644 --- a/SuperBuild/CMakeLists.txt +++ b/SuperBuild/CMakeLists.txt @@ -159,6 +159,7 @@ SETUP_EXTERNAL_PROJECT(Hexer 1.4 ON) set(custom_libs OpenSfM LASzip PDAL + PDALPython Untwine Entwine MvsTexturing diff --git a/SuperBuild/cmake/External-PDAL.cmake b/SuperBuild/cmake/External-PDAL.cmake index 4fda625c..4bdd956b 100644 --- a/SuperBuild/cmake/External-PDAL.cmake +++ b/SuperBuild/cmake/External-PDAL.cmake @@ -16,7 +16,7 @@ ExternalProject_Add(${_proj_name} STAMP_DIR ${_SB_BINARY_DIR}/stamp #--Download step-------------- DOWNLOAD_DIR ${SB_DOWNLOAD_DIR} - URL https://github.com/PDAL/PDAL/archive/refs/tags/2.3RC1.zip + URL https://github.com/PDAL/PDAL/archive/refs/tags/2.4.3.zip #--Update/Patch step---------- UPDATE_COMMAND "" #--Configure step------------- @@ -60,3 +60,4 @@ ExternalProject_Add(${_proj_name} LOG_CONFIGURE OFF LOG_BUILD OFF ) + diff --git a/SuperBuild/cmake/External-PDALPython.cmake b/SuperBuild/cmake/External-PDALPython.cmake new file mode 100644 index 00000000..be3e1e51 --- /dev/null +++ b/SuperBuild/cmake/External-PDALPython.cmake @@ -0,0 +1,30 @@ +set(_proj_name pdal-python) +set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}") + +ExternalProject_Add(${_proj_name} + DEPENDS pdal + PREFIX ${_SB_BINARY_DIR} + TMP_DIR ${_SB_BINARY_DIR}/tmp + STAMP_DIR ${_SB_BINARY_DIR}/stamp + #--Download step-------------- + DOWNLOAD_DIR ${SB_DOWNLOAD_DIR} + GIT_REPOSITORY https://github.com/OpenDroneMap/pdal-python + GIT_TAG main + #--Update/Patch step---------- + UPDATE_COMMAND "" + #--Configure step------------- + SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name} + CMAKE_ARGS + -DPDAL_DIR=${SB_INSTALL_DIR}/lib/cmake/PDAL + -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}/lib/python3.8/dist-packages + ${WIN32_CMAKE_ARGS} + #--Build step----------------- + BINARY_DIR ${_SB_BINARY_DIR} + #--Install step--------------- + INSTALL_DIR ${SB_INSTALL_DIR} + #--Output logging------------- + LOG_DOWNLOAD OFF + LOG_CONFIGURE OFF + LOG_BUILD OFF +) \ No newline at end of file diff --git a/opendm/context.py b/opendm/context.py index 293d6cb5..2fac61d3 100644 --- a/opendm/context.py +++ b/opendm/context.py @@ -14,7 +14,7 @@ python_packages_paths = [os.path.join(superbuild_path, p) for p in [ 'install/lib/python3.9/dist-packages', 'install/lib/python3.8/dist-packages', 'install/lib/python3/dist-packages', - 'install/bin/opensfm' + 'install/bin/opensfm', ]] for p in python_packages_paths: sys.path.append(p) From c463136cb0c9de1200f750e8cffaff91ae3d5c54 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 30 Nov 2022 21:50:48 -0500 Subject: [PATCH 031/102] Update openmvs, bump version --- SuperBuild/cmake/External-OpenMVS.cmake | 2 +- VERSION | 2 +- stages/openmvs.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/SuperBuild/cmake/External-OpenMVS.cmake b/SuperBuild/cmake/External-OpenMVS.cmake index 4927ec6d..9cb0d298 100644 --- a/SuperBuild/cmake/External-OpenMVS.cmake +++ b/SuperBuild/cmake/External-OpenMVS.cmake @@ -53,7 +53,7 @@ ExternalProject_Add(${_proj_name} #--Download step-------------- DOWNLOAD_DIR ${SB_DOWNLOAD_DIR} GIT_REPOSITORY https://github.com/OpenDroneMap/openMVS - GIT_TAG 300 + GIT_TAG 301 #--Update/Patch step---------- UPDATE_COMMAND "" #--Configure step------------- diff --git a/VERSION b/VERSION index 4a36342f..cb2b00e4 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.0.0 +3.0.1 diff --git a/stages/openmvs.py b/stages/openmvs.py index 6d9e6340..df37f6a0 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -77,8 +77,8 @@ class ODMOpenMVSStage(types.ODM_Stage): gpu_config = [] use_gpu = has_gpu(args) if use_gpu: - gpu_config.append("--cuda-device -3") - # gpu_config.append("--cuda-device -1") + #gpu_config.append("--cuda-device -3") + gpu_config.append("--cuda-device -1") else: gpu_config.append("--cuda-device -2") From 95712f4b4786a5f504855aeab41810e22b5a496e Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Thu, 1 Dec 2022 08:13:07 -0500 Subject: [PATCH 032/102] Do not build openmvs tests --- SuperBuild/cmake/External-OpenMVS.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/SuperBuild/cmake/External-OpenMVS.cmake b/SuperBuild/cmake/External-OpenMVS.cmake index 4927ec6d..c18004ca 100644 --- a/SuperBuild/cmake/External-OpenMVS.cmake +++ b/SuperBuild/cmake/External-OpenMVS.cmake @@ -64,6 +64,7 @@ ExternalProject_Add(${_proj_name} -DEIGEN3_INCLUDE_DIR=${SB_SOURCE_DIR}/eigen34/ -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX=${SB_INSTALL_DIR} + -DOpenMVS_ENABLE_TESTS=OFF -DOpenMVS_MAX_CUDA_COMPATIBILITY=ON ${GPU_CMAKE_ARGS} ${WIN32_CMAKE_ARGS} From be6b1ba7f0f6f760dc6667f1ccee91c6ee612c5c Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Thu, 1 Dec 2022 10:03:28 -0500 Subject: [PATCH 033/102] Revert "upgrade laspy to 2.2.0 with laszip support, allows to directly read and write laszip file" This reverts commit 054a4414a1ea3504de6c9c480f5259f6e9a3eb6a. --- opendm/dem/commands.py | 25 +++++++++- .../extra_dimensions/distance_dimension.py | 2 +- .../extra_dimensions/extended_dimension.py | 2 +- .../extra_dimensions/partition_dimension.py | 2 +- opendm/dem/ground_rectification/io/las_io.py | 48 +++++++++++++------ requirements.txt | 2 +- 6 files changed, 61 insertions(+), 20 deletions(-) diff --git a/opendm/dem/commands.py b/opendm/dem/commands.py index 47903a68..56b80b05 100755 --- a/opendm/dem/commands.py +++ b/opendm/dem/commands.py @@ -50,13 +50,36 @@ def rectify(lasFile, debug=False, reclassify_threshold=5, min_area=750, min_poin start = datetime.now() try: + # Currently, no Python 2 lib that supports reading and writing LAZ, so we will do it manually until ODM is migrated to Python 3 + # When migration is done, we can move to pylas and avoid using PDAL for conversion + tempLasFile = os.path.join(os.path.dirname(lasFile), 'tmp.las') + + # Convert LAZ to LAS + cmd = [ + 'pdal', + 'translate', + '-i %s' % lasFile, + '-o %s' % tempLasFile + ] + system.run(' '.join(cmd)) + log.ODM_INFO("Rectifying {} using with [reclassify threshold: {}, min area: {}, min points: {}]".format(lasFile, reclassify_threshold, min_area, min_points)) run_rectification( - input=lasFile, output=lasFile, debug=debug, \ + input=tempLasFile, output=tempLasFile, debug=debug, \ reclassify_plan='median', reclassify_threshold=reclassify_threshold, \ extend_plan='surrounding', extend_grid_distance=5, \ min_area=min_area, min_points=min_points) + # Convert LAS to LAZ + cmd = [ + 'pdal', + 'translate', + '-i %s' % tempLasFile, + '-o %s' % lasFile + ] + system.run(' '.join(cmd)) + os.remove(tempLasFile) + except Exception as e: raise Exception("Error rectifying ground in file %s: %s" % (lasFile, str(e))) diff --git a/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py b/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py index d2f72fdb..33b56835 100755 --- a/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py +++ b/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py @@ -35,7 +35,7 @@ class DistanceDimension(Dimension): return 'distance_to_ground' def get_las_type(self): - return 'float64' + return 10 def __calculate_angle(self, model): "Calculate the angle between the estimated plane and the XY plane" diff --git a/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py b/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py index c371e83b..741d041b 100755 --- a/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py +++ b/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py @@ -20,4 +20,4 @@ class ExtendedDimension(Dimension): return 'extended' def get_las_type(self): - return 'uint16' + return 3 diff --git a/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py b/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py index b7fed6b6..8d39866b 100755 --- a/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py +++ b/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py @@ -22,4 +22,4 @@ class PartitionDimension(Dimension): return self.name def get_las_type(self): - return 'uint32' + return 5 diff --git a/opendm/dem/ground_rectification/io/las_io.py b/opendm/dem/ground_rectification/io/las_io.py index 07f50729..49ce0361 100755 --- a/opendm/dem/ground_rectification/io/las_io.py +++ b/opendm/dem/ground_rectification/io/las_io.py @@ -1,39 +1,52 @@ # TODO: Move to pylas when project migrates to python3 -import laspy +from laspy.file import File +from laspy.header import Header import numpy as np from ..point_cloud import PointCloud def read_cloud(point_cloud_path): # Open point cloud and read its properties - las_file = laspy.read(point_cloud_path) - header = las_file.header + las_file = File(point_cloud_path, mode='r') + header = (las_file.header.copy(), las_file.header.scale, las_file.header.offset,las_file.header.evlrs, las_file.header.vlrs) + [x_scale, y_scale, z_scale] = las_file.header.scale + [x_offset, y_offset, z_offset] = las_file.header.offset - x = las_file.x.scaled_array() - y = las_file.y.scaled_array() - z = las_file.z.scaled_array() + # Calculate the real coordinates + x = las_file.X * x_scale + x_offset + y = las_file.Y * y_scale + y_offset + z = las_file.Z * z_scale + z_offset - cloud = PointCloud.with_dimensions(x, y, z, las_file.classification.array, las_file.red, las_file.green, las_file.blue) + cloud = PointCloud.with_dimensions(x, y, z, las_file.Classification, las_file.red, las_file.green, las_file.blue) + + # Close the file + las_file.close() # Return the result return header, cloud def write_cloud(header, point_cloud, output_point_cloud_path, write_extra_dimensions=False): + (h, scale, offset, evlrs, vlrs) = header + # Open output file - output_las_file = laspy.LasData(header) + output_las_file = File(output_point_cloud_path, mode='w', header=h, evlrs=evlrs, vlrs=vlrs) if write_extra_dimensions: - extra_dims = [laspy.ExtraBytesParams(name=name, type=dimension.get_las_type(), description="Dimension added by Ground Extend") for name, dimension in point_cloud.extra_dimensions_metadata.items()] - output_las_file.add_extra_dims(extra_dims) + # Create new dimensions + for name, dimension in point_cloud.extra_dimensions_metadata.items(): + output_las_file.define_new_dimension(name=name, data_type=dimension.get_las_type(), description="Dimension added by Ground Extend") + # Assign dimension values for dimension_name, values in point_cloud.extra_dimensions.items(): setattr(output_las_file, dimension_name, values) # Adapt points to scale and offset + [x_scale, y_scale, z_scale] = scale + [x_offset, y_offset, z_offset] = offset [x, y] = np.hsplit(point_cloud.xy, 2) - output_las_file.x = x.ravel() - output_las_file.y = y.ravel() - output_las_file.z = point_cloud.z + output_las_file.X = (x.ravel() - x_offset) / x_scale + output_las_file.Y = (y.ravel() - y_offset) / y_scale + output_las_file.Z = (point_cloud.z - z_offset) / z_scale # Set color [red, green, blue] = np.hsplit(point_cloud.rgb, 3) @@ -42,6 +55,11 @@ def write_cloud(header, point_cloud, output_point_cloud_path, write_extra_dimens output_las_file.blue = blue.ravel() # Set classification - output_las_file.classification = point_cloud.classification.astype(np.uint8) + output_las_file.Classification = point_cloud.classification.astype(np.uint8) - output_las_file.write(output_point_cloud_path) \ No newline at end of file + # Set header + output_las_file.header.scale = scale + output_las_file.header.offset = offset + + # Close files + output_las_file.close() diff --git a/requirements.txt b/requirements.txt index 28d71347..c5441e9a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ ODMExifRead==3.0.4 Fiona==1.8.17 ; sys_platform == 'linux' or sys_platform == 'darwin' https://github.com/OpenDroneMap/windows-deps/raw/main/Fiona-1.8.19-cp38-cp38-win_amd64.whl ; sys_platform == 'win32' joblib==1.1.0 -laspy[laszip]==2.3.0 +laspy==1.7.0 lxml==4.6.1 matplotlib==3.3.3 networkx==2.5 From 0e1ece841230ccd1416973b3687544923c1cc40c Mon Sep 17 00:00:00 2001 From: Shuo Zhong Date: Tue, 29 Nov 2022 16:15:07 -0500 Subject: [PATCH 034/102] upgrade laspy to 2.2.0 with laszip support, allows to directly read and write laszip file --- opendm/dem/commands.py | 25 +--------- .../extra_dimensions/distance_dimension.py | 2 +- .../extra_dimensions/extended_dimension.py | 2 +- .../extra_dimensions/partition_dimension.py | 2 +- opendm/dem/ground_rectification/io/las_io.py | 48 ++++++------------- requirements.txt | 2 +- 6 files changed, 20 insertions(+), 61 deletions(-) diff --git a/opendm/dem/commands.py b/opendm/dem/commands.py index 56b80b05..47903a68 100755 --- a/opendm/dem/commands.py +++ b/opendm/dem/commands.py @@ -50,36 +50,13 @@ def rectify(lasFile, debug=False, reclassify_threshold=5, min_area=750, min_poin start = datetime.now() try: - # Currently, no Python 2 lib that supports reading and writing LAZ, so we will do it manually until ODM is migrated to Python 3 - # When migration is done, we can move to pylas and avoid using PDAL for conversion - tempLasFile = os.path.join(os.path.dirname(lasFile), 'tmp.las') - - # Convert LAZ to LAS - cmd = [ - 'pdal', - 'translate', - '-i %s' % lasFile, - '-o %s' % tempLasFile - ] - system.run(' '.join(cmd)) - log.ODM_INFO("Rectifying {} using with [reclassify threshold: {}, min area: {}, min points: {}]".format(lasFile, reclassify_threshold, min_area, min_points)) run_rectification( - input=tempLasFile, output=tempLasFile, debug=debug, \ + input=lasFile, output=lasFile, debug=debug, \ reclassify_plan='median', reclassify_threshold=reclassify_threshold, \ extend_plan='surrounding', extend_grid_distance=5, \ min_area=min_area, min_points=min_points) - # Convert LAS to LAZ - cmd = [ - 'pdal', - 'translate', - '-i %s' % tempLasFile, - '-o %s' % lasFile - ] - system.run(' '.join(cmd)) - os.remove(tempLasFile) - except Exception as e: raise Exception("Error rectifying ground in file %s: %s" % (lasFile, str(e))) diff --git a/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py b/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py index 33b56835..d2f72fdb 100755 --- a/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py +++ b/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py @@ -35,7 +35,7 @@ class DistanceDimension(Dimension): return 'distance_to_ground' def get_las_type(self): - return 10 + return 'float64' def __calculate_angle(self, model): "Calculate the angle between the estimated plane and the XY plane" diff --git a/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py b/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py index 741d041b..c371e83b 100755 --- a/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py +++ b/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py @@ -20,4 +20,4 @@ class ExtendedDimension(Dimension): return 'extended' def get_las_type(self): - return 3 + return 'uint16' diff --git a/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py b/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py index 8d39866b..b7fed6b6 100755 --- a/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py +++ b/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py @@ -22,4 +22,4 @@ class PartitionDimension(Dimension): return self.name def get_las_type(self): - return 5 + return 'uint32' diff --git a/opendm/dem/ground_rectification/io/las_io.py b/opendm/dem/ground_rectification/io/las_io.py index 49ce0361..07f50729 100755 --- a/opendm/dem/ground_rectification/io/las_io.py +++ b/opendm/dem/ground_rectification/io/las_io.py @@ -1,52 +1,39 @@ # TODO: Move to pylas when project migrates to python3 -from laspy.file import File -from laspy.header import Header +import laspy import numpy as np from ..point_cloud import PointCloud def read_cloud(point_cloud_path): # Open point cloud and read its properties - las_file = File(point_cloud_path, mode='r') - header = (las_file.header.copy(), las_file.header.scale, las_file.header.offset,las_file.header.evlrs, las_file.header.vlrs) - [x_scale, y_scale, z_scale] = las_file.header.scale - [x_offset, y_offset, z_offset] = las_file.header.offset + las_file = laspy.read(point_cloud_path) + header = las_file.header - # Calculate the real coordinates - x = las_file.X * x_scale + x_offset - y = las_file.Y * y_scale + y_offset - z = las_file.Z * z_scale + z_offset + x = las_file.x.scaled_array() + y = las_file.y.scaled_array() + z = las_file.z.scaled_array() - cloud = PointCloud.with_dimensions(x, y, z, las_file.Classification, las_file.red, las_file.green, las_file.blue) - - # Close the file - las_file.close() + cloud = PointCloud.with_dimensions(x, y, z, las_file.classification.array, las_file.red, las_file.green, las_file.blue) # Return the result return header, cloud def write_cloud(header, point_cloud, output_point_cloud_path, write_extra_dimensions=False): - (h, scale, offset, evlrs, vlrs) = header - # Open output file - output_las_file = File(output_point_cloud_path, mode='w', header=h, evlrs=evlrs, vlrs=vlrs) + output_las_file = laspy.LasData(header) if write_extra_dimensions: - # Create new dimensions - for name, dimension in point_cloud.extra_dimensions_metadata.items(): - output_las_file.define_new_dimension(name=name, data_type=dimension.get_las_type(), description="Dimension added by Ground Extend") - + extra_dims = [laspy.ExtraBytesParams(name=name, type=dimension.get_las_type(), description="Dimension added by Ground Extend") for name, dimension in point_cloud.extra_dimensions_metadata.items()] + output_las_file.add_extra_dims(extra_dims) # Assign dimension values for dimension_name, values in point_cloud.extra_dimensions.items(): setattr(output_las_file, dimension_name, values) # Adapt points to scale and offset - [x_scale, y_scale, z_scale] = scale - [x_offset, y_offset, z_offset] = offset [x, y] = np.hsplit(point_cloud.xy, 2) - output_las_file.X = (x.ravel() - x_offset) / x_scale - output_las_file.Y = (y.ravel() - y_offset) / y_scale - output_las_file.Z = (point_cloud.z - z_offset) / z_scale + output_las_file.x = x.ravel() + output_las_file.y = y.ravel() + output_las_file.z = point_cloud.z # Set color [red, green, blue] = np.hsplit(point_cloud.rgb, 3) @@ -55,11 +42,6 @@ def write_cloud(header, point_cloud, output_point_cloud_path, write_extra_dimens output_las_file.blue = blue.ravel() # Set classification - output_las_file.Classification = point_cloud.classification.astype(np.uint8) + output_las_file.classification = point_cloud.classification.astype(np.uint8) - # Set header - output_las_file.header.scale = scale - output_las_file.header.offset = offset - - # Close files - output_las_file.close() + output_las_file.write(output_point_cloud_path) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index c5441e9a..28d71347 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ ODMExifRead==3.0.4 Fiona==1.8.17 ; sys_platform == 'linux' or sys_platform == 'darwin' https://github.com/OpenDroneMap/windows-deps/raw/main/Fiona-1.8.19-cp38-cp38-win_amd64.whl ; sys_platform == 'win32' joblib==1.1.0 -laspy==1.7.0 +laspy[laszip]==2.3.0 lxml==4.6.1 matplotlib==3.3.3 networkx==2.5 From b60995a512782094ece5ed913a0453b42ac33b73 Mon Sep 17 00:00:00 2001 From: Thor Date: Thu, 1 Dec 2022 17:20:02 -0500 Subject: [PATCH 035/102] replace laszip dependency with lazrs (laszip rust implementation with arm64 support) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 28d71347..2483c092 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ ODMExifRead==3.0.4 Fiona==1.8.17 ; sys_platform == 'linux' or sys_platform == 'darwin' https://github.com/OpenDroneMap/windows-deps/raw/main/Fiona-1.8.19-cp38-cp38-win_amd64.whl ; sys_platform == 'win32' joblib==1.1.0 -laspy[laszip]==2.3.0 +laspy[lazrs]==2.3.0 lxml==4.6.1 matplotlib==3.3.3 networkx==2.5 From 9ddb4554c4ec5c5662ae2610771c11f5489eb436 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Fri, 2 Dec 2022 16:53:38 -0500 Subject: [PATCH 036/102] PoC codem integration --- opendm/align.py | 34 +++++ opendm/config.py | 8 ++ opendm/types.py | 3 +- stages/dataset.py | 2 +- stages/odm_georeferencing.py | 246 +++++++++++++++++++---------------- 5 files changed, 176 insertions(+), 117 deletions(-) create mode 100644 opendm/align.py diff --git a/opendm/align.py b/opendm/align.py new file mode 100644 index 00000000..ab41a342 --- /dev/null +++ b/opendm/align.py @@ -0,0 +1,34 @@ +import os +import shutil +import codem +import dataclasses +from opendm import log + +def compute_alignment_matrix(input_laz, align_file, tmp_dir): + if os.path.exists(tmp_dir): + shutil.rmtree(tmp_dir) + os.mkdir(tmp_dir) + + conf = dataclasses.asdict(codem.CodemRunConfig(align_file, input_laz, MIN_RESOLUTION=0.2, OUTPUT_DIR=tmp_dir)) # TODO: how to compute this + fnd_obj, aoi_obj = codem.preprocess(conf) + fnd_obj.prep() + aoi_obj.prep() + log.ODM_INFO("Aligning reconstruction to %s" % align_file) + log.ODM_INFO("Coarse registration...") + dsm_reg = codem.coarse_registration(fnd_obj, aoi_obj, conf) + log.ODM_INFO("Fine registration...") + icp_reg = codem.fine_registration(fnd_obj, aoi_obj, dsm_reg, conf) + + app_reg = codem.registration.ApplyRegistration( + fnd_obj, + aoi_obj, + icp_reg.registration_parameters, + icp_reg.residual_vectors, + icp_reg.residual_origins, + conf, + None, + ) + + return app_reg.get_registration_transformation() + # if os.path.exists(tmp_dir): + # shutil.rmtree(tmp_dir) \ No newline at end of file diff --git a/opendm/config.py b/opendm/config.py index 864ad7e1..b8a39721 100755 --- a/opendm/config.py +++ b/opendm/config.py @@ -470,6 +470,14 @@ def config(argv=None, parser=None): 'EPSG: or <+proj definition>\n' 'image_name geo_x geo_y geo_z [omega (degrees)] [phi (degrees)] [kappa (degrees)] [horz accuracy (meters)] [vert accuracy (meters)]\n' 'Default: %(default)s')) + + parser.add_argument('--align-to', + metavar='', + action=StoreValue, + default=None, + help=('Path to a GeoTIFF DEM or a LAS/LAZ point cloud ' + 'that the reconstruction outputs should be automatically aligned to. ' + 'Default: %(default)s')) parser.add_argument('--use-exif', action=StoreTrue, diff --git a/opendm/types.py b/opendm/types.py index af04a334..4a930f2c 100644 --- a/opendm/types.py +++ b/opendm/types.py @@ -241,7 +241,7 @@ class ODM_GeoRef(object): return (self.utm_east_offset, self.utm_north_offset) class ODM_Tree(object): - def __init__(self, root_path, gcp_file = None, geo_file = None): + def __init__(self, root_path, gcp_file = None, geo_file = None, align_file = None): # root path to the project self.root_path = io.absolute_path_file(root_path) self.input_images = os.path.join(self.root_path, 'images') @@ -296,6 +296,7 @@ class ODM_Tree(object): self.odm_georeferencing_gcp = gcp_file or io.find('gcp_list.txt', self.root_path) self.odm_georeferencing_gcp_utm = os.path.join(self.odm_georeferencing, 'gcp_list_utm.txt') self.odm_geo_file = geo_file or io.find('geo.txt', self.root_path) + self.odm_align_file = align_file or io.find('align.laz', self.root_path) self.odm_georeferencing_proj = 'proj.txt' self.odm_georeferencing_model_txt_geo = os.path.join( diff --git a/stages/dataset.py b/stages/dataset.py index a167b845..caf692b3 100644 --- a/stages/dataset.py +++ b/stages/dataset.py @@ -46,7 +46,7 @@ def load_images_database(database_file): class ODMLoadDatasetStage(types.ODM_Stage): def process(self, args, outputs): outputs['start_time'] = system.now_raw() - tree = types.ODM_Tree(args.project_path, args.gcp, args.geo) + tree = types.ODM_Tree(args.project_path, args.gcp, args.geo, args.align_to) outputs['tree'] = tree if io.file_exists(tree.benchmarking): diff --git a/stages/odm_georeferencing.py b/stages/odm_georeferencing.py index 062a7034..59f847e9 100644 --- a/stages/odm_georeferencing.py +++ b/stages/odm_georeferencing.py @@ -18,6 +18,7 @@ from opendm import point_cloud from opendm.multispectral import get_primary_band_name from opendm.osfm import OSFMContext from opendm.boundary import as_polygon, export_to_bounds_files +from opendm.align import compute_alignment_matrix class ODMGeoreferencingStage(types.ODM_Stage): def process(self, args, outputs): @@ -30,143 +31,158 @@ class ODMGeoreferencingStage(types.ODM_Stage): gcp_gml_export_file = tree.path("odm_georeferencing", "ground_control_points.gml") gcp_geojson_export_file = tree.path("odm_georeferencing", "ground_control_points.geojson") - if reconstruction.has_gcp() and (not io.file_exists(gcp_export_file) or self.rerun()): - octx = OSFMContext(tree.opensfm) - gcps = octx.ground_control_points(reconstruction.georef.proj4()) + # if reconstruction.has_gcp() and (not io.file_exists(gcp_export_file) or self.rerun()): + # octx = OSFMContext(tree.opensfm) + # gcps = octx.ground_control_points(reconstruction.georef.proj4()) - if len(gcps): - gcp_schema = { - 'geometry': 'Point', - 'properties': OrderedDict([ - ('id', 'str'), - ('observations_count', 'int'), - ('observations_list', 'str'), - ('error_x', 'float'), - ('error_y', 'float'), - ('error_z', 'float'), - ]) - } + # if len(gcps): + # gcp_schema = { + # 'geometry': 'Point', + # 'properties': OrderedDict([ + # ('id', 'str'), + # ('observations_count', 'int'), + # ('observations_list', 'str'), + # ('error_x', 'float'), + # ('error_y', 'float'), + # ('error_z', 'float'), + # ]) + # } - # Write GeoPackage - with fiona.open(gcp_export_file, 'w', driver="GPKG", - crs=fiona.crs.from_string(reconstruction.georef.proj4()), - schema=gcp_schema) as f: - for gcp in gcps: - f.write({ - 'geometry': { - 'type': 'Point', - 'coordinates': gcp['coordinates'], - }, - 'properties': OrderedDict([ - ('id', gcp['id']), - ('observations_count', len(gcp['observations'])), - ('observations_list', ",".join([obs['shot_id'] for obs in gcp['observations']])), - ('error_x', gcp['error'][0]), - ('error_y', gcp['error'][1]), - ('error_z', gcp['error'][2]), - ]) - }) + # # Write GeoPackage + # with fiona.open(gcp_export_file, 'w', driver="GPKG", + # crs=fiona.crs.from_string(reconstruction.georef.proj4()), + # schema=gcp_schema) as f: + # for gcp in gcps: + # f.write({ + # 'geometry': { + # 'type': 'Point', + # 'coordinates': gcp['coordinates'], + # }, + # 'properties': OrderedDict([ + # ('id', gcp['id']), + # ('observations_count', len(gcp['observations'])), + # ('observations_list', ",".join([obs['shot_id'] for obs in gcp['observations']])), + # ('error_x', gcp['error'][0]), + # ('error_y', gcp['error'][1]), + # ('error_z', gcp['error'][2]), + # ]) + # }) - # Write GML - try: - system.run('ogr2ogr -of GML "{}" "{}"'.format(gcp_gml_export_file, gcp_export_file)) - except Exception as e: - log.ODM_WARNING("Cannot generate ground control points GML file: %s" % str(e)) + # # Write GML + # try: + # system.run('ogr2ogr -of GML "{}" "{}"'.format(gcp_gml_export_file, gcp_export_file)) + # except Exception as e: + # log.ODM_WARNING("Cannot generate ground control points GML file: %s" % str(e)) - # Write GeoJSON - geojson = { - 'type': 'FeatureCollection', - 'features': [] - } + # # Write GeoJSON + # geojson = { + # 'type': 'FeatureCollection', + # 'features': [] + # } - from_srs = CRS.from_proj4(reconstruction.georef.proj4()) - to_srs = CRS.from_epsg(4326) - transformer = location.transformer(from_srs, to_srs) + # from_srs = CRS.from_proj4(reconstruction.georef.proj4()) + # to_srs = CRS.from_epsg(4326) + # transformer = location.transformer(from_srs, to_srs) - for gcp in gcps: - properties = gcp.copy() - del properties['coordinates'] + # for gcp in gcps: + # properties = gcp.copy() + # del properties['coordinates'] - geojson['features'].append({ - 'type': 'Feature', - 'geometry': { - 'type': 'Point', - 'coordinates': transformer.TransformPoint(*gcp['coordinates']), - }, - 'properties': properties - }) + # geojson['features'].append({ + # 'type': 'Feature', + # 'geometry': { + # 'type': 'Point', + # 'coordinates': transformer.TransformPoint(*gcp['coordinates']), + # }, + # 'properties': properties + # }) - with open(gcp_geojson_export_file, 'w') as f: - f.write(json.dumps(geojson, indent=4)) + # with open(gcp_geojson_export_file, 'w') as f: + # f.write(json.dumps(geojson, indent=4)) - else: - log.ODM_WARNING("GCPs could not be loaded for writing to %s" % gcp_export_file) + # else: + # log.ODM_WARNING("GCPs could not be loaded for writing to %s" % gcp_export_file) if not io.file_exists(tree.odm_georeferencing_model_laz) or self.rerun(): - cmd = ('pdal translate -i "%s" -o \"%s\"' % (tree.filtered_point_cloud, tree.odm_georeferencing_model_laz)) - stages = ["ferry"] - params = [ - '--filters.ferry.dimensions="views => UserData"', - '--writers.las.compression="lazip"', - ] + # cmd = ('pdal translate -i "%s" -o \"%s\"' % (tree.filtered_point_cloud, tree.odm_georeferencing_model_laz)) + # stages = ["ferry"] + # params = [ + # '--filters.ferry.dimensions="views => UserData"', + # '--writers.las.compression="lazip"', + # ] - if reconstruction.is_georeferenced(): - log.ODM_INFO("Georeferencing point cloud") + # if reconstruction.is_georeferenced(): + # log.ODM_INFO("Georeferencing point cloud") - stages.append("transformation") - params += [ - '--filters.transformation.matrix="1 0 0 %s 0 1 0 %s 0 0 1 0 0 0 0 1"' % reconstruction.georef.utm_offset(), - '--writers.las.offset_x=%s' % reconstruction.georef.utm_east_offset, - '--writers.las.offset_y=%s' % reconstruction.georef.utm_north_offset, - '--writers.las.scale_x=0.001', - '--writers.las.scale_y=0.001', - '--writers.las.scale_z=0.001', - '--writers.las.offset_z=0', - '--writers.las.a_srs="%s"' % reconstruction.georef.proj4() - ] + # stages.append("transformation") + # params += [ + # '--filters.transformation.matrix="1 0 0 %s 0 1 0 %s 0 0 1 0 0 0 0 1"' % reconstruction.georef.utm_offset(), + # '--writers.las.offset_x=%s' % reconstruction.georef.utm_east_offset, + # '--writers.las.offset_y=%s' % reconstruction.georef.utm_north_offset, + # '--writers.las.scale_x=0.001', + # '--writers.las.scale_y=0.001', + # '--writers.las.scale_z=0.001', + # '--writers.las.offset_z=0', + # '--writers.las.a_srs="%s"' % reconstruction.georef.proj4() + # ] - if reconstruction.has_gcp() and io.file_exists(gcp_gml_export_file): - log.ODM_INFO("Embedding GCP info in point cloud") - params += [ - '--writers.las.vlrs="{\\\"filename\\\": \\\"%s\\\", \\\"user_id\\\": \\\"ODM_GCP\\\", \\\"description\\\": \\\"Ground Control Points (GML)\\\"}"' % gcp_gml_export_file.replace(os.sep, "/") - ] + # if reconstruction.has_gcp() and io.file_exists(gcp_gml_export_file): + # log.ODM_INFO("Embedding GCP info in point cloud") + # params += [ + # '--writers.las.vlrs="{\\\"filename\\\": \\\"%s\\\", \\\"user_id\\\": \\\"ODM_GCP\\\", \\\"description\\\": \\\"Ground Control Points (GML)\\\"}"' % gcp_gml_export_file.replace(os.sep, "/") + # ] - system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params)) + # system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params)) - self.update_progress(50) + # self.update_progress(50) - if args.crop > 0: - log.ODM_INFO("Calculating cropping area and generating bounds shapefile from point cloud") - cropper = Cropper(tree.odm_georeferencing, 'odm_georeferenced_model') + # if args.crop > 0: + # log.ODM_INFO("Calculating cropping area and generating bounds shapefile from point cloud") + # cropper = Cropper(tree.odm_georeferencing, 'odm_georeferenced_model') - if args.fast_orthophoto: - decimation_step = 4 - else: - decimation_step = 40 + # if args.fast_orthophoto: + # decimation_step = 4 + # else: + # decimation_step = 40 - # More aggressive decimation for large datasets - if not args.fast_orthophoto: - decimation_step *= int(len(reconstruction.photos) / 1000) + 1 - decimation_step = min(decimation_step, 95) + # # More aggressive decimation for large datasets + # if not args.fast_orthophoto: + # decimation_step *= int(len(reconstruction.photos) / 1000) + 1 + # decimation_step = min(decimation_step, 95) - try: - cropper.create_bounds_gpkg(tree.odm_georeferencing_model_laz, args.crop, - decimation_step=decimation_step) - except: - log.ODM_WARNING("Cannot calculate crop bounds! We will skip cropping") - args.crop = 0 + # try: + # cropper.create_bounds_gpkg(tree.odm_georeferencing_model_laz, args.crop, + # decimation_step=decimation_step) + # except: + # log.ODM_WARNING("Cannot calculate crop bounds! We will skip cropping") + # args.crop = 0 - if 'boundary' in outputs and args.crop == 0: - log.ODM_INFO("Using boundary JSON as cropping area") + # if 'boundary' in outputs and args.crop == 0: + # log.ODM_INFO("Using boundary JSON as cropping area") - bounds_base, _ = os.path.splitext(tree.odm_georeferencing_model_laz) - bounds_json = bounds_base + ".bounds.geojson" - bounds_gpkg = bounds_base + ".bounds.gpkg" - export_to_bounds_files(outputs['boundary'], reconstruction.get_proj_srs(), bounds_json, bounds_gpkg) - else: - log.ODM_INFO("Converting point cloud (non-georeferenced)") - system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params)) - + # bounds_base, _ = os.path.splitext(tree.odm_georeferencing_model_laz) + # bounds_json = bounds_base + ".bounds.geojson" + # bounds_gpkg = bounds_base + ".bounds.gpkg" + # export_to_bounds_files(outputs['boundary'], reconstruction.get_proj_srs(), bounds_json, bounds_gpkg) + # else: + # log.ODM_INFO("Converting point cloud (non-georeferenced)") + # system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params)) + + + if tree.odm_align_file is not None: + tmp_dir = tree.path("odm_georeferencing", "codem") + a_matrix = compute_alignment_matrix(tree.odm_georeferencing_model_laz, tree.odm_align_file, tmp_dir) + if a_matrix is not None: + print(a_matrix) + exit(1) + else: + log.ODM_WARNING("Alignment to %s will be skipped." % tree.odm_align_file) + # Align + # - Compute alignment + # - Transform point cloud + # - Transform textured model(s) + # + point_cloud.post_point_cloud_steps(args, tree, self.rerun()) else: log.ODM_WARNING('Found a valid georeferenced model in: %s' From 020979da4f89b6409ceaa0c93567e22d5fc62804 Mon Sep 17 00:00:00 2001 From: Saijin-Naib Date: Mon, 5 Dec 2022 00:16:58 -0500 Subject: [PATCH 037/102] Add Sensor Readout: FUJIFILM X-T2 35ms for the FUJIFILM X-T2 Sensor Readout ISO:ISO-12,800 Shutter:1/10,000s (Forced Electronic Shutter) --- opendm/rollingshutter.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/opendm/rollingshutter.py b/opendm/rollingshutter.py index f8cde129..55a39877 100644 --- a/opendm/rollingshutter.py +++ b/opendm/rollingshutter.py @@ -28,6 +28,8 @@ RS_DATABASE = { 'teracube teracube one': 32 # TeraCube TeraCube_One TR1907Q Mobile Phone + 'fujifilm x-t2': 35 # FUJIFILM X-T2 Mirrorless Interchangeable Lens Camera + # Help us add more! # See: https://github.com/OpenDroneMap/RSCalibration for instructions @@ -72,3 +74,4 @@ def get_rolling_shutter_readout(photo, override_value=0): log.ODM_WARNING("Rolling shutter readout time for \"%s %s\" is not in our database, using default of %sms which might be incorrect. Use --rolling-shutter-readout to set an actual value (see https://github.com/OpenDroneMap/RSCalibration for instructions on how to calculate this value)" % (make, model, DEFAULT_RS_READOUT)) warn_db_missing[key] = True return float(DEFAULT_RS_READOUT) + From e91eb5dad9b966e5ae8fd95dad364f1972e1b847 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Mon, 5 Dec 2022 11:58:10 -0500 Subject: [PATCH 038/102] Commas --- opendm/rollingshutter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/opendm/rollingshutter.py b/opendm/rollingshutter.py index 55a39877..99c4fad2 100644 --- a/opendm/rollingshutter.py +++ b/opendm/rollingshutter.py @@ -26,9 +26,9 @@ RS_DATABASE = { 'gopro hero4 black': 30, # GoPro Hero 4 Black 'gopro hero8 black': 17, # GoPro Hero 8 Black - 'teracube teracube one': 32 # TeraCube TeraCube_One TR1907Q Mobile Phone + 'teracube teracube one': 32, # TeraCube TeraCube_One TR1907Q Mobile Phone - 'fujifilm x-t2': 35 # FUJIFILM X-T2 Mirrorless Interchangeable Lens Camera + 'fujifilm x-t2': 35, # FUJIFILM X-T2 Mirrorless Interchangeable Lens Camera # Help us add more! From 87880d24fbbbb13baa3af01f1e5864c6b228005c Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Mon, 5 Dec 2022 23:10:29 -0500 Subject: [PATCH 039/102] Remove broken action --- .github/workflows/publish-snap.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/publish-snap.yml b/.github/workflows/publish-snap.yml index 0b330981..4ee4cfa2 100644 --- a/.github/workflows/publish-snap.yml +++ b/.github/workflows/publish-snap.yml @@ -26,11 +26,6 @@ jobs: uses: diddlesnaps/snapcraft-multiarch-action@v1 with: architecture: ${{ matrix.architecture }} - - name: Review - uses: diddlesnaps/snapcraft-review-tools-action@v1 - with: - snap: ${{ steps.build.outputs.snap }} - isClassic: 'false' - name: Publish unstable builds to Edge if: github.ref == 'refs/heads/master' uses: snapcore/action-publish@v1 From b8e77f67500fd9cd5f0dbc80d6ed19e6cd5456cc Mon Sep 17 00:00:00 2001 From: Saijin-Naib Date: Tue, 6 Dec 2022 01:21:06 -0500 Subject: [PATCH 040/102] Add Fujifilm X-A5 Sensor Readout 186ms ISO-6400 1/2000s (Electronic Shutter) --- opendm/rollingshutter.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/opendm/rollingshutter.py b/opendm/rollingshutter.py index 99c4fad2..28c8d23d 100644 --- a/opendm/rollingshutter.py +++ b/opendm/rollingshutter.py @@ -28,6 +28,8 @@ RS_DATABASE = { 'teracube teracube one': 32, # TeraCube TeraCube_One TR1907Q Mobile Phone + 'fujifilm x-a5': 186, # FUJIFILM X-A5 Mirrorless Interchangeable Lens Camera + 'fujifilm x-t2': 35, # FUJIFILM X-T2 Mirrorless Interchangeable Lens Camera From 9a77a4e611ff5f9c2aa5c81f8f798f847c819d7c Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 7 Dec 2022 13:43:40 -0500 Subject: [PATCH 041/102] Point cloud, textured model transform --- opendm/align.py | 43 +++++++++++++++++++++++++------ opendm/io.py | 4 +++ stages/odm_georeferencing.py | 50 +++++++++++++++++++++++++++--------- 3 files changed, 77 insertions(+), 20 deletions(-) diff --git a/opendm/align.py b/opendm/align.py index ab41a342..ea226bc4 100644 --- a/opendm/align.py +++ b/opendm/align.py @@ -1,15 +1,18 @@ import os import shutil +import json import codem import dataclasses +import pdal +import numpy as np from opendm import log -def compute_alignment_matrix(input_laz, align_file, tmp_dir): - if os.path.exists(tmp_dir): - shutil.rmtree(tmp_dir) - os.mkdir(tmp_dir) +def compute_alignment_matrix(input_laz, align_file, stats_dir): + if os.path.exists(stats_dir): + shutil.rmtree(stats_dir) + os.mkdir(stats_dir) - conf = dataclasses.asdict(codem.CodemRunConfig(align_file, input_laz, MIN_RESOLUTION=0.2, OUTPUT_DIR=tmp_dir)) # TODO: how to compute this + conf = dataclasses.asdict(codem.CodemRunConfig(align_file, input_laz, OUTPUT_DIR=stats_dir)) fnd_obj, aoi_obj = codem.preprocess(conf) fnd_obj.prep() aoi_obj.prep() @@ -29,6 +32,30 @@ def compute_alignment_matrix(input_laz, align_file, tmp_dir): None, ) - return app_reg.get_registration_transformation() - # if os.path.exists(tmp_dir): - # shutil.rmtree(tmp_dir) \ No newline at end of file + reg = app_reg.get_registration_transformation() + matrix = np.fromstring(reg['matrix'], dtype=float, sep=' ').reshape((4, 4)) + return matrix + +def transform_point_cloud(input_laz, a_matrix, output_laz): + pipe = [ + input_laz, + { + 'type': 'filters.transformation', + 'matrix': " ".join(list(map(str, a_matrix.flatten()))), + }, + output_laz, + ] + p = pdal.Pipeline(json.dumps(pipe)) + p.execute() + +def transform_obj(input_obj, a_matrix, output_obj): + with open(input_obj, 'r') as fin: + with open(output_obj, 'w') as fout: + lines = fin.readlines() + for line in lines: + if line.startswith("v "): + v = np.fromstring(line.strip()[2:] + " 1", sep=' ', dtype=float) + vt = v.dot(a_matrix)[:3] + fout.write("v " + " ".join(map(str, list(vt))) + '\n') + else: + fout.write(line) \ No newline at end of file diff --git a/opendm/io.py b/opendm/io.py index b8be1af0..fc878edb 100644 --- a/opendm/io.py +++ b/opendm/io.py @@ -85,3 +85,7 @@ def path_or_json_string_to_dict(string): raise ValueError("{0} is not a valid JSON file.".format(string)) else: raise ValueError("{0} is not a valid JSON file or string.".format(string)) + +def touch(file): + with open(file, 'w') as fout: + fout.write("Done!\n") \ No newline at end of file diff --git a/stages/odm_georeferencing.py b/stages/odm_georeferencing.py index 59f847e9..47f81d59 100644 --- a/stages/odm_georeferencing.py +++ b/stages/odm_georeferencing.py @@ -18,7 +18,7 @@ from opendm import point_cloud from opendm.multispectral import get_primary_band_name from opendm.osfm import OSFMContext from opendm.boundary import as_polygon, export_to_bounds_files -from opendm.align import compute_alignment_matrix +from opendm.align import compute_alignment_matrix, transform_point_cloud, transform_obj class ODMGeoreferencingStage(types.ODM_Stage): def process(self, args, outputs): @@ -170,18 +170,44 @@ class ODMGeoreferencingStage(types.ODM_Stage): if tree.odm_align_file is not None: - tmp_dir = tree.path("odm_georeferencing", "codem") - a_matrix = compute_alignment_matrix(tree.odm_georeferencing_model_laz, tree.odm_align_file, tmp_dir) - if a_matrix is not None: - print(a_matrix) - exit(1) + alignment_file = os.path.join(tree.odm_georeferencing, 'alignment_done.txt') + + if not io.file_exists(alignment_file) or self.rerun(): + stats_dir = tree.path("opensfm", "stats", "codem") + a_matrix = compute_alignment_matrix(tree.odm_georeferencing_model_laz, tree.odm_align_file, stats_dir) + if a_matrix is not None: + log.ODM_INFO("Alignment matrix: %s" % a_matrix) + + # Align point cloud + unaligned_model = io.related_file_path(tree.odm_georeferencing_model_laz, postfix="_unaligned") + if not os.path.isfile(unaligned_model): + os.rename(tree.odm_georeferencing_model_laz, unaligned_model) + try: + transform_point_cloud(unaligned_model, a_matrix, tree.odm_georeferencing_model_laz) + log.ODM_INFO("Transformed %s" % tree.odm_georeferencing_model_laz) + except Exception as e: + log.ODM_WARNING("Cannot transform point cloud: %s" % str(e)) + os.rename(unaligned_model, tree.odm_georeferencing_model_laz) + + # Align textured models + for texturing in [tree.odm_texturing, tree.odm_25dtexturing]: + obj = os.path.join(texturing, "odm_textured_model_geo.obj") + if os.path.isfile(obj): + unaligned_obj = io.related_file_path(obj, postfix="_unaligned") + if not os.path.isfile(unaligned_obj): + os.rename(obj, unaligned_obj) + try: + transform_obj(unaligned_obj, a_matrix, obj) + log.ODM_INFO("Transformed %s" % obj) + except Exception as e: + log.ODM_WARNING("Cannot transform textured model: %s" % str(e)) + os.rename(unaligned_obj, obj) + else: + log.ODM_WARNING("Alignment to %s will be skipped." % tree.odm_align_file) + + io.touch(alignment_file) else: - log.ODM_WARNING("Alignment to %s will be skipped." % tree.odm_align_file) - # Align - # - Compute alignment - # - Transform point cloud - # - Transform textured model(s) - # + log.ODM_WARNING("Already computed alignment") point_cloud.post_point_cloud_steps(args, tree, self.rerun()) else: From 360ab3cc5ff085a2cc69fb3d518817c32980df6b Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 7 Dec 2022 15:20:09 -0500 Subject: [PATCH 042/102] PoC ortho,point cloud and textured model alignment working --- opendm/align.py | 6 +- stages/odm_georeferencing.py | 237 +++++++++++++++++------------------ 2 files changed, 122 insertions(+), 121 deletions(-) diff --git a/opendm/align.py b/opendm/align.py index ea226bc4..cd1b5df2 100644 --- a/opendm/align.py +++ b/opendm/align.py @@ -48,14 +48,16 @@ def transform_point_cloud(input_laz, a_matrix, output_laz): p = pdal.Pipeline(json.dumps(pipe)) p.execute() -def transform_obj(input_obj, a_matrix, output_obj): +def transform_obj(input_obj, a_matrix, geo_offset, output_obj): + g_off = np.array([geo_offset[0], geo_offset[1], 0, 0]) + with open(input_obj, 'r') as fin: with open(output_obj, 'w') as fout: lines = fin.readlines() for line in lines: if line.startswith("v "): v = np.fromstring(line.strip()[2:] + " 1", sep=' ', dtype=float) - vt = v.dot(a_matrix)[:3] + vt = (a_matrix.dot((v + g_off)) - g_off)[:3] fout.write("v " + " ".join(map(str, list(vt))) + '\n') else: fout.write(line) \ No newline at end of file diff --git a/stages/odm_georeferencing.py b/stages/odm_georeferencing.py index 47f81d59..4c5f7398 100644 --- a/stages/odm_georeferencing.py +++ b/stages/odm_georeferencing.py @@ -31,142 +31,142 @@ class ODMGeoreferencingStage(types.ODM_Stage): gcp_gml_export_file = tree.path("odm_georeferencing", "ground_control_points.gml") gcp_geojson_export_file = tree.path("odm_georeferencing", "ground_control_points.geojson") - # if reconstruction.has_gcp() and (not io.file_exists(gcp_export_file) or self.rerun()): - # octx = OSFMContext(tree.opensfm) - # gcps = octx.ground_control_points(reconstruction.georef.proj4()) + if reconstruction.has_gcp() and (not io.file_exists(gcp_export_file) or self.rerun()): + octx = OSFMContext(tree.opensfm) + gcps = octx.ground_control_points(reconstruction.georef.proj4()) - # if len(gcps): - # gcp_schema = { - # 'geometry': 'Point', - # 'properties': OrderedDict([ - # ('id', 'str'), - # ('observations_count', 'int'), - # ('observations_list', 'str'), - # ('error_x', 'float'), - # ('error_y', 'float'), - # ('error_z', 'float'), - # ]) - # } + if len(gcps): + gcp_schema = { + 'geometry': 'Point', + 'properties': OrderedDict([ + ('id', 'str'), + ('observations_count', 'int'), + ('observations_list', 'str'), + ('error_x', 'float'), + ('error_y', 'float'), + ('error_z', 'float'), + ]) + } - # # Write GeoPackage - # with fiona.open(gcp_export_file, 'w', driver="GPKG", - # crs=fiona.crs.from_string(reconstruction.georef.proj4()), - # schema=gcp_schema) as f: - # for gcp in gcps: - # f.write({ - # 'geometry': { - # 'type': 'Point', - # 'coordinates': gcp['coordinates'], - # }, - # 'properties': OrderedDict([ - # ('id', gcp['id']), - # ('observations_count', len(gcp['observations'])), - # ('observations_list', ",".join([obs['shot_id'] for obs in gcp['observations']])), - # ('error_x', gcp['error'][0]), - # ('error_y', gcp['error'][1]), - # ('error_z', gcp['error'][2]), - # ]) - # }) + # Write GeoPackage + with fiona.open(gcp_export_file, 'w', driver="GPKG", + crs=fiona.crs.from_string(reconstruction.georef.proj4()), + schema=gcp_schema) as f: + for gcp in gcps: + f.write({ + 'geometry': { + 'type': 'Point', + 'coordinates': gcp['coordinates'], + }, + 'properties': OrderedDict([ + ('id', gcp['id']), + ('observations_count', len(gcp['observations'])), + ('observations_list', ",".join([obs['shot_id'] for obs in gcp['observations']])), + ('error_x', gcp['error'][0]), + ('error_y', gcp['error'][1]), + ('error_z', gcp['error'][2]), + ]) + }) - # # Write GML - # try: - # system.run('ogr2ogr -of GML "{}" "{}"'.format(gcp_gml_export_file, gcp_export_file)) - # except Exception as e: - # log.ODM_WARNING("Cannot generate ground control points GML file: %s" % str(e)) + # Write GML + try: + system.run('ogr2ogr -of GML "{}" "{}"'.format(gcp_gml_export_file, gcp_export_file)) + except Exception as e: + log.ODM_WARNING("Cannot generate ground control points GML file: %s" % str(e)) - # # Write GeoJSON - # geojson = { - # 'type': 'FeatureCollection', - # 'features': [] - # } + # Write GeoJSON + geojson = { + 'type': 'FeatureCollection', + 'features': [] + } - # from_srs = CRS.from_proj4(reconstruction.georef.proj4()) - # to_srs = CRS.from_epsg(4326) - # transformer = location.transformer(from_srs, to_srs) + from_srs = CRS.from_proj4(reconstruction.georef.proj4()) + to_srs = CRS.from_epsg(4326) + transformer = location.transformer(from_srs, to_srs) - # for gcp in gcps: - # properties = gcp.copy() - # del properties['coordinates'] + for gcp in gcps: + properties = gcp.copy() + del properties['coordinates'] - # geojson['features'].append({ - # 'type': 'Feature', - # 'geometry': { - # 'type': 'Point', - # 'coordinates': transformer.TransformPoint(*gcp['coordinates']), - # }, - # 'properties': properties - # }) + geojson['features'].append({ + 'type': 'Feature', + 'geometry': { + 'type': 'Point', + 'coordinates': transformer.TransformPoint(*gcp['coordinates']), + }, + 'properties': properties + }) - # with open(gcp_geojson_export_file, 'w') as f: - # f.write(json.dumps(geojson, indent=4)) + with open(gcp_geojson_export_file, 'w') as f: + f.write(json.dumps(geojson, indent=4)) - # else: - # log.ODM_WARNING("GCPs could not be loaded for writing to %s" % gcp_export_file) + else: + log.ODM_WARNING("GCPs could not be loaded for writing to %s" % gcp_export_file) if not io.file_exists(tree.odm_georeferencing_model_laz) or self.rerun(): - # cmd = ('pdal translate -i "%s" -o \"%s\"' % (tree.filtered_point_cloud, tree.odm_georeferencing_model_laz)) - # stages = ["ferry"] - # params = [ - # '--filters.ferry.dimensions="views => UserData"', - # '--writers.las.compression="lazip"', - # ] + cmd = ('pdal translate -i "%s" -o \"%s\"' % (tree.filtered_point_cloud, tree.odm_georeferencing_model_laz)) + stages = ["ferry"] + params = [ + '--filters.ferry.dimensions="views => UserData"', + '--writers.las.compression="lazip"', + ] - # if reconstruction.is_georeferenced(): - # log.ODM_INFO("Georeferencing point cloud") + if reconstruction.is_georeferenced(): + log.ODM_INFO("Georeferencing point cloud") - # stages.append("transformation") - # params += [ - # '--filters.transformation.matrix="1 0 0 %s 0 1 0 %s 0 0 1 0 0 0 0 1"' % reconstruction.georef.utm_offset(), - # '--writers.las.offset_x=%s' % reconstruction.georef.utm_east_offset, - # '--writers.las.offset_y=%s' % reconstruction.georef.utm_north_offset, - # '--writers.las.scale_x=0.001', - # '--writers.las.scale_y=0.001', - # '--writers.las.scale_z=0.001', - # '--writers.las.offset_z=0', - # '--writers.las.a_srs="%s"' % reconstruction.georef.proj4() - # ] + stages.append("transformation") + params += [ + '--filters.transformation.matrix="1 0 0 %s 0 1 0 %s 0 0 1 0 0 0 0 1"' % reconstruction.georef.utm_offset(), + '--writers.las.offset_x=%s' % reconstruction.georef.utm_east_offset, + '--writers.las.offset_y=%s' % reconstruction.georef.utm_north_offset, + '--writers.las.scale_x=0.001', + '--writers.las.scale_y=0.001', + '--writers.las.scale_z=0.001', + '--writers.las.offset_z=0', + '--writers.las.a_srs="%s"' % reconstruction.georef.proj4() + ] - # if reconstruction.has_gcp() and io.file_exists(gcp_gml_export_file): - # log.ODM_INFO("Embedding GCP info in point cloud") - # params += [ - # '--writers.las.vlrs="{\\\"filename\\\": \\\"%s\\\", \\\"user_id\\\": \\\"ODM_GCP\\\", \\\"description\\\": \\\"Ground Control Points (GML)\\\"}"' % gcp_gml_export_file.replace(os.sep, "/") - # ] + if reconstruction.has_gcp() and io.file_exists(gcp_gml_export_file): + log.ODM_INFO("Embedding GCP info in point cloud") + params += [ + '--writers.las.vlrs="{\\\"filename\\\": \\\"%s\\\", \\\"user_id\\\": \\\"ODM_GCP\\\", \\\"description\\\": \\\"Ground Control Points (GML)\\\"}"' % gcp_gml_export_file.replace(os.sep, "/") + ] - # system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params)) + system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params)) - # self.update_progress(50) + self.update_progress(50) - # if args.crop > 0: - # log.ODM_INFO("Calculating cropping area and generating bounds shapefile from point cloud") - # cropper = Cropper(tree.odm_georeferencing, 'odm_georeferenced_model') + if args.crop > 0: + log.ODM_INFO("Calculating cropping area and generating bounds shapefile from point cloud") + cropper = Cropper(tree.odm_georeferencing, 'odm_georeferenced_model') - # if args.fast_orthophoto: - # decimation_step = 4 - # else: - # decimation_step = 40 + if args.fast_orthophoto: + decimation_step = 4 + else: + decimation_step = 40 - # # More aggressive decimation for large datasets - # if not args.fast_orthophoto: - # decimation_step *= int(len(reconstruction.photos) / 1000) + 1 - # decimation_step = min(decimation_step, 95) + # More aggressive decimation for large datasets + if not args.fast_orthophoto: + decimation_step *= int(len(reconstruction.photos) / 1000) + 1 + decimation_step = min(decimation_step, 95) - # try: - # cropper.create_bounds_gpkg(tree.odm_georeferencing_model_laz, args.crop, - # decimation_step=decimation_step) - # except: - # log.ODM_WARNING("Cannot calculate crop bounds! We will skip cropping") - # args.crop = 0 + try: + cropper.create_bounds_gpkg(tree.odm_georeferencing_model_laz, args.crop, + decimation_step=decimation_step) + except: + log.ODM_WARNING("Cannot calculate crop bounds! We will skip cropping") + args.crop = 0 - # if 'boundary' in outputs and args.crop == 0: - # log.ODM_INFO("Using boundary JSON as cropping area") + if 'boundary' in outputs and args.crop == 0: + log.ODM_INFO("Using boundary JSON as cropping area") - # bounds_base, _ = os.path.splitext(tree.odm_georeferencing_model_laz) - # bounds_json = bounds_base + ".bounds.geojson" - # bounds_gpkg = bounds_base + ".bounds.gpkg" - # export_to_bounds_files(outputs['boundary'], reconstruction.get_proj_srs(), bounds_json, bounds_gpkg) - # else: - # log.ODM_INFO("Converting point cloud (non-georeferenced)") - # system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params)) + bounds_base, _ = os.path.splitext(tree.odm_georeferencing_model_laz) + bounds_json = bounds_base + ".bounds.geojson" + bounds_gpkg = bounds_base + ".bounds.gpkg" + export_to_bounds_files(outputs['boundary'], reconstruction.get_proj_srs(), bounds_json, bounds_gpkg) + else: + log.ODM_INFO("Converting point cloud (non-georeferenced)") + system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params)) if tree.odm_align_file is not None: @@ -180,8 +180,8 @@ class ODMGeoreferencingStage(types.ODM_Stage): # Align point cloud unaligned_model = io.related_file_path(tree.odm_georeferencing_model_laz, postfix="_unaligned") - if not os.path.isfile(unaligned_model): - os.rename(tree.odm_georeferencing_model_laz, unaligned_model) + os.rename(tree.odm_georeferencing_model_laz, unaligned_model) + try: transform_point_cloud(unaligned_model, a_matrix, tree.odm_georeferencing_model_laz) log.ODM_INFO("Transformed %s" % tree.odm_georeferencing_model_laz) @@ -194,10 +194,9 @@ class ODMGeoreferencingStage(types.ODM_Stage): obj = os.path.join(texturing, "odm_textured_model_geo.obj") if os.path.isfile(obj): unaligned_obj = io.related_file_path(obj, postfix="_unaligned") - if not os.path.isfile(unaligned_obj): - os.rename(obj, unaligned_obj) + os.rename(obj, unaligned_obj) try: - transform_obj(unaligned_obj, a_matrix, obj) + transform_obj(unaligned_obj, a_matrix, [reconstruction.georef.utm_east_offset, reconstruction.georef.utm_north_offset], obj) log.ODM_INFO("Transformed %s" % obj) except Exception as e: log.ODM_WARNING("Cannot transform textured model: %s" % str(e)) From d74f573dbf5b6c7b242de69161208e4f58b2eccf Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Fri, 9 Dec 2022 11:53:38 -0500 Subject: [PATCH 043/102] Handle TIFF inputs for --align --- opendm/align.py | 2 ++ opendm/types.py | 2 +- stages/mvstex.py | 4 ++++ stages/odm_georeferencing.py | 10 ++++++++-- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/opendm/align.py b/opendm/align.py index cd1b5df2..8b87f372 100644 --- a/opendm/align.py +++ b/opendm/align.py @@ -33,6 +33,8 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir): ) reg = app_reg.get_registration_transformation() + # print(dsm_reg.registration_parameters) + # print(icp_reg.registration_parameters) matrix = np.fromstring(reg['matrix'], dtype=float, sep=' ').reshape((4, 4)) return matrix diff --git a/opendm/types.py b/opendm/types.py index 4a930f2c..a3d769b6 100644 --- a/opendm/types.py +++ b/opendm/types.py @@ -296,7 +296,7 @@ class ODM_Tree(object): self.odm_georeferencing_gcp = gcp_file or io.find('gcp_list.txt', self.root_path) self.odm_georeferencing_gcp_utm = os.path.join(self.odm_georeferencing, 'gcp_list_utm.txt') self.odm_geo_file = geo_file or io.find('geo.txt', self.root_path) - self.odm_align_file = align_file or io.find('align.laz', self.root_path) + self.odm_align_file = align_file or io.find('align.laz', self.root_path) or io.find('align.tif', self.root_path) self.odm_georeferencing_proj = 'proj.txt' self.odm_georeferencing_model_txt_geo = os.path.join( diff --git a/stages/mvstex.py b/stages/mvstex.py index da8fd433..3f557627 100644 --- a/stages/mvstex.py +++ b/stages/mvstex.py @@ -68,11 +68,15 @@ class ODMMvsTexStage(types.ODM_Stage): system.mkdir_p(r['out_dir']) odm_textured_model_obj = os.path.join(r['out_dir'], tree.odm_textured_model_obj) + unaligned_obj = io.related_file_path(odm_textured_model_obj, postfix="_unaligned") if not io.file_exists(odm_textured_model_obj) or self.rerun(): log.ODM_INFO('Writing MVS Textured file in: %s' % odm_textured_model_obj) + if os.path.isfile(unaligned_obj): + os.unlink(unaligned_obj) + # Format arguments to fit Mvs-Texturing app skipGlobalSeamLeveling = "" skipLocalSeamLeveling = "" diff --git a/stages/odm_georeferencing.py b/stages/odm_georeferencing.py index 4c5f7398..af0f07f7 100644 --- a/stages/odm_georeferencing.py +++ b/stages/odm_georeferencing.py @@ -30,6 +30,9 @@ class ODMGeoreferencingStage(types.ODM_Stage): gcp_export_file = tree.path("odm_georeferencing", "ground_control_points.gpkg") gcp_gml_export_file = tree.path("odm_georeferencing", "ground_control_points.gml") gcp_geojson_export_file = tree.path("odm_georeferencing", "ground_control_points.geojson") + unaligned_model = io.related_file_path(tree.odm_georeferencing_model_laz, postfix="_unaligned") + if os.path.isfile(unaligned_model) and self.rerun(): + os.unlink(unaligned_model) if reconstruction.has_gcp() and (not io.file_exists(gcp_export_file) or self.rerun()): octx = OSFMContext(tree.opensfm) @@ -179,7 +182,8 @@ class ODMGeoreferencingStage(types.ODM_Stage): log.ODM_INFO("Alignment matrix: %s" % a_matrix) # Align point cloud - unaligned_model = io.related_file_path(tree.odm_georeferencing_model_laz, postfix="_unaligned") + if os.path.isfile(unaligned_model): + os.rename(unaligned_model, tree.odm_georeferencing_model_laz) os.rename(tree.odm_georeferencing_model_laz, unaligned_model) try: @@ -188,12 +192,14 @@ class ODMGeoreferencingStage(types.ODM_Stage): except Exception as e: log.ODM_WARNING("Cannot transform point cloud: %s" % str(e)) os.rename(unaligned_model, tree.odm_georeferencing_model_laz) - + # Align textured models for texturing in [tree.odm_texturing, tree.odm_25dtexturing]: obj = os.path.join(texturing, "odm_textured_model_geo.obj") if os.path.isfile(obj): unaligned_obj = io.related_file_path(obj, postfix="_unaligned") + if os.path.isfile(unaligned_obj): + os.rename(unaligned_obj, obj) os.rename(obj, unaligned_obj) try: transform_obj(unaligned_obj, a_matrix, [reconstruction.georef.utm_east_offset, reconstruction.georef.utm_north_offset], obj) From 35db4ae89fb1035f4e22ceb4b339224e35cfa7ed Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Fri, 9 Dec 2022 12:39:09 -0500 Subject: [PATCH 044/102] Reproject alignment foundation if needed --- opendm/align.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/opendm/align.py b/opendm/align.py index 8b87f372..09ad88ce 100644 --- a/opendm/align.py +++ b/opendm/align.py @@ -5,13 +5,47 @@ import codem import dataclasses import pdal import numpy as np +from rasterio.crs import CRS from opendm import log +from opendm import io + +def get_point_cloud_crs(file): + pipeline = pdal.Pipeline(json.dumps([ file ])) + metadata = pipeline.quickinfo + + reader_metadata = [val for key, val in metadata.items() if "readers" in key] + crs = CRS.from_string(reader_metadata[0]["srs"]["horizontal"]) + return str(crs) + +def reproject_point_cloud(file, out_srs): + log.ODM_INFO("Reprojecting %s to %s" % (file, out_srs)) + out_file = io.related_file_path(file, postfix="_reprojected") + pipeline = pdal.Pipeline(json.dumps([ file, { + "type": "filters.reprojection", + "out_srs": out_srs + }, out_file])) + pipeline.execute() + return out_file def compute_alignment_matrix(input_laz, align_file, stats_dir): if os.path.exists(stats_dir): shutil.rmtree(stats_dir) os.mkdir(stats_dir) + # Check if we need to reproject align file + input_crs = get_point_cloud_crs(input_laz) + log.ODM_INFO("Input CRS: %s" % input_crs) + + _, ext = os.path.splitext(align_file) + if ext.lower() in [".tif"]: + pass #TODO + elif ext.lower() in [".las", ".laz"]: + align_crs = get_point_cloud_crs(align_file) + log.ODM_INFO("Align CRS: %s" % align_crs) + if input_crs != get_point_cloud_crs(align_file): + # Reprojection needed + align_file = reproject_point_cloud(align_file, input_crs) + conf = dataclasses.asdict(codem.CodemRunConfig(align_file, input_laz, OUTPUT_DIR=stats_dir)) fnd_obj, aoi_obj = codem.preprocess(conf) fnd_obj.prep() From 28c48c34e63314b49682cff9bad76d9ee9fb39c3 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Fri, 9 Dec 2022 13:13:21 -0500 Subject: [PATCH 045/102] Alignment DSM reprojection working --- opendm/align.py | 62 ++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 53 insertions(+), 9 deletions(-) diff --git a/opendm/align.py b/opendm/align.py index 09ad88ce..ca33ce8e 100644 --- a/opendm/align.py +++ b/opendm/align.py @@ -5,9 +5,12 @@ import codem import dataclasses import pdal import numpy as np +import rasterio from rasterio.crs import CRS +from opendm.utils import double_quote from opendm import log from opendm import io +from opendm import system def get_point_cloud_crs(file): pipeline = pdal.Pipeline(json.dumps([ file ])) @@ -17,9 +20,12 @@ def get_point_cloud_crs(file): crs = CRS.from_string(reader_metadata[0]["srs"]["horizontal"]) return str(crs) +def get_raster_crs(file): + with rasterio.open(file, 'r') as f: + return str(f.crs) + def reproject_point_cloud(file, out_srs): - log.ODM_INFO("Reprojecting %s to %s" % (file, out_srs)) - out_file = io.related_file_path(file, postfix="_reprojected") + out_file = io.related_file_path(file, postfix="_reprojected_tmp") pipeline = pdal.Pipeline(json.dumps([ file, { "type": "filters.reprojection", "out_srs": out_srs @@ -27,6 +33,19 @@ def reproject_point_cloud(file, out_srs): pipeline.execute() return out_file +def reproject_raster(file, out_srs): + out_file = io.related_file_path(file, postfix="_reprojected") + kwargs = { + 'input': double_quote(file), + 'output': double_quote(out_file), + 'out_srs': out_srs, + } + system.run('gdalwarp ' + '-t_srs {out_srs} ' + '{input} ' + '{output} '.format(**kwargs)) + return out_file + def compute_alignment_matrix(input_laz, align_file, stats_dir): if os.path.exists(stats_dir): shutil.rmtree(stats_dir) @@ -37,14 +56,25 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir): log.ODM_INFO("Input CRS: %s" % input_crs) _, ext = os.path.splitext(align_file) + repr_func = None + if ext.lower() in [".tif"]: - pass #TODO + align_crs = get_raster_crs(align_file) + repr_func = reproject_raster elif ext.lower() in [".las", ".laz"]: align_crs = get_point_cloud_crs(align_file) - log.ODM_INFO("Align CRS: %s" % align_crs) - if input_crs != get_point_cloud_crs(align_file): - # Reprojection needed - align_file = reproject_point_cloud(align_file, input_crs) + repr_func = reproject_point_cloud + else: + log.ODM_WARNING("Unsupported alignment file: %s" % align_file) + return + + to_delete = [] + log.ODM_INFO("Align CRS: %s" % align_crs) + if input_crs != align_crs: + # Reprojection needed + log.ODM_INFO("Reprojecting %s to %s" % (align_file, input_crs)) + align_file = repr_func(align_file, input_crs) + to_delete.append(align_file) conf = dataclasses.asdict(codem.CodemRunConfig(align_file, input_laz, OUTPUT_DIR=stats_dir)) fnd_obj, aoi_obj = codem.preprocess(conf) @@ -67,9 +97,23 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir): ) reg = app_reg.get_registration_transformation() - # print(dsm_reg.registration_parameters) - # print(icp_reg.registration_parameters) + + # Write JSON to stats folder + with open(os.path.join(stats_dir, "registration.json"), 'w') as f: + del dsm_reg.registration_parameters['matrix'] + del icp_reg.registration_parameters['matrix'] + + f.write(json.dumps({ + 'coarse': dsm_reg.registration_parameters, + 'fine': icp_reg.registration_parameters, + }, indent=4)) + matrix = np.fromstring(reg['matrix'], dtype=float, sep=' ').reshape((4, 4)) + + for f in to_delete: + if os.path.isfile(f): + os.unlink(f) + return matrix def transform_point_cloud(input_laz, a_matrix, output_laz): From 9f8eca2b83613e32b0bdd584e610caf76f8cf902 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Mon, 12 Dec 2022 20:09:29 +0000 Subject: [PATCH 046/102] Transform shots.geojson, bump opensfm, bump version --- SuperBuild/cmake/External-OpenSfM.cmake | 2 +- VERSION | 2 +- opendm/config.py | 4 ++-- opendm/shots.py | 7 ++++++- opendm/types.py | 3 +++ opendm/utils.py | 17 ++++++++++++++++- stages/dataset.py | 2 +- stages/odm_georeferencing.py | 23 +++++++++++++++++------ stages/odm_report.py | 17 +++++++++++++++-- 9 files changed, 62 insertions(+), 15 deletions(-) diff --git a/SuperBuild/cmake/External-OpenSfM.cmake b/SuperBuild/cmake/External-OpenSfM.cmake index d1801495..7ce9a002 100644 --- a/SuperBuild/cmake/External-OpenSfM.cmake +++ b/SuperBuild/cmake/External-OpenSfM.cmake @@ -25,7 +25,7 @@ ExternalProject_Add(${_proj_name} #--Download step-------------- DOWNLOAD_DIR ${SB_DOWNLOAD_DIR} GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/ - GIT_TAG 292 + GIT_TAG 302 #--Update/Patch step---------- UPDATE_COMMAND git submodule update --init --recursive #--Configure step------------- diff --git a/VERSION b/VERSION index cb2b00e4..b5021469 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.0.1 +3.0.2 diff --git a/opendm/config.py b/opendm/config.py index b8a39721..edb66b95 100755 --- a/opendm/config.py +++ b/opendm/config.py @@ -471,12 +471,12 @@ def config(argv=None, parser=None): 'image_name geo_x geo_y geo_z [omega (degrees)] [phi (degrees)] [kappa (degrees)] [horz accuracy (meters)] [vert accuracy (meters)]\n' 'Default: %(default)s')) - parser.add_argument('--align-to', + parser.add_argument('--align', metavar='', action=StoreValue, default=None, help=('Path to a GeoTIFF DEM or a LAS/LAZ point cloud ' - 'that the reconstruction outputs should be automatically aligned to. ' + 'that the reconstruction outputs should be automatically aligned to. Experimental. ' 'Default: %(default)s')) parser.add_argument('--use-exif', diff --git a/opendm/shots.py b/opendm/shots.py index 0440257e..b3956ee3 100644 --- a/opendm/shots.py +++ b/opendm/shots.py @@ -23,7 +23,7 @@ def get_origin(shot): """The origin of the pose in world coordinates.""" return -get_rotation_matrix(np.array(shot['rotation'])).T.dot(np.array(shot['translation'])) -def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset=None, pseudo_geotiff=None): +def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset=None, pseudo_geotiff=None, a_matrix=None): """ Extract shots from OpenSfM's reconstruction.json """ @@ -92,6 +92,11 @@ def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset utm_coords = [origin[0] + utm_offset[0], origin[1] + utm_offset[1], origin[2]] + + if a_matrix is not None: + rotation = list(np.array(rotation).dot(a_matrix[:3,:3])) + utm_coords = list(a_matrix.dot(np.hstack((np.array(utm_coords), 1)))[:-1]) + translation = utm_coords trans_coords = crstrans.TransformPoint(utm_coords[0], utm_coords[1], utm_coords[2]) diff --git a/opendm/types.py b/opendm/types.py index a3d769b6..b6d95f4a 100644 --- a/opendm/types.py +++ b/opendm/types.py @@ -307,6 +307,9 @@ class ODM_Tree(object): self.odm_georeferencing, 'odm_georeferenced_model.laz') self.odm_georeferencing_model_las = os.path.join( self.odm_georeferencing, 'odm_georeferenced_model.las') + self.odm_georeferencing_alignment_matrix = os.path.join( + self.odm_georeferencing, 'alignment_matrix.json' + ) # odm_orthophoto self.odm_orthophoto_render = os.path.join(self.odm_orthophoto, 'odm_orthophoto_render.tif') diff --git a/opendm/utils.py b/opendm/utils.py index 0729eb7c..85a32003 100644 --- a/opendm/utils.py +++ b/opendm/utils.py @@ -1,9 +1,18 @@ import os, shutil +import numpy as np +import json from opendm import log from opendm.photo import find_largest_photo_dims from osgeo import gdal from opendm.loghelpers import double_quote +class NumpyEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + return json.JSONEncoder.default(self, obj) + + def get_depthmap_resolution(args, photos): max_dims = find_largest_photo_dims(photos) min_dim = 320 # Never go lower than this @@ -98,4 +107,10 @@ def rm_r(path): elif os.path.exists(path): os.remove(path) except: - log.ODM_WARNING("Cannot remove %s" % path) \ No newline at end of file + log.ODM_WARNING("Cannot remove %s" % path) + +def np_to_json(arr): + return json.dumps(arr, cls=NumpyEncoder) + +def np_from_json(json_dump): + return np.asarray(json.loads(json_dump)) \ No newline at end of file diff --git a/stages/dataset.py b/stages/dataset.py index caf692b3..355fac4d 100644 --- a/stages/dataset.py +++ b/stages/dataset.py @@ -46,7 +46,7 @@ def load_images_database(database_file): class ODMLoadDatasetStage(types.ODM_Stage): def process(self, args, outputs): outputs['start_time'] = system.now_raw() - tree = types.ODM_Tree(args.project_path, args.gcp, args.geo, args.align_to) + tree = types.ODM_Tree(args.project_path, args.gcp, args.geo, args.align) outputs['tree'] = tree if io.file_exists(tree.benchmarking): diff --git a/stages/odm_georeferencing.py b/stages/odm_georeferencing.py index af0f07f7..563d0a91 100644 --- a/stages/odm_georeferencing.py +++ b/stages/odm_georeferencing.py @@ -1,4 +1,5 @@ import os +import shutil import struct import pipes import fiona @@ -19,6 +20,7 @@ from opendm.multispectral import get_primary_band_name from opendm.osfm import OSFMContext from opendm.boundary import as_polygon, export_to_bounds_files from opendm.align import compute_alignment_matrix, transform_point_cloud, transform_obj +from opendm.utils import np_to_json class ODMGeoreferencingStage(types.ODM_Stage): def process(self, args, outputs): @@ -172,11 +174,17 @@ class ODMGeoreferencingStage(types.ODM_Stage): system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params)) - if tree.odm_align_file is not None: - alignment_file = os.path.join(tree.odm_georeferencing, 'alignment_done.txt') + stats_dir = tree.path("opensfm", "stats", "codem") + if os.path.exists(stats_dir) and self.rerun(): + shutil.rmtree(stats_dir) + + if tree.odm_align_file is not None: + alignment_file_exists = io.file_exists(tree.odm_georeferencing_alignment_matrix) + + if not alignment_file_exists or self.rerun(): + if alignment_file_exists: + os.unlink(tree.odm_georeferencing_alignment_matrix) - if not io.file_exists(alignment_file) or self.rerun(): - stats_dir = tree.path("opensfm", "stats", "codem") a_matrix = compute_alignment_matrix(tree.odm_georeferencing_model_laz, tree.odm_align_file, stats_dir) if a_matrix is not None: log.ODM_INFO("Alignment matrix: %s" % a_matrix) @@ -207,12 +215,15 @@ class ODMGeoreferencingStage(types.ODM_Stage): except Exception as e: log.ODM_WARNING("Cannot transform textured model: %s" % str(e)) os.rename(unaligned_obj, obj) + + with open(tree.odm_georeferencing_alignment_matrix, "w") as f: + f.write(np_to_json(a_matrix)) else: log.ODM_WARNING("Alignment to %s will be skipped." % tree.odm_align_file) - - io.touch(alignment_file) else: log.ODM_WARNING("Already computed alignment") + elif io.file_exists(tree.odm_georeferencing_alignment_matrix): + os.unlink(tree.odm_georeferencing_alignment_matrix) point_cloud.post_point_cloud_steps(args, tree, self.rerun()) else: diff --git a/stages/odm_report.py b/stages/odm_report.py index 04d98cf3..19d9fe26 100644 --- a/stages/odm_report.py +++ b/stages/odm_report.py @@ -14,7 +14,7 @@ from opendm.point_cloud import export_info_json from opendm.cropper import Cropper from opendm.orthophoto import get_orthophoto_vars, get_max_memory, generate_png from opendm.tiles.tiler import generate_colored_hillshade -from opendm.utils import get_raster_stats +from opendm.utils import get_raster_stats, np_from_json def hms(seconds): h = seconds // 3600 @@ -49,7 +49,14 @@ class ODMReport(types.ODM_Stage): if not io.file_exists(shots_geojson) or self.rerun(): # Extract geographical camera shots if reconstruction.is_georeferenced(): - shots = get_geojson_shots_from_opensfm(tree.opensfm_reconstruction, utm_srs=reconstruction.get_proj_srs(), utm_offset=reconstruction.georef.utm_offset()) + # Check if alignment has been performed (we need to transform our shots if so) + a_matrix = None + if io.file_exists(tree.odm_georeferencing_alignment_matrix): + with open(tree.odm_georeferencing_alignment_matrix, 'r') as f: + a_matrix = np_from_json(f.read()) + log.ODM_INFO("Aligning shots to %s" % a_matrix) + + shots = get_geojson_shots_from_opensfm(tree.opensfm_reconstruction, utm_srs=reconstruction.get_proj_srs(), utm_offset=reconstruction.georef.utm_offset(), a_matrix=a_matrix) else: # Pseudo geo shots = get_geojson_shots_from_opensfm(tree.opensfm_reconstruction, pseudo_geotiff=tree.odm_orthophoto_tif) @@ -73,6 +80,7 @@ class ODMReport(types.ODM_Stage): odm_stats_json = os.path.join(tree.odm_report, "stats.json") octx = OSFMContext(tree.opensfm) osfm_stats_json = octx.path("stats", "stats.json") + codem_stats_json = octx.path("stats", "codem", "registration.json") odm_stats = None point_cloud_file = None views_dimension = None @@ -111,6 +119,11 @@ class ODMReport(types.ODM_Stage): 'average_gsd': gsd.opensfm_reconstruction_average_gsd(octx.recon_file(), use_all_shots=reconstruction.has_gcp()), } + # Add CODEM stats + if os.path.exists(codem_stats_json): + with open(codem_stats_json, 'r') as f: + odm_stats['align'] = json.loads(f.read()) + with open(odm_stats_json, 'w') as f: f.write(json.dumps(odm_stats)) else: From f728371074772d3ac83ad0c204f8030971a4b3b4 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 13 Dec 2022 11:08:11 -0500 Subject: [PATCH 047/102] Add CODEM dep --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 2483c092..f6629557 100644 --- a/requirements.txt +++ b/requirements.txt @@ -31,3 +31,4 @@ xmltodict==0.12.0 fpdf2==2.4.6 Shapely==1.7.1 onnxruntime==1.12.1 +codem==0.24.0 From e5bd090ba5b3e3b5f186ad6c9d0803fcc6619e1a Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 13 Dec 2022 11:21:19 -0500 Subject: [PATCH 048/102] Find align.las --- opendm/types.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opendm/types.py b/opendm/types.py index b6d95f4a..be4b44ea 100644 --- a/opendm/types.py +++ b/opendm/types.py @@ -296,7 +296,7 @@ class ODM_Tree(object): self.odm_georeferencing_gcp = gcp_file or io.find('gcp_list.txt', self.root_path) self.odm_georeferencing_gcp_utm = os.path.join(self.odm_georeferencing, 'gcp_list_utm.txt') self.odm_geo_file = geo_file or io.find('geo.txt', self.root_path) - self.odm_align_file = align_file or io.find('align.laz', self.root_path) or io.find('align.tif', self.root_path) + self.odm_align_file = align_file or io.find('align.laz', self.root_path) or io.find('align.las', self.root_path) or io.find('align.tif', self.root_path) self.odm_georeferencing_proj = 'proj.txt' self.odm_georeferencing_model_txt_geo = os.path.join( From 4137f20bd9654d9a1a481b01232a19485bbcaa9b Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 13 Dec 2022 11:32:09 -0500 Subject: [PATCH 049/102] Some optimization, exception handling --- opendm/align.py | 88 +++++++++++++++++++----------------- stages/odm_georeferencing.py | 7 ++- 2 files changed, 52 insertions(+), 43 deletions(-) diff --git a/opendm/align.py b/opendm/align.py index ca33ce8e..a5643ced 100644 --- a/opendm/align.py +++ b/opendm/align.py @@ -11,6 +11,7 @@ from opendm.utils import double_quote from opendm import log from opendm import io from opendm import system +from opendm.concurrency import get_max_memory def get_point_cloud_crs(file): pipeline = pdal.Pipeline(json.dumps([ file ])) @@ -34,16 +35,18 @@ def reproject_point_cloud(file, out_srs): return out_file def reproject_raster(file, out_srs): - out_file = io.related_file_path(file, postfix="_reprojected") + out_file = io.related_file_path(file, postfix="_reprojected_tmp") kwargs = { 'input': double_quote(file), 'output': double_quote(out_file), 'out_srs': out_srs, + 'max_memory': get_max_memory() } system.run('gdalwarp ' '-t_srs {out_srs} ' '{input} ' - '{output} '.format(**kwargs)) + '{output} ' + '--config GDAL_CACHEMAX {max_memory}% '.format(**kwargs)) return out_file def compute_alignment_matrix(input_laz, align_file, stats_dir): @@ -69,52 +72,53 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir): return to_delete = [] - log.ODM_INFO("Align CRS: %s" % align_crs) - if input_crs != align_crs: - # Reprojection needed - log.ODM_INFO("Reprojecting %s to %s" % (align_file, input_crs)) - align_file = repr_func(align_file, input_crs) - to_delete.append(align_file) - conf = dataclasses.asdict(codem.CodemRunConfig(align_file, input_laz, OUTPUT_DIR=stats_dir)) - fnd_obj, aoi_obj = codem.preprocess(conf) - fnd_obj.prep() - aoi_obj.prep() - log.ODM_INFO("Aligning reconstruction to %s" % align_file) - log.ODM_INFO("Coarse registration...") - dsm_reg = codem.coarse_registration(fnd_obj, aoi_obj, conf) - log.ODM_INFO("Fine registration...") - icp_reg = codem.fine_registration(fnd_obj, aoi_obj, dsm_reg, conf) + try: + log.ODM_INFO("Align CRS: %s" % align_crs) + if input_crs != align_crs: + # Reprojection needed + log.ODM_INFO("Reprojecting %s to %s" % (align_file, input_crs)) + align_file = repr_func(align_file, input_crs) + to_delete.append(align_file) - app_reg = codem.registration.ApplyRegistration( - fnd_obj, - aoi_obj, - icp_reg.registration_parameters, - icp_reg.residual_vectors, - icp_reg.residual_origins, - conf, - None, - ) + conf = dataclasses.asdict(codem.CodemRunConfig(align_file, input_laz, OUTPUT_DIR=stats_dir)) + fnd_obj, aoi_obj = codem.preprocess(conf) + fnd_obj.prep() + aoi_obj.prep() + log.ODM_INFO("Aligning reconstruction to %s" % align_file) + log.ODM_INFO("Coarse registration...") + dsm_reg = codem.coarse_registration(fnd_obj, aoi_obj, conf) + log.ODM_INFO("Fine registration...") + icp_reg = codem.fine_registration(fnd_obj, aoi_obj, dsm_reg, conf) - reg = app_reg.get_registration_transformation() - - # Write JSON to stats folder - with open(os.path.join(stats_dir, "registration.json"), 'w') as f: - del dsm_reg.registration_parameters['matrix'] - del icp_reg.registration_parameters['matrix'] + app_reg = codem.registration.ApplyRegistration( + fnd_obj, + aoi_obj, + icp_reg.registration_parameters, + icp_reg.residual_vectors, + icp_reg.residual_origins, + conf, + None, + ) - f.write(json.dumps({ - 'coarse': dsm_reg.registration_parameters, - 'fine': icp_reg.registration_parameters, - }, indent=4)) + reg = app_reg.get_registration_transformation() + + # Write JSON to stats folder + with open(os.path.join(stats_dir, "registration.json"), 'w') as f: + del dsm_reg.registration_parameters['matrix'] + del icp_reg.registration_parameters['matrix'] - matrix = np.fromstring(reg['matrix'], dtype=float, sep=' ').reshape((4, 4)) + f.write(json.dumps({ + 'coarse': dsm_reg.registration_parameters, + 'fine': icp_reg.registration_parameters, + }, indent=4)) - for f in to_delete: - if os.path.isfile(f): - os.unlink(f) - - return matrix + matrix = np.fromstring(reg['matrix'], dtype=float, sep=' ').reshape((4, 4)) + return matrix + finally: + for f in to_delete: + if os.path.isfile(f): + os.unlink(f) def transform_point_cloud(input_laz, a_matrix, output_laz): pipe = [ diff --git a/stages/odm_georeferencing.py b/stages/odm_georeferencing.py index 563d0a91..b86ba94b 100644 --- a/stages/odm_georeferencing.py +++ b/stages/odm_georeferencing.py @@ -185,7 +185,12 @@ class ODMGeoreferencingStage(types.ODM_Stage): if alignment_file_exists: os.unlink(tree.odm_georeferencing_alignment_matrix) - a_matrix = compute_alignment_matrix(tree.odm_georeferencing_model_laz, tree.odm_align_file, stats_dir) + a_matrix = None + try: + a_matrix = compute_alignment_matrix(tree.odm_georeferencing_model_laz, tree.odm_align_file, stats_dir) + except Exception as e: + log.ODM_WARNING("Cannot compute alignment matrix: %s" % str(e)) + if a_matrix is not None: log.ODM_INFO("Alignment matrix: %s" % a_matrix) From 8e72d30668efed473f32af56b54df67be59223da Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 13 Dec 2022 17:41:59 +0000 Subject: [PATCH 050/102] Add CODEM deps --- requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/requirements.txt b/requirements.txt index f6629557..3084deb3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -32,3 +32,5 @@ fpdf2==2.4.6 Shapely==1.7.1 onnxruntime==1.12.1 codem==0.24.0 +trimesh==3.17.1 +pandas==1.5.2 From a330a8934d02b6ededb52fa0e6b226a008b19f93 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 13 Dec 2022 15:22:12 -0500 Subject: [PATCH 051/102] Set python path to build pdal-python --- SuperBuild/cmake/External-PDALPython.cmake | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/SuperBuild/cmake/External-PDALPython.cmake b/SuperBuild/cmake/External-PDALPython.cmake index be3e1e51..410b77bf 100644 --- a/SuperBuild/cmake/External-PDALPython.cmake +++ b/SuperBuild/cmake/External-PDALPython.cmake @@ -1,6 +1,11 @@ set(_proj_name pdal-python) set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}") +if (WIN32) + set(PP_EXTRA_ARGS -DPYTHON3_EXECUTABLE=${PYTHON_EXE_PATH} + -DPython3_NumPy_INCLUDE_DIRS=${PYTHON_HOME}/lib/site-packages/numpy/core/include) +endif() + ExternalProject_Add(${_proj_name} DEPENDS pdal PREFIX ${_SB_BINARY_DIR} @@ -19,6 +24,7 @@ ExternalProject_Add(${_proj_name} -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}/lib/python3.8/dist-packages ${WIN32_CMAKE_ARGS} + ${PP_EXTRA_ARGS} #--Build step----------------- BINARY_DIR ${_SB_BINARY_DIR} #--Install step--------------- From fc0e0e46411eb19d1857b72d982b14bbcf4a2d8d Mon Sep 17 00:00:00 2001 From: Luca Di Leo Date: Wed, 14 Dec 2022 21:57:30 +0100 Subject: [PATCH 052/102] Added video2dataset files --- opendm/video2dataset/__init__.py | 0 opendm/video2dataset/checkers.py | 109 +++++++++++++ opendm/video2dataset/parameters.py | 89 ++++++++++ opendm/video2dataset/requirements.txt | 3 + opendm/video2dataset/video2dataset.py | 223 ++++++++++++++++++++++++++ 5 files changed, 424 insertions(+) create mode 100644 opendm/video2dataset/__init__.py create mode 100644 opendm/video2dataset/checkers.py create mode 100644 opendm/video2dataset/parameters.py create mode 100644 opendm/video2dataset/requirements.txt create mode 100644 opendm/video2dataset/video2dataset.py diff --git a/opendm/video2dataset/__init__.py b/opendm/video2dataset/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/opendm/video2dataset/checkers.py b/opendm/video2dataset/checkers.py new file mode 100644 index 00000000..9b06c74a --- /dev/null +++ b/opendm/video2dataset/checkers.py @@ -0,0 +1,109 @@ +import cv2 +import numpy as np + +class PercentageBlurChecker: + def __init__(self, percentage): + self.percentage = percentage + self.cache = None + + def NeedPreProcess(self): + return True + + def PreProcess(self, video_path, start_frame, end_frame, width=800, height=600): + + # Open video file + cap = cv2.VideoCapture(video_path) + if (cap.isOpened() == False): + print("Error opening video stream or file") + return + + if start_frame is not None: + cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) + + tmp = [] + + frame_index = start_frame if start_frame is not None else 0 + while (cap.isOpened() and (end_frame is None or frame_index <= end_frame)): + + ret, frame = cap.read() + + if not ret: + break + + frame_bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + frame_bw = cv2.resize(frame_bw, (width, height)) + var = cv2.Laplacian(frame_bw, cv2.CV_64F).var() + + tmp.append(var) + + frame_index += 1 + + cap.release() + + # Calculate threshold + self.threshold = np.percentile(tmp, self.percentage * 100) + start_frame = start_frame if start_frame is not None else 0 + + # Fill cache with frame blur scores and indices + self.cache = {i + start_frame: v for i, v in enumerate(tmp)} + + return + + def IsBlur(self, image_bw, id): + + if self.cache is None: + return 0, True + + if id not in self.cache: + return 0, True + + return self.cache[id], self.cache[id] < self.threshold + +class ThresholdBlurChecker: + def __init__(self, threshold): + self.threshold = threshold + + def NeedPreProcess(self): + return False + + def PreProcess(self, video_path, start_frame, end_frame, width=800, height=600): + return + + def IsBlur(self, image_bw, id): + var = cv2.Laplacian(image_bw, cv2.CV_64F).var() + return var, var < self.threshold + +class SimilarityChecker: + def __init__(self, threshold, max_features=500): + self.threshold = threshold + self.max_features = max_features + self.last_image = None + self.last_image_id = None + self.last_image_features = None + + def IsSimilar(self, image_bw, id): + + if self.last_image is None: + self.last_image = image_bw + self.last_image_id = id + self.last_image_features = cv2.goodFeaturesToTrack(image_bw, self.max_features, 0.01, 10) + return 0, False, None + + # Detect features + features, status, _ = cv2.calcOpticalFlowPyrLK(self.last_image, image_bw, self.last_image_features, None) + + # Filter out the "bad" features (i.e. those that are not tracked successfully) + good_features = features[status == 1] + good_features2 = self.last_image_features[status == 1] + + # Calculate the difference between the locations of the good features in the two frames + distance = np.average(np.abs(good_features2 - good_features)) + + res = distance < self.threshold + + if (not res): + self.last_image = image_bw + self.last_image_id = id + self.last_image_features = cv2.goodFeaturesToTrack(image_bw, self.max_features, 0.01, 10) + + return distance, res, self.last_image_id diff --git a/opendm/video2dataset/parameters.py b/opendm/video2dataset/parameters.py new file mode 100644 index 00000000..8b5c8b9e --- /dev/null +++ b/opendm/video2dataset/parameters.py @@ -0,0 +1,89 @@ + +import argparse +import os + +class Parameters: + def __init__(self): + + ap = argparse.ArgumentParser() + ap.add_argument("-i", "--input", required=True, help="path to input video file") + ap.add_argument("-o", "--output", required=True, help="path to output directory") + ap.add_argument("-s", "--start", type=int, help="start frame index") + ap.add_argument("-e", "--end", type=int, help="end frame index") + ap.add_argument("-or", "--output-resolution", type=str, help="Override output resolution (ex. 640x480)") + ap.add_argument("-bp", "--blur-percentage", type=float, help="discard the lowest X percent of frames based on blur score (allowed values from 0.0 to 1.0)") + ap.add_argument("-bt", "--blur-threshold", type=float, help="blur measures that fall below this value will be considered 'blurry' (to be used in exclusion with -bp)") + ap.add_argument("-dt", "--distance-threshold", type=float, help="distance measures that fall below this value will be considered 'similar'") + ap.add_argument("-ff", "--frame-format", type=str, default="jpg", help="frame format (jpg, png, tiff, etc.)") + ap.add_argument("-st", "--stats-file", type=str, help="Save statistics to csv file") + args = vars(ap.parse_args()) + + if (not self.ValidateParameters(args)): + print("Invalid parameters") + exit() + + if not os.path.exists(args["output"]): + os.makedirs(args["output"]) + + self.input = args["input"] + self.output = args["output"] + self.start = args["start"] if args["start"] else 0 + self.end = args["end"] if args["end"] else None + self.blur_percentage = args["blur_percentage"] if args["blur_percentage"] else None + self.blur_threshold = args["blur_threshold"] if args["blur_threshold"] else None + self.distance_threshold = args["distance_threshold"] if args["distance_threshold"] else None + self.frame_format = args["frame_format"] + self.output_resolution = tuple(map(int, args["output_resolution"].split("x"))) if args["output_resolution"] else None + self.stats_file = args["stats_file"] if args["stats_file"] else None + + # We will resize the image to this size before processing + self.internal_width = 800 + self.internal_height = 600 + + # Validate parameters + def ValidateParameters(self, args): + + if not os.path.exists(args["input"]): + print("Input file does not exist") + return False + + if not os.path.exists(args["output"]): + os.makedirs(args["output"]) + + if args["start"] and args["start"] < 0: + print("Start frame index must be greater than 0") + return False + + if args["end"]: + if args["end"] < 0: + print("End frame index must be greater than 0") + return False + + if args["end"] < args["start"]: + print("End frame index must be greater than start frame index") + return False + + if args["blur_percentage"] and (args["blur_percentage"] < 0 or args["blur_percentage"] > 1): + print("Blur percentage must be in the range 0.0 to 1.0") + return False + + if args["blur_threshold"] and args["blur_threshold"] < 0: + print("Blur threshold must be greater than 0") + return False + + if args["distance_threshold"] and args["distance_threshold"] < 0: + print("Distance threshold must be greater than 0") + return False + + if args["output_resolution"]: + segs = args["output_resolution"].split("x") + if (len(segs) != 2): + print("Output resolution must be in the format WxH") + return False + + if (int(segs[0]) <= 0 or int(segs[1]) <= 0): + print("Output resolution must be in the format WxH") + return False + + return True + diff --git a/opendm/video2dataset/requirements.txt b/opendm/video2dataset/requirements.txt new file mode 100644 index 00000000..49332489 --- /dev/null +++ b/opendm/video2dataset/requirements.txt @@ -0,0 +1,3 @@ +opencv-python +opencv-contrib-python +numpy \ No newline at end of file diff --git a/opendm/video2dataset/video2dataset.py b/opendm/video2dataset/video2dataset.py new file mode 100644 index 00000000..cf142755 --- /dev/null +++ b/opendm/video2dataset/video2dataset.py @@ -0,0 +1,223 @@ +from math import floor +import time +import cv2 +import os + +from checkers import PercentageBlurChecker, SimilarityChecker, ThresholdBlurChecker + +# Input parameters: +# input = path to input video file +# output = path to output directory +# start = start frame index +# end = end frame index +# output_resolution = Override output resolution (ex. 640x480) +# blur_percentage = discard the lowest X percent of frames based on blur score (allowed values from 0.0 to 1.0) +# blur_threshold = blur measures that fall below this value will be considered 'blurry' (to be used in exclusion with -bp) +# distance_threshold = distance measures that fall below this value will be considered 'similar' +# frame_format = frame format (jpg, png, tiff, etc.) +# stats_file = Save statistics to csv file +# internal_width = We will resize the image to this width before processing +# internal_height = We will resize the image to this height before processing + +class Parameters: + + def __init__(self): + + self.input = None + self.output = None + self.start = 0 + self.end = None + self.blur_percentage = None + self.blur_threshold = None + self.distance_threshold = None + self.frame_format = "jpg" + self.output_resolution = None + self.stats_file = None + + # We will resize the image to this size before processing + self.internal_width = 800 + self.internal_height = 600 + + +class Video2Dataset: + + def __init__(self, parameters): + + if not self.ValidateParameters(parameters): + raise Exception("Invalid parameters") + + self.parameters = parameters + + # We prioritize blur threshold over blur percentage. + if parameters.blur_threshold is not None: + self.blur_checker = ThresholdBlurChecker(parameters.blur_threshold) + else: + if parameters.blur_percentage is not None: + self.blur_checker = PercentageBlurChecker(parameters.blur_percentage) + else: + self.blur_checker = None + + self.similarity_checker = SimilarityChecker(parameters.distance_threshold) if parameters.distance_threshold is not None else None + self.frame_index = parameters.start + self.orientation = None + self.f = None + + # Validate parameters + def ValidateParameters(self, args): + + if not os.path.exists(args.input): + print("Input file does not exist") + return False + + if not os.path.exists(args.output): + os.makedirs(args.output) + + if args.start and args.start < 0: + print("Start frame index must be greater than 0") + return False + + if args.end: + if args.end < 0: + print("End frame index must be greater than 0") + return False + + if args.end < args.start: + print("End frame index must be greater than start frame index") + return False + + if args.blur_percentage and (args.blur_percentage < 0 or args.blur_percentage > 1): + print("Blur percentage must be in the range 0.0 to 1.0") + return False + + if args.blur_threshold and args.blur_threshold < 0: + print("Blur threshold must be greater than 0") + return False + + if args.distance_threshold and args.distance_threshold < 0: + print("Distance threshold must be greater than 0") + return False + + if args.output_resolution: + segs = args.output_resolution.split("x") + if (len(segs) != 2): + print("Output resolution must be in the format WxH") + return False + + if (int(segs[0]) <= 0 or int(segs[1]) <= 0): + print("Output resolution must be in the format WxH") + return False + + return True + + def ProcessVideo(self): + + start = time.time() + + if (self.blur_checker is not None and self.blur_checker.NeedPreProcess()): + print("Preprocessing video...") + self.blur_checker.PreProcess(self.parameters.input, self.parameters.start, self.parameters.end, self.parameters.internal_width, self.parameters.internal_height) + end = time.time() + print("Preprocessing time: {:.2f}s".format(end - start)) + print("Calculated threshold is {}".format(self.blur_checker.threshold)) + + # open video file + cap = cv2.VideoCapture(self.parameters.input) + if (cap.isOpened() == False): + print("Error opening video stream or file") + return + + frames_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + if (self.parameters.stats_file is not None): + self.f = open(self.parameters.stats_file, "w") + if (self.blur_checker is not None): + self.f.write("blur_threshold;{}\n\n".format(self.blur_checker.threshold).replace(".", ",")) + self.f.write("frame_index;blur_score;is_blurry;last_frame_index;similarity_score;is_similar;written\n") + + if (self.parameters.start is not None): + cap.set(cv2.CAP_PROP_POS_FRAMES, self.parameters.start) + self.frame_index = self.parameters.start + start_frame = self.parameters.start + else: + start_frame = 0 + + frames_to_process = self.parameters.end - start_frame + 1 if (self.parameters.end is not None) else frames_count - start_frame + + while (cap.isOpened()): + ret, frame = cap.read() + + if not ret: + break + + if (self.parameters.end is not None and self.frame_index > self.parameters.end): + break + + # Calculate progress percentage + progress = floor((self.frame_index - start_frame + 1) / frames_to_process * 100) + print("[{:3d}%] Processing frame {}/{}: ".format(progress, self.frame_index - start_frame + 1, frames_to_process), end="") + + stats = self.ProcessFrame(frame) + + if stats is not None and self.parameters.stats_file is not None: + self.WriteStats(stats) + + cap.release() + if (self.f is not None): + self.f.close() + + end = time.time() + print("Total processing time: {:.2f}s".format(end - start)) + + + def ProcessFrame(self, frame): + + res = {"frame_index": self.frame_index} + + frame_bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + frame_bw = cv2.resize(frame_bw, (self.parameters.internal_width, self.parameters.internal_height)) + + if (self.blur_checker is not None): + blur_score, is_blurry = self.blur_checker.IsBlur(frame_bw, self.frame_index) + res["blur_score"] = blur_score + res["is_blurry"] = is_blurry + + if is_blurry: + print ("blurry, skipping") + self.frame_index += 1 + return res + + if (self.similarity_checker is not None): + similarity_score, is_similar, last_frame_index = self.similarity_checker.IsSimilar(frame_bw, self.frame_index) + res["similarity_score"] = similarity_score + res["is_similar"] = is_similar + res["last_frame_index"] = last_frame_index + + if is_similar: + print ("similar to {}, skipping".format(self.similarity_checker.last_image_id)) + self.frame_index += 1 + return res + + self.SaveFrame(frame) + res["written"] = True + self.frame_index += 1 + + print ("saved") + + return res + + def SaveFrame(self, frame): + + if (self.parameters.output_resolution is not None): + frame = cv2.resize(frame, self.parameters.output_resolution) + + cv2.imwrite(os.path.join(self.parameters.output, + "frame_{}.{}".format(self.frame_index, self.parameters.frame_format)), frame) + + def WriteStats(self, stats): + self.f.write("{};{};{};{};{};{};{}\n".format(stats["frame_index"], + stats["blur_score"] if "blur_score" in stats else "", + stats["is_blurry"] if "is_blurry" in stats else "", + stats["last_frame_index"] if "last_frame_index" in stats else "", + stats["similarity_score"] if "similarity_score" in stats else "", + stats["is_similar"] if "is_similar" in stats else "", + stats["written"] if "written" in stats else "").replace(".", ",")) \ No newline at end of file From 1f1365708c698405e4e57ed8737ea622c5312dd3 Mon Sep 17 00:00:00 2001 From: twchambers Date: Thu, 15 Dec 2022 10:26:01 +0000 Subject: [PATCH 053/102] Update rollingshutter.py Added calibration value for Mavic Pro (=64). Value is valid for both Mavic Pro and Mavic Pro Platinum, as both use camera 'DJI FC220' --- opendm/rollingshutter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/opendm/rollingshutter.py b/opendm/rollingshutter.py index 28c8d23d..b8227f61 100644 --- a/opendm/rollingshutter.py +++ b/opendm/rollingshutter.py @@ -15,6 +15,7 @@ RS_DATABASE = { 'dji fc3170': 27, # DJI Mavic Air 2 'dji fc3411': 32, # DJI Mavic Air 2S + 'dji fc220': 64, # DJI Mavic Pro (Platinum) 'hasselblad l1d-20c': lambda p: 47 if p.get_capture_megapixels() < 17 else 56, # DJI Mavic 2 Pro (at 16:10 => 16.8MP 47ms, at 3:2 => 19.9MP 56ms. 4:3 has 17.7MP with same image height as 3:2 which can be concluded as same sensor readout) 'dji fc3582': lambda p: 26 if p.get_capture_megapixels() < 48 else 60, # DJI Mini 3 pro (at 48MP readout is 60ms, at 12MP it's 26ms) From 1f76738e1874ac08d48cd296684e794c5b143984 Mon Sep 17 00:00:00 2001 From: Luca Di Leo Date: Thu, 15 Dec 2022 16:00:12 +0100 Subject: [PATCH 054/102] Removed old files --- opendm/video2dataset/parameters.py | 89 --------------------------- opendm/video2dataset/requirements.txt | 3 - 2 files changed, 92 deletions(-) delete mode 100644 opendm/video2dataset/parameters.py delete mode 100644 opendm/video2dataset/requirements.txt diff --git a/opendm/video2dataset/parameters.py b/opendm/video2dataset/parameters.py deleted file mode 100644 index 8b5c8b9e..00000000 --- a/opendm/video2dataset/parameters.py +++ /dev/null @@ -1,89 +0,0 @@ - -import argparse -import os - -class Parameters: - def __init__(self): - - ap = argparse.ArgumentParser() - ap.add_argument("-i", "--input", required=True, help="path to input video file") - ap.add_argument("-o", "--output", required=True, help="path to output directory") - ap.add_argument("-s", "--start", type=int, help="start frame index") - ap.add_argument("-e", "--end", type=int, help="end frame index") - ap.add_argument("-or", "--output-resolution", type=str, help="Override output resolution (ex. 640x480)") - ap.add_argument("-bp", "--blur-percentage", type=float, help="discard the lowest X percent of frames based on blur score (allowed values from 0.0 to 1.0)") - ap.add_argument("-bt", "--blur-threshold", type=float, help="blur measures that fall below this value will be considered 'blurry' (to be used in exclusion with -bp)") - ap.add_argument("-dt", "--distance-threshold", type=float, help="distance measures that fall below this value will be considered 'similar'") - ap.add_argument("-ff", "--frame-format", type=str, default="jpg", help="frame format (jpg, png, tiff, etc.)") - ap.add_argument("-st", "--stats-file", type=str, help="Save statistics to csv file") - args = vars(ap.parse_args()) - - if (not self.ValidateParameters(args)): - print("Invalid parameters") - exit() - - if not os.path.exists(args["output"]): - os.makedirs(args["output"]) - - self.input = args["input"] - self.output = args["output"] - self.start = args["start"] if args["start"] else 0 - self.end = args["end"] if args["end"] else None - self.blur_percentage = args["blur_percentage"] if args["blur_percentage"] else None - self.blur_threshold = args["blur_threshold"] if args["blur_threshold"] else None - self.distance_threshold = args["distance_threshold"] if args["distance_threshold"] else None - self.frame_format = args["frame_format"] - self.output_resolution = tuple(map(int, args["output_resolution"].split("x"))) if args["output_resolution"] else None - self.stats_file = args["stats_file"] if args["stats_file"] else None - - # We will resize the image to this size before processing - self.internal_width = 800 - self.internal_height = 600 - - # Validate parameters - def ValidateParameters(self, args): - - if not os.path.exists(args["input"]): - print("Input file does not exist") - return False - - if not os.path.exists(args["output"]): - os.makedirs(args["output"]) - - if args["start"] and args["start"] < 0: - print("Start frame index must be greater than 0") - return False - - if args["end"]: - if args["end"] < 0: - print("End frame index must be greater than 0") - return False - - if args["end"] < args["start"]: - print("End frame index must be greater than start frame index") - return False - - if args["blur_percentage"] and (args["blur_percentage"] < 0 or args["blur_percentage"] > 1): - print("Blur percentage must be in the range 0.0 to 1.0") - return False - - if args["blur_threshold"] and args["blur_threshold"] < 0: - print("Blur threshold must be greater than 0") - return False - - if args["distance_threshold"] and args["distance_threshold"] < 0: - print("Distance threshold must be greater than 0") - return False - - if args["output_resolution"]: - segs = args["output_resolution"].split("x") - if (len(segs) != 2): - print("Output resolution must be in the format WxH") - return False - - if (int(segs[0]) <= 0 or int(segs[1]) <= 0): - print("Output resolution must be in the format WxH") - return False - - return True - diff --git a/opendm/video2dataset/requirements.txt b/opendm/video2dataset/requirements.txt deleted file mode 100644 index 49332489..00000000 --- a/opendm/video2dataset/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -opencv-python -opencv-contrib-python -numpy \ No newline at end of file From c60763a312a25b77852ff334ed38e6beaf96f1c1 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Thu, 15 Dec 2022 12:42:50 -0500 Subject: [PATCH 055/102] Rename folder, minor import fix --- opendm/{video2dataset => video}/__init__.py | 0 opendm/{video2dataset => video}/checkers.py | 0 opendm/{video2dataset => video}/video2dataset.py | 2 +- 3 files changed, 1 insertion(+), 1 deletion(-) rename opendm/{video2dataset => video}/__init__.py (100%) rename opendm/{video2dataset => video}/checkers.py (100%) rename opendm/{video2dataset => video}/video2dataset.py (98%) diff --git a/opendm/video2dataset/__init__.py b/opendm/video/__init__.py similarity index 100% rename from opendm/video2dataset/__init__.py rename to opendm/video/__init__.py diff --git a/opendm/video2dataset/checkers.py b/opendm/video/checkers.py similarity index 100% rename from opendm/video2dataset/checkers.py rename to opendm/video/checkers.py diff --git a/opendm/video2dataset/video2dataset.py b/opendm/video/video2dataset.py similarity index 98% rename from opendm/video2dataset/video2dataset.py rename to opendm/video/video2dataset.py index cf142755..78ed18b8 100644 --- a/opendm/video2dataset/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -3,7 +3,7 @@ import time import cv2 import os -from checkers import PercentageBlurChecker, SimilarityChecker, ThresholdBlurChecker +from .checkers import PercentageBlurChecker, SimilarityChecker, ThresholdBlurChecker # Input parameters: # input = path to input video file From 49a755491065e4fa03c581c3f3b4561fbd9ff48d Mon Sep 17 00:00:00 2001 From: Luca Di Leo Date: Thu, 15 Dec 2022 19:01:22 +0100 Subject: [PATCH 056/102] Added threshold parameter in init --- opendm/video/checkers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/opendm/video/checkers.py b/opendm/video/checkers.py index 9b06c74a..b67390f9 100644 --- a/opendm/video/checkers.py +++ b/opendm/video/checkers.py @@ -5,6 +5,7 @@ class PercentageBlurChecker: def __init__(self, percentage): self.percentage = percentage self.cache = None + self.threshold = None def NeedPreProcess(self): return True From 62abebb74159f982fd2f0fa7a2b9a9db088d0049 Mon Sep 17 00:00:00 2001 From: Luca Di Leo Date: Thu, 15 Dec 2022 19:12:59 +0100 Subject: [PATCH 057/102] Added exception when PreProcess fails --- opendm/video/checkers.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/opendm/video/checkers.py b/opendm/video/checkers.py index b67390f9..53aa2c68 100644 --- a/opendm/video/checkers.py +++ b/opendm/video/checkers.py @@ -15,8 +15,7 @@ class PercentageBlurChecker: # Open video file cap = cv2.VideoCapture(video_path) if (cap.isOpened() == False): - print("Error opening video stream or file") - return + raise Exception("Error opening video stream or file") if start_frame is not None: cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) From d4846ad7d0392e296175aaddc3d8065c21c10c0a Mon Sep 17 00:00:00 2001 From: HeDo Date: Fri, 16 Dec 2022 01:07:42 +0100 Subject: [PATCH 058/102] Switched WITH_FFMPEG flag to ON --- SuperBuild/cmake/External-OpenCV.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SuperBuild/cmake/External-OpenCV.cmake b/SuperBuild/cmake/External-OpenCV.cmake index 812d4873..293d788a 100644 --- a/SuperBuild/cmake/External-OpenCV.cmake +++ b/SuperBuild/cmake/External-OpenCV.cmake @@ -55,7 +55,7 @@ ExternalProject_Add(${_proj_name} -DBUILD_opencv_photo=ON -DBUILD_opencv_legacy=ON -DBUILD_opencv_python3=ON - -DWITH_FFMPEG=OFF + -DWITH_FFMPEG=ON -DWITH_CUDA=OFF -DWITH_GTK=OFF -DWITH_VTK=OFF From 70155e35e8dc31bf63347479b0e6b7f9cdbf91ca Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 20 Dec 2022 10:50:42 -0500 Subject: [PATCH 059/102] Add --feature-quality help warning --- opendm/gpu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opendm/gpu.py b/opendm/gpu.py index c92cad12..fa0712dc 100644 --- a/opendm/gpu.py +++ b/opendm/gpu.py @@ -26,7 +26,7 @@ def has_popsift_and_can_handle_texsize(width, height): from opensfm import pypopsift fits = pypopsift.fits_texture(int(width * 1.02), int(height * 1.02)) if not fits: - log.ODM_WARNING("Image size (%sx%spx) would not fit in GPU memory, falling back to CPU" % (width, height)) + log.ODM_WARNING("Image size (%sx%spx) would not fit in GPU memory, try lowering --feature-quality. Falling back to CPU" % (width, height)) return fits except (ModuleNotFoundError, ImportError): return False From 214b6ef9afa0f84644722d4f42ed1476ee3ba4a8 Mon Sep 17 00:00:00 2001 From: Julien <6372605+fetzu@users.noreply.github.com> Date: Sat, 24 Dec 2022 15:23:32 +0100 Subject: [PATCH 060/102] feat: added "DJI Mavic Air 1" to rollingshutter.py --- opendm/rollingshutter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/opendm/rollingshutter.py b/opendm/rollingshutter.py index b8227f61..908af1cb 100644 --- a/opendm/rollingshutter.py +++ b/opendm/rollingshutter.py @@ -12,6 +12,7 @@ RS_DATABASE = { 'dji fc6310': 33, # Phantom 4 Professional 'dji fc7203': 20, # Mavic Mini v1 + 'dji fc2103': 32, # DJI Mavic Air 1 'dji fc3170': 27, # DJI Mavic Air 2 'dji fc3411': 32, # DJI Mavic Air 2S From 3d08c09ac9da56852a63826e939b90934c18d356 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 28 Dec 2022 14:21:29 -0500 Subject: [PATCH 061/102] Cleanup orthophoto_render.tif --- stages/odm_orthophoto.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stages/odm_orthophoto.py b/stages/odm_orthophoto.py index 39e2bf60..16efdd7c 100644 --- a/stages/odm_orthophoto.py +++ b/stages/odm_orthophoto.py @@ -165,5 +165,5 @@ class ODMOrthoPhotoStage(types.ODM_Stage): else: log.ODM_WARNING('Found a valid orthophoto in: %s' % tree.odm_orthophoto_tif) - if args.optimize_disk_space and io.file_exists(tree.odm_orthophoto_render): + if io.file_exists(tree.odm_orthophoto_render): os.remove(tree.odm_orthophoto_render) From 8410186b663645e5480b058a57fde477aead0c40 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 3 Jan 2023 08:07:54 -0500 Subject: [PATCH 062/102] Don't let PDAL resolve canonical paths while searching for drivers --- Dockerfile | 3 ++- gpu.Dockerfile | 3 ++- portable.Dockerfile | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index dbe6e7ae..85708413 100644 --- a/Dockerfile +++ b/Dockerfile @@ -29,7 +29,8 @@ FROM ubuntu:21.04 # Env variables ENV DEBIAN_FRONTEND=noninteractive \ PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \ - LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" + LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" \ + PDAL_DRIVER_PATH="/code/SuperBuild/install/bin" WORKDIR /code diff --git a/gpu.Dockerfile b/gpu.Dockerfile index b7e6d7ec..e37c76a7 100644 --- a/gpu.Dockerfile +++ b/gpu.Dockerfile @@ -27,7 +27,8 @@ FROM nvidia/cuda:11.2.0-runtime-ubuntu20.04 # Env variables ENV DEBIAN_FRONTEND=noninteractive \ PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \ - LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" + LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" \ + PDAL_DRIVER_PATH="/code/SuperBuild/install/bin" WORKDIR /code diff --git a/portable.Dockerfile b/portable.Dockerfile index c4b288f5..75c9aa11 100644 --- a/portable.Dockerfile +++ b/portable.Dockerfile @@ -29,7 +29,8 @@ FROM ubuntu:21.04 # Env variables ENV DEBIAN_FRONTEND=noninteractive \ PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \ - LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" + LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" \ + PDAL_DRIVER_PATH="/code/SuperBuild/install/bin" WORKDIR /code From 9efea5a966d38feacf94c5e560b2f12128d9d83e Mon Sep 17 00:00:00 2001 From: Luca Di Leo Date: Mon, 9 Jan 2023 11:45:22 +0100 Subject: [PATCH 063/102] Added black frame detection, srt parser and exif tags population --- opendm/video/checkers.py | 84 ++++++++- opendm/video/parameters.py | 141 ++++++++++++++ opendm/video/srtparser.py | 124 ++++++++++++ opendm/video/video2dataset.py | 344 ++++++++++++++++++++-------------- 4 files changed, 555 insertions(+), 138 deletions(-) create mode 100644 opendm/video/parameters.py create mode 100644 opendm/video/srtparser.py diff --git a/opendm/video/checkers.py b/opendm/video/checkers.py index 53aa2c68..1eadec40 100644 --- a/opendm/video/checkers.py +++ b/opendm/video/checkers.py @@ -5,7 +5,6 @@ class PercentageBlurChecker: def __init__(self, percentage): self.percentage = percentage self.cache = None - self.threshold = None def NeedPreProcess(self): return True @@ -15,7 +14,8 @@ class PercentageBlurChecker: # Open video file cap = cv2.VideoCapture(video_path) if (cap.isOpened() == False): - raise Exception("Error opening video stream or file") + print("Error opening video stream or file") + return if start_frame is not None: cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) @@ -107,3 +107,83 @@ class SimilarityChecker: self.last_image_features = cv2.goodFeaturesToTrack(image_bw, self.max_features, 0.01, 10) return distance, res, self.last_image_id + + +class NaiveBlackFrameChecker: + def __init__(self, threshold): + self.threshold = threshold + + def PreProcess(self, video_path, start_frame, end_frame, width=800, height=600): + return + + def NeedPreProcess(self): + return False + + def IsBlack(self, image_bw, id): + return np.average(image_bw) < self.threshold + + +class BlackFrameChecker: + def __init__(self, picture_black_ratio_th=0.98, pixel_black_th=0.30): + self.picture_black_ratio_th = picture_black_ratio_th if picture_black_ratio_th is not None else 0.98 + self.pixel_black_th = pixel_black_th if pixel_black_th is not None else 0.30 + self.luminance_minimum_value = None + self.luminance_range_size = None + self.absolute_threshold = None + + def NeedPreProcess(self): + return True + + def PreProcess(self, video_path, start_frame, end_frame, width=800, height=600): + # Open video file + cap = cv2.VideoCapture(video_path) + + # Set frame start and end indices + cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) + frame_end = end_frame + if end_frame == -1: + frame_end = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + # Initialize luminance range size and minimum value + self.luminance_range_size = 0 + self.luminance_minimum_value = 255 + + frame_index = start_frame if start_frame is not None else 0 + + # Read and process frames from video file + while (cap.isOpened() and (end_frame is None or frame_index <= end_frame)): + + ret, frame = cap.read() + if not ret: + break + + # Resize frame to specified width and height + frame = cv2.resize(frame, (width, height)) + + # Convert frame to grayscale + gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + gray_frame_min = gray_frame.min() + gray_frame_max = gray_frame.max() + + # Update luminance range size and minimum value + self.luminance_range_size = max(self.luminance_range_size, gray_frame_max - gray_frame_min) + self.luminance_minimum_value = min(self.luminance_minimum_value, gray_frame_min) + + frame_index += 1 + + # Calculate absolute threshold for considering a pixel "black" + self.absolute_threshold = self.luminance_minimum_value + self.pixel_black_th * self.luminance_range_size + + # Close video file + cap.release() + + def IsBlack(self, image_bw, id): + + # Count number of pixels < self.absolute_threshold + nb_black_pixels = np.sum(image_bw < self.absolute_threshold) + + # Calculate ratio of black pixels + ratio_black_pixels = nb_black_pixels / (image_bw.shape[0] * image_bw.shape[1]) + + # Check if ratio of black pixels is above threshold + return ratio_black_pixels >= self.picture_black_ratio_th \ No newline at end of file diff --git a/opendm/video/parameters.py b/opendm/video/parameters.py new file mode 100644 index 00000000..13a9dc16 --- /dev/null +++ b/opendm/video/parameters.py @@ -0,0 +1,141 @@ + +import argparse +import datetime +import os + +class Parameters: + + input = None + output = None + start = None + end = None + output_resolution = None + blur_percentage = None + blur_threshold = None + distance_threshold = None + black_ratio_threshold = None + pixel_black_threshold = None + use_srt = None + timezone = None + frame_format = None + stats_file = None + + def __init__(self, args): + + # "input" -> path to input video file(s), use ',' to separate multiple files") + # "output" -> path to output directory") + # "start" -> start frame index") + # "end" -> end frame index") + # "output-resolution" -> Override output resolution (ex. 640x480)") + # "blur-percentage" -> discard the lowest X percent of frames based on blur score (allowed values from 0.0 to 1.0)") + # "blur-threshold" -> blur measures that fall below this value will be considered 'blurry' (to be used in exclusion with --blur-percentage)") + # "distance-threshold" -> distance measures that fall below this value will be considered 'similar'") + # "black-ratio-threshold" -> Set the threshold for considering a frame 'black'. Express the minimum value for the ratio: nb_black_pixels / nb_pixels. Default value is 0.98") + # "pixel-black-threshold" -> Set the threshold for considering a pixel 'black'. The threshold expresses the maximum pixel luminance value for which a pixel is considered 'black'. Good value is 0.30 (30%)") + # "use-srt" -> Use SRT files for extracting metadata (same name as video file with .srt extension)") + # "timezone" -> UTC timezone offset (ex. -5 for EST). Default to local timezone") + # "frame-format" -> frame format (jpg, png, tiff, etc.)") + # "stats-file" -> Save statistics to csv file") + + if (not self.ValidateParameters(args)): + print("Invalid parameters") + exit() + + if not os.path.exists(args["output"]): + os.makedirs(args["output"]) + + self.input = args["input"].split(",") + self.output = args["output"] + + self.start = args["start"] if args["start"] else 0 + self.end = args["end"] if args["end"] else None + + self.blur_percentage = args["blur_percentage"] if args["blur_percentage"] else None + self.blur_threshold = args["blur_threshold"] if args["blur_threshold"] else None + + self.distance_threshold = args["distance_threshold"] if args["distance_threshold"] else None + + self.black_ratio_threshold = args["black_ratio_threshold"] if args["black_ratio_threshold"] else None + self.pixel_black_threshold = args["pixel_black_threshold"] if args["pixel_black_threshold"] else None + + self.use_srt = args["use_srt"] + + self.utc_offset = datetime.timedelta(hours=int(args["timezone"])) if args["timezone"] != "local" else datetime.datetime.now() - datetime.datetime.utcnow() + + self.frame_format = args["frame_format"] + self.output_resolution = tuple(map(int, args["output_resolution"].split("x"))) if args["output_resolution"] else None + + self.stats_file = args["stats_file"] if args["stats_file"] else None + + # We will resize the image to this size before processing + self.internal_width = 800 + self.internal_height = 600 + + # Validate parameters + def ValidateParameters(self, args): + + # input can be a list of files comma separated, check for each file + files = args["input"].split(",") + for file in files: + if not os.path.exists(file): + print("Input file does not exist: " + file) + return False + + if not os.path.exists(args["output"]): + os.makedirs(args["output"]) + + if args["start"] and args["start"] < 0: + print("Start frame index must be greater than 0") + return False + + if args["end"]: + if args["end"] < 0: + print("End frame index must be greater than 0") + return False + + if args["end"] < args["start"]: + print("End frame index must be greater than start frame index") + return False + + if args["timezone"] and args["timezone"] != "local": + try: + val = int(args["timezone"]) + if val < -12 or val > 14: + print("Timezone must be in the range -12 to 14") + return False + except ValueError: + print("Timezone must be a valid integer") + return False + + if args["blur_percentage"] and (args["blur_percentage"] < 0 or args["blur_percentage"] > 1): + print("Blur percentage must be in the range 0.0 to 1.0") + return False + + if args["blur_threshold"] and args["blur_threshold"] < 0: + print("Blur threshold must be greater than 0") + return False + + if args["distance_threshold"] and args["distance_threshold"] < 0: + print("Distance threshold must be greater than 0") + return False + + if args["black_ratio_threshold"] and (args["black_ratio_threshold"] < 0 or args["black_ratio_threshold"] > 1): + print("Black ratio threshold must be in the range 0.0 to 1.0") + return False + + if args["pixel_black_threshold"] and (args["pixel_black_threshold"] < 0 or args["pixel_black_threshold"] > 1): + print("Pixel black threshold must be in the range 0.0 to 1.0") + return False + + if args["output_resolution"]: + segs = args["output_resolution"].split("x") + if (len(segs) != 2): + print("Output resolution must be in the format WxH") + return False + + if (int(segs[0]) <= 0 or int(segs[1]) <= 0): + print("Output resolution must be in the format WxH") + return False + + return True + diff --git a/opendm/video/srtparser.py b/opendm/video/srtparser.py new file mode 100644 index 00000000..28e6f0ae --- /dev/null +++ b/opendm/video/srtparser.py @@ -0,0 +1,124 @@ +from datetime import datetime +import re +class SrtFileParser: + def __init__(self, filename, utc_offset): + self.filename = filename + self.data = [] + self.utc_offset = utc_offset + + def get_entry(self, timestamp): + if not self.data: + self.parse() + + for entry in self.data: + if entry["timestamp"] <= timestamp: + return entry + + return None + + def parse(self): + + # The SRT file is formatted this way: + + # 1 + # 00:00:00,000 --> 00:00:00,016 + # SrtCnt : 1, DiffTime : 16ms + # 2023-01-06 18:56:48,380,821 + # [iso : 3200] [shutter : 1/60.0] [fnum : 280] [ev : 0] [ct : 3925] [color_md : default] [focal_len : 240] [latitude: 0.000000] [longitude: 0.000000] [altitude: 0.000000] + + with open(self.filename, 'r') as f: + + for line in f: + + # Check if line is empty + if not line.strip(): + if srtcnt is not None: + self.data.append({ + "srtcnt": srtcnt, + "difftime": difftime, + "timestamp": timestamp, + "iso": iso, + "shutter": shutter, + "fnum": fnum, + "ev": ev, + "ct": ct, + "color_md": color_md, + "focal_len": focal_len, + "latitude": latitude, + "longitude": longitude, + "altitude": altitude + }) + srtcnt = None + difftime = None + timestamp = None + iso = None + shutter = None + fnum = None + ev = None + ct = None + color_md = None + focal_len = None + latitude = None + longitude = None + altitude = None + continue + + # Remove the html font tag + line = re.sub('<[^<]+?>', '', line) + + match = re.search("SrtCnt : (\d+)", line) + if match: + srtcnt = int(match.group(1)) + + match = re.search("DiffTime : (\d+)ms", line) + if match: + difftime = int(match.group(1)) + + match = re.search("(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})", line) + if match: + timestamp = match.group(1) + timestamp = datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S,%f") + # The timestamp is in local time, so we need to subtract the UTC offset + timestamp = timestamp - self.utc_offset + + match = re.search("iso : (\d+)", line) + if match: + iso = int(match.group(1)) + + match = re.search("shutter : (\d+/\d+.\d+)", line) + if match: + shutter = match.group(1) + + match = re.search("fnum : (\d+)", line) + if match: + fnum = int(match.group(1)) + + match = re.search("ev : (\d+)", line) + if match: + ev = int(match.group(1)) + + match = re.search("ct : (\d+)", line) + if match: + ct = int(match.group(1)) + + match = re.search("color_md : (\w+)", line) + if match: + color_md = match.group(1) + + match = re.search("focal_len : (\d+)", line) + if match: + focal_len = int(match.group(1)) + + match = re.search("latitude: (\d+.\d+)", line) + if match: + latitude = float(match.group(1)) + + match = re.search("longitude: (\d+.\d+)", line) + if match: + longitude = float(match.group(1)) + + match = re.search("altitude: (\d+.\d+)", line) + if match: + altitude = float(match.group(1)) + + diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 78ed18b8..9f1634b8 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -1,51 +1,18 @@ +import datetime from math import floor import time import cv2 import os - -from .checkers import PercentageBlurChecker, SimilarityChecker, ThresholdBlurChecker - -# Input parameters: -# input = path to input video file -# output = path to output directory -# start = start frame index -# end = end frame index -# output_resolution = Override output resolution (ex. 640x480) -# blur_percentage = discard the lowest X percent of frames based on blur score (allowed values from 0.0 to 1.0) -# blur_threshold = blur measures that fall below this value will be considered 'blurry' (to be used in exclusion with -bp) -# distance_threshold = distance measures that fall below this value will be considered 'similar' -# frame_format = frame format (jpg, png, tiff, etc.) -# stats_file = Save statistics to csv file -# internal_width = We will resize the image to this width before processing -# internal_height = We will resize the image to this height before processing - -class Parameters: - - def __init__(self): - - self.input = None - self.output = None - self.start = 0 - self.end = None - self.blur_percentage = None - self.blur_threshold = None - self.distance_threshold = None - self.frame_format = "jpg" - self.output_resolution = None - self.stats_file = None - - # We will resize the image to this size before processing - self.internal_width = 800 - self.internal_height = 600 - +from opendm.video.parameters import Parameters +import pymediainfo +import collections +from exif import Image, Orientation +from checkers import BlackFrameChecker, PercentageBlurChecker, SimilarityChecker, ThresholdBlurChecker +from srtparser import SrtFileParser class Video2Dataset: - def __init__(self, parameters): - - if not self.ValidateParameters(parameters): - raise Exception("Invalid parameters") - + def __init__(self, parameters: Parameters): self.parameters = parameters # We prioritize blur threshold over blur percentage. @@ -58,120 +25,117 @@ class Video2Dataset: self.blur_checker = None self.similarity_checker = SimilarityChecker(parameters.distance_threshold) if parameters.distance_threshold is not None else None + self.black_checker = BlackFrameChecker(parameters.black_ratio_threshold, parameters.pixel_black_threshold) if parameters.black_ratio_threshold is not None or parameters.pixel_black_threshold is not None else None + self.frame_index = parameters.start - self.orientation = None self.f = None - # Validate parameters - def ValidateParameters(self, args): - - if not os.path.exists(args.input): - print("Input file does not exist") - return False - - if not os.path.exists(args.output): - os.makedirs(args.output) - - if args.start and args.start < 0: - print("Start frame index must be greater than 0") - return False - - if args.end: - if args.end < 0: - print("End frame index must be greater than 0") - return False - - if args.end < args.start: - print("End frame index must be greater than start frame index") - return False - - if args.blur_percentage and (args.blur_percentage < 0 or args.blur_percentage > 1): - print("Blur percentage must be in the range 0.0 to 1.0") - return False - - if args.blur_threshold and args.blur_threshold < 0: - print("Blur threshold must be greater than 0") - return False - - if args.distance_threshold and args.distance_threshold < 0: - print("Distance threshold must be greater than 0") - return False - - if args.output_resolution: - segs = args.output_resolution.split("x") - if (len(segs) != 2): - print("Output resolution must be in the format WxH") - return False - - if (int(segs[0]) <= 0 or int(segs[1]) <= 0): - print("Output resolution must be in the format WxH") - return False - - return True def ProcessVideo(self): start = time.time() - if (self.blur_checker is not None and self.blur_checker.NeedPreProcess()): - print("Preprocessing video...") - self.blur_checker.PreProcess(self.parameters.input, self.parameters.start, self.parameters.end, self.parameters.internal_width, self.parameters.internal_height) - end = time.time() - print("Preprocessing time: {:.2f}s".format(end - start)) - print("Calculated threshold is {}".format(self.blur_checker.threshold)) - - # open video file - cap = cv2.VideoCapture(self.parameters.input) - if (cap.isOpened() == False): - print("Error opening video stream or file") - return - - frames_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - if (self.parameters.stats_file is not None): self.f = open(self.parameters.stats_file, "w") - if (self.blur_checker is not None): - self.f.write("blur_threshold;{}\n\n".format(self.blur_checker.threshold).replace(".", ",")) - self.f.write("frame_index;blur_score;is_blurry;last_frame_index;similarity_score;is_similar;written\n") + self.f.write("global_idx;file_name;frame_index;blur_score;is_blurry;is_black;last_frame_index;similarity_score;is_similar;written\n") - if (self.parameters.start is not None): - cap.set(cv2.CAP_PROP_POS_FRAMES, self.parameters.start) - self.frame_index = self.parameters.start - start_frame = self.parameters.start - else: - start_frame = 0 + self.global_idx = 0 - frames_to_process = self.parameters.end - start_frame + 1 if (self.parameters.end is not None) else frames_count - start_frame + # foreach input file + for input_file in self.parameters.input: - while (cap.isOpened()): - ret, frame = cap.read() + # get file name + file_name = os.path.basename(input_file) - if not ret: - break + print("Processing video: {}".format(input_file)) - if (self.parameters.end is not None and self.frame_index > self.parameters.end): - break + if self.parameters.use_srt: + srt_file = os.path.splitext(input_file)[0] + ".srt" + if os.path.exists(srt_file): + print("Loading SRT file: {}".format(srt_file)) + srt_parser = SrtFileParser(srt_file, self.parameters.utc_offset) + srt_parser.parse() + else: + srt_file = os.path.splitext(input_file)[0] + ".SRT" + if os.path.exists(srt_file): + print("Loading SRT file: {}".format(srt_file)) + srt_parser = SrtFileParser(srt_file, self.parameters.utc_offset) + srt_parser.parse() + else: + print("SRT file not found: {}".format(srt_file)) + srt_parser = None + else: + srt_parser = None - # Calculate progress percentage - progress = floor((self.frame_index - start_frame + 1) / frames_to_process * 100) - print("[{:3d}%] Processing frame {}/{}: ".format(progress, self.frame_index - start_frame + 1, frames_to_process), end="") + # get video info + video_info = self.GetVideoInfo(input_file) - stats = self.ProcessFrame(frame) + print(video_info) - if stats is not None and self.parameters.stats_file is not None: - self.WriteStats(stats) + if (self.blur_checker is not None and self.blur_checker.NeedPreProcess()): + print("Preprocessing for blur checker...") + self.blur_checker.PreProcess(input_file, self.parameters.start, self.parameters.end, self.parameters.internal_width, self.parameters.internal_height) + end = time.time() + print("Preprocessing time: {:.2f}s".format(end - start)) + print("Calculated threshold is {}".format(self.blur_checker.threshold)) - cap.release() - if (self.f is not None): + if (self.black_checker is not None and self.black_checker.NeedPreProcess()): + start2 = time.time() + print("Preprocessing for black checker...") + self.black_checker.PreProcess(input_file, self.parameters.start, self.parameters.end, self.parameters.internal_width, self.parameters.internal_height) + end = time.time() + print("Preprocessing time: {:.2f}s".format(end - start2)) + print("Calculated luminance_range_size is {}".format(self.black_checker.luminance_range_size)) + print("Calculated luminance_minimum_value is {}".format(self.black_checker.luminance_minimum_value)) + print("Calculated absolute_threshold is {}".format(self.black_checker.absolute_threshold)) + + # open video file + cap = cv2.VideoCapture(input_file) + if (not cap.isOpened()): + print("Error opening video stream or file") + return + + frames_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + if (self.parameters.start is not None): + cap.set(cv2.CAP_PROP_POS_FRAMES, self.parameters.start) + self.frame_index = self.parameters.start + start_frame = self.parameters.start + else: + start_frame = 0 + + frames_to_process = self.parameters.end - start_frame + 1 if (self.parameters.end is not None) else frames_count - start_frame + + while (cap.isOpened()): + ret, frame = cap.read() + + if not ret: + break + + if (self.parameters.end is not None and self.frame_index > self.parameters.end): + break + + # Calculate progress percentage + progress = floor((self.frame_index - start_frame + 1) / frames_to_process * 100) + print("[{}][{:3d}%] Processing frame {}/{}: ".format(file_name, progress, self.frame_index - start_frame + 1, frames_to_process), end="") + + stats = self.ProcessFrame(frame, video_info, srt_parser) + + if stats is not None and self.parameters.stats_file is not None: + self.WriteStats(input_file, stats) + + cap.release() + + if self.f is not None: self.f.close() end = time.time() print("Total processing time: {:.2f}s".format(end - start)) - def ProcessFrame(self, frame): + def ProcessFrame(self, frame, video_info, srt_parser): - res = {"frame_index": self.frame_index} + res = {"frame_index": self.frame_index, "global_idx": self.global_idx} frame_bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame_bw = cv2.resize(frame_bw, (self.parameters.internal_width, self.parameters.internal_height)) @@ -186,6 +150,15 @@ class Video2Dataset: self.frame_index += 1 return res + if (self.black_checker is not None): + is_black = self.black_checker.IsBlack(frame_bw, self.frame_index) + res["is_black"] = is_black + + if is_black: + print ("black, skipping") + self.frame_index += 1 + return res + if (self.similarity_checker is not None): similarity_score, is_similar, last_frame_index = self.similarity_checker.IsSimilar(frame_bw, self.frame_index) res["similarity_score"] = similarity_score @@ -197,27 +170,126 @@ class Video2Dataset: self.frame_index += 1 return res - self.SaveFrame(frame) + self.SaveFrame(frame, video_info, srt_parser) res["written"] = True self.frame_index += 1 + self.global_idx += 1 print ("saved") return res - def SaveFrame(self, frame): + def SaveFrame(self, frame, video_info, srt_parser: SrtFileParser): if (self.parameters.output_resolution is not None): frame = cv2.resize(frame, self.parameters.output_resolution) - cv2.imwrite(os.path.join(self.parameters.output, - "frame_{}.{}".format(self.frame_index, self.parameters.frame_format)), frame) + path = os.path.join(self.parameters.output, + "frame_{}_{}.{}".format(self.global_idx, self.frame_index, self.parameters.frame_format)) - def WriteStats(self, stats): - self.f.write("{};{};{};{};{};{};{}\n".format(stats["frame_index"], + _, buf = cv2.imencode('.' + self.parameters.frame_format, frame) + + img = Image(buf.tobytes()) + + start_time = (video_info.start_time if video_info.start_time is not None \ + else srt_parser.data[0].timestamp if srt_parser is not None \ + else datetime.datetime.now()) + self.parameters.utc_offset + + elapsed_time = start_time + datetime.timedelta(seconds=(self.frame_index / video_info.frame_rate)) + + # Set datetime_original + img.datetime_original = elapsed_time.strftime('%Y:%m:%d %H:%M:%S') + img.datetime_digitized = elapsed_time.strftime('%Y:%m:%d %H:%M:%S') + img.datetime = elapsed_time.strftime('%Y:%m:%d %H:%M:%S') + img.pixel_x_dimension = frame.shape[1] + img.pixel_y_dimension = frame.shape[0] + img.orientation = video_info.orientation if video_info.orientation is not None else Orientation.TOP_LEFT + img.software = "Video2Dataset" + + if video_info.model is not None: + img.model = video_info.model + + entry = srt_parser.get_entry(elapsed_time) if srt_parser is not None else None + + if (entry is not None): + + img.exposure_time = 1 / float(entry["shutter"].split("/")[1]) + img.focal_length = entry["focal_len"] + img.f_number = entry["fnum"] + img.latitude = entry["latitude"] + img.longitude = entry["longitude"] + img.altitude = entry["altitude"] + img.photographic_sensitivity = entry["iso"] + + with open(path, "wb") as f: + f.write(img.get_file()) + + def WriteStats(self, input_file, stats): + self.f.write("{};{};{};{};{};{};{};{};{};{}\n".format( + stats["global_idx"], + input_file, + stats["frame_index"], stats["blur_score"] if "blur_score" in stats else "", stats["is_blurry"] if "is_blurry" in stats else "", + stats["is_black"] if "is_black" in stats else "", stats["last_frame_index"] if "last_frame_index" in stats else "", stats["similarity_score"] if "similarity_score" in stats else "", stats["is_similar"] if "is_similar" in stats else "", - stats["written"] if "written" in stats else "").replace(".", ",")) \ No newline at end of file + stats["written"] if "written" in stats else "").replace(".", ",")) + + def GetVideoInfo(self, input_file): + + video = cv2.VideoCapture(input_file) + + total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) + frame_rate = video.get(cv2.CAP_PROP_FPS) + start_time, orientation, model = self.GetVideoMetadata(input_file) + + video.release() + + return collections.namedtuple("VideoInfo", ["total_frames", "frame_rate", "start_time", "orientation", "model"])(total_frames, frame_rate, start_time, orientation, model) + + def GetVideoMetadata(self, input_file): + + try: + + metadata = pymediainfo.MediaInfo.parse(input_file).to_data() + + start_time = None + orientation = Orientation.TOP_LEFT + performer = None + + if metadata is not None and 'tracks' in metadata: + # Check if it is safe to access the first element of the tracks list + if len(metadata['tracks']) > 0: + + start_time = metadata['tracks'][0].get('encoded_date') or \ + metadata['tracks'][0].get('tagged_date') or \ + metadata['tracks'][0].get('file_creation_date') + + start_time = datetime.datetime.strptime(start_time, '%Z %Y-%m-%d %H:%M:%S') + + performer = metadata['tracks'][0].get('performer') + + # Check if it is safe to access the second element of the tracks list + if len(metadata['tracks']) > 1: + + orientation = metadata['tracks'][1].get('rotation') + + if orientation is not None: + orientation = int(float(orientation)) + + if orientation == 0: + orientation = Orientation.TOP_LEFT + elif orientation == 90: + orientation = Orientation.LEFT_BOTTOM + elif orientation == 180: + orientation = Orientation.BOTTOM_RIGHT + elif orientation == 270: + orientation = Orientation.RIGHT_TOP + + return start_time, orientation, performer + + except Exception as e: + + return start_time, orientation, performer \ No newline at end of file From 8cb70002f570d69c8510c485e202da83ff94ed77 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Mon, 9 Jan 2023 14:05:38 -0500 Subject: [PATCH 064/102] Multispectral split-merge, GCP names fix, bump version --- SuperBuild/cmake/External-OpenSfM.cmake | 2 +- VERSION | 2 +- opendm/dem/commands.py | 2 +- opendm/orthophoto.py | 4 ++-- opendm/system.py | 10 ++++++++++ opendm/types.py | 2 +- stages/splitmerge.py | 16 +++++++++++++++- 7 files changed, 31 insertions(+), 7 deletions(-) diff --git a/SuperBuild/cmake/External-OpenSfM.cmake b/SuperBuild/cmake/External-OpenSfM.cmake index 7ce9a002..428675c9 100644 --- a/SuperBuild/cmake/External-OpenSfM.cmake +++ b/SuperBuild/cmake/External-OpenSfM.cmake @@ -25,7 +25,7 @@ ExternalProject_Add(${_proj_name} #--Download step-------------- DOWNLOAD_DIR ${SB_DOWNLOAD_DIR} GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/ - GIT_TAG 302 + GIT_TAG 303 #--Update/Patch step---------- UPDATE_COMMAND git submodule update --init --recursive #--Configure step------------- diff --git a/VERSION b/VERSION index b5021469..75a22a26 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.0.2 +3.0.3 diff --git a/opendm/dem/commands.py b/opendm/dem/commands.py index 47903a68..d8d194f1 100755 --- a/opendm/dem/commands.py +++ b/opendm/dem/commands.py @@ -58,7 +58,7 @@ def rectify(lasFile, debug=False, reclassify_threshold=5, min_area=750, min_poin min_area=min_area, min_points=min_points) except Exception as e: - raise Exception("Error rectifying ground in file %s: %s" % (lasFile, str(e))) + log.ODM_WARNING("Error rectifying ground in file %s: %s" % (lasFile, str(e))) log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start)) return lasFile diff --git a/opendm/orthophoto.py b/opendm/orthophoto.py index 9b25e6d1..79865c71 100644 --- a/opendm/orthophoto.py +++ b/opendm/orthophoto.py @@ -216,8 +216,8 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}): left, bottom, right, top = src.bounds xs.extend([left, right]) ys.extend([bottom, top]) - if src.profile["count"] < 4: - raise ValueError("Inputs must be at least 4-band rasters") + if src.profile["count"] < 2: + raise ValueError("Inputs must be at least 2-band rasters") dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys) log.ODM_INFO("Output bounds: %r %r %r %r" % (dst_w, dst_s, dst_e, dst_n)) diff --git a/opendm/system.py b/opendm/system.py index 4feff283..adeeb3f5 100644 --- a/opendm/system.py +++ b/opendm/system.py @@ -144,3 +144,13 @@ def which(program): p=os.path.join(p,program) if os.path.exists(p) and os.access(p,os.X_OK): return p + +def link_file(src, dst): + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + + if not os.path.isfile(dst): + if sys.platform == 'win32': + os.link(src, dst) + else: + os.symlink(os.path.relpath(os.path.abspath(src), os.path.dirname(os.path.abspath(dst))), dst) diff --git a/opendm/types.py b/opendm/types.py index be4b44ea..06a10bc0 100644 --- a/opendm/types.py +++ b/opendm/types.py @@ -113,7 +113,7 @@ class ODM_Reconstruction(object): # Convert GCP file to a UTM projection since the rest of the pipeline # does not handle other SRS well. rejected_entries = [] - utm_gcp = GCPFile(gcp.create_utm_copy(output_gcp_file, filenames=[p.filename for p in self.photos], rejected_entries=rejected_entries, include_extras=False)) + utm_gcp = GCPFile(gcp.create_utm_copy(output_gcp_file, filenames=[p.filename for p in self.photos], rejected_entries=rejected_entries, include_extras=True)) if not utm_gcp.exists(): raise RuntimeError("Could not project GCP file to UTM. Please double check your GCP file for mistakes.") diff --git a/stages/splitmerge.py b/stages/splitmerge.py index ce70f512..74f6afea 100644 --- a/stages/splitmerge.py +++ b/stages/splitmerge.py @@ -20,6 +20,7 @@ from opendm import point_cloud from opendm.utils import double_quote from opendm.tiles.tiler import generate_dem_tiles from opendm.cogeo import convert_to_cogeo +from opendm import multispectral class ODMSplitStage(types.ODM_Stage): def process(self, args, outputs): @@ -88,12 +89,12 @@ class ODMSplitStage(types.ODM_Stage): for sp in submodel_paths: sp_octx = OSFMContext(sp) + submodel_images_dir = os.path.abspath(sp_octx.path("..", "images")) # Copy filtered GCP file if needed # One in OpenSfM's directory, one in the submodel project directory if reconstruction.gcp and reconstruction.gcp.exists(): submodel_gcp_file = os.path.abspath(sp_octx.path("..", "gcp_list.txt")) - submodel_images_dir = os.path.abspath(sp_octx.path("..", "images")) if reconstruction.gcp.make_filtered_copy(submodel_gcp_file, submodel_images_dir): log.ODM_INFO("Copied filtered GCP file to %s" % submodel_gcp_file) @@ -107,6 +108,19 @@ class ODMSplitStage(types.ODM_Stage): io.copy(tree.odm_geo_file, geo_dst_path) log.ODM_INFO("Copied GEO file to %s" % geo_dst_path) + # If this is a multispectral dataset, + # we need to link the multispectral images + if reconstruction.multi_camera: + submodel_images = os.listdir(submodel_images_dir) + + primary_band_name = multispectral.get_primary_band_name(reconstruction.multi_camera, args.primary_band) + _, p2s = multispectral.compute_band_maps(reconstruction.multi_camera, primary_band_name) + for filename in p2s: + if filename in submodel_images: + secondary_band_photos = p2s[filename] + for p in secondary_band_photos: + system.link_file(os.path.join(tree.dataset_raw, p.filename), submodel_images_dir) + # Reconstruct each submodel log.ODM_INFO("Dataset has been split into %s submodels. Reconstructing each submodel..." % len(submodel_paths)) self.update_progress(25) From 697727aeefc47c7431439a5b4a15c5e6ae050f45 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Mon, 9 Jan 2023 14:08:49 -0500 Subject: [PATCH 065/102] Move info log --- opendm/dem/commands.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/opendm/dem/commands.py b/opendm/dem/commands.py index d8d194f1..dbba8794 100755 --- a/opendm/dem/commands.py +++ b/opendm/dem/commands.py @@ -56,11 +56,10 @@ def rectify(lasFile, debug=False, reclassify_threshold=5, min_area=750, min_poin reclassify_plan='median', reclassify_threshold=reclassify_threshold, \ extend_plan='surrounding', extend_grid_distance=5, \ min_area=min_area, min_points=min_points) - + log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start)) except Exception as e: log.ODM_WARNING("Error rectifying ground in file %s: %s" % (lasFile, str(e))) - log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start)) return lasFile error = None From 0c0aa24dd9da0732a7beff245086dc34538c0df2 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Mon, 9 Jan 2023 15:57:26 -0500 Subject: [PATCH 066/102] More logical split value for multispec --- stages/splitmerge.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stages/splitmerge.py b/stages/splitmerge.py index 74f6afea..8e1ca322 100644 --- a/stages/splitmerge.py +++ b/stages/splitmerge.py @@ -55,11 +55,13 @@ class ODMSplitStage(types.ODM_Stage): log.ODM_INFO("Setting max-concurrency to %s to better handle remote splits" % args.max_concurrency) log.ODM_INFO("Large dataset detected (%s photos) and split set at %s. Preparing split merge." % (len(photos), args.split)) + multiplier = (1.0 / len(reconstruction.multi_camera)) if reconstruction.multi_camera else 1.0 + config = [ "submodels_relpath: " + os.path.join("..", "submodels", "opensfm"), "submodel_relpath_template: " + os.path.join("..", "submodels", "submodel_%04d", "opensfm"), "submodel_images_relpath_template: " + os.path.join("..", "submodels", "submodel_%04d", "images"), - "submodel_size: %s" % args.split, + "submodel_size: %s" % max(2, int(float(args.split) * multiplier)), "submodel_overlap: %s" % args.split_overlap, ] From e997e13e750cc1e1898597de8822890d1009cbfe Mon Sep 17 00:00:00 2001 From: Stephen Vincent Mather Date: Mon, 9 Jan 2023 23:59:11 -0500 Subject: [PATCH 067/102] add --pc-skip-geometric --- opendm/config.py | 7 +++++++ stages/openmvs.py | 6 +++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/opendm/config.py b/opendm/config.py index edb66b95..9e73e466 100755 --- a/opendm/config.py +++ b/opendm/config.py @@ -389,6 +389,13 @@ def config(argv=None, parser=None): help='Filters the point cloud by keeping only a single point around a radius N (in meters). This can be useful to limit the output resolution of the point cloud and remove duplicate points. Set to 0 to disable sampling. ' 'Default: %(default)s') + parser.add_argument('--pc-skip-geometric + action=StoreTrue, + nargs=0, + default=False, + help='Geometric estimates improve the accuracy of the point cloud by computing geometrically consistent depthmaps but may not be usable in larger datasets. This flag disables geometric estimates. ' + 'Default: %(default)s') + parser.add_argument('--pc-tile', action=StoreTrue, nargs=0, diff --git a/stages/openmvs.py b/stages/openmvs.py index df37f6a0..ea0c590e 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -86,12 +86,16 @@ class ODMOpenMVSStage(types.ODM_Stage): config.append("--fusion-mode 1") extra_config = [] - + + if args.pc_skip_geometric: + extra_config.append("--geometric-iters 0") + masks_dir = os.path.join(tree.opensfm, "undistorted", "masks") masks = os.path.exists(masks_dir) and len(os.listdir(masks_dir)) > 0 if masks: extra_config.append("--ignore-mask-label 0") + sharp = args.pc_geometric with open(densify_ini_file, 'w+') as f: f.write("Optimize = 7\n") From 999434e1610ccab3794d8ebc3d42cc176ff967c7 Mon Sep 17 00:00:00 2001 From: Stephen Vincent Mather Date: Tue, 10 Jan 2023 00:04:43 -0500 Subject: [PATCH 068/102] add missing logic --- stages/openmvs.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stages/openmvs.py b/stages/openmvs.py index ea0c590e..75293eb5 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -95,12 +95,12 @@ class ODMOpenMVSStage(types.ODM_Stage): if masks: extra_config.append("--ignore-mask-label 0") - sharp = args.pc_geometric + sharp = args.pc_skip_geometric with open(densify_ini_file, 'w+') as f: - f.write("Optimize = 7\n") + f.write("Optimize = %s\n" % (3 if sharp else 7)) def run_densify(): - system.run('"%s" "%s" %s' % (context.omvs_densify_path, + .run('"%s" "%s" %s' % (context.omvs_densify_path, openmvs_scene_file, ' '.join(config + gpu_config + extra_config))) try: From 3f6b8b49362ba2df412b4aaaf55764bbf51b7a23 Mon Sep 17 00:00:00 2001 From: Stephen Vincent Mather Date: Tue, 10 Jan 2023 00:05:42 -0500 Subject: [PATCH 069/102] typo fix:wq --- stages/openmvs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stages/openmvs.py b/stages/openmvs.py index 75293eb5..4133042e 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -100,7 +100,7 @@ class ODMOpenMVSStage(types.ODM_Stage): f.write("Optimize = %s\n" % (3 if sharp else 7)) def run_densify(): - .run('"%s" "%s" %s' % (context.omvs_densify_path, + system.run('"%s" "%s" %s' % (context.omvs_densify_path, openmvs_scene_file, ' '.join(config + gpu_config + extra_config))) try: From 2e362c22380801905de57d98fa03efa4b5307602 Mon Sep 17 00:00:00 2001 From: Luca Di Leo Date: Tue, 10 Jan 2023 12:19:28 +0100 Subject: [PATCH 070/102] Added modules for video --- requirements.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/requirements.txt b/requirements.txt index 3084deb3..c016a6e7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,3 +34,7 @@ onnxruntime==1.12.1 codem==0.24.0 trimesh==3.17.1 pandas==1.5.2 + +# for video support +pymediainfo==6.0.1 +piexif==1.1.3 \ No newline at end of file From a74337f8fea99ba1b7e62c0811b9c982839d6841 Mon Sep 17 00:00:00 2001 From: Luca Di Leo Date: Tue, 10 Jan 2023 12:19:50 +0100 Subject: [PATCH 071/102] Fixed srt parser --- opendm/video/srtparser.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/opendm/video/srtparser.py b/opendm/video/srtparser.py index 28e6f0ae..a69a11d6 100644 --- a/opendm/video/srtparser.py +++ b/opendm/video/srtparser.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, timedelta import re class SrtFileParser: def __init__(self, filename, utc_offset): @@ -10,11 +10,15 @@ class SrtFileParser: if not self.data: self.parse() + # check min and max + if timestamp < self.min or timestamp > self.max: + return None + for entry in self.data: if entry["timestamp"] <= timestamp: return entry - return None + return self.data[len(self.data) - 1] def parse(self): @@ -26,6 +30,9 @@ class SrtFileParser: # 2023-01-06 18:56:48,380,821 # [iso : 3200] [shutter : 1/60.0] [fnum : 280] [ev : 0] [ct : 3925] [color_md : default] [focal_len : 240] [latitude: 0.000000] [longitude: 0.000000] [altitude: 0.000000] + self.min = datetime.max + self.max = datetime.min + with open(self.filename, 'r') as f: for line in f: @@ -48,6 +55,10 @@ class SrtFileParser: "longitude": longitude, "altitude": altitude }) + self.min = min(self.min, timestamp) + # account for the difftime milliseconds to get the actual max + self.max = max(self.max, timestamp + timedelta(milliseconds=difftime)) + srtcnt = None difftime = None timestamp = None @@ -112,13 +123,20 @@ class SrtFileParser: match = re.search("latitude: (\d+.\d+)", line) if match: latitude = float(match.group(1)) + latitude = latitude if latitude != 0 else None match = re.search("longitude: (\d+.\d+)", line) if match: longitude = float(match.group(1)) + longitude = longitude if longitude != 0 else None match = re.search("altitude: (\d+.\d+)", line) if match: altitude = float(match.group(1)) + altitude = altitude if altitude != 0 else None + self.data.reverse() + + self.max = self.max.replace(microsecond=0) + self.min = self.min.replace(microsecond=0) From 02257a62cd00e8dca36a7942e7563df27a00478e Mon Sep 17 00:00:00 2001 From: Luca Di Leo Date: Tue, 10 Jan 2023 12:20:08 +0100 Subject: [PATCH 072/102] Switch to pillow + timezone fixes --- opendm/video/video2dataset.py | 162 ++++++++++++++++++++++------------ 1 file changed, 108 insertions(+), 54 deletions(-) diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 9f1634b8..759494e2 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -1,18 +1,21 @@ +from opendm.video.parameters import Parameters import datetime +from fractions import Fraction +import io from math import floor import time import cv2 import os -from opendm.video.parameters import Parameters import pymediainfo import collections -from exif import Image, Orientation +from PIL import Image from checkers import BlackFrameChecker, PercentageBlurChecker, SimilarityChecker, ThresholdBlurChecker from srtparser import SrtFileParser +import piexif class Video2Dataset: - def __init__(self, parameters: Parameters): + def __init__(self, parameters : Parameters): self.parameters = parameters # We prioritize blur threshold over blur percentage. @@ -46,20 +49,25 @@ class Video2Dataset: # get file name file_name = os.path.basename(input_file) - print("Processing video: {}".format(input_file)) + # get video info + video_info = self.GetVideoInfo(input_file) + print(video_info) + + utc_offset = self.parameters.utc_offset if self.parameters.utc_offset is not None else video_info.utc_offset + if self.parameters.use_srt: srt_file = os.path.splitext(input_file)[0] + ".srt" if os.path.exists(srt_file): print("Loading SRT file: {}".format(srt_file)) - srt_parser = SrtFileParser(srt_file, self.parameters.utc_offset) + srt_parser = SrtFileParser(srt_file, utc_offset) srt_parser.parse() else: srt_file = os.path.splitext(input_file)[0] + ".SRT" if os.path.exists(srt_file): print("Loading SRT file: {}".format(srt_file)) - srt_parser = SrtFileParser(srt_file, self.parameters.utc_offset) + srt_parser = SrtFileParser(srt_file, utc_offset) srt_parser.parse() else: print("SRT file not found: {}".format(srt_file)) @@ -67,11 +75,6 @@ class Video2Dataset: else: srt_parser = None - # get video info - video_info = self.GetVideoInfo(input_file) - - print(video_info) - if (self.blur_checker is not None and self.blur_checker.NeedPreProcess()): print("Preprocessing for blur checker...") self.blur_checker.PreProcess(input_file, self.parameters.start, self.parameters.end, self.parameters.internal_width, self.parameters.internal_height) @@ -189,40 +192,65 @@ class Video2Dataset: _, buf = cv2.imencode('.' + self.parameters.frame_format, frame) - img = Image(buf.tobytes()) + start_time_utc = video_info.start_time_utc if video_info.start_time_utc is not None \ + else srt_parser.data[0].timestamp if srt_parser is not None \ + else datetime.datetime.now() - start_time = (video_info.start_time if video_info.start_time is not None \ - else srt_parser.data[0].timestamp if srt_parser is not None \ - else datetime.datetime.now()) + self.parameters.utc_offset + elapsed_time_utc = start_time_utc + datetime.timedelta(seconds=(self.frame_index / video_info.frame_rate)) + elapsed_time = elapsed_time_utc + srt_parser.utc_offset - elapsed_time = start_time + datetime.timedelta(seconds=(self.frame_index / video_info.frame_rate)) + img = Image.open(io.BytesIO(buf)) - # Set datetime_original - img.datetime_original = elapsed_time.strftime('%Y:%m:%d %H:%M:%S') - img.datetime_digitized = elapsed_time.strftime('%Y:%m:%d %H:%M:%S') - img.datetime = elapsed_time.strftime('%Y:%m:%d %H:%M:%S') - img.pixel_x_dimension = frame.shape[1] - img.pixel_y_dimension = frame.shape[0] - img.orientation = video_info.orientation if video_info.orientation is not None else Orientation.TOP_LEFT - img.software = "Video2Dataset" + # Exif dict contains the following keys: '0th', 'Exif', 'GPS', '1st', 'thumbnail' + + # Set the EXIF metadata + exif_dict = { + "0th": { + piexif.ImageIFD.Software: "ODM", + piexif.ImageIFD.DateTime: elapsed_time.strftime('%Y:%m:%d %H:%M:%S'), + piexif.ImageIFD.XResolution: (frame.shape[1], 1), + piexif.ImageIFD.YResolution: (frame.shape[0], 1), + }, + "Exif": { + piexif.ExifIFD.DateTimeOriginal: elapsed_time.strftime('%Y:%m:%d %H:%M:%S'), + piexif.ExifIFD.DateTimeDigitized: elapsed_time.strftime('%Y:%m:%d %H:%M:%S'), + piexif.ExifIFD.PixelXDimension: (frame.shape[1], 1), + piexif.ExifIFD.PixelYDimension: (frame.shape[0], 1), + }} + + if video_info.orientation is not None: + exif_dict["0th"][piexif.ImageIFD.Orientation] = video_info.orientation if video_info.model is not None: - img.model = video_info.model + exif_dict["Exif"][piexif.ImageIFD.Model] = video_info.model - entry = srt_parser.get_entry(elapsed_time) if srt_parser is not None else None + entry = srt_parser.get_entry(elapsed_time_utc) if srt_parser is not None else None - if (entry is not None): + if entry is not None: + segs = entry["shutter"].split("/") + exif_dict["Exif"][piexif.ExifIFD.ExposureTime] = (int(float(segs[0])), int(float(segs[1]))) + exif_dict["Exif"][piexif.ExifIFD.FocalLength] = (entry["focal_len"], 1) + exif_dict["Exif"][piexif.ExifIFD.FNumber] = (entry["fnum"], 1) + exif_dict["Exif"][piexif.ExifIFD.ISOSpeedRatings] = (entry["iso"], 1) - img.exposure_time = 1 / float(entry["shutter"].split("/")[1]) - img.focal_length = entry["focal_len"] - img.f_number = entry["fnum"] - img.latitude = entry["latitude"] - img.longitude = entry["longitude"] - img.altitude = entry["altitude"] - img.photographic_sensitivity = entry["iso"] + exif_dict["GPS"] = { + piexif.GPSIFD.GPSMapDatum: "WGS-84", + piexif.GPSIFD.GPSDateStamp: elapsed_time.strftime('%Y:%m:%d %H:%M:%S') + } + + if (entry["latitude"] is not None): + exif_dict["GPS"][piexif.GPSIFD.GPSLatitude] = FloatToRational(entry["latitude"]) + + if (entry["longitude"] is not None): + exif_dict["GPS"][piexif.GPSIFD.GPSLongitude] = FloatToRational(entry["longitude"]) + + if (entry["altitude"] is not None): + exif_dict["GPS"][piexif.GPSIFD.GPSAltitude] = FloatToRational(entry["altitude"]) + + + exif_bytes = piexif.dump(exif_dict) + img.save(path, exif=exif_bytes) - with open(path, "wb") as f: - f.write(img.get_file()) def WriteStats(self, input_file, stats): self.f.write("{};{};{};{};{};{};{};{};{};{}\n".format( @@ -237,17 +265,19 @@ class Video2Dataset: stats["is_similar"] if "is_similar" in stats else "", stats["written"] if "written" in stats else "").replace(".", ",")) + def GetVideoInfo(self, input_file): video = cv2.VideoCapture(input_file) total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) frame_rate = video.get(cv2.CAP_PROP_FPS) - start_time, orientation, model = self.GetVideoMetadata(input_file) + start_time_utc, utc_offset, orientation, model = self.GetVideoMetadata(input_file) video.release() - return collections.namedtuple("VideoInfo", ["total_frames", "frame_rate", "start_time", "orientation", "model"])(total_frames, frame_rate, start_time, orientation, model) + return collections.namedtuple("VideoInfo", ["total_frames", "frame_rate", "start_time_utc", "utc_offset", "orientation", "model"])(total_frames, frame_rate, start_time_utc, utc_offset, orientation, model) + def GetVideoMetadata(self, input_file): @@ -255,19 +285,28 @@ class Video2Dataset: metadata = pymediainfo.MediaInfo.parse(input_file).to_data() - start_time = None - orientation = Orientation.TOP_LEFT + start_time_utc = None + orientation = 1 performer = None + utc_offset = None if metadata is not None and 'tracks' in metadata: # Check if it is safe to access the first element of the tracks list if len(metadata['tracks']) > 0: - start_time = metadata['tracks'][0].get('encoded_date') or \ - metadata['tracks'][0].get('tagged_date') or \ - metadata['tracks'][0].get('file_creation_date') + start_time_utc = metadata['tracks'][0].get('encoded_date') or \ + metadata['tracks'][0].get('tagged_date') - start_time = datetime.datetime.strptime(start_time, '%Z %Y-%m-%d %H:%M:%S') + start_time_utc = datetime.datetime.strptime(start_time_utc, '%Z %Y-%m-%d %H:%M:%S') if start_time_utc is not None else None + + file_creation_date_utc = metadata['tracks'][0].get('file_creation_date') + file_creation_date_utc = datetime.datetime.strptime(file_creation_date_utc, '%Z %Y-%m-%d %H:%M:%S.%f') if file_creation_date_utc is not None else None + + file_creation_date = metadata['tracks'][0].get('file_creation_date__local') + file_creation_date = datetime.datetime.strptime(file_creation_date, '%Y-%m-%d %H:%M:%S.%f') if file_creation_date is not None else None + + if file_creation_date_utc is not None and file_creation_date is not None: + utc_offset = file_creation_date - file_creation_date_utc performer = metadata['tracks'][0].get('performer') @@ -279,17 +318,32 @@ class Video2Dataset: if orientation is not None: orientation = int(float(orientation)) - if orientation == 0: - orientation = Orientation.TOP_LEFT - elif orientation == 90: - orientation = Orientation.LEFT_BOTTOM - elif orientation == 180: - orientation = Orientation.BOTTOM_RIGHT - elif orientation == 270: - orientation = Orientation.RIGHT_TOP + # The 8 EXIF orientation values are numbered 1 to 8. + # 1 = 0°: the correct orientation, no adjustment is required. + # 2 = 0°, mirrored: image has been flipped back-to-front. + # 3 = 180°: image is upside down. + # 4 = 180°, mirrored: image has been flipped back-to-front and is upside down. + # 5 = 90°: image has been flipped back-to-front and is on its side. + # 6 = 90°, mirrored: image is on its side. + # 7 = 270°: image has been flipped back-to-front and is on its far side. + # 8 = 270°, mirrored: image is on its far side. - return start_time, orientation, performer + if orientation == 0: + orientation = 1 + elif orientation == 90: + orientation = 8 + elif orientation == 180: + orientation = 3 + elif orientation == 270: + orientation = 6 + + return start_time_utc, utc_offset, orientation, performer except Exception as e: - return start_time, orientation, performer \ No newline at end of file + return start_time_utc, utc_offset, orientation, performer + + +def FloatToRational(f): + f = Fraction(f).limit_denominator() + return (f.numerator, f.denominator) \ No newline at end of file From aa82f767477f332eb71ace7989a3ade6d2fce0d5 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 10 Jan 2023 09:38:13 -0500 Subject: [PATCH 073/102] Syntax fix, always optimize --- opendm/config.py | 2 +- stages/openmvs.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/opendm/config.py b/opendm/config.py index 9e73e466..966a4f2e 100755 --- a/opendm/config.py +++ b/opendm/config.py @@ -389,7 +389,7 @@ def config(argv=None, parser=None): help='Filters the point cloud by keeping only a single point around a radius N (in meters). This can be useful to limit the output resolution of the point cloud and remove duplicate points. Set to 0 to disable sampling. ' 'Default: %(default)s') - parser.add_argument('--pc-skip-geometric + parser.add_argument('--pc-skip-geometric', action=StoreTrue, nargs=0, default=False, diff --git a/stages/openmvs.py b/stages/openmvs.py index 4133042e..eec1bba4 100644 --- a/stages/openmvs.py +++ b/stages/openmvs.py @@ -95,9 +95,8 @@ class ODMOpenMVSStage(types.ODM_Stage): if masks: extra_config.append("--ignore-mask-label 0") - sharp = args.pc_skip_geometric with open(densify_ini_file, 'w+') as f: - f.write("Optimize = %s\n" % (3 if sharp else 7)) + f.write("Optimize = 7\n") def run_densify(): system.run('"%s" "%s" %s' % (context.omvs_densify_path, From 6e50ed8fcd708a2cf8727a621f37fda863feca63 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 11 Jan 2023 13:16:04 -0500 Subject: [PATCH 074/102] Add --texturing-single-material --- opendm/config.py | 7 + opendm/objpacker/__init__.py | 1 + opendm/objpacker/imagepacker/__init__.py | 1 + opendm/objpacker/imagepacker/imagepacker.py | 240 ++++++++++++++++++++ opendm/objpacker/imagepacker/utils.py | 55 +++++ opendm/objpacker/objpacker.py | 228 +++++++++++++++++++ opendm/system.py | 17 ++ stages/mvstex.py | 23 +- 8 files changed, 571 insertions(+), 1 deletion(-) create mode 100644 opendm/objpacker/__init__.py create mode 100644 opendm/objpacker/imagepacker/__init__.py create mode 100644 opendm/objpacker/imagepacker/imagepacker.py create mode 100644 opendm/objpacker/imagepacker/utils.py create mode 100644 opendm/objpacker/objpacker.py diff --git a/opendm/config.py b/opendm/config.py index 966a4f2e..e9de9e5b 100755 --- a/opendm/config.py +++ b/opendm/config.py @@ -454,6 +454,13 @@ def config(argv=None, parser=None): help=('Keep faces in the mesh that are not seen in any camera. ' 'Default: %(default)s')) + parser.add_argument('--texturing-single-material', + action=StoreTrue, + nargs=0, + default=False, + help=('Generate OBJs that have a single material and a single texture file instead of multiple ones. ' + 'Default: %(default)s')) + parser.add_argument('--gcp', metavar='', action=StoreValue, diff --git a/opendm/objpacker/__init__.py b/opendm/objpacker/__init__.py new file mode 100644 index 00000000..f3bbb024 --- /dev/null +++ b/opendm/objpacker/__init__.py @@ -0,0 +1 @@ +from .objpacker import obj_pack \ No newline at end of file diff --git a/opendm/objpacker/imagepacker/__init__.py b/opendm/objpacker/imagepacker/__init__.py new file mode 100644 index 00000000..d89deac5 --- /dev/null +++ b/opendm/objpacker/imagepacker/__init__.py @@ -0,0 +1 @@ +from .imagepacker import pack diff --git a/opendm/objpacker/imagepacker/imagepacker.py b/opendm/objpacker/imagepacker/imagepacker.py new file mode 100644 index 00000000..a0a589b2 --- /dev/null +++ b/opendm/objpacker/imagepacker/imagepacker.py @@ -0,0 +1,240 @@ +#! /usr/bin/python + +# The MIT License (MIT) + +# Copyright (c) 2015 Luke Gaynor + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import rasterio +import numpy as np +#from PIL import Image, ImageDraw +import math + +# Based off of the great writeup, demo and code at: +# http://codeincomplete.com/posts/2011/5/7/bin_packing/ + +class Block(): + """A rectangular block, to be packed""" + def __init__(self, w, h, data=None, padding=0): + self.w = w + self.h = h + self.x = None + self.y = None + self.fit = None + self.data = data + self.padding = padding # not implemented yet + + def __str__(self): + return "({x},{y}) ({w}x{h}): {data}".format( + x=self.x,y=self.y, w=self.w,h=self.h, data=self.data) + + +class _BlockNode(): + """A BlockPacker node""" + def __init__(self, x, y, w, h, used=False, right=None, down=None): + self.x = x + self.y = y + self.w = w + self.h = h + self.used = used + self.right = right + self.down = down + + def __repr__(self): + return "({x},{y}) ({w}x{h})".format(x=self.x,y=self.y,w=self.w,h=self.h) + + +class BlockPacker(): + """Packs blocks of varying sizes into a single, larger block""" + def __init__(self): + self.root = None + + def fit(self, blocks): + nblocks = len(blocks) + w = blocks[0].w# if nblocks > 0 else 0 + h = blocks[0].h# if nblocks > 0 else 0 + + self.root = _BlockNode(0,0, w,h) + + for block in blocks: + node = self.find_node(self.root, block.w, block.h) + if node: + # print("split") + node_fit = self.split_node(node, block.w, block.h) + block.x = node_fit.x + block.y = node_fit.y + else: + # print("grow") + node_fit = self.grow_node(block.w, block.h) + block.x = node_fit.x + block.y = node_fit.y + + def find_node(self, root, w, h): + if root.used: + # raise Exception("used") + node = self.find_node(root.right, w, h) + if node: + return node + return self.find_node(root.down, w, h) + elif w <= root.w and h <= root.h: + return root + else: + return None + + def split_node(self, node, w, h): + node.used = True + node.down = _BlockNode( + node.x, node.y + h, + node.w, node.h - h + ) + node.right = _BlockNode( + node.x + w, node.y, + node.w - w, h + ) + return node + + def grow_node(self, w, h): + can_grow_down = w <= self.root.w + can_grow_right = h <= self.root.h + + # try to keep the packing square + should_grow_right = can_grow_right and self.root.h >= (self.root.w + w) + should_grow_down = can_grow_down and self.root.w >= (self.root.h + h) + + if should_grow_right: + return self.grow_right(w, h) + elif should_grow_down: + return self.grow_down(w, h) + elif can_grow_right: + return self.grow_right(w, h) + elif can_grow_down: + return self.grow_down(w, h) + else: + raise Exception("no valid expansion avaliable!") + + def grow_right(self, w, h): + old_root = self.root + self.root = _BlockNode( + 0, 0, + old_root.w + w, old_root.h, + down=old_root, + right=_BlockNode(self.root.w, 0, w, self.root.h), + used=True + ) + + node = self.find_node(self.root, w, h) + if node: + return self.split_node(node, w, h) + else: + return None + + def grow_down(self, w, h): + old_root = self.root + self.root = _BlockNode( + 0, 0, + old_root.w, old_root.h + h, + down=_BlockNode(0, self.root.h, self.root.w, h), + right=old_root, + used=True + ) + + node = self.find_node(self.root, w, h) + if node: + return self.split_node(node, w, h) + else: + return None + + +def crop_by_extents(image, extent): + if min(extent.min_x,extent.min_y) < 0 or max(extent.max_x,extent.max_y) > 1: + print("\tWARNING! UV Coordinates lying outside of [0:1] space!") + + _, h, w = image.shape + minx = max(math.floor(extent.min_x*w), 0) + miny = max(math.floor(extent.min_y*h), 0) + maxx = min(math.ceil(extent.max_x*w), w) + maxy = min(math.ceil(extent.max_y*h), h) + + image = image[:, miny:maxy, minx:maxx] + delta_w = maxx - minx + delta_h = maxy - miny + + # offset from origin x, y, horizontal scale, vertical scale + changes = (minx, miny, delta_w / w, delta_h / h) + + return (image, changes) + +def pack(obj, background=(0,0,0,0), format="PNG", extents=None): + blocks = [] + image_name_map = {} + profile = None + + for mat in obj['materials']: + filename = obj['materials'][mat] + + with rasterio.open(filename, 'r') as f: + profile = f.profile + image = f.read() + + image = np.flip(image, axis=1) + + changes = None + if extents and extents[mat]: + image, changes = crop_by_extents(image, extents[mat]) + + image_name_map[filename] = image + _, h, w = image.shape + + # using filename so we can pass back UV info without storing it in image + blocks.append(Block(w, h, data=(filename, mat, changes))) + + # sort by width, descending (widest first) + blocks.sort(key=lambda block: -block.w) + + packer = BlockPacker() + packer.fit(blocks) + + # output_image = Image.new("RGBA", (packer.root.w, packer.root.h)) + output_image = np.zeros((profile['count'], packer.root.h, packer.root.w), dtype=profile['dtype']) + + uv_changes = {} + for block in blocks: + fname, mat, changes = block.data + image = image_name_map[fname] + _, im_h, im_w = image.shape + + uv_changes[mat] = { + "offset": ( + # should be in [0, 1] range + (block.x - (changes[0] if changes else 0))/output_image.shape[2], + # UV origin is bottom left, PIL assumes top left! + (block.y - (changes[1] if changes else 0))/output_image.shape[1] + ), + + "aspect": ( + ((1/changes[2]) if changes else 1) * (im_w/output_image.shape[2]), + ((1/changes[3]) if changes else 1) * (im_h/output_image.shape[1]) + ), + } + + output_image[:, block.y:block.y + im_h, block.x:block.x + im_w] = image + output_image = np.flip(output_image, axis=1) + + return output_image, uv_changes, profile diff --git a/opendm/objpacker/imagepacker/utils.py b/opendm/objpacker/imagepacker/utils.py new file mode 100644 index 00000000..072d27d4 --- /dev/null +++ b/opendm/objpacker/imagepacker/utils.py @@ -0,0 +1,55 @@ +#! /usr/bin/python + +# The MIT License (MIT) + +# Copyright (c) 2015 Luke Gaynor + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +class AABB(): + def __init__(self, min_x=None, min_y=None, max_x=None, max_y=None): + self.min_x = min_x + self.min_y = min_y + self.max_x = max_x + self.max_y = max_y + + self.to_tile = False # TODO: remove? + + def add(self, x,y): + self.min_x = min(self.min_x, x) if self.min_x is not None else x + self.min_y = min(self.min_y, y) if self.min_y is not None else y + self.max_x = max(self.max_x, x) if self.max_x is not None else x + self.max_y = max(self.max_y, y) if self.max_y is not None else y + + def uv_wrap(self): + return (self.max_x - self.min_x, self.max_y - self.min_y) + + def tiling(self): + if self.min_x and self.max_x and self.min_y and self.max_y: + if self.min_x < 0 or self.min_y < 0 or self.max_x > 1 or self.max_y > 1: + return (self.max_x - self.min_x, self.max_y - self.min_y) + return None + + def __repr__(self): + return "({},{}) ({},{})".format( + self.min_x, + self.min_y, + self.max_x, + self.max_y + ) \ No newline at end of file diff --git a/opendm/objpacker/objpacker.py b/opendm/objpacker/objpacker.py new file mode 100644 index 00000000..533c310d --- /dev/null +++ b/opendm/objpacker/objpacker.py @@ -0,0 +1,228 @@ +import os +import rasterio +import warnings +import numpy as np +from .imagepacker.utils import AABB +from .imagepacker import pack + +warnings.filterwarnings("ignore", category=rasterio.errors.NotGeoreferencedWarning) + +def load_obj(obj_path, _info=print): + if not os.path.isfile(obj_path): + raise IOError("Cannot open %s" % obj_path) + + obj_base_path = os.path.dirname(os.path.abspath(obj_path)) + obj = { + 'filename': os.path.basename(obj_path), + 'root_dir': os.path.dirname(os.path.abspath(obj_path)), + 'mtl_filenames': [], + 'materials': {}, + } + uvs = [] + + faces = {} + current_material = "_" + + with open(obj_path) as f: + _info("Loading %s" % obj_path) + + for line in f: + if line.startswith("mtllib "): + # Materials + mtl_file = "".join(line.split()[1:]).strip() + obj['materials'].update(load_mtl(mtl_file, obj_base_path, _info=_info)) + obj['mtl_filenames'].append(mtl_file) + # elif line.startswith("v "): + # # Vertices + # vertices.append(list(map(float, line.split()[1:4]))) + elif line.startswith("vt "): + # UVs + uvs.append(list(map(float, line.split()[1:3]))) + # elif line.startswith("vn "): + # normals.append(list(map(float, line.split()[1:4]))) + elif line.startswith("usemtl "): + mtl_name = "".join(line.split()[1:]).strip() + if not mtl_name in obj['materials']: + raise Exception("%s material is missing" % mtl_name) + + current_material = mtl_name + elif line.startswith("f "): + if current_material not in faces: + faces[current_material] = [] + + a,b,c = line.split()[1:] + at = int(a.split("/")[1]) + bt = int(b.split("/")[1]) + ct = int(c.split("/")[1]) + faces[current_material].append((at - 1, bt - 1, ct - 1)) + + obj['uvs'] = np.array(uvs, dtype=np.float32) + obj['faces'] = faces + + return obj + +def load_mtl(mtl_file, obj_base_path, _info=print): + mtl_file = os.path.join(obj_base_path, mtl_file) + + if not os.path.isfile(mtl_file): + raise IOError("Cannot open %s" % mtl_file) + + mats = {} + current_mtl = "" + + with open(mtl_file) as f: + for line in f: + if line.startswith("newmtl "): + current_mtl = "".join(line.split()[1:]).strip() + elif line.startswith("map_Kd ") and current_mtl: + map_kd_filename = "".join(line.split()[1:]).strip() + map_kd = os.path.join(obj_base_path, map_kd_filename) + if not os.path.isfile(map_kd): + raise IOError("Cannot open %s" % map_kd) + + mats[current_mtl] = map_kd + return mats + + +def write_obj_changes(obj_file, mtl_file, uv_changes, single_mat, output_dir, _info=print): + with open(obj_file) as f: + obj_lines = f.readlines() + + out_lines = [] + uv_lines = [] + current_material = None + + printed_mtllib = False + printed_usemtl = False + + _info("Transforming UV coordinates") + + for line_idx, line in enumerate(obj_lines): + if line.startswith("mtllib"): + if not printed_mtllib: + out_lines.append("mtllib %s\n" % mtl_file) + printed_mtllib = True + else: + out_lines.append("# \n") + elif line.startswith("usemtl"): + if not printed_usemtl: + out_lines.append("usemtl %s\n" % single_mat) + printed_usemtl = True + else: + out_lines.append("# \n") + current_material = line[7:].strip() + elif line.startswith("vt"): + uv_lines.append(line_idx) + out_lines.append(line) + elif line.startswith("f"): + for v in line[2:].split(): + parts = v.split("/") + if len(parts) >= 2 and parts[1]: + uv_idx = int(parts[1]) - 1 # uv indexes start from 1 + uv_line_idx = uv_lines[uv_idx] + uv_line = obj_lines[uv_line_idx][3:] + uv = [float(uv.strip()) for uv in uv_line.split()] + + if current_material and current_material in uv_changes: + changes = uv_changes[current_material] + uv[0] = uv[0] * changes["aspect"][0] + changes["offset"][0] + uv[1] = uv[1] * changes["aspect"][1] + changes["offset"][1] + out_lines[uv_line_idx] = "vt %s %s\n" % (uv[0], uv[1]) + out_lines.append(line) + else: + out_lines.append(line) + + out_file = os.path.join(output_dir, os.path.basename(obj_file)) + _info("Writing %s" % out_file) + + with open(out_file, 'w') as f: + f.writelines(out_lines) + +def write_output_tex(img, profile, path, _info=print): + _, h, w = img.shape + profile['width'] = w + profile['height'] = h + + _info("Writing %s (%sx%s pixels)" % (path, w, h)) + with rasterio.open(path, 'w', **profile) as dst: + for b in range(1, img.shape[0] + 1): + dst.write(img[b - 1], b) + + sidecar = path + '.aux.xml' + if os.path.isfile(sidecar): + os.unlink(sidecar) + +def write_output_mtl(src_mtl, mat_file, dst_mtl): + with open(src_mtl, 'r') as src: + lines = src.readlines() + + out = [] + found_map = False + single_mat = None + + for l in lines: + if l.startswith("newmtl"): + single_mat = "".join(l.split()[1:]).strip() + out.append(l) + elif l.startswith("map_Kd"): + out.append("map_Kd %s\n" % mat_file) + break + else: + out.append(l) + + with open(dst_mtl, 'w') as dst: + dst.write("".join(out)) + + if single_mat is None: + raise Exception("Could not find material name in file") + + return single_mat + +def obj_pack(obj_file, output_dir=None, _info=print): + if not output_dir: + output_dir = os.path.join(os.path.dirname(os.path.abspath(obj_file)), "packed") + + obj = load_obj(obj_file, _info=_info) + if not obj['mtl_filenames']: + raise Exception("No MTL files found, nothing to do") + + if os.path.abspath(obj_file) == os.path.abspath(os.path.join(output_dir, os.path.basename(obj_file))): + raise Exception("This will overwrite %s. Choose a different output directory" % obj_file) + + if len(obj['mtl_filenames']) <= 1 and len(obj['materials']) <= 1: + raise Exception("File already has a single material, nothing to do") + + # Compute AABB for UVs + _info("Computing texture bounds") + extents = {} + for material in obj['materials']: + bounds = AABB() + + faces = obj['faces'][material] + for f in faces: + for uv_idx in f: + uv = obj['uvs'][uv_idx] + bounds.add(uv[0], uv[1]) + + extents[material] = bounds + + _info("Binary packing...") + output_image, uv_changes, profile = pack(obj, extents=extents) + mtl_file = obj['mtl_filenames'][0] + mat_file = os.path.basename(obj['materials'][next(iter(obj['materials']))]) + + if not os.path.isdir(output_dir): + os.mkdir(output_dir) + + write_output_tex(output_image, profile, os.path.join(output_dir, mat_file), _info=_info) + single_mat = write_output_mtl(os.path.join(obj['root_dir'], mtl_file), mat_file, os.path.join(output_dir, mtl_file)) + write_obj_changes(obj_file, mtl_file, uv_changes, single_mat, output_dir, _info=_info) + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser(description="Packs textured .OBJ Wavefront files into a single materials") + parser.add_argument("obj", help="Path to the .OBJ file") + parser.add_argument("-o","--output-dir", help="Output directory") + args = parser.parse_args() + + obj_pack(args.obj, args.output_dir) \ No newline at end of file diff --git a/opendm/system.py b/opendm/system.py index adeeb3f5..1d0b0ab2 100644 --- a/opendm/system.py +++ b/opendm/system.py @@ -7,6 +7,7 @@ import subprocess import string import signal import io +import shutil from collections import deque from opendm import context @@ -154,3 +155,19 @@ def link_file(src, dst): os.link(src, dst) else: os.symlink(os.path.relpath(os.path.abspath(src), os.path.dirname(os.path.abspath(dst))), dst) + +def move_files(src, dst): + if not os.path.isdir(dst): + raise IOError("Not a directory: %s" % dst) + + for f in os.listdir(src): + if os.path.isfile(os.path.join(src, f)): + shutil.move(os.path.join(src, f), dst) + +def delete_files(folder): + if not os.path.isdir(folder): + return + + for f in os.listdir(folder): + if os.path.isfile(os.path.join(folder, f)): + os.unlink(os.path.join(folder, f)) \ No newline at end of file diff --git a/stages/mvstex.py b/stages/mvstex.py index 3f557627..46b6820a 100644 --- a/stages/mvstex.py +++ b/stages/mvstex.py @@ -7,6 +7,7 @@ from opendm import context from opendm import types from opendm.multispectral import get_primary_band_name from opendm.photo import find_largest_photo_dim +from opendm.objpacker import obj_pack class ODMMvsTexStage(types.ODM_Stage): def process(self, args, outputs): @@ -129,6 +130,26 @@ class ODMMvsTexStage(types.ODM_Stage): '{labelingFile} ' '{maxTextureSize} '.format(**kwargs)) + # Single material? + if r['primary'] and args.texturing_single_material: + log.ODM_INFO("Packing to single material") + + packed_dir = os.path.join(r['out_dir'], 'packed') + if io.dir_exists(packed_dir): + log.ODM_INFO("Removing old packed directory {}".format(packed_dir)) + shutil.rmtree(packed_dir) + + try: + obj_pack(os.path.join(r['out_dir'], tree.odm_textured_model_obj), packed_dir, _info=log.ODM_INFO) + + # Move packed/* into texturing folder + system.delete_files(r['out_dir']) + system.move_files(packed_dir, r['out_dir']) + if os.path.isdir(packed_dir): + os.rmdir(packed_dir) + except Exception as e: + log.ODM_WARNING(str(e)) + # Backward compatibility: copy odm_textured_model_geo.mtl to odm_textured_model.mtl # for certain older WebODM clients which expect a odm_textured_model.mtl # to be present for visualization @@ -137,7 +158,7 @@ class ODMMvsTexStage(types.ODM_Stage): if io.file_exists(geo_mtl): nongeo_mtl = os.path.join(r['out_dir'], 'odm_textured_model.mtl') shutil.copy(geo_mtl, nongeo_mtl) - + progress += progress_per_run self.update_progress(progress) else: From 15131be6cc88daf277b4f63bf95730e70b1982e2 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 11 Jan 2023 13:49:07 -0500 Subject: [PATCH 075/102] Don't accidentally delete texture label file --- opendm/system.py | 5 +++-- stages/mvstex.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/opendm/system.py b/opendm/system.py index 1d0b0ab2..ade8ccd0 100644 --- a/opendm/system.py +++ b/opendm/system.py @@ -164,10 +164,11 @@ def move_files(src, dst): if os.path.isfile(os.path.join(src, f)): shutil.move(os.path.join(src, f), dst) -def delete_files(folder): +def delete_files(folder, exclude=()): if not os.path.isdir(folder): return for f in os.listdir(folder): if os.path.isfile(os.path.join(folder, f)): - os.unlink(os.path.join(folder, f)) \ No newline at end of file + if not exclude or not f.endswith(exclude): + os.unlink(os.path.join(folder, f)) \ No newline at end of file diff --git a/stages/mvstex.py b/stages/mvstex.py index 46b6820a..0e07151f 100644 --- a/stages/mvstex.py +++ b/stages/mvstex.py @@ -143,7 +143,7 @@ class ODMMvsTexStage(types.ODM_Stage): obj_pack(os.path.join(r['out_dir'], tree.odm_textured_model_obj), packed_dir, _info=log.ODM_INFO) # Move packed/* into texturing folder - system.delete_files(r['out_dir']) + system.delete_files(r['out_dir'], (".vec", )) system.move_files(packed_dir, r['out_dir']) if os.path.isdir(packed_dir): os.rmdir(packed_dir) From 2b2875bec6f4ba358530e293caec80d011b71ea9 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 11 Jan 2023 15:48:37 -0500 Subject: [PATCH 076/102] Fix width/height --- opendm/objpacker/objpacker.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/opendm/objpacker/objpacker.py b/opendm/objpacker/objpacker.py index 533c310d..75ddf863 100644 --- a/opendm/objpacker/objpacker.py +++ b/opendm/objpacker/objpacker.py @@ -2,8 +2,12 @@ import os import rasterio import warnings import numpy as np -from .imagepacker.utils import AABB -from .imagepacker import pack +try: + from .imagepacker.utils import AABB + from .imagepacker import pack +except ImportError: + from imagepacker.utils import AABB + from imagepacker import pack warnings.filterwarnings("ignore", category=rasterio.errors.NotGeoreferencedWarning) @@ -139,10 +143,13 @@ def write_obj_changes(obj_file, mtl_file, uv_changes, single_mat, output_dir, _i f.writelines(out_lines) def write_output_tex(img, profile, path, _info=print): - _, h, w = img.shape + _, w, h = img.shape profile['width'] = w profile['height'] = h + if 'tiled' in profile: + profile['tiled'] = False + _info("Writing %s (%sx%s pixels)" % (path, w, h)) with rasterio.open(path, 'w', **profile) as dst: for b in range(1, img.shape[0] + 1): From 9f17e8451abac64a283d87f0f443d1388cebdea2 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 11 Jan 2023 15:57:59 -0500 Subject: [PATCH 077/102] Only pack textures where needed --- stages/mvstex.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stages/mvstex.py b/stages/mvstex.py index 0e07151f..bd0d3acc 100644 --- a/stages/mvstex.py +++ b/stages/mvstex.py @@ -131,7 +131,7 @@ class ODMMvsTexStage(types.ODM_Stage): '{maxTextureSize} '.format(**kwargs)) # Single material? - if r['primary'] and args.texturing_single_material: + if args.texturing_single_material and r['primary'] and (not r['nadir'] or args.skip_3dmodel): log.ODM_INFO("Packing to single material") packed_dir = os.path.join(r['out_dir'], 'packed') From 64544f3c4154520004800674494b3acd6a3cff00 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 11 Jan 2023 16:00:15 -0500 Subject: [PATCH 078/102] Cleanup --- opendm/objpacker/imagepacker/imagepacker.py | 3 +-- opendm/objpacker/imagepacker/utils.py | 2 -- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/opendm/objpacker/imagepacker/imagepacker.py b/opendm/objpacker/imagepacker/imagepacker.py index a0a589b2..9031d28d 100644 --- a/opendm/objpacker/imagepacker/imagepacker.py +++ b/opendm/objpacker/imagepacker/imagepacker.py @@ -24,7 +24,6 @@ import rasterio import numpy as np -#from PIL import Image, ImageDraw import math # Based off of the great writeup, demo and code at: @@ -204,7 +203,7 @@ def pack(obj, background=(0,0,0,0), format="PNG", extents=None): # using filename so we can pass back UV info without storing it in image blocks.append(Block(w, h, data=(filename, mat, changes))) - +use_3dmesh # sort by width, descending (widest first) blocks.sort(key=lambda block: -block.w) diff --git a/opendm/objpacker/imagepacker/utils.py b/opendm/objpacker/imagepacker/utils.py index 072d27d4..8124648c 100644 --- a/opendm/objpacker/imagepacker/utils.py +++ b/opendm/objpacker/imagepacker/utils.py @@ -29,8 +29,6 @@ class AABB(): self.max_x = max_x self.max_y = max_y - self.to_tile = False # TODO: remove? - def add(self, x,y): self.min_x = min(self.min_x, x) if self.min_x is not None else x self.min_y = min(self.min_y, y) if self.min_y is not None else y From 280ba2c50b1d04d2512ec93f6fc5bce8792b4247 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 11 Jan 2023 16:01:48 -0500 Subject: [PATCH 079/102] Remove typo --- opendm/objpacker/imagepacker/imagepacker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opendm/objpacker/imagepacker/imagepacker.py b/opendm/objpacker/imagepacker/imagepacker.py index 9031d28d..2090d73f 100644 --- a/opendm/objpacker/imagepacker/imagepacker.py +++ b/opendm/objpacker/imagepacker/imagepacker.py @@ -203,7 +203,7 @@ def pack(obj, background=(0,0,0,0), format="PNG", extents=None): # using filename so we can pass back UV info without storing it in image blocks.append(Block(w, h, data=(filename, mat, changes))) -use_3dmesh + # sort by width, descending (widest first) blocks.sort(key=lambda block: -block.w) From 3680b54d6429929d4ce2aede48cc24f1bde6bb5c Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Thu, 12 Jan 2023 11:41:16 -0500 Subject: [PATCH 080/102] Tweak file glob pattern in Windows build --- .github/workflows/publish-windows.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish-windows.yml b/.github/workflows/publish-windows.yml index b9737f55..7c0cacd0 100644 --- a/.github/workflows/publish-windows.yml +++ b/.github/workflows/publish-windows.yml @@ -53,7 +53,7 @@ jobs: if: startsWith(github.ref, 'refs/tags/') with: repo_token: ${{ secrets.GITHUB_TOKEN }} - file: dist\*.exe + file: dist/*.exe file_glob: true tag: ${{ github.ref }} overwrite: true From 7c4da76280738d8727685253c1adff6dda7f9660 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Thu, 12 Jan 2023 12:45:20 -0500 Subject: [PATCH 081/102] Include python3.8 folder in setup --- innosetup.iss | 1 + 1 file changed, 1 insertion(+) diff --git a/innosetup.iss b/innosetup.iss index 945e1fee..ed877007 100644 --- a/innosetup.iss +++ b/innosetup.iss @@ -43,6 +43,7 @@ Source: "licenses\*"; DestDir: "{app}\licenses"; Flags: ignoreversion recursesub Source: "opendm\*"; DestDir: "{app}\opendm"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs Source: "stages\*"; DestDir: "{app}\stages"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs Source: "SuperBuild\install\bin\*"; DestDir: "{app}\SuperBuild\install\bin"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs +Source: "SuperBuild\install\lib\python3.8\*"; DestDir: "{app}\SuperBuild\install\lib\python3.8\*"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs Source: "venv\*"; DestDir: "{app}\venv"; Excludes: "__pycache__,pyvenv.cfg"; Flags: ignoreversion recursesubdirs createallsubdirs Source: "python38\*"; DestDir: "{app}\python38"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs Source: "console.bat"; DestDir: "{app}"; Flags: ignoreversion From 60125010f29273af4cce58f6f985538936a4b0b9 Mon Sep 17 00:00:00 2001 From: Luca Di Leo Date: Fri, 13 Jan 2023 12:08:16 +0100 Subject: [PATCH 082/102] Removed pymediainfo, added output limit, improved srt parser and fixed gps exif writing --- opendm/video/parameters.py | 31 ++-- opendm/video/srtparser.py | 55 +++++--- opendm/video/video2dataset.py | 257 ++++++++++++++++------------------ requirements.txt | 1 - 4 files changed, 165 insertions(+), 179 deletions(-) diff --git a/opendm/video/parameters.py b/opendm/video/parameters.py index 13a9dc16..f5cbc557 100644 --- a/opendm/video/parameters.py +++ b/opendm/video/parameters.py @@ -19,6 +19,7 @@ class Parameters: timezone = None frame_format = None stats_file = None + limit = None def __init__(self, args): @@ -27,13 +28,12 @@ class Parameters: # "start" -> start frame index") # "end" -> end frame index") # "output-resolution" -> Override output resolution (ex. 640x480)") - # "blur-percentage" -> discard the lowest X percent of frames based on blur score (allowed values from 0.0 to 1.0)") - # "blur-threshold" -> blur measures that fall below this value will be considered 'blurry' (to be used in exclusion with --blur-percentage)") + # "blur-threshold" -> blur measures that fall below this value will be considered 'blurry'. Good value is 300 # "distance-threshold" -> distance measures that fall below this value will be considered 'similar'") # "black-ratio-threshold" -> Set the threshold for considering a frame 'black'. Express the minimum value for the ratio: nb_black_pixels / nb_pixels. Default value is 0.98") # "pixel-black-threshold" -> Set the threshold for considering a pixel 'black'. The threshold expresses the maximum pixel luminance value for which a pixel is considered 'black'. Good value is 0.30 (30%)") # "use-srt" -> Use SRT files for extracting metadata (same name as video file with .srt extension)") - # "timezone" -> UTC timezone offset (ex. -5 for EST). Default to local timezone") + # "limit" -> Maximum number of output frames # "frame-format" -> frame format (jpg, png, tiff, etc.)") # "stats-file" -> Save statistics to csv file") @@ -50,7 +50,8 @@ class Parameters: self.start = args["start"] if args["start"] else 0 self.end = args["end"] if args["end"] else None - self.blur_percentage = args["blur_percentage"] if args["blur_percentage"] else None + self.limit = args["limit"] if args["limit"] else None + self.blur_threshold = args["blur_threshold"] if args["blur_threshold"] else None self.distance_threshold = args["distance_threshold"] if args["distance_threshold"] else None @@ -60,8 +61,6 @@ class Parameters: self.use_srt = args["use_srt"] - self.utc_offset = datetime.timedelta(hours=int(args["timezone"])) if args["timezone"] != "local" else datetime.datetime.now() - datetime.datetime.utcnow() - self.frame_format = args["frame_format"] self.output_resolution = tuple(map(int, args["output_resolution"].split("x"))) if args["output_resolution"] else None @@ -88,29 +87,19 @@ class Parameters: print("Start frame index must be greater than 0") return False + if args["limit"] and args["limit"] < 0: + print("Limit must be greater than 0") + return False + if args["end"]: if args["end"] < 0: print("End frame index must be greater than 0") return False - if args["end"] < args["start"]: + if args["start"] is not None and args["end"] < args["start"]: print("End frame index must be greater than start frame index") return False - if args["timezone"] and args["timezone"] != "local": - try: - val = int(args["timezone"]) - if val < -12 or val > 14: - print("Timezone must be in the range -12 to 14") - return False - except ValueError: - print("Timezone must be a valid integer") - return False - - if args["blur_percentage"] and (args["blur_percentage"] < 0 or args["blur_percentage"] > 1): - print("Blur percentage must be in the range 0.0 to 1.0") - return False - if args["blur_threshold"] and args["blur_threshold"] < 0: print("Blur threshold must be greater than 0") return False diff --git a/opendm/video/srtparser.py b/opendm/video/srtparser.py index a69a11d6..1ab8c0cf 100644 --- a/opendm/video/srtparser.py +++ b/opendm/video/srtparser.py @@ -1,24 +1,23 @@ -from datetime import datetime, timedelta +from datetime import datetime import re class SrtFileParser: - def __init__(self, filename, utc_offset): + def __init__(self, filename): self.filename = filename self.data = [] - self.utc_offset = utc_offset - def get_entry(self, timestamp): + def get_entry(self, timestamp: datetime): if not self.data: self.parse() # check min and max - if timestamp < self.min or timestamp > self.max: + if timestamp < self.data[0]["start"] or timestamp > self.data[len(self.data) - 1]["end"]: return None for entry in self.data: - if entry["timestamp"] <= timestamp: + if entry["start"] <= timestamp and entry["end"] >= timestamp: return entry - return self.data[len(self.data) - 1] + return None def parse(self): @@ -30,17 +29,32 @@ class SrtFileParser: # 2023-01-06 18:56:48,380,821 # [iso : 3200] [shutter : 1/60.0] [fnum : 280] [ev : 0] [ct : 3925] [color_md : default] [focal_len : 240] [latitude: 0.000000] [longitude: 0.000000] [altitude: 0.000000] - self.min = datetime.max - self.max = datetime.min - with open(self.filename, 'r') as f: + srtcnt = None + difftime = None + timestamp = None + iso = None + shutter = None + fnum = None + ev = None + ct = None + color_md = None + focal_len = None + latitude = None + longitude = None + altitude = None + start = None + end = None + for line in f: # Check if line is empty if not line.strip(): if srtcnt is not None: self.data.append({ + "start": start, + "end": end, "srtcnt": srtcnt, "difftime": difftime, "timestamp": timestamp, @@ -55,9 +69,6 @@ class SrtFileParser: "longitude": longitude, "altitude": altitude }) - self.min = min(self.min, timestamp) - # account for the difftime milliseconds to get the actual max - self.max = max(self.max, timestamp + timedelta(milliseconds=difftime)) srtcnt = None difftime = None @@ -72,11 +83,21 @@ class SrtFileParser: latitude = None longitude = None altitude = None + start = None + end = None + continue # Remove the html font tag line = re.sub('<[^<]+?>', '', line) + # Search this "00:00:00,000 --> 00:00:00,016" + match = re.search("(\d{2}:\d{2}:\d{2},\d{3}) --> (\d{2}:\d{2}:\d{2},\d{3})", line) + if match: + start = datetime.strptime(match.group(1), "%H:%M:%S,%f") + end = datetime.strptime(match.group(2), "%H:%M:%S,%f") + + match = re.search("SrtCnt : (\d+)", line) if match: srtcnt = int(match.group(1)) @@ -89,8 +110,6 @@ class SrtFileParser: if match: timestamp = match.group(1) timestamp = datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S,%f") - # The timestamp is in local time, so we need to subtract the UTC offset - timestamp = timestamp - self.utc_offset match = re.search("iso : (\d+)", line) if match: @@ -134,9 +153,3 @@ class SrtFileParser: if match: altitude = float(match.group(1)) altitude = altitude if altitude != 0 else None - - self.data.reverse() - - self.max = self.max.replace(microsecond=0) - self.min = self.min.replace(microsecond=0) - diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 759494e2..102098fa 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -2,31 +2,21 @@ from opendm.video.parameters import Parameters import datetime from fractions import Fraction import io -from math import floor +from math import ceil, floor import time import cv2 import os -import pymediainfo import collections from PIL import Image from checkers import BlackFrameChecker, PercentageBlurChecker, SimilarityChecker, ThresholdBlurChecker from srtparser import SrtFileParser import piexif - class Video2Dataset: def __init__(self, parameters : Parameters): self.parameters = parameters - # We prioritize blur threshold over blur percentage. - if parameters.blur_threshold is not None: - self.blur_checker = ThresholdBlurChecker(parameters.blur_threshold) - else: - if parameters.blur_percentage is not None: - self.blur_checker = PercentageBlurChecker(parameters.blur_percentage) - else: - self.blur_checker = None - + self.blur_checker = ThresholdBlurChecker(parameters.blur_threshold) if parameters.blur_threshold is not None else None self.similarity_checker = SimilarityChecker(parameters.distance_threshold) if parameters.distance_threshold is not None else None self.black_checker = BlackFrameChecker(parameters.black_ratio_threshold, parameters.pixel_black_threshold) if parameters.black_ratio_threshold is not None or parameters.pixel_black_threshold is not None else None @@ -52,26 +42,26 @@ class Video2Dataset: print("Processing video: {}".format(input_file)) # get video info - video_info = self.GetVideoInfo(input_file) + video_info = get_video_info(input_file) print(video_info) - utc_offset = self.parameters.utc_offset if self.parameters.utc_offset is not None else video_info.utc_offset - if self.parameters.use_srt: - srt_file = os.path.splitext(input_file)[0] + ".srt" - if os.path.exists(srt_file): - print("Loading SRT file: {}".format(srt_file)) - srt_parser = SrtFileParser(srt_file, utc_offset) - srt_parser.parse() - else: - srt_file = os.path.splitext(input_file)[0] + ".SRT" + + name = os.path.splitext(input_file)[0] + + srt_files = [name + ".srt", name + ".SRT"] + srt_parser = None + + for srt_file in srt_files: if os.path.exists(srt_file): print("Loading SRT file: {}".format(srt_file)) - srt_parser = SrtFileParser(srt_file, utc_offset) - srt_parser.parse() - else: - print("SRT file not found: {}".format(srt_file)) - srt_parser = None + try: + srt_parser = SrtFileParser(srt_file) + srt_parser.parse() + break + except Exception as e: + print("Error parsing SRT file: {}".format(e)) + srt_parser = None else: srt_parser = None @@ -98,8 +88,6 @@ class Video2Dataset: print("Error opening video stream or file") return - frames_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - if (self.parameters.start is not None): cap.set(cv2.CAP_PROP_POS_FRAMES, self.parameters.start) self.frame_index = self.parameters.start @@ -107,7 +95,9 @@ class Video2Dataset: else: start_frame = 0 - frames_to_process = self.parameters.end - start_frame + 1 if (self.parameters.end is not None) else frames_count - start_frame + frames_to_process = self.parameters.end - start_frame + 1 if (self.parameters.end is not None) else video_info.total_frames - start_frame + + output_file_paths = [] while (cap.isOpened()): ret, frame = cap.read() @@ -127,11 +117,19 @@ class Video2Dataset: if stats is not None and self.parameters.stats_file is not None: self.WriteStats(input_file, stats) + # Add element to array + if stats is not None and "written" in stats.keys(): + output_file_paths.append(stats["path"]) + cap.release() if self.f is not None: self.f.close() + if self.parameters.limit is not None and self.global_idx >= self.parameters.limit: + print("Limit of {} frames reached, trimming dataset to {} frames".format(self.parameters.limit, self.global_idx)) + limit_files(output_file_paths, self.parameters.limit) + end = time.time() print("Total processing time: {:.2f}s".format(end - start)) @@ -173,8 +171,9 @@ class Video2Dataset: self.frame_index += 1 return res - self.SaveFrame(frame, video_info, srt_parser) + path = self.SaveFrame(frame, video_info, srt_parser) res["written"] = True + res["path"] = path self.frame_index += 1 self.global_idx += 1 @@ -192,40 +191,38 @@ class Video2Dataset: _, buf = cv2.imencode('.' + self.parameters.frame_format, frame) - start_time_utc = video_info.start_time_utc if video_info.start_time_utc is not None \ - else srt_parser.data[0].timestamp if srt_parser is not None \ - else datetime.datetime.now() + #start_time_utc = video_info.start_time_utc if video_info.start_time_utc is not None \ + # else srt_parser.data[0].timestamp if srt_parser is not None \ + # else datetime.datetime.now() - elapsed_time_utc = start_time_utc + datetime.timedelta(seconds=(self.frame_index / video_info.frame_rate)) - elapsed_time = elapsed_time_utc + srt_parser.utc_offset + #elapsed_time_utc = start_time_utc + datetime.timedelta(seconds=(self.frame_index / video_info.frame_rate)) + #elapsed_time = elapsed_time_utc + srt_parser.utc_offset if srt_parser is not None else elapsed_time_utc + + delta = datetime.timedelta(seconds=(self.frame_index / video_info.frame_rate)) + # convert to datetime + elapsed_time = datetime.datetime(1900, 1, 1) + delta img = Image.open(io.BytesIO(buf)) - # Exif dict contains the following keys: '0th', 'Exif', 'GPS', '1st', 'thumbnail' + entry = srt_parser.get_entry(elapsed_time) if srt_parser is not None else None + elapsed_time_str = (elapsed_time + (datetime.datetime.now() - datetime.datetime(1900, 1, 1))).strftime("%Y:%m:%d %H:%M:%S.%f") + # Exif dict contains the following keys: '0th', 'Exif', 'GPS', '1st', 'thumbnail' # Set the EXIF metadata exif_dict = { "0th": { piexif.ImageIFD.Software: "ODM", - piexif.ImageIFD.DateTime: elapsed_time.strftime('%Y:%m:%d %H:%M:%S'), + piexif.ImageIFD.DateTime: elapsed_time_str, piexif.ImageIFD.XResolution: (frame.shape[1], 1), piexif.ImageIFD.YResolution: (frame.shape[0], 1), }, "Exif": { - piexif.ExifIFD.DateTimeOriginal: elapsed_time.strftime('%Y:%m:%d %H:%M:%S'), - piexif.ExifIFD.DateTimeDigitized: elapsed_time.strftime('%Y:%m:%d %H:%M:%S'), + piexif.ExifIFD.DateTimeOriginal: elapsed_time_str, + piexif.ExifIFD.DateTimeDigitized: elapsed_time_str, piexif.ExifIFD.PixelXDimension: (frame.shape[1], 1), piexif.ExifIFD.PixelYDimension: (frame.shape[0], 1), }} - if video_info.orientation is not None: - exif_dict["0th"][piexif.ImageIFD.Orientation] = video_info.orientation - - if video_info.model is not None: - exif_dict["Exif"][piexif.ImageIFD.Model] = video_info.model - - entry = srt_parser.get_entry(elapsed_time_utc) if srt_parser is not None else None - if entry is not None: segs = entry["shutter"].split("/") exif_dict["Exif"][piexif.ExifIFD.ExposureTime] = (int(float(segs[0])), int(float(segs[1]))) @@ -233,24 +230,14 @@ class Video2Dataset: exif_dict["Exif"][piexif.ExifIFD.FNumber] = (entry["fnum"], 1) exif_dict["Exif"][piexif.ExifIFD.ISOSpeedRatings] = (entry["iso"], 1) - exif_dict["GPS"] = { - piexif.GPSIFD.GPSMapDatum: "WGS-84", - piexif.GPSIFD.GPSDateStamp: elapsed_time.strftime('%Y:%m:%d %H:%M:%S') - } - - if (entry["latitude"] is not None): - exif_dict["GPS"][piexif.GPSIFD.GPSLatitude] = FloatToRational(entry["latitude"]) - - if (entry["longitude"] is not None): - exif_dict["GPS"][piexif.GPSIFD.GPSLongitude] = FloatToRational(entry["longitude"]) - - if (entry["altitude"] is not None): - exif_dict["GPS"][piexif.GPSIFD.GPSAltitude] = FloatToRational(entry["altitude"]) + exif_dict["GPS"] = get_gps_location(elapsed_time, entry["latitude"], entry["longitude"], entry["altitude"]) exif_bytes = piexif.dump(exif_dict) img.save(path, exif=exif_bytes) + return path + def WriteStats(self, input_file, stats): self.f.write("{};{};{};{};{};{};{};{};{};{}\n".format( @@ -266,84 +253,82 @@ class Video2Dataset: stats["written"] if "written" in stats else "").replace(".", ",")) - def GetVideoInfo(self, input_file): +def get_video_info(input_file): - video = cv2.VideoCapture(input_file) + video = cv2.VideoCapture(input_file) - total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) - frame_rate = video.get(cv2.CAP_PROP_FPS) - start_time_utc, utc_offset, orientation, model = self.GetVideoMetadata(input_file) + total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) + frame_rate = video.get(cv2.CAP_PROP_FPS) - video.release() + video.release() - return collections.namedtuple("VideoInfo", ["total_frames", "frame_rate", "start_time_utc", "utc_offset", "orientation", "model"])(total_frames, frame_rate, start_time_utc, utc_offset, orientation, model) + return collections.namedtuple("VideoInfo", ["total_frames", "frame_rate"])(total_frames, frame_rate) - - def GetVideoMetadata(self, input_file): - - try: - - metadata = pymediainfo.MediaInfo.parse(input_file).to_data() - - start_time_utc = None - orientation = 1 - performer = None - utc_offset = None - - if metadata is not None and 'tracks' in metadata: - # Check if it is safe to access the first element of the tracks list - if len(metadata['tracks']) > 0: - - start_time_utc = metadata['tracks'][0].get('encoded_date') or \ - metadata['tracks'][0].get('tagged_date') - - start_time_utc = datetime.datetime.strptime(start_time_utc, '%Z %Y-%m-%d %H:%M:%S') if start_time_utc is not None else None - - file_creation_date_utc = metadata['tracks'][0].get('file_creation_date') - file_creation_date_utc = datetime.datetime.strptime(file_creation_date_utc, '%Z %Y-%m-%d %H:%M:%S.%f') if file_creation_date_utc is not None else None - - file_creation_date = metadata['tracks'][0].get('file_creation_date__local') - file_creation_date = datetime.datetime.strptime(file_creation_date, '%Y-%m-%d %H:%M:%S.%f') if file_creation_date is not None else None - - if file_creation_date_utc is not None and file_creation_date is not None: - utc_offset = file_creation_date - file_creation_date_utc - - performer = metadata['tracks'][0].get('performer') - - # Check if it is safe to access the second element of the tracks list - if len(metadata['tracks']) > 1: - - orientation = metadata['tracks'][1].get('rotation') - - if orientation is not None: - orientation = int(float(orientation)) - - # The 8 EXIF orientation values are numbered 1 to 8. - # 1 = 0°: the correct orientation, no adjustment is required. - # 2 = 0°, mirrored: image has been flipped back-to-front. - # 3 = 180°: image is upside down. - # 4 = 180°, mirrored: image has been flipped back-to-front and is upside down. - # 5 = 90°: image has been flipped back-to-front and is on its side. - # 6 = 90°, mirrored: image is on its side. - # 7 = 270°: image has been flipped back-to-front and is on its far side. - # 8 = 270°, mirrored: image is on its far side. - - if orientation == 0: - orientation = 1 - elif orientation == 90: - orientation = 8 - elif orientation == 180: - orientation = 3 - elif orientation == 270: - orientation = 6 - - return start_time_utc, utc_offset, orientation, performer - - except Exception as e: - - return start_time_utc, utc_offset, orientation, performer - - -def FloatToRational(f): +def float_to_rational(f): f = Fraction(f).limit_denominator() - return (f.numerator, f.denominator) \ No newline at end of file + return (f.numerator, f.denominator) + +def limit_files(paths, limit): + + cnt = len(paths) + num_to_delete = cnt - limit + + if num_to_delete <= 0: + return + + skip = floor(num_to_delete / limit) if num_to_delete > cnt else ceil(cnt / num_to_delete) + + to_keep = [] + + for i in range(len(paths)): + if i % skip == 0: + os.remove(paths[i]) + else: + to_keep.append(paths[i]) + + limit_files(to_keep, limit) + +def to_deg(value, loc): + """convert decimal coordinates into degrees, munutes and seconds tuple + Keyword arguments: value is float gps-value, loc is direction list ["S", "N"] or ["W", "E"] + return: tuple like (25, 13, 48.343 ,'N') + """ + if value < 0: + loc_value = loc[0] + elif value > 0: + loc_value = loc[1] + else: + loc_value = "" + abs_value = abs(value) + deg = int(abs_value) + t1 = (abs_value-deg)*60 + min = int(t1) + sec = round((t1 - min)* 60, 5) + return (deg, min, sec, loc_value) + +def get_gps_location(elapsed_time, lat, lng, altitude): + + lat_deg = to_deg(lat, ["S", "N"]) + lng_deg = to_deg(lng, ["W", "E"]) + + exiv_lat = (float_to_rational(lat_deg[0]), float_to_rational(lat_deg[1]), float_to_rational(lat_deg[2])) + exiv_lng = (float_to_rational(lng_deg[0]), float_to_rational(lng_deg[1]), float_to_rational(lng_deg[2])) + + gps_ifd = { + piexif.GPSIFD.GPSVersionID: (2, 0, 0, 0), + piexif.GPSIFD.GPSDateStamp: elapsed_time.strftime('%Y:%m:%d %H:%M:%S.%f') + } + + if altitude is not None: + gps_ifd[piexif.GPSIFD.GPSAltitudeRef] = 0 + gps_ifd[piexif.GPSIFD.GPSAltitude] = float_to_rational(round(altitude)) + + if lat is not None: + gps_ifd[piexif.GPSIFD.GPSLatitudeRef] = lat_deg[3] + gps_ifd[piexif.GPSIFD.GPSLatitude] = exiv_lat + + if lng is not None: + gps_ifd[piexif.GPSIFD.GPSLongitudeRef] = lng_deg[3] + gps_ifd[piexif.GPSIFD.GPSLongitude] = exiv_lng + + return gps_ifd \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index c016a6e7..e8f31569 100644 --- a/requirements.txt +++ b/requirements.txt @@ -36,5 +36,4 @@ trimesh==3.17.1 pandas==1.5.2 # for video support -pymediainfo==6.0.1 piexif==1.1.3 \ No newline at end of file From 305ec0730f8bf18dd12a0a7ee5e4e86b22cd0f3c Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Mon, 16 Jan 2023 21:12:14 -0500 Subject: [PATCH 083/102] Fix weakly_canonical: The device is not ready --- win32env.bat | 1 + 1 file changed, 1 insertion(+) diff --git a/win32env.bat b/win32env.bat index d00e0192..36c35d17 100644 --- a/win32env.bat +++ b/win32env.bat @@ -14,6 +14,7 @@ set GDAL_DATA=%GDALBASE%\data\gdal set GDAL_DRIVER_PATH=%GDALBASE%\gdalplugins set OSFMBASE=%ODMBASE%SuperBuild\install\bin\opensfm\bin set SBBIN=%ODMBASE%SuperBuild\install\bin +set PDAL_DRIVER_PATH=%ODMBASE%SuperBuild\install\bin set PATH=%GDALBASE%;%SBBIN%;%OSFMBASE% set PROJ_LIB=%GDALBASE%\data\proj From 3296aa7a51a93c9c6ac793ee6e8f3d3d99ae8d06 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 18 Jan 2023 11:44:02 -0500 Subject: [PATCH 084/102] Some cleanup --- opendm/video/parameters.py | 109 +++++----------------------------- opendm/video/video2dataset.py | 7 ++- requirements.txt | 4 +- 3 files changed, 19 insertions(+), 101 deletions(-) diff --git a/opendm/video/parameters.py b/opendm/video/parameters.py index f5cbc557..7119960c 100644 --- a/opendm/video/parameters.py +++ b/opendm/video/parameters.py @@ -5,22 +5,6 @@ import os class Parameters: - input = None - output = None - start = None - end = None - output_resolution = None - blur_percentage = None - blur_threshold = None - distance_threshold = None - black_ratio_threshold = None - pixel_black_threshold = None - use_srt = None - timezone = None - frame_format = None - stats_file = None - limit = None - def __init__(self, args): # "input" -> path to input video file(s), use ',' to separate multiple files") @@ -37,94 +21,29 @@ class Parameters: # "frame-format" -> frame format (jpg, png, tiff, etc.)") # "stats-file" -> Save statistics to csv file") - if (not self.ValidateParameters(args)): - print("Invalid parameters") - exit() - if not os.path.exists(args["output"]): os.makedirs(args["output"]) self.input = args["input"].split(",") self.output = args["output"] + self.start = args.get("start", 0) + self.end = args.get("end", None) + self.limit = args.get("limit", None) + self.blur_threshold = args.get("blur_threshold", None) + self.distance_threshold = args.get("distance_threshold", None) + self.black_ratio_threshold = args.get("black_ratio_threshold", None) + self.pixel_black_threshold = args.get("pixel_black_threshold", None) + self.use_srt = "use_srt" in args + self.frame_format = args.get("frame_format", "jpg") + + # TODO: use max dimension + self.output_resolution = None + #self.output_resolution = tuple(map(int, args["output_resolution"].split("x"))) if args["output_resolution"] else None - self.start = args["start"] if args["start"] else 0 - self.end = args["end"] if args["end"] else None - - self.limit = args["limit"] if args["limit"] else None - - self.blur_threshold = args["blur_threshold"] if args["blur_threshold"] else None - - self.distance_threshold = args["distance_threshold"] if args["distance_threshold"] else None - - self.black_ratio_threshold = args["black_ratio_threshold"] if args["black_ratio_threshold"] else None - self.pixel_black_threshold = args["pixel_black_threshold"] if args["pixel_black_threshold"] else None - - self.use_srt = args["use_srt"] - - self.frame_format = args["frame_format"] - self.output_resolution = tuple(map(int, args["output_resolution"].split("x"))) if args["output_resolution"] else None - - self.stats_file = args["stats_file"] if args["stats_file"] else None + self.stats_file = args.get("stats_file", None) # We will resize the image to this size before processing self.internal_width = 800 self.internal_height = 600 - # Validate parameters - def ValidateParameters(self, args): - - # input can be a list of files comma separated, check for each file - files = args["input"].split(",") - for file in files: - if not os.path.exists(file): - print("Input file does not exist: " + file) - return False - - if not os.path.exists(args["output"]): - os.makedirs(args["output"]) - - if args["start"] and args["start"] < 0: - print("Start frame index must be greater than 0") - return False - - if args["limit"] and args["limit"] < 0: - print("Limit must be greater than 0") - return False - - if args["end"]: - if args["end"] < 0: - print("End frame index must be greater than 0") - return False - - if args["start"] is not None and args["end"] < args["start"]: - print("End frame index must be greater than start frame index") - return False - - if args["blur_threshold"] and args["blur_threshold"] < 0: - print("Blur threshold must be greater than 0") - return False - - if args["distance_threshold"] and args["distance_threshold"] < 0: - print("Distance threshold must be greater than 0") - return False - - if args["black_ratio_threshold"] and (args["black_ratio_threshold"] < 0 or args["black_ratio_threshold"] > 1): - print("Black ratio threshold must be in the range 0.0 to 1.0") - return False - - if args["pixel_black_threshold"] and (args["pixel_black_threshold"] < 0 or args["pixel_black_threshold"] > 1): - print("Pixel black threshold must be in the range 0.0 to 1.0") - return False - - if args["output_resolution"]: - segs = args["output_resolution"].split("x") - if (len(segs) != 2): - print("Output resolution must be in the format WxH") - return False - - if (int(segs[0]) <= 0 or int(segs[1]) <= 0): - print("Output resolution must be in the format WxH") - return False - - return True diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 102098fa..8a476123 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -1,4 +1,3 @@ -from opendm.video.parameters import Parameters import datetime from fractions import Fraction import io @@ -8,9 +7,11 @@ import cv2 import os import collections from PIL import Image -from checkers import BlackFrameChecker, PercentageBlurChecker, SimilarityChecker, ThresholdBlurChecker -from srtparser import SrtFileParser import piexif +from opendm.video.srtparser import SrtFileParser +from opendm.video.parameters import Parameters +from opendm.video.checkers import BlackFrameChecker, PercentageBlurChecker, SimilarityChecker, ThresholdBlurChecker + class Video2Dataset: def __init__(self, parameters : Parameters): diff --git a/requirements.txt b/requirements.txt index e8f31569..481dc910 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,6 +34,4 @@ onnxruntime==1.12.1 codem==0.24.0 trimesh==3.17.1 pandas==1.5.2 - -# for video support -piexif==1.1.3 \ No newline at end of file +piexif==1.1.3 From 270da0b757fdab2b2bde0da605904b4f5339c142 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 18 Jan 2023 12:09:52 -0500 Subject: [PATCH 085/102] Use max_dimension instead of output_resolution --- opendm/video/parameters.py | 10 +++++----- opendm/video/video2dataset.py | 12 ++++++++---- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/opendm/video/parameters.py b/opendm/video/parameters.py index 7119960c..9fc7c97f 100644 --- a/opendm/video/parameters.py +++ b/opendm/video/parameters.py @@ -24,7 +24,10 @@ class Parameters: if not os.path.exists(args["output"]): os.makedirs(args["output"]) - self.input = args["input"].split(",") + self.input = args["input"] + if isinstance(self.input, str): + self.input = [self.input] + self.output = args["output"] self.start = args.get("start", 0) self.end = args.get("end", None) @@ -35,10 +38,7 @@ class Parameters: self.pixel_black_threshold = args.get("pixel_black_threshold", None) self.use_srt = "use_srt" in args self.frame_format = args.get("frame_format", "jpg") - - # TODO: use max dimension - self.output_resolution = None - #self.output_resolution = tuple(map(int, args["output_resolution"].split("x"))) if args["output_resolution"] else None + self.max_dimension = args.get("max_dimension", None) self.stats_file = args.get("stats_file", None) diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 8a476123..97e8da39 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -128,7 +128,7 @@ class Video2Dataset: self.f.close() if self.parameters.limit is not None and self.global_idx >= self.parameters.limit: - print("Limit of {} frames reached, trimming dataset to {} frames".format(self.parameters.limit, self.global_idx)) + print("Limit of {} frames reached, trimming dataset".format(self.parameters.limit)) limit_files(output_file_paths, self.parameters.limit) end = time.time() @@ -183,9 +183,13 @@ class Video2Dataset: return res def SaveFrame(self, frame, video_info, srt_parser: SrtFileParser): - - if (self.parameters.output_resolution is not None): - frame = cv2.resize(frame, self.parameters.output_resolution) + max_dim = self.parameters.max_dimension + if max_dim is not None: + h, w, _ = frame.shape + if max_dim < w or max_dim < h: + m = max(w, h) + factor = max_dim / m + frame = cv2.resize(frame, (int(ceil(w * factor)), int(ceil(h * factor)))) path = os.path.join(self.parameters.output, "frame_{}_{}.{}".format(self.global_idx, self.frame_index, self.parameters.frame_format)) From 0ede11b788b9046f034cc8cc4876aeeb2f661a48 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 18 Jan 2023 13:10:35 -0500 Subject: [PATCH 086/102] Use log, tune down verbose output --- opendm/video/video2dataset.py | 52 ++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 97e8da39..f9f98db3 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -8,6 +8,7 @@ import os import collections from PIL import Image import piexif +from opendm import log from opendm.video.srtparser import SrtFileParser from opendm.video.parameters import Parameters from opendm.video.checkers import BlackFrameChecker, PercentageBlurChecker, SimilarityChecker, ThresholdBlurChecker @@ -40,11 +41,11 @@ class Video2Dataset: # get file name file_name = os.path.basename(input_file) - print("Processing video: {}".format(input_file)) + log.ODM_INFO("Processing video: {}".format(input_file)) # get video info video_info = get_video_info(input_file) - print(video_info) + log.ODM_INFO(video_info) if self.parameters.use_srt: @@ -55,38 +56,38 @@ class Video2Dataset: for srt_file in srt_files: if os.path.exists(srt_file): - print("Loading SRT file: {}".format(srt_file)) + log.ODM_INFO("Loading SRT file: {}".format(srt_file)) try: srt_parser = SrtFileParser(srt_file) srt_parser.parse() break except Exception as e: - print("Error parsing SRT file: {}".format(e)) + log.ODM_INFO("Error parsing SRT file: {}".format(e)) srt_parser = None else: srt_parser = None if (self.blur_checker is not None and self.blur_checker.NeedPreProcess()): - print("Preprocessing for blur checker...") + log.ODM_INFO("Preprocessing for blur checker...") self.blur_checker.PreProcess(input_file, self.parameters.start, self.parameters.end, self.parameters.internal_width, self.parameters.internal_height) end = time.time() - print("Preprocessing time: {:.2f}s".format(end - start)) - print("Calculated threshold is {}".format(self.blur_checker.threshold)) + log.ODM_INFO("Preprocessing time: {:.2f}s".format(end - start)) + log.ODM_INFO("Calculated threshold is {}".format(self.blur_checker.threshold)) if (self.black_checker is not None and self.black_checker.NeedPreProcess()): start2 = time.time() - print("Preprocessing for black checker...") + log.ODM_INFO("Preprocessing for black checker...") self.black_checker.PreProcess(input_file, self.parameters.start, self.parameters.end, self.parameters.internal_width, self.parameters.internal_height) end = time.time() - print("Preprocessing time: {:.2f}s".format(end - start2)) - print("Calculated luminance_range_size is {}".format(self.black_checker.luminance_range_size)) - print("Calculated luminance_minimum_value is {}".format(self.black_checker.luminance_minimum_value)) - print("Calculated absolute_threshold is {}".format(self.black_checker.absolute_threshold)) + log.ODM_INFO("Preprocessing time: {:.2f}s".format(end - start2)) + log.ODM_INFO("Calculated luminance_range_size is {}".format(self.black_checker.luminance_range_size)) + log.ODM_INFO("Calculated luminance_minimum_value is {}".format(self.black_checker.luminance_minimum_value)) + log.ODM_INFO("Calculated absolute_threshold is {}".format(self.black_checker.absolute_threshold)) # open video file cap = cv2.VideoCapture(input_file) if (not cap.isOpened()): - print("Error opening video stream or file") + log.ODM_INFO("Error opening video stream or file") return if (self.parameters.start is not None): @@ -99,7 +100,7 @@ class Video2Dataset: frames_to_process = self.parameters.end - start_frame + 1 if (self.parameters.end is not None) else video_info.total_frames - start_frame output_file_paths = [] - + progress = 0 while (cap.isOpened()): ret, frame = cap.read() @@ -110,8 +111,10 @@ class Video2Dataset: break # Calculate progress percentage + prev_progress = progress progress = floor((self.frame_index - start_frame + 1) / frames_to_process * 100) - print("[{}][{:3d}%] Processing frame {}/{}: ".format(file_name, progress, self.frame_index - start_frame + 1, frames_to_process), end="") + if progress != prev_progress: + print("[{}][{:3d}%] Processing frame {}/{}: ".format(file_name, progress, self.frame_index - start_frame + 1, frames_to_process), end="\r") stats = self.ProcessFrame(frame, video_info, srt_parser) @@ -128,11 +131,12 @@ class Video2Dataset: self.f.close() if self.parameters.limit is not None and self.global_idx >= self.parameters.limit: - print("Limit of {} frames reached, trimming dataset".format(self.parameters.limit)) - limit_files(output_file_paths, self.parameters.limit) + log.ODM_INFO("Limit of {} frames reached, trimming dataset".format(self.parameters.limit)) + output_file_paths = limit_files(output_file_paths, self.parameters.limit) end = time.time() - print("Total processing time: {:.2f}s".format(end - start)) + log.ODM_INFO("Total processing time: {:.2f}s".format(end - start)) + return output_file_paths def ProcessFrame(self, frame, video_info, srt_parser): @@ -148,7 +152,7 @@ class Video2Dataset: res["is_blurry"] = is_blurry if is_blurry: - print ("blurry, skipping") + # print ("blurry, skipping") self.frame_index += 1 return res @@ -157,7 +161,7 @@ class Video2Dataset: res["is_black"] = is_black if is_black: - print ("black, skipping") + # print ("black, skipping") self.frame_index += 1 return res @@ -168,7 +172,7 @@ class Video2Dataset: res["last_frame_index"] = last_frame_index if is_similar: - print ("similar to {}, skipping".format(self.similarity_checker.last_image_id)) + # print ("similar to {}, skipping".format(self.similarity_checker.last_image_id)) self.frame_index += 1 return res @@ -178,8 +182,6 @@ class Video2Dataset: self.frame_index += 1 self.global_idx += 1 - print ("saved") - return res def SaveFrame(self, frame, video_info, srt_parser: SrtFileParser): @@ -279,7 +281,7 @@ def limit_files(paths, limit): num_to_delete = cnt - limit if num_to_delete <= 0: - return + return paths skip = floor(num_to_delete / limit) if num_to_delete > cnt else ceil(cnt / num_to_delete) @@ -291,7 +293,7 @@ def limit_files(paths, limit): else: to_keep.append(paths[i]) - limit_files(to_keep, limit) + return limit_files(to_keep, limit) def to_deg(value, loc): """convert decimal coordinates into degrees, munutes and seconds tuple From f0e716f28b293bf29028a1d763095088415925f8 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 18 Jan 2023 13:48:54 -0500 Subject: [PATCH 087/102] Fixed start date --- opendm/video/srtparser.py | 7 ++++++- opendm/video/video2dataset.py | 3 ++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/opendm/video/srtparser.py b/opendm/video/srtparser.py index 1ab8c0cf..d234cf22 100644 --- a/opendm/video/srtparser.py +++ b/opendm/video/srtparser.py @@ -21,7 +21,7 @@ class SrtFileParser: def parse(self): - # The SRT file is formatted this way: + # SRT metadata is not standarized, we support the following formats: # 1 # 00:00:00,000 --> 00:00:00,016 @@ -29,6 +29,11 @@ class SrtFileParser: # 2023-01-06 18:56:48,380,821 # [iso : 3200] [shutter : 1/60.0] [fnum : 280] [ev : 0] [ct : 3925] [color_md : default] [focal_len : 240] [latitude: 0.000000] [longitude: 0.000000] [altitude: 0.000000] + # DJI Mavic Mini + # 1 + # 00:00:00,000 --> 00:00:01,000 + # F/2.8, SS 206.14, ISO 150, EV 0, GPS (-82.6669, 27.7716, 10), D 2.80m, H 0.00m, H.S 0.00m/s, V.S 0.00m/s + with open(self.filename, 'r') as f: srtcnt = None diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index f9f98db3..3af17b25 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -29,6 +29,7 @@ class Video2Dataset: def ProcessVideo(self): start = time.time() + self.date_now = datetime.datetime.now() if (self.parameters.stats_file is not None): self.f = open(self.parameters.stats_file, "w") @@ -212,7 +213,7 @@ class Video2Dataset: img = Image.open(io.BytesIO(buf)) entry = srt_parser.get_entry(elapsed_time) if srt_parser is not None else None - elapsed_time_str = (elapsed_time + (datetime.datetime.now() - datetime.datetime(1900, 1, 1))).strftime("%Y:%m:%d %H:%M:%S.%f") + elapsed_time_str = (elapsed_time + (self.date_now - datetime.datetime(1900, 1, 1))).strftime("%Y:%m:%d %H:%M:%S.%f") # Exif dict contains the following keys: '0th', 'Exif', 'GPS', '1st', 'thumbnail' # Set the EXIF metadata From 0118fcb5a6d175114be781f1b1f0c23139cfe665 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 18 Jan 2023 14:13:06 -0500 Subject: [PATCH 088/102] Fix EXIF DateTimeOriginal population --- opendm/video/srtparser.py | 1 + opendm/video/video2dataset.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/opendm/video/srtparser.py b/opendm/video/srtparser.py index d234cf22..81839daf 100644 --- a/opendm/video/srtparser.py +++ b/opendm/video/srtparser.py @@ -23,6 +23,7 @@ class SrtFileParser: # SRT metadata is not standarized, we support the following formats: + # DJI mavic air 2 # 1 # 00:00:00,000 --> 00:00:00,016 # SrtCnt : 1, DiffTime : 16ms diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 3af17b25..cb2193a6 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -213,7 +213,7 @@ class Video2Dataset: img = Image.open(io.BytesIO(buf)) entry = srt_parser.get_entry(elapsed_time) if srt_parser is not None else None - elapsed_time_str = (elapsed_time + (self.date_now - datetime.datetime(1900, 1, 1))).strftime("%Y:%m:%d %H:%M:%S.%f") + elapsed_time_str = (elapsed_time + (self.date_now - datetime.datetime(1900, 1, 1))).strftime("%Y:%m:%d %H:%M:%S") # Exif dict contains the following keys: '0th', 'Exif', 'GPS', '1st', 'thumbnail' # Set the EXIF metadata From a46f56731d2603531b03e2955681ca1339a6b8ce Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 18 Jan 2023 14:43:01 -0500 Subject: [PATCH 089/102] Add Exif SubSecTime, no lng without lat --- opendm/video/video2dataset.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index cb2193a6..753c3fcf 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -213,7 +213,10 @@ class Video2Dataset: img = Image.open(io.BytesIO(buf)) entry = srt_parser.get_entry(elapsed_time) if srt_parser is not None else None - elapsed_time_str = (elapsed_time + (self.date_now - datetime.datetime(1900, 1, 1))).strftime("%Y:%m:%d %H:%M:%S") + + exif_time = (elapsed_time + (self.date_now - datetime.datetime(1900, 1, 1))) + elapsed_time_str = exif_time.strftime("%Y:%m:%d %H:%M:%S") + subsec_time_str = exif_time.strftime("%f") # Exif dict contains the following keys: '0th', 'Exif', 'GPS', '1st', 'thumbnail' # Set the EXIF metadata @@ -227,6 +230,7 @@ class Video2Dataset: "Exif": { piexif.ExifIFD.DateTimeOriginal: elapsed_time_str, piexif.ExifIFD.DateTimeDigitized: elapsed_time_str, + piexif.ExifIFD.SubSecTime: subsec_time_str, piexif.ExifIFD.PixelXDimension: (frame.shape[1], 1), piexif.ExifIFD.PixelYDimension: (frame.shape[0], 1), }} @@ -324,18 +328,16 @@ def get_gps_location(elapsed_time, lat, lng, altitude): gps_ifd = { piexif.GPSIFD.GPSVersionID: (2, 0, 0, 0), - piexif.GPSIFD.GPSDateStamp: elapsed_time.strftime('%Y:%m:%d %H:%M:%S.%f') + piexif.GPSIFD.GPSDateStamp: elapsed_time.strftime('%Y:%m:%d %H:%M:%S') } if altitude is not None: gps_ifd[piexif.GPSIFD.GPSAltitudeRef] = 0 gps_ifd[piexif.GPSIFD.GPSAltitude] = float_to_rational(round(altitude)) - if lat is not None: + if lat is not None and lng is not None: gps_ifd[piexif.GPSIFD.GPSLatitudeRef] = lat_deg[3] gps_ifd[piexif.GPSIFD.GPSLatitude] = exiv_lat - - if lng is not None: gps_ifd[piexif.GPSIFD.GPSLongitudeRef] = lng_deg[3] gps_ifd[piexif.GPSIFD.GPSLongitude] = exiv_lng From 1106635d09daea35c7d7408753f0178577225101 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 18 Jan 2023 14:44:02 -0500 Subject: [PATCH 090/102] No altitude without lat/lng --- opendm/video/video2dataset.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 753c3fcf..0001fe4b 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -331,14 +331,13 @@ def get_gps_location(elapsed_time, lat, lng, altitude): piexif.GPSIFD.GPSDateStamp: elapsed_time.strftime('%Y:%m:%d %H:%M:%S') } - if altitude is not None: - gps_ifd[piexif.GPSIFD.GPSAltitudeRef] = 0 - gps_ifd[piexif.GPSIFD.GPSAltitude] = float_to_rational(round(altitude)) - if lat is not None and lng is not None: gps_ifd[piexif.GPSIFD.GPSLatitudeRef] = lat_deg[3] gps_ifd[piexif.GPSIFD.GPSLatitude] = exiv_lat gps_ifd[piexif.GPSIFD.GPSLongitudeRef] = lng_deg[3] gps_ifd[piexif.GPSIFD.GPSLongitude] = exiv_lng + if altitude is not None: + gps_ifd[piexif.GPSIFD.GPSAltitudeRef] = 0 + gps_ifd[piexif.GPSIFD.GPSAltitude] = float_to_rational(round(altitude)) return gps_ifd \ No newline at end of file From 0da6067796640d317b9bea9a2e23b46b24921b6b Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 18 Jan 2023 15:48:29 -0500 Subject: [PATCH 091/102] Mavic Mini SRT support, cleanup, fixes --- opendm/video/srtparser.py | 117 ++++++++++++++-------------------- opendm/video/video2dataset.py | 37 ++++------- 2 files changed, 60 insertions(+), 94 deletions(-) diff --git a/opendm/video/srtparser.py b/opendm/video/srtparser.py index 81839daf..21c2bd41 100644 --- a/opendm/video/srtparser.py +++ b/opendm/video/srtparser.py @@ -1,5 +1,26 @@ from datetime import datetime import re + + +def match_single(regexes, line, dtype=int): + if isinstance(regexes, str): + regexes = [(regexes, dtype)] + + for i in range(len(regexes)): + if isinstance(regexes[i], str): + regexes[i] = (regexes[i], dtype) + + try: + for r, transform in regexes: + match = re.search(r, line) + if match: + res = match.group(1) + return transform(res) + except Exception as e: + log.ODM_WARNING("Cannot parse SRT line \"%s\": %s", (line, str(e))) + + return None + class SrtFileParser: def __init__(self, filename): self.filename = filename @@ -37,15 +58,9 @@ class SrtFileParser: with open(self.filename, 'r') as f: - srtcnt = None - difftime = None - timestamp = None iso = None shutter = None fnum = None - ev = None - ct = None - color_md = None focal_len = None latitude = None longitude = None @@ -57,34 +72,23 @@ class SrtFileParser: # Check if line is empty if not line.strip(): - if srtcnt is not None: + if start is not None: self.data.append({ "start": start, "end": end, - "srtcnt": srtcnt, - "difftime": difftime, - "timestamp": timestamp, "iso": iso, "shutter": shutter, "fnum": fnum, - "ev": ev, - "ct": ct, - "color_md": color_md, "focal_len": focal_len, "latitude": latitude, "longitude": longitude, "altitude": altitude }) - srtcnt = None - difftime = None - timestamp = None iso = None shutter = None fnum = None - ev = None ct = None - color_md = None focal_len = None latitude = None longitude = None @@ -94,7 +98,7 @@ class SrtFileParser: continue - # Remove the html font tag + # Remove html tags line = re.sub('<[^<]+?>', '', line) # Search this "00:00:00,000 --> 00:00:00,016" @@ -103,59 +107,34 @@ class SrtFileParser: start = datetime.strptime(match.group(1), "%H:%M:%S,%f") end = datetime.strptime(match.group(2), "%H:%M:%S,%f") + iso = match_single([ + "iso : (\d+)", + "ISO (\d+)" + ], line) - match = re.search("SrtCnt : (\d+)", line) - if match: - srtcnt = int(match.group(1)) + shutter = match_single([ + "shutter : \d+/(\d+\.?\d*)" + "SS (\d+\.?\d*)" + ], line) - match = re.search("DiffTime : (\d+)ms", line) - if match: - difftime = int(match.group(1)) + fnum = match_single([ + ("fnum : (\d+)", lambda v: float(v)/100.0), + ("F/([\d\.]+)", float), + ], line) - match = re.search("(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})", line) - if match: - timestamp = match.group(1) - timestamp = datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S,%f") + focal_len = match_single("focal_len : (\d+)", line) - match = re.search("iso : (\d+)", line) - if match: - iso = int(match.group(1)) + latitude = match_single([ + ("latitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None), + ("GPS \(([\d\.\-]+),? [\d\.\-]+,? [\d\.\-]+\)", lambda v: float(v) if v != 0 else None), + ], line) - match = re.search("shutter : (\d+/\d+.\d+)", line) - if match: - shutter = match.group(1) + longitude = match_single([ + ("longitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None), + ("GPS \(([\d\.\-]+),? [\d\.\-]+,? [\d\.\-]+\)", lambda v: float(v) if v != 0 else None), + ], line) - match = re.search("fnum : (\d+)", line) - if match: - fnum = int(match.group(1)) - - match = re.search("ev : (\d+)", line) - if match: - ev = int(match.group(1)) - - match = re.search("ct : (\d+)", line) - if match: - ct = int(match.group(1)) - - match = re.search("color_md : (\w+)", line) - if match: - color_md = match.group(1) - - match = re.search("focal_len : (\d+)", line) - if match: - focal_len = int(match.group(1)) - - match = re.search("latitude: (\d+.\d+)", line) - if match: - latitude = float(match.group(1)) - latitude = latitude if latitude != 0 else None - - match = re.search("longitude: (\d+.\d+)", line) - if match: - longitude = float(match.group(1)) - longitude = longitude if longitude != 0 else None - - match = re.search("altitude: (\d+.\d+)", line) - if match: - altitude = float(match.group(1)) - altitude = altitude if altitude != 0 else None + altitude = match_single([ + ("altitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None), + ("GPS \([\d\.\-]+,? [\d\.\-]+,? ([\d\.\-]+)\)", lambda v: float(v) if v != 0 else None), + ], line) \ No newline at end of file diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 0001fe4b..0504c2dc 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -68,16 +68,9 @@ class Video2Dataset: else: srt_parser = None - if (self.blur_checker is not None and self.blur_checker.NeedPreProcess()): - log.ODM_INFO("Preprocessing for blur checker...") - self.blur_checker.PreProcess(input_file, self.parameters.start, self.parameters.end, self.parameters.internal_width, self.parameters.internal_height) - end = time.time() - log.ODM_INFO("Preprocessing time: {:.2f}s".format(end - start)) - log.ODM_INFO("Calculated threshold is {}".format(self.blur_checker.threshold)) - if (self.black_checker is not None and self.black_checker.NeedPreProcess()): start2 = time.time() - log.ODM_INFO("Preprocessing for black checker...") + log.ODM_INFO("Preprocessing for black frame checker... this might take a bit") self.black_checker.PreProcess(input_file, self.parameters.start, self.parameters.end, self.parameters.internal_width, self.parameters.internal_height) end = time.time() log.ODM_INFO("Preprocessing time: {:.2f}s".format(end - start2)) @@ -199,15 +192,7 @@ class Video2Dataset: _, buf = cv2.imencode('.' + self.parameters.frame_format, frame) - #start_time_utc = video_info.start_time_utc if video_info.start_time_utc is not None \ - # else srt_parser.data[0].timestamp if srt_parser is not None \ - # else datetime.datetime.now() - - #elapsed_time_utc = start_time_utc + datetime.timedelta(seconds=(self.frame_index / video_info.frame_rate)) - #elapsed_time = elapsed_time_utc + srt_parser.utc_offset if srt_parser is not None else elapsed_time_utc - delta = datetime.timedelta(seconds=(self.frame_index / video_info.frame_rate)) - # convert to datetime elapsed_time = datetime.datetime(1900, 1, 1) + delta img = Image.open(io.BytesIO(buf)) @@ -236,14 +221,16 @@ class Video2Dataset: }} if entry is not None: - segs = entry["shutter"].split("/") - exif_dict["Exif"][piexif.ExifIFD.ExposureTime] = (int(float(segs[0])), int(float(segs[1]))) - exif_dict["Exif"][piexif.ExifIFD.FocalLength] = (entry["focal_len"], 1) - exif_dict["Exif"][piexif.ExifIFD.FNumber] = (entry["fnum"], 1) - exif_dict["Exif"][piexif.ExifIFD.ISOSpeedRatings] = (entry["iso"], 1) - - exif_dict["GPS"] = get_gps_location(elapsed_time, entry["latitude"], entry["longitude"], entry["altitude"]) - + if entry["shutter"] is not None: + exif_dict["Exif"][piexif.ExifIFD.ExposureTime] = (1, int(entry["shutter"])) + if entry["focal_len"] is not None: + exif_dict["Exif"][piexif.ExifIFD.FocalLength] = (entry["focal_len"], 1) + if entry["fnum"] is not None: + exif_dict["Exif"][piexif.ExifIFD.FNumber] = float_to_rational(entry["fnum"]) + if entry["iso"] is not None: + exif_dict["Exif"][piexif.ExifIFD.ISOSpeedRatings] = (entry["iso"], 1) + if entry["latitude"] is not None and entry["longitude"] is not None: + exif_dict["GPS"] = get_gps_location(elapsed_time, entry["latitude"], entry["longitude"], entry.get("altitude")) exif_bytes = piexif.dump(exif_dict) img.save(path, exif=exif_bytes) @@ -328,7 +315,7 @@ def get_gps_location(elapsed_time, lat, lng, altitude): gps_ifd = { piexif.GPSIFD.GPSVersionID: (2, 0, 0, 0), - piexif.GPSIFD.GPSDateStamp: elapsed_time.strftime('%Y:%m:%d %H:%M:%S') + piexif.GPSIFD.GPSDateStamp: elapsed_time.strftime('%Y:%m:%d') } if lat is not None and lng is not None: From d1997b6270fcfd0440493a78c293a0c18455a69c Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 18 Jan 2023 16:13:27 -0500 Subject: [PATCH 092/102] Fix lat/lon parsing for mavic mini, frame named after input file --- opendm/video/srtparser.py | 2 +- opendm/video/video2dataset.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/opendm/video/srtparser.py b/opendm/video/srtparser.py index 21c2bd41..c8c1a8ec 100644 --- a/opendm/video/srtparser.py +++ b/opendm/video/srtparser.py @@ -126,7 +126,7 @@ class SrtFileParser: latitude = match_single([ ("latitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None), - ("GPS \(([\d\.\-]+),? [\d\.\-]+,? [\d\.\-]+\)", lambda v: float(v) if v != 0 else None), + ("GPS \([\d\.\-]+,? ([\d\.\-]+),? [\d\.\-]+\)", lambda v: float(v) if v != 0 else None), ], line) longitude = match_single([ diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 0504c2dc..cef4fc68 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -188,7 +188,7 @@ class Video2Dataset: frame = cv2.resize(frame, (int(ceil(w * factor)), int(ceil(h * factor)))) path = os.path.join(self.parameters.output, - "frame_{}_{}.{}".format(self.global_idx, self.frame_index, self.parameters.frame_format)) + "{}_{}_{}.{}".format(video_info.basename, self.global_idx, self.frame_index, self.parameters.frame_format)) _, buf = cv2.imencode('.' + self.parameters.frame_format, frame) @@ -255,13 +255,14 @@ class Video2Dataset: def get_video_info(input_file): video = cv2.VideoCapture(input_file) + basename = os.path.splitext(os.path.basename(input_file))[0] total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) frame_rate = video.get(cv2.CAP_PROP_FPS) video.release() - return collections.namedtuple("VideoInfo", ["total_frames", "frame_rate"])(total_frames, frame_rate) + return collections.namedtuple("VideoInfo", ["total_frames", "frame_rate", "basename"])(total_frames, frame_rate, basename) def float_to_rational(f): f = Fraction(f).limit_denominator() From 17121d8d80259915e3a2760f3d75d19962b30550 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 18 Jan 2023 16:34:06 -0500 Subject: [PATCH 093/102] Fix EXIF rationals --- opendm/video/video2dataset.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index cef4fc68..67d84b7a 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -27,9 +27,8 @@ class Video2Dataset: def ProcessVideo(self): - + self.date_now = None start = time.time() - self.date_now = datetime.datetime.now() if (self.parameters.stats_file is not None): self.f = open(self.parameters.stats_file, "w") @@ -39,7 +38,6 @@ class Video2Dataset: # foreach input file for input_file in self.parameters.input: - # get file name file_name = os.path.basename(input_file) log.ODM_INFO("Processing video: {}".format(input_file)) @@ -48,6 +46,17 @@ class Video2Dataset: video_info = get_video_info(input_file) log.ODM_INFO(video_info) + # Set pseudo start time + if self.date_now is None: + try: + self.date_now = datetime.datetime.fromtimestamp(os.path.getmtime(input_file)) + except: + self.date_now = datetime.datetime.now() + else: + self.date_now += datetime.timedelta(seconds=video_info.total_frames * video_info.frame_rate) + + log.ODM_INFO("Use pseudo start time: %s" % self.date_now) + if self.parameters.use_srt: name = os.path.splitext(input_file)[0] @@ -216,19 +225,19 @@ class Video2Dataset: piexif.ExifIFD.DateTimeOriginal: elapsed_time_str, piexif.ExifIFD.DateTimeDigitized: elapsed_time_str, piexif.ExifIFD.SubSecTime: subsec_time_str, - piexif.ExifIFD.PixelXDimension: (frame.shape[1], 1), - piexif.ExifIFD.PixelYDimension: (frame.shape[0], 1), + piexif.ExifIFD.PixelXDimension: frame.shape[1], + piexif.ExifIFD.PixelYDimension: frame.shape[0], }} if entry is not None: if entry["shutter"] is not None: exif_dict["Exif"][piexif.ExifIFD.ExposureTime] = (1, int(entry["shutter"])) if entry["focal_len"] is not None: - exif_dict["Exif"][piexif.ExifIFD.FocalLength] = (entry["focal_len"], 1) + exif_dict["Exif"][piexif.ExifIFD.FocalLength] = entry["focal_len"] if entry["fnum"] is not None: exif_dict["Exif"][piexif.ExifIFD.FNumber] = float_to_rational(entry["fnum"]) if entry["iso"] is not None: - exif_dict["Exif"][piexif.ExifIFD.ISOSpeedRatings] = (entry["iso"], 1) + exif_dict["Exif"][piexif.ExifIFD.ISOSpeedRatings] = entry["iso"] if entry["latitude"] is not None and entry["longitude"] is not None: exif_dict["GPS"] = get_gps_location(elapsed_time, entry["latitude"], entry["longitude"], entry.get("altitude")) From 8b0b63d357eb858bfab1fc8c429d2be77c6709de Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Thu, 19 Jan 2023 10:27:18 -0500 Subject: [PATCH 094/102] Fix date now, limit with multiple input files --- opendm/video/video2dataset.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 67d84b7a..439d5c01 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -36,6 +36,8 @@ class Video2Dataset: self.global_idx = 0 + output_file_paths = [] + # foreach input file for input_file in self.parameters.input: # get file name @@ -53,7 +55,7 @@ class Video2Dataset: except: self.date_now = datetime.datetime.now() else: - self.date_now += datetime.timedelta(seconds=video_info.total_frames * video_info.frame_rate) + self.date_now += datetime.timedelta(seconds=video_info.total_frames / video_info.frame_rate) log.ODM_INFO("Use pseudo start time: %s" % self.date_now) @@ -102,7 +104,6 @@ class Video2Dataset: frames_to_process = self.parameters.end - start_frame + 1 if (self.parameters.end is not None) else video_info.total_frames - start_frame - output_file_paths = [] progress = 0 while (cap.isOpened()): ret, frame = cap.read() From 7e9a1c38861851ebe9b153b9be3bb12c4c3a9193 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Thu, 19 Jan 2023 13:51:58 -0500 Subject: [PATCH 095/102] Add GPS interpolation, cleanup, fixes --- opendm/config.py | 14 +++++++ opendm/location.py | 14 ++++++- opendm/video/checkers.py | 65 +-------------------------------- opendm/video/parameters.py | 7 +--- opendm/video/srtparser.py | 69 ++++++++++++++++++++++++++++++++++- opendm/video/video2dataset.py | 24 ++++++++---- 6 files changed, 115 insertions(+), 78 deletions(-) diff --git a/opendm/config.py b/opendm/config.py index e9de9e5b..140d2715 100755 --- a/opendm/config.py +++ b/opendm/config.py @@ -653,6 +653,20 @@ def config(argv=None, parser=None): version='ODM {0}'.format(__version__), help='Displays version number and exits. ') + parser.add_argument('--video-limit', + type=int, + action=StoreValue, + default=500, + metavar='', + help='Maximum number of frames to extract from video files for processing. Default: %(default)s') + + parser.add_argument('--video-resolution', + type=int, + action=StoreValue, + default=4000, + metavar='', + help='The maximum output resolution of extracted video frames in pixels. Default: %(default)s') + parser.add_argument('--split', type=int, action=StoreValue, diff --git a/opendm/location.py b/opendm/location.py index 46e9b550..64c5c07a 100644 --- a/opendm/location.py +++ b/opendm/location.py @@ -119,7 +119,6 @@ def parse_srs_header(header): :param header (str) line :return Proj object """ - log.ODM_INFO('Parsing SRS header: %s' % header) header = header.strip() ref = header.split(' ') @@ -155,4 +154,15 @@ def parse_srs_header(header): 'Modify your input and try again.' % header) raise RuntimeError(e) - return srs \ No newline at end of file + return srs + +def utm_srs_from_ll(lon, lat): + utm_zone, hemisphere = get_utm_zone_and_hemisphere_from(lon, lat) + return parse_srs_header("WGS84 UTM %s%s" % (utm_zone, hemisphere)) + +def utm_transformers_from_ll(lon, lat): + source_srs = CRS.from_epsg(4326) + target_srs = utm_srs_from_ll(lon, lat) + ll_to_utm = transformer(source_srs, target_srs) + utm_to_ll = transformer(target_srs, source_srs) + return ll_to_utm, utm_to_ll \ No newline at end of file diff --git a/opendm/video/checkers.py b/opendm/video/checkers.py index 1eadec40..1036689f 100644 --- a/opendm/video/checkers.py +++ b/opendm/video/checkers.py @@ -1,64 +1,6 @@ import cv2 import numpy as np -class PercentageBlurChecker: - def __init__(self, percentage): - self.percentage = percentage - self.cache = None - - def NeedPreProcess(self): - return True - - def PreProcess(self, video_path, start_frame, end_frame, width=800, height=600): - - # Open video file - cap = cv2.VideoCapture(video_path) - if (cap.isOpened() == False): - print("Error opening video stream or file") - return - - if start_frame is not None: - cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) - - tmp = [] - - frame_index = start_frame if start_frame is not None else 0 - while (cap.isOpened() and (end_frame is None or frame_index <= end_frame)): - - ret, frame = cap.read() - - if not ret: - break - - frame_bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - frame_bw = cv2.resize(frame_bw, (width, height)) - var = cv2.Laplacian(frame_bw, cv2.CV_64F).var() - - tmp.append(var) - - frame_index += 1 - - cap.release() - - # Calculate threshold - self.threshold = np.percentile(tmp, self.percentage * 100) - start_frame = start_frame if start_frame is not None else 0 - - # Fill cache with frame blur scores and indices - self.cache = {i + start_frame: v for i, v in enumerate(tmp)} - - return - - def IsBlur(self, image_bw, id): - - if self.cache is None: - return 0, True - - if id not in self.cache: - return 0, True - - return self.cache[id], self.cache[id] < self.threshold - class ThresholdBlurChecker: def __init__(self, threshold): self.threshold = threshold @@ -66,7 +8,7 @@ class ThresholdBlurChecker: def NeedPreProcess(self): return False - def PreProcess(self, video_path, start_frame, end_frame, width=800, height=600): + def PreProcess(self, video_path, start_frame, end_frame): return def IsBlur(self, image_bw, id): @@ -134,7 +76,7 @@ class BlackFrameChecker: def NeedPreProcess(self): return True - def PreProcess(self, video_path, start_frame, end_frame, width=800, height=600): + def PreProcess(self, video_path, start_frame, end_frame): # Open video file cap = cv2.VideoCapture(video_path) @@ -157,9 +99,6 @@ class BlackFrameChecker: if not ret: break - # Resize frame to specified width and height - frame = cv2.resize(frame, (width, height)) - # Convert frame to grayscale gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray_frame_min = gray_frame.min() diff --git a/opendm/video/parameters.py b/opendm/video/parameters.py index 9fc7c97f..56194f12 100644 --- a/opendm/video/parameters.py +++ b/opendm/video/parameters.py @@ -11,7 +11,7 @@ class Parameters: # "output" -> path to output directory") # "start" -> start frame index") # "end" -> end frame index") - # "output-resolution" -> Override output resolution (ex. 640x480)") + # "output-resolution" -> Override output resolution (ex. 1024)") # "blur-threshold" -> blur measures that fall below this value will be considered 'blurry'. Good value is 300 # "distance-threshold" -> distance measures that fall below this value will be considered 'similar'") # "black-ratio-threshold" -> Set the threshold for considering a frame 'black'. Express the minimum value for the ratio: nb_black_pixels / nb_pixels. Default value is 0.98") @@ -43,7 +43,4 @@ class Parameters: self.stats_file = args.get("stats_file", None) # We will resize the image to this size before processing - self.internal_width = 800 - self.internal_height = 600 - - + self.internal_resolution = 800 diff --git a/opendm/video/srtparser.py b/opendm/video/srtparser.py index c8c1a8ec..81ac67e2 100644 --- a/opendm/video/srtparser.py +++ b/opendm/video/srtparser.py @@ -1,4 +1,6 @@ from datetime import datetime +from opendm import location, log +from pyproj import Transformer, CRS import re @@ -25,6 +27,9 @@ class SrtFileParser: def __init__(self, filename): self.filename = filename self.data = [] + self.gps_data = [] + self.ll_to_utm = None + self.utm_to_ll = None def get_entry(self, timestamp: datetime): if not self.data: @@ -40,6 +45,68 @@ class SrtFileParser: return None + def get_gps(self, timestamp): + if not self.data: + self.parse() + + # Initialize on first call + prev_coords = None + + if not self.gps_data: + for d in self.data: + lat, lon, alt = d.get('latitude'), d.get('longitude'), d.get('altitude') + tm = d.get('start') + + if lat is not None and lon is not None: + if self.ll_to_utm is None: + self.ll_to_utm, self.utm_to_ll = location.utm_transformers_from_ll(lon, lat) + + coords = self.ll_to_utm.TransformPoint(lon, lat, alt) + + # First or new (in X/Y only) + add = (not len(self.gps_data)) or (coords[0], coords[1]) != (self.gps_data[-1][1][0], self.gps_data[-1][1][1]) + if add: + self.gps_data.append((tm, coords)) + + # No data available + if not len(self.gps_data) or self.gps_data[0][0] > timestamp: + return None + + # Interpolate + start = None + for i in range(len(self.gps_data)): + tm, coords = self.gps_data[i] + + # Perfect match + if timestamp == tm: + return self.utm_to_ll.TransformPoint(*coords) + + elif tm > timestamp: + end = i + start = i - 1 + if start < 0: + return None + + gd_s = self.gps_data[start] + gd_e = self.gps_data[end] + sx, sy, sz = gd_s[1] + ex, ey, ez = gd_e[1] + + dt = (gd_e[0] - gd_s[0]).total_seconds() + if dt >= 10: + return None + + dx = (ex - sx) / dt + dy = (ey - sy) / dt + dz = (ez - sz) / dt + t = (timestamp - gd_s[0]).total_seconds() + + return self.utm_to_ll.TransformPoint( + sx + dx * t, + sy + dy * t, + sz + dz * t + ) + def parse(self): # SRT metadata is not standarized, we support the following formats: @@ -102,7 +169,7 @@ class SrtFileParser: line = re.sub('<[^<]+?>', '', line) # Search this "00:00:00,000 --> 00:00:00,016" - match = re.search("(\d{2}:\d{2}:\d{2},\d{3}) --> (\d{2}:\d{2}:\d{2},\d{3})", line) + match = re.search("(\d{2}:\d{2}:\d{2},\d+) --> (\d{2}:\d{2}:\d{2},\d+)", line) if match: start = datetime.strptime(match.group(1), "%H:%M:%S,%f") end = datetime.strptime(match.group(2), "%H:%M:%S,%f") diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 439d5c01..16c5d773 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -11,7 +11,7 @@ import piexif from opendm import log from opendm.video.srtparser import SrtFileParser from opendm.video.parameters import Parameters -from opendm.video.checkers import BlackFrameChecker, PercentageBlurChecker, SimilarityChecker, ThresholdBlurChecker +from opendm.video.checkers import BlackFrameChecker, SimilarityChecker, ThresholdBlurChecker class Video2Dataset: @@ -82,7 +82,7 @@ class Video2Dataset: if (self.black_checker is not None and self.black_checker.NeedPreProcess()): start2 = time.time() log.ODM_INFO("Preprocessing for black frame checker... this might take a bit") - self.black_checker.PreProcess(input_file, self.parameters.start, self.parameters.end, self.parameters.internal_width, self.parameters.internal_height) + self.black_checker.PreProcess(input_file, self.parameters.start, self.parameters.end) end = time.time() log.ODM_INFO("Preprocessing time: {:.2f}s".format(end - start2)) log.ODM_INFO("Calculated luminance_range_size is {}".format(self.black_checker.luminance_range_size)) @@ -148,8 +148,14 @@ class Video2Dataset: res = {"frame_index": self.frame_index, "global_idx": self.global_idx} frame_bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - frame_bw = cv2.resize(frame_bw, (self.parameters.internal_width, self.parameters.internal_height)) + h, w = frame_bw.shape + resolution = self.parameters.internal_resolution + if resolution < w or resolution < h: + m = max(w, h) + factor = resolution / m + frame_bw = cv2.resize(frame_bw, (int(ceil(w * factor)), int(ceil(h * factor)))) + if (self.blur_checker is not None): blur_score, is_blurry = self.blur_checker.IsBlur(frame_bw, self.frame_index) res["blur_score"] = blur_score @@ -206,9 +212,12 @@ class Video2Dataset: elapsed_time = datetime.datetime(1900, 1, 1) + delta img = Image.open(io.BytesIO(buf)) - - entry = srt_parser.get_entry(elapsed_time) if srt_parser is not None else None + entry = gps_coords = None + if srt_parser is not None: + entry = srt_parser.get_entry(elapsed_time) + gps_coords = srt_parser.get_gps(elapsed_time) + exif_time = (elapsed_time + (self.date_now - datetime.datetime(1900, 1, 1))) elapsed_time_str = exif_time.strftime("%Y:%m:%d %H:%M:%S") subsec_time_str = exif_time.strftime("%f") @@ -239,8 +248,9 @@ class Video2Dataset: exif_dict["Exif"][piexif.ExifIFD.FNumber] = float_to_rational(entry["fnum"]) if entry["iso"] is not None: exif_dict["Exif"][piexif.ExifIFD.ISOSpeedRatings] = entry["iso"] - if entry["latitude"] is not None and entry["longitude"] is not None: - exif_dict["GPS"] = get_gps_location(elapsed_time, entry["latitude"], entry["longitude"], entry.get("altitude")) + + if gps_coords is not None: + exif_dict["GPS"] = get_gps_location(elapsed_time, gps_coords[1], gps_coords[0], gps_coords[2]) exif_bytes = piexif.dump(exif_dict) img.save(path, exif=exif_bytes) From 993274292041ae4e0af748214c5a9011dc98cc5d Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Thu, 19 Jan 2023 14:17:11 -0500 Subject: [PATCH 096/102] Remove import --- opendm/video/srtparser.py | 1 - 1 file changed, 1 deletion(-) diff --git a/opendm/video/srtparser.py b/opendm/video/srtparser.py index 81ac67e2..7e52db43 100644 --- a/opendm/video/srtparser.py +++ b/opendm/video/srtparser.py @@ -1,6 +1,5 @@ from datetime import datetime from opendm import location, log -from pyproj import Transformer, CRS import re From 64b687a3a6b4bf7dea903e1716ed722e13ee50ac Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Thu, 19 Jan 2023 15:28:42 -0500 Subject: [PATCH 097/102] Fix limit_files --- opendm/video/video2dataset.py | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 16c5d773..0f07fba3 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -7,6 +7,7 @@ import cv2 import os import collections from PIL import Image +import numpy as np import piexif from opendm import log from opendm.video.srtparser import SrtFileParser @@ -154,8 +155,8 @@ class Video2Dataset: if resolution < w or resolution < h: m = max(w, h) factor = resolution / m - frame_bw = cv2.resize(frame_bw, (int(ceil(w * factor)), int(ceil(h * factor)))) - + frame_bw = cv2.resize(frame_bw, (int(ceil(w * factor)), int(ceil(h * factor))), interpolation=cv2.INTER_NEAREST) + if (self.blur_checker is not None): blur_score, is_blurry = self.blur_checker.IsBlur(frame_bw, self.frame_index) res["blur_score"] = blur_score @@ -201,7 +202,7 @@ class Video2Dataset: if max_dim < w or max_dim < h: m = max(w, h) factor = max_dim / m - frame = cv2.resize(frame, (int(ceil(w * factor)), int(ceil(h * factor)))) + frame = cv2.resize(frame, (int(ceil(w * factor)), int(ceil(h * factor))), interpolation=cv2.INTER_AREA) path = os.path.join(self.parameters.output, "{}_{}_{}.{}".format(video_info.basename, self.global_idx, self.frame_index, self.parameters.frame_format)) @@ -289,24 +290,21 @@ def float_to_rational(f): return (f.numerator, f.denominator) def limit_files(paths, limit): - - cnt = len(paths) - num_to_delete = cnt - limit - - if num_to_delete <= 0: + if len(paths) <= limit: return paths - - skip = floor(num_to_delete / limit) if num_to_delete > cnt else ceil(cnt / num_to_delete) - + to_keep = [] + all_idxes = np.arange(0, len(paths)) + keep_idxes = np.linspace(0, len(paths) - 1, limit, dtype=int) + remove_idxes = set(all_idxes) - set(keep_idxes) - for i in range(len(paths)): - if i % skip == 0: - os.remove(paths[i]) - else: - to_keep.append(paths[i]) + p = np.array(paths) + to_keep = list(p[keep_idxes]) - return limit_files(to_keep, limit) + for idx in remove_idxes: + os.remove(paths[idx]) + + return to_keep def to_deg(value, loc): """convert decimal coordinates into degrees, munutes and seconds tuple From 67787f0059f0b4ae89e4a8ba2a189bebed0e16bb Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 24 Jan 2023 10:39:24 -0500 Subject: [PATCH 098/102] Extract video frames in load dataset --- opendm/config.py | 2 +- opendm/context.py | 1 + opendm/video/video2dataset.py | 2 +- stages/dataset.py | 59 ++++++++++++++++++++++++++++++++--- 4 files changed, 58 insertions(+), 6 deletions(-) diff --git a/opendm/config.py b/opendm/config.py index 140d2715..c7953fb7 100755 --- a/opendm/config.py +++ b/opendm/config.py @@ -658,7 +658,7 @@ def config(argv=None, parser=None): action=StoreValue, default=500, metavar='', - help='Maximum number of frames to extract from video files for processing. Default: %(default)s') + help='Maximum number of frames to extract from video files for processing. Set to 0 for no limit. Default: %(default)s') parser.add_argument('--video-resolution', type=int, diff --git a/opendm/context.py b/opendm/context.py index 2fac61d3..7bafdd5d 100644 --- a/opendm/context.py +++ b/opendm/context.py @@ -41,6 +41,7 @@ settings_path = os.path.join(root_path, 'settings.yaml') # Define supported image extensions supported_extensions = {'.jpg','.jpeg','.png', '.tif', '.tiff', '.bmp'} +supported_video_extensions = {'.mp4', '.mov'} # Define the number of cores num_cores = multiprocessing.cpu_count() diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 0f07fba3..82eb96c0 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -135,7 +135,7 @@ class Video2Dataset: if self.f is not None: self.f.close() - if self.parameters.limit is not None and self.global_idx >= self.parameters.limit: + if self.parameters.limit is not None and self.parameters.limit > 0 and self.global_idx >= self.parameters.limit: log.ODM_INFO("Limit of {} frames reached, trimming dataset".format(self.parameters.limit)) output_file_paths = limit_files(output_file_paths, self.parameters.limit) diff --git a/stages/dataset.py b/stages/dataset.py index 355fac4d..cdd8b792 100644 --- a/stages/dataset.py +++ b/stages/dataset.py @@ -15,6 +15,7 @@ from opendm import ai from opendm.skyremoval.skyfilter import SkyFilter from opendm.bgfilter import BgFilter from opendm.concurrency import parallel_map +from opendm.video.video2dataset import Parameters, Video2Dataset def save_images_database(photos, database_file): with open(database_file, 'w') as f: @@ -58,22 +59,25 @@ class ODMLoadDatasetStage(types.ODM_Stage): except Exception as e: log.ODM_WARNING("Cannot write benchmark file: %s" % str(e)) - # check if the image filename is supported - def valid_image_filename(filename): + def valid_filename(filename, supported_extensions): (pathfn, ext) = os.path.splitext(filename) - return ext.lower() in context.supported_extensions and pathfn[-5:] != "_mask" + return ext.lower() in supported_extensions and pathfn[-5:] != "_mask" # Get supported images from dir def get_images(in_dir): entries = os.listdir(in_dir) valid, rejects = [], [] for f in entries: - if valid_image_filename(f): + if valid_filename(f, context.supported_extensions): valid.append(f) else: rejects.append(f) return valid, rejects + def search_video_files(in_dir): + entries = os.listdir(in_dir) + return [os.path.join(in_dir, f) for f in entries if valid_filename(f, context.supported_video_extensions)] + def find_mask(photo_path, masks): (pathfn, ext) = os.path.splitext(os.path.basename(photo_path)) k = "{}_mask".format(pathfn) @@ -85,6 +89,8 @@ class ODMLoadDatasetStage(types.ODM_Stage): return mask else: log.ODM_WARNING("Image mask {} has a space. Spaces are currently not supported for image masks.".format(mask)) + + # get images directory images_dir = tree.dataset_raw @@ -100,6 +106,51 @@ class ODMLoadDatasetStage(types.ODM_Stage): if not os.path.exists(images_dir): raise system.ExitException("There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s" % (images_dir, args.project_path)) + # Check if we need to extract video frames + frames_db_file = os.path.join(images_dir, 'frames.json') + if not os.path.exists(frames_db_file) or self.rerun(): + video_files = search_video_files(images_dir) + + # If we're re-running the pipeline, and frames have been extracted during a previous run + # we need to remove those before re-extracting them + if len(video_files) > 0 and os.path.exists(frames_db_file) and self.rerun(): + log.ODM_INFO("Re-run, removing previously extracted video frames") + frames = [] + try: + with open(frames_db_file, 'r') as f: + frames = json.loads(f.read()) + except Exception as e: + log.ODM_WARNING("Cannot check previous video extraction: %s" % str(e)) + + for f in frames: + fp = os.path.join(images_dir, f) + if os.path.isfile(fp): + os.remove(fp) + + if len(video_files) > 0: + log.ODM_INFO("Found video files (%s), extracting frames" % len(video_files)) + + try: + params = Parameters({ + "input": video_files, + "output": images_dir, + + "blur_threshold": 0.2, + "distance_threshold": 10, + "black_ratio_threshold": 0.98, + "pixel_black_threshold": 0.30, + "use_srt": True, + "max_dimension": args.video_resolution, + "limit": args.video_limit, + }) + v2d = Video2Dataset(params) + frames = v2d.ProcessVideo() + + with open(frames_db_file, 'w') as f: + f.write(json.dumps([os.path.basename(f) for f in frames])) + except Exception as e: + log.ODM_WARNING("Could not extract video frames: %s" % str(e)) + files, rejects = get_images(images_dir) if files: # create ODMPhoto list From 4139c951c5c4ec37955b6e7629c3362b090137b4 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 24 Jan 2023 10:40:10 -0500 Subject: [PATCH 099/102] Bump version --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 75a22a26..b0f2dcb3 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.0.3 +3.0.4 From f6c448745db9e30185b76b3eb15f160f04095b39 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 24 Jan 2023 11:08:21 -0500 Subject: [PATCH 100/102] Fix focal length EXIF --- opendm/video/video2dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 82eb96c0..7161bd64 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -244,7 +244,7 @@ class Video2Dataset: if entry["shutter"] is not None: exif_dict["Exif"][piexif.ExifIFD.ExposureTime] = (1, int(entry["shutter"])) if entry["focal_len"] is not None: - exif_dict["Exif"][piexif.ExifIFD.FocalLength] = entry["focal_len"] + exif_dict["Exif"][piexif.ExifIFD.FocalLength] = (entry["focal_len"], 100) if entry["fnum"] is not None: exif_dict["Exif"][piexif.ExifIFD.FNumber] = float_to_rational(entry["fnum"]) if entry["iso"] is not None: From 23028f3add7824920e23ce7f10be8d18534ea6fe Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 24 Jan 2023 17:52:39 -0500 Subject: [PATCH 101/102] Fix blur threshold, populate make/model, remove JPEG compression --- opendm/video/video2dataset.py | 4 +++- stages/dataset.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/opendm/video/video2dataset.py b/opendm/video/video2dataset.py index 7161bd64..8f977dfa 100644 --- a/opendm/video/video2dataset.py +++ b/opendm/video/video2dataset.py @@ -231,6 +231,8 @@ class Video2Dataset: piexif.ImageIFD.DateTime: elapsed_time_str, piexif.ImageIFD.XResolution: (frame.shape[1], 1), piexif.ImageIFD.YResolution: (frame.shape[0], 1), + piexif.ImageIFD.Make: "DJI" if video_info.basename.lower().startswith("dji") else "Unknown", + piexif.ImageIFD.Model: "Unknown" }, "Exif": { piexif.ExifIFD.DateTimeOriginal: elapsed_time_str, @@ -254,7 +256,7 @@ class Video2Dataset: exif_dict["GPS"] = get_gps_location(elapsed_time, gps_coords[1], gps_coords[0], gps_coords[2]) exif_bytes = piexif.dump(exif_dict) - img.save(path, exif=exif_bytes) + img.save(path, exif=exif_bytes, quality=95) return path diff --git a/stages/dataset.py b/stages/dataset.py index cdd8b792..c7b84ab9 100644 --- a/stages/dataset.py +++ b/stages/dataset.py @@ -135,7 +135,7 @@ class ODMLoadDatasetStage(types.ODM_Stage): "input": video_files, "output": images_dir, - "blur_threshold": 0.2, + "blur_threshold": 300, "distance_threshold": 10, "black_ratio_threshold": 0.98, "pixel_black_threshold": 0.30, From dc997d3bbb621e02cd0f5b8f672a257a55922ee7 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 24 Jan 2023 20:52:12 -0500 Subject: [PATCH 102/102] Readme note --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 9e25d6a0..c1bbbb79 100644 --- a/README.md +++ b/README.md @@ -259,6 +259,10 @@ Experimental flags need to be enabled in Docker to use the ```--squash``` flag. After this, you must restart docker. +## Video Support + +Starting from version 3.0.4, ODM can automatically extract images from video files (.mp4 or .mov). Just place one or more video files into the `images` folder and run the program as usual. Subtitles files (.srt) with GPS information are also supported. Place .srt files in the `images` folder, making sure that the filenames match. For example, `my_video.mp4` ==> `my_video.srt` (case-sensitive). + ## Developers Help improve our software! We welcome contributions from everyone, whether to add new features, improve speed, fix existing bugs or add support for more cameras. Check our [code of conduct](https://github.com/OpenDroneMap/documents/blob/master/CONDUCT.md), the [contributing guidelines](https://github.com/OpenDroneMap/documents/blob/master/CONTRIBUTING.md) and [how decisions are made](https://github.com/OpenDroneMap/documents/blob/master/GOVERNANCE.md#how-decisions-are-made).