diff --git a/configure.py b/configure.py
index c1526046..80b09c6a 100644
--- a/configure.py
+++ b/configure.py
@@ -1,9 +1,13 @@
import sys, platform
-if sys.platform != 'win32':
+
+if sys.platform != "win32":
print("This script is for Windows only! Use configure.sh instead.")
exit(1)
if sys.version_info.major != 3 or sys.version_info.minor != 8:
- print("You need to use Python 3.8.x (due to the requirements.txt). You are using %s instead." % platform.python_version())
+ print(
+ "You need to use Python 3.8.x (due to the requirements.txt). You are using %s instead."
+ % platform.python_version()
+ )
exit(1)
import argparse
@@ -11,32 +15,41 @@ import subprocess
import os
import stat
import urllib.request
-import shutil
+import shutil
import zipfile
from venv import EnvBuilder
-parser = argparse.ArgumentParser(description='ODM Windows Configure Script')
-parser.add_argument('action',
- type=str,
- choices=["build", "clean", "dist", "vcpkg_export"],
- help='Action: %(choices)s')
-parser.add_argument('--build-vcpkg',
- type=bool,
- help='Build VCPKG environment from scratch instead of downloading prebuilt one.')
-parser.add_argument('--vcpkg-archive-url',
- type=str,
- default='https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/vcpkg-export-250.zip',
- required=False,
- help='Path to VCPKG export archive')
-parser.add_argument('--code-sign-cert-path',
- type=str,
- default='',
- required=False,
- help='Path to pfx code signing certificate')
+parser = argparse.ArgumentParser(description="ODM Windows Configure Script")
+parser.add_argument(
+ "action",
+ type=str,
+ choices=["build", "clean", "dist", "vcpkg_export"],
+ help="Action: %(choices)s",
+)
+parser.add_argument(
+ "--build-vcpkg",
+ type=bool,
+ help="Build VCPKG environment from scratch instead of downloading prebuilt one.",
+)
+parser.add_argument(
+ "--vcpkg-archive-url",
+ type=str,
+ default="https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/vcpkg-export-250.zip",
+ required=False,
+ help="Path to VCPKG export archive",
+)
+parser.add_argument(
+ "--code-sign-cert-path",
+ type=str,
+ default="",
+ required=False,
+ help="Path to pfx code signing certificate",
+)
args = parser.parse_args()
+
def run(cmd, cwd=os.getcwd()):
env = os.environ.copy()
print(cmd)
@@ -45,6 +58,7 @@ def run(cmd, cwd=os.getcwd()):
if retcode != 0:
raise Exception("Command returned %s" % retcode)
+
# https://izziswift.com/shutil-rmtree-fails-on-windows-with-access-is-denied/
def rmtree(top):
for root, dirs, files in os.walk(top, topdown=False):
@@ -56,11 +70,13 @@ def rmtree(top):
os.rmdir(os.path.join(root, name))
os.rmdir(top)
+
def vcpkg_requirements():
with open("vcpkg-requirements.txt") as f:
pckgs = list(filter(lambda l: len(l) > 0, map(str.strip, f.read().split("\n"))))
return pckgs
+
def build():
# Create python virtual env
if not os.path.isdir("venv"):
@@ -69,7 +85,7 @@ def build():
ebuilder.create("venv")
run("venv\\Scripts\\pip install --ignore-installed -r requirements.txt")
-
+
# Download / build VCPKG environment
if not os.path.isdir("vcpkg"):
if args.build_vcpkg:
@@ -81,7 +97,9 @@ def build():
else:
if not os.path.exists("vcpkg-env.zip"):
print("Downloading %s" % args.vcpkg_archive_url)
- with urllib.request.urlopen(args.vcpkg_archive_url) as response, open( "vcpkg-env.zip", 'wb') as out_file:
+ with urllib.request.urlopen(args.vcpkg_archive_url) as response, open(
+ "vcpkg-env.zip", "wb"
+ ) as out_file:
shutil.copyfileobj(response, out_file)
if not os.path.exists("vcpkg"):
print("Extracting vcpkg-env.zip --> vcpkg/")
@@ -92,20 +110,27 @@ def build():
if os.path.exists(top_dir):
os.rename(top_dir, "vcpkg")
else:
- print("Warning! Something looks wrong in the VCPKG archive... check the vcpkg/ directory.")
+ print(
+ "Warning! Something looks wrong in the VCPKG archive... check the vcpkg/ directory."
+ )
safe_remove("vcpkg-env.zip")
- if not os.path.exists(os.path.join("SuperBuild", "build")) or not os.path.exists(os.path.join("SuperBuild", "install")):
+ if not os.path.exists(os.path.join("SuperBuild", "build")) or not os.path.exists(
+ os.path.join("SuperBuild", "install")
+ ):
print("Compiling SuperBuild")
-
+
build_dir = os.path.join("SuperBuild", "build")
if not os.path.isdir(build_dir):
os.mkdir(build_dir)
- toolchain_file = os.path.join(os.getcwd(), "vcpkg", "scripts", "buildsystems", "vcpkg.cmake")
- run("cmake .. -DCMAKE_TOOLCHAIN_FILE=\"%s\"" % toolchain_file, cwd=build_dir)
+ toolchain_file = os.path.join(
+ os.getcwd(), "vcpkg", "scripts", "buildsystems", "vcpkg.cmake"
+ )
+ run('cmake .. -DCMAKE_TOOLCHAIN_FILE="%s"' % toolchain_file, cwd=build_dir)
run("cmake --build . --config Release", cwd=build_dir)
+
def vcpkg_export():
if not os.path.exists("vcpkg"):
print("vcpkg directory does not exist. Did you build the environment?")
@@ -115,16 +140,19 @@ def vcpkg_export():
out = "vcpkg-export-%s" % odm_version().replace(".", "")
run("vcpkg\\vcpkg export %s --output=%s --zip" % (" ".join(pkgs), out))
+
def odm_version():
with open("VERSION") as f:
return f.read().split("\n")[0].strip()
+
def safe_remove(path):
if os.path.isdir(path):
rmtree(path)
elif os.path.isfile(path):
os.remove(path)
+
def clean():
safe_remove("vcpkg-download.zip")
safe_remove("vcpkg")
@@ -134,6 +162,7 @@ def clean():
safe_remove(os.path.join("SuperBuild", "src"))
safe_remove(os.path.join("SuperBuild", "install"))
+
def dist():
if not os.path.exists("SuperBuild\\install"):
print("You need to run configure.py build before you can run dist")
@@ -147,7 +176,9 @@ def dist():
if not os.path.isfile(vcredist_path):
vcredist_url = "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/VC_redist.x64.zip"
print("Downloading %s" % vcredist_url)
- with urllib.request.urlopen(vcredist_url) as response, open(vcredist_path, 'wb') as out_file:
+ with urllib.request.urlopen(vcredist_url) as response, open(
+ vcredist_path, "wb"
+ ) as out_file:
shutil.copyfileobj(response, out_file)
print("Extracting --> vc_redist.x64.exe")
@@ -160,9 +191,11 @@ def dist():
python_url = "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/python-3.8.1-embed-amd64-less-pth.zip"
if not os.path.exists(pythonzip_path):
print("Downloading %s" % python_url)
- with urllib.request.urlopen(python_url) as response, open( pythonzip_path, 'wb') as out_file:
+ with urllib.request.urlopen(python_url) as response, open(
+ pythonzip_path, "wb"
+ ) as out_file:
shutil.copyfileobj(response, out_file)
-
+
os.mkdir("python38")
print("Extracting --> python38/")
@@ -174,7 +207,9 @@ def dist():
signtool_url = "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/signtool.exe"
if not os.path.exists(signtool_path):
print("Downloading %s" % signtool_url)
- with urllib.request.urlopen(signtool_url) as response, open(signtool_path, 'wb') as out_file:
+ with urllib.request.urlopen(signtool_url) as response, open(
+ signtool_path, "wb"
+ ) as out_file:
shutil.copyfileobj(response, out_file)
# Download innosetup
@@ -183,7 +218,9 @@ def dist():
innosetup_url = "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/innosetup-portable-win32-6.0.5-3.zip"
if not os.path.exists(innosetupzip_path):
print("Downloading %s" % innosetup_url)
- with urllib.request.urlopen(innosetup_url) as response, open(innosetupzip_path, 'wb') as out_file:
+ with urllib.request.urlopen(innosetup_url) as response, open(
+ innosetupzip_path, "wb"
+ ) as out_file:
shutil.copyfileobj(response, out_file)
os.mkdir("innosetup")
@@ -193,20 +230,24 @@ def dist():
z.extractall("innosetup")
# Run
- cs_flags = '/DSKIP_SIGN=1'
+ cs_flags = "/DSKIP_SIGN=1"
if args.code_sign_cert_path:
- cs_flags = '"/Ssigntool=%s sign /f %s /fd SHA1 /t http://timestamp.sectigo.com $f"' % (signtool_path, args.code_sign_cert_path)
- run("innosetup\\iscc /Qp " + cs_flags + " \"innosetup.iss\"")
+ cs_flags = (
+ '"/Ssigntool=%s sign /f %s /fd SHA1 /t http://timestamp.sectigo.com $f"'
+ % (signtool_path, args.code_sign_cert_path)
+ )
+ run("innosetup\\iscc /Qp " + cs_flags + ' "innosetup.iss"')
print("Done! Setup created in dist/")
-if args.action == 'build':
+
+if args.action == "build":
build()
-elif args.action == 'vcpkg_export':
+elif args.action == "vcpkg_export":
vcpkg_export()
-elif args.action == 'dist':
+elif args.action == "dist":
dist()
-elif args.action == 'clean':
+elif args.action == "clean":
clean()
else:
args.print_help()
diff --git a/contrib/blender/common.py b/contrib/blender/common.py
index fe4924b3..039806a9 100644
--- a/contrib/blender/common.py
+++ b/contrib/blender/common.py
@@ -1,22 +1,21 @@
import bpy
+
def loadMesh(file):
- bpy.utils.register_module('materials_utils')
+ bpy.utils.register_module("materials_utils")
- bpy.ops.import_scene.obj(filepath=file,
- axis_forward='Y',
- axis_up='Z')
+ bpy.ops.import_scene.obj(filepath=file, axis_forward="Y", axis_up="Z")
bpy.ops.xps_tools.convert_to_cycles_all()
model = bpy.data.objects[-1]
- minX = float('inf')
- maxX = float('-inf')
- minY = float('inf')
- maxY = float('-inf')
- minZ = float('inf')
- maxZ = float('-inf')
+ minX = float("inf")
+ maxX = float("-inf")
+ minY = float("inf")
+ maxY = float("-inf")
+ minZ = float("inf")
+ maxZ = float("-inf")
for coord in model.bound_box:
x = coord[0]
y = coord[1]
@@ -28,17 +27,21 @@ def loadMesh(file):
minZ = min(z, minZ)
maxZ = max(z, maxZ)
- model.location[2] += (maxZ - minZ)/2
+ model.location[2] += (maxZ - minZ) / 2
- surfaceShaderType = 'ShaderNodeEmission'
- surfaceShaderName = 'Emission'
+ surfaceShaderType = "ShaderNodeEmission"
+ surfaceShaderName = "Emission"
for m in bpy.data.materials:
nt = m.node_tree
- nt.nodes.remove(nt.nodes['Color Mult'])
- nt.nodes.remove(nt.nodes['Diffuse BSDF'])
+ nt.nodes.remove(nt.nodes["Color Mult"])
+ nt.nodes.remove(nt.nodes["Diffuse BSDF"])
nt.nodes.new(surfaceShaderType)
- nt.links.new(nt.nodes['Material Output'].inputs[0],
- nt.nodes[surfaceShaderName].outputs[0])
- nt.links.new(nt.nodes[surfaceShaderName].inputs[0],
- nt.nodes['Diffuse Texture'].outputs[0])
+ nt.links.new(
+ nt.nodes["Material Output"].inputs[0],
+ nt.nodes[surfaceShaderName].outputs[0],
+ )
+ nt.links.new(
+ nt.nodes[surfaceShaderName].inputs[0],
+ nt.nodes["Diffuse Texture"].outputs[0],
+ )
diff --git a/contrib/blender/odm_photo.py b/contrib/blender/odm_photo.py
index b63cdcf3..451f1240 100644
--- a/contrib/blender/odm_photo.py
+++ b/contrib/blender/odm_photo.py
@@ -16,50 +16,53 @@ from common import loadMesh
def main():
- if len(sys.argv) < 5 or sys.argv[-2] != '--':
- sys.exit('Please provide the ODM project path.')
+ if len(sys.argv) < 5 or sys.argv[-2] != "--":
+ sys.exit("Please provide the ODM project path.")
projectHome = sys.argv[-1]
- loadMesh(projectHome +
- '/odm_texturing/odm_textured_model_geo.obj')
+ loadMesh(projectHome + "/odm_texturing/odm_textured_model_geo.obj")
blendName = bpy.path.display_name_from_filepath(bpy.data.filepath)
- fileName = projectHome + '/odm_photo/odm_' + blendName
- render = bpy.data.scenes['Scene'].render
+ fileName = projectHome + "/odm_photo/odm_" + blendName
+ render = bpy.data.scenes["Scene"].render
render.filepath = fileName
bpy.ops.render.render(write_still=True)
width = render.resolution_x
height = render.resolution_y
- if(render.use_multiview):
- writeExif(fileName+render.views[0].file_suffix+'.jpg', width, height)
- writeExif(fileName+render.views[1].file_suffix+'.jpg', width, height)
+ if render.use_multiview:
+ writeExif(fileName + render.views[0].file_suffix + ".jpg", width, height)
+ writeExif(fileName + render.views[1].file_suffix + ".jpg", width, height)
else:
- writeExif(fileName+'.jpg', width, height)
+ writeExif(fileName + ".jpg", width, height)
def writeExif(fileName, width, height):
w = str(width)
h = str(height)
- subprocess.run(['exiftool',
- '-overwrite_original',
- '-CroppedAreaImageWidthPixels=' + w,
- '-CroppedAreaImageHeightPixels=' + h,
- '-FullPanoWidthPixels=' + w,
- '-FullPanoHeightPixels=' + h,
- '-CroppedAreaLeftPixels=0',
- '-CroppedAreaTopPixels=0',
- '-ProjectionType=equirectangular',
- '-UsePanoramaViewer=True',
- '-PoseHeadingDegrees=0',
- '-LargestValidInteriorRectLeft=0',
- '-LargestValidInteriorRectTop=0',
- '-LargestValidInteriorRectWidth=' + w,
- '-LargestValidInteriorRectHeight=' + h,
- fileName])
+ subprocess.run(
+ [
+ "exiftool",
+ "-overwrite_original",
+ "-CroppedAreaImageWidthPixels=" + w,
+ "-CroppedAreaImageHeightPixels=" + h,
+ "-FullPanoWidthPixels=" + w,
+ "-FullPanoHeightPixels=" + h,
+ "-CroppedAreaLeftPixels=0",
+ "-CroppedAreaTopPixels=0",
+ "-ProjectionType=equirectangular",
+ "-UsePanoramaViewer=True",
+ "-PoseHeadingDegrees=0",
+ "-LargestValidInteriorRectLeft=0",
+ "-LargestValidInteriorRectTop=0",
+ "-LargestValidInteriorRectWidth=" + w,
+ "-LargestValidInteriorRectHeight=" + h,
+ fileName,
+ ]
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/contrib/blender/odm_video.py b/contrib/blender/odm_video.py
index ef58648e..8d3245b4 100644
--- a/contrib/blender/odm_video.py
+++ b/contrib/blender/odm_video.py
@@ -13,101 +13,105 @@ from common import loadMesh
def main():
- if len(sys.argv) < 7 or sys.argv[-4] != '--':
- sys.exit('Please provide the ODM project path, camera waypoints (xyz format), and number of frames.')
+ if len(sys.argv) < 7 or sys.argv[-4] != "--":
+ sys.exit(
+ "Please provide the ODM project path, camera waypoints (xyz format), and number of frames."
+ )
projectHome = sys.argv[-3]
waypointFile = sys.argv[-2]
numFrames = int(sys.argv[-1])
- loadMesh(projectHome +
- '/odm_texturing/odm_textured_model_geo.obj')
+ loadMesh(projectHome + "/odm_texturing/odm_textured_model_geo.obj")
waypoints = loadWaypoints(waypointFile)
numWaypoints = len(waypoints)
- scene = bpy.data.scenes['Scene']
+ scene = bpy.data.scenes["Scene"]
# create path thru waypoints
- curve = bpy.data.curves.new(name='CameraPath', type='CURVE')
- curve.dimensions = '3D'
- curve.twist_mode = 'Z_UP'
- nurbs = curve.splines.new('NURBS')
- nurbs.points.add(numWaypoints-1)
+ curve = bpy.data.curves.new(name="CameraPath", type="CURVE")
+ curve.dimensions = "3D"
+ curve.twist_mode = "Z_UP"
+ nurbs = curve.splines.new("NURBS")
+ nurbs.points.add(numWaypoints - 1)
weight = 1
for i in range(numWaypoints):
- nurbs.points[i].co[0] = waypoints[i][0]
- nurbs.points[i].co[1] = waypoints[i][1]
- nurbs.points[i].co[2] = waypoints[i][2]
- nurbs.points[i].co[3] = weight
+ nurbs.points[i].co[0] = waypoints[i][0]
+ nurbs.points[i].co[1] = waypoints[i][1]
+ nurbs.points[i].co[2] = waypoints[i][2]
+ nurbs.points[i].co[3] = weight
nurbs.use_endpoint_u = True
- path = bpy.data.objects.new(name='CameraPath', object_data=curve)
+ path = bpy.data.objects.new(name="CameraPath", object_data=curve)
scene.objects.link(path)
- camera = bpy.data.objects['Camera']
+ camera = bpy.data.objects["Camera"]
camera.location[0] = 0
camera.location[1] = 0
camera.location[2] = 0
- followPath = camera.constraints.new(type='FOLLOW_PATH')
- followPath.name = 'CameraFollowPath'
+ followPath = camera.constraints.new(type="FOLLOW_PATH")
+ followPath.name = "CameraFollowPath"
followPath.target = path
followPath.use_curve_follow = True
animateContext = bpy.context.copy()
- animateContext['constraint'] = followPath
- bpy.ops.constraint.followpath_path_animate(animateContext,
- constraint='CameraFollowPath',
- frame_start=0,
- length=numFrames)
+ animateContext["constraint"] = followPath
+ bpy.ops.constraint.followpath_path_animate(
+ animateContext, constraint="CameraFollowPath", frame_start=0, length=numFrames
+ )
blendName = bpy.path.display_name_from_filepath(bpy.data.filepath)
- fileName = projectHome + '/odm_video/odm_' + blendName.replace('photo', 'video')
+ fileName = projectHome + "/odm_video/odm_" + blendName.replace("photo", "video")
scene.frame_start = 0
scene.frame_end = numFrames
render = scene.render
- render.filepath = fileName + '.mp4'
- render.image_settings.file_format = 'FFMPEG'
- if(render.use_multiview):
- render.image_settings.stereo_3d_format.display_mode = 'TOPBOTTOM'
- render.image_settings.views_format = 'STEREO_3D'
- render.views[0].file_suffix = ''
- format3d = 'top-bottom'
+ render.filepath = fileName + ".mp4"
+ render.image_settings.file_format = "FFMPEG"
+ if render.use_multiview:
+ render.image_settings.stereo_3d_format.display_mode = "TOPBOTTOM"
+ render.image_settings.views_format = "STEREO_3D"
+ render.views[0].file_suffix = ""
+ format3d = "top-bottom"
else:
width = render.resolution_x
height = render.resolution_y
- format3d = 'none'
+ format3d = "none"
render.resolution_x = 4096
render.resolution_y = 2048
- render.ffmpeg.audio_codec = 'AAC'
- render.ffmpeg.codec = 'H264'
- render.ffmpeg.format = 'MPEG4'
+ render.ffmpeg.audio_codec = "AAC"
+ render.ffmpeg.codec = "H264"
+ render.ffmpeg.format = "MPEG4"
render.ffmpeg.video_bitrate = 45000
bpy.ops.render.render(animation=True)
- writeMetadata(fileName+'.mp4', format3d)
+ writeMetadata(fileName + ".mp4", format3d)
def loadWaypoints(filename):
waypoints = []
with open(filename) as f:
for line in f:
- xyz = line.split()
- waypoints.append((float(xyz[0]), float(xyz[1]), float(xyz[2])))
+ xyz = line.split()
+ waypoints.append((float(xyz[0]), float(xyz[1]), float(xyz[2])))
return waypoints
def writeMetadata(filename, format3d):
- subprocess.run(['python',
- 'spatialmedia',
- '-i',
- '--stereo='+format3d,
- filename,
- filename+'.injected'])
+ subprocess.run(
+ [
+ "python",
+ "spatialmedia",
+ "-i",
+ "--stereo=" + format3d,
+ filename,
+ filename + ".injected",
+ ]
+ )
# check metadata injector was successful
- if os.path.exists(filename+'.injected'):
+ if os.path.exists(filename + ".injected"):
os.remove(filename)
- os.rename(filename+'.injected', filename)
+ os.rename(filename + ".injected", filename)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/contrib/dem-blend/dem-blend.py b/contrib/dem-blend/dem-blend.py
index 17752504..f73e62e2 100755
--- a/contrib/dem-blend/dem-blend.py
+++ b/contrib/dem-blend/dem-blend.py
@@ -5,15 +5,16 @@
import os
import glob
import sys
+
sys.path.insert(0, os.path.join("..", "..", os.path.dirname(__file__)))
import argparse
from opendm.dem import merge
-parser = argparse.ArgumentParser(description='Merge and blend DEMs using OpenDroneMap\'s approach.')
-parser.add_argument('input_dems',
- type=str,
- help='Path to input dems (.tif)')
+parser = argparse.ArgumentParser(
+ description="Merge and blend DEMs using OpenDroneMap's approach."
+)
+parser.add_argument("input_dems", type=str, help="Path to input dems (.tif)")
args = parser.parse_args()
@@ -21,10 +22,8 @@ if not os.path.exists(args.input_dems):
print("%s does not exist" % args.input_dems)
exit(1)
-output_dem = os.path.join(args.input_dems, 'merged_blended_dem.tif')
-input_dem_path = os.path.join(args.input_dems, '*.tif')
+output_dem = os.path.join(args.input_dems, "merged_blended_dem.tif")
+input_dem_path = os.path.join(args.input_dems, "*.tif")
input_dems = glob.glob(input_dem_path)
-merge.euclidean_merge_dems(input_dems
- ,output_dem=output_dem
- )
+merge.euclidean_merge_dems(input_dems, output_dem=output_dem)
diff --git a/contrib/exif-binner/exif_binner.py b/contrib/exif-binner/exif_binner.py
index 276793f0..6a98bca7 100755
--- a/contrib/exif-binner/exif_binner.py
+++ b/contrib/exif-binner/exif_binner.py
@@ -17,7 +17,7 @@ import argparse
# other imports
import PIL
from PIL import Image, ExifTags
-from tqdm import tqdm # optional: see "swap with this for no tqdm" below
+from tqdm import tqdm # optional: see "swap with this for no tqdm" below
parser = argparse.ArgumentParser()
@@ -26,16 +26,50 @@ parser.add_argument("file_dir", help="input folder of images")
parser.add_argument("output_dir", help="output folder to copy images to")
# args with defaults
-parser.add_argument("-b", "--bands", help="number of expected bands per capture", type=int, default=5)
-parser.add_argument("-s", "--sequential", help="use sequential capture group in filenames rather than original capture ID", type=bool, default=True)
-parser.add_argument("-z", "--zero_pad", help="if using sequential capture groups, zero-pad the group number to this many digits. 0 for no padding, -1 for auto padding", type=int, default=5)
-parser.add_argument("-w", "--whitespace_replace", help="replace whitespace characters with this character", type=str, default="-")
+parser.add_argument(
+ "-b", "--bands", help="number of expected bands per capture", type=int, default=5
+)
+parser.add_argument(
+ "-s",
+ "--sequential",
+ help="use sequential capture group in filenames rather than original capture ID",
+ type=bool,
+ default=True,
+)
+parser.add_argument(
+ "-z",
+ "--zero_pad",
+ help="if using sequential capture groups, zero-pad the group number to this many digits. 0 for no padding, -1 for auto padding",
+ type=int,
+ default=5,
+)
+parser.add_argument(
+ "-w",
+ "--whitespace_replace",
+ help="replace whitespace characters with this character",
+ type=str,
+ default="-",
+)
# optional args no defaults
-parser.add_argument("-l", "--logfile", help="write image metadata used to this CSV file", type=str)
-parser.add_argument("-r", "--replace_filename", help="use this instead of using the original filename in new filenames", type=str)
-parser.add_argument("-f", "--force", help="don't ask for confirmation", action="store_true")
-parser.add_argument("-g", "--no_grouping", help="do not apply grouping, only validate and add band name", action="store_true")
+parser.add_argument(
+ "-l", "--logfile", help="write image metadata used to this CSV file", type=str
+)
+parser.add_argument(
+ "-r",
+ "--replace_filename",
+ help="use this instead of using the original filename in new filenames",
+ type=str,
+)
+parser.add_argument(
+ "-f", "--force", help="don't ask for confirmation", action="store_true"
+)
+parser.add_argument(
+ "-g",
+ "--no_grouping",
+ help="do not apply grouping, only validate and add band name",
+ action="store_true",
+)
args = parser.parse_args()
file_dir = args.file_dir
@@ -53,9 +87,19 @@ auto_zero_pad = len(str(math.ceil(float(file_count) / float(expected_bands))))
if args.zero_pad >= 1:
if int("9" * args.zero_pad) < math.ceil(float(file_count) / float(expected_bands)):
- raise ValueError("Zero pad must have more digits than maximum capture groups! Attempted to pad " + str(args.zero_pad) + " digits with "
- + str(file_count) + " files and " + str(expected_bands) + " bands (up to " + str(math.ceil(float(file_count) / float(expected_bands)))
- + " capture groups possible, try at least " + str(auto_zero_pad) + " digits to zero pad)")
+ raise ValueError(
+ "Zero pad must have more digits than maximum capture groups! Attempted to pad "
+ + str(args.zero_pad)
+ + " digits with "
+ + str(file_count)
+ + " files and "
+ + str(expected_bands)
+ + " bands (up to "
+ + str(math.ceil(float(file_count) / float(expected_bands)))
+ + " capture groups possible, try at least "
+ + str(auto_zero_pad)
+ + " digits to zero pad)"
+ )
if args.force is False:
print("Input dir: " + str(file_dir) + " (" + str(file_count) + " files)")
@@ -84,7 +128,15 @@ print("Indexing images ...")
for filename in tqdm(os.listdir(file_dir)):
old_path = os.path.join(file_dir, filename)
file_name, file_ext = os.path.splitext(filename)
- image_entry = {"name": filename, "valid": True, "band": "-", "ID": "-", "group": 0, "DateTime": "-", "error": "-"} # dashes to ensure CSV exports properly, can be blank
+ image_entry = {
+ "name": filename,
+ "valid": True,
+ "band": "-",
+ "ID": "-",
+ "group": 0,
+ "DateTime": "-",
+ "error": "-",
+ } # dashes to ensure CSV exports properly, can be blank
try:
img = Image.open(old_path)
except PIL.UnidentifiedImageError as img_err:
@@ -102,9 +154,9 @@ for filename in tqdm(os.listdir(file_dir)):
# print(ExifTags.TAGS[key] + ":" + str(val)) # debugging
if ExifTags.TAGS[key] == "XMLPacket":
# find bandname
- bandname_start = val.find(b'')
- bandname_end = val.find(b'')
- bandname_coded = val[(bandname_start + 17):bandname_end]
+ bandname_start = val.find(b"")
+ bandname_end = val.find(b"")
+ bandname_coded = val[(bandname_start + 17) : bandname_end]
bandname = bandname_coded.decode("UTF-8")
image_entry["band"] = str(bandname)
# find capture ID
@@ -112,7 +164,9 @@ for filename in tqdm(os.listdir(file_dir)):
if ExifTags.TAGS[key] == "DateTime":
image_entry["DateTime"] = str(val)
image_entry["band"].replace(" ", "-")
- if len(image_entry["band"]) >= 99: # if it's too long, wrong value (RGB pic has none)
+ if (
+ len(image_entry["band"]) >= 99
+ ): # if it's too long, wrong value (RGB pic has none)
# no exif present
no_exif_n += 1
image_entry["valid"] = False
@@ -121,7 +175,9 @@ for filename in tqdm(os.listdir(file_dir)):
no_exif_n += 1
image_entry["valid"] = False
image_entry["error"] = "No Capture ID found"
- if (file_ext.lower() in [".jpg", ".jpeg"]) and (image_entry["band"] == "-"): # hack for DJI RGB jpgs
+ if (file_ext.lower() in [".jpg", ".jpeg"]) and (
+ image_entry["band"] == "-"
+ ): # hack for DJI RGB jpgs
# handle = open(old_path, 'rb').read()
# xmp_start = handle.find(b' \"%s\"" % (f, geojson))
- run("ogr2ogr \"%s\" \"%s\"" % (gpkg, geojson))
+ run('ddb info --format geojson --geometry polygon "%s" > "%s"' % (f, geojson))
+ run('ogr2ogr "%s" "%s"' % (gpkg, geojson))
log.ODM_INFO("Computing cutlines")
@@ -79,23 +85,17 @@ for f in projected_images:
name, _ = os.path.splitext(os.path.basename(f))
cutline_file = os.path.join(tmp_path, "%s_cutline.gpkg" % name)
bounds_file_path = os.path.join(tmp_path, "%s.gpkg" % name)
-
- compute_cutline(f,
- bounds_file_path,
- cutline_file,
- 4,
- scale=1)
+
+ compute_cutline(f, bounds_file_path, cutline_file, 4, scale=1)
cut_raster = os.path.join(tmp_path, "%s_cut.tif" % name)
- orthophoto.compute_mask_raster(f, cutline_file,
- cut_raster,
- blend_distance=20, only_max_coords_feature=True)
+ orthophoto.compute_mask_raster(
+ f, cutline_file, cut_raster, blend_distance=20, only_max_coords_feature=True
+ )
feathered_raster = os.path.join(tmp_path, "%s_feathered.tif" % name)
- orthophoto.feather_raster(f, feathered_raster,
- blend_distance=20
- )
+ orthophoto.feather_raster(f, feathered_raster, blend_distance=20)
all_orthos_and_ortho_cuts.append([feathered_raster, cut_raster])
@@ -104,23 +104,26 @@ log.ODM_INFO("Merging...")
if len(all_orthos_and_ortho_cuts) > 1:
# TODO: histogram matching via rasterio
# currently parts have different color tones
- output_file = os.path.join(cwd_path, 'mergepreview.tif')
+ output_file = os.path.join(cwd_path, "mergepreview.tif")
if os.path.isfile(output_file):
os.remove(output_file)
- orthophoto.merge(all_orthos_and_ortho_cuts, output_file, {
- 'TILED': 'YES',
- 'COMPRESS': 'LZW',
- 'PREDICTOR': '2',
- 'BIGTIFF': 'IF_SAFER',
- 'BLOCKXSIZE': 512,
- 'BLOCKYSIZE': 512
- })
-
+ orthophoto.merge(
+ all_orthos_and_ortho_cuts,
+ output_file,
+ {
+ "TILED": "YES",
+ "COMPRESS": "LZW",
+ "PREDICTOR": "2",
+ "BIGTIFF": "IF_SAFER",
+ "BLOCKXSIZE": 512,
+ "BLOCKYSIZE": 512,
+ },
+ )
log.ODM_INFO("Wrote %s" % output_file)
shutil.rmtree(tmp_path)
else:
log.ODM_ERROR("Error: no orthos found to merge")
- exit(1)
\ No newline at end of file
+ exit(1)
diff --git a/contrib/ndvi/agricultural_indices.py b/contrib/ndvi/agricultural_indices.py
index 91c6c636..d9286a7b 100755
--- a/contrib/ndvi/agricultural_indices.py
+++ b/contrib/ndvi/agricultural_indices.py
@@ -9,49 +9,58 @@
import numpy
import argparse
import os.path
+
try:
from osgeo import gdal
from osgeo import osr
except ImportError:
- raise ImportError("You need to install python-gdal : \
+ raise ImportError(
+ "You need to install python-gdal : \
run `sudo apt-get install libgdal-dev` \
# Check Gdal version with \
gdal-config --version \
#install corresponding gdal version with pip : \
- pip3 install GDAL==2.4.0")
+ pip3 install GDAL==2.4.0"
+ )
def parse_args():
- argument_parser = argparse.ArgumentParser('Createa from a multispectral orthophoto \
-a Geotif with NDVI, NDRE and GNDVI agricultural indices')
+ argument_parser = argparse.ArgumentParser(
+ "Createa from a multispectral orthophoto \
+a Geotif with NDVI, NDRE and GNDVI agricultural indices"
+ )
- argument_parser.add_argument("orthophoto", metavar="",
- type=argparse.FileType('r'),
- help="The CIR orthophoto. Must be a GeoTiff.")
- argument_parser.add_argument("-red", type=int,
- help="Red band number")
- argument_parser.add_argument("-green", type=int,
- help="Green band number")
- argument_parser.add_argument("-blue", type=int,
- help="Blue band number")
- argument_parser.add_argument("-re", type=int,
- help="RedEdge band number")
- argument_parser.add_argument("-nir", type=int,
- help="NIR band number")
- argument_parser.add_argument("out", metavar="",
- type=argparse.FileType('w'),
- help="The output file.")
- argument_parser.add_argument("--overwrite", "-o",
- action='store_true',
- default=False,
- help="Will overwrite output file if it exists. ")
+ argument_parser.add_argument(
+ "orthophoto",
+ metavar="",
+ type=argparse.FileType("r"),
+ help="The CIR orthophoto. Must be a GeoTiff.",
+ )
+ argument_parser.add_argument("-red", type=int, help="Red band number")
+ argument_parser.add_argument("-green", type=int, help="Green band number")
+ argument_parser.add_argument("-blue", type=int, help="Blue band number")
+ argument_parser.add_argument("-re", type=int, help="RedEdge band number")
+ argument_parser.add_argument("-nir", type=int, help="NIR band number")
+ argument_parser.add_argument(
+ "out",
+ metavar="",
+ type=argparse.FileType("w"),
+ help="The output file.",
+ )
+ argument_parser.add_argument(
+ "--overwrite",
+ "-o",
+ action="store_true",
+ default=False,
+ help="Will overwrite output file if it exists. ",
+ )
return argument_parser.parse_args()
if __name__ == "__main__":
-
+
# Suppress/hide warning when dividing by zero
- numpy.seterr(divide='ignore', invalid='ignore')
+ numpy.seterr(divide="ignore", invalid="ignore")
rootdir = os.path.dirname(os.path.abspath(__file__))
@@ -69,37 +78,48 @@ if __name__ == "__main__":
# parse out bands
print("Reading rasters")
- red_matrix=orthophoto[args.red-1].astype(float)
- green_matrix=orthophoto[args.green-1].astype(float)
- blue_matrix=orthophoto[args.blue-1].astype(float)
- re_matrix=orthophoto[args.re-1].astype(float)
- nir_matrix=orthophoto[args.nir-1].astype(float)
+ red_matrix = orthophoto[args.red - 1].astype(float)
+ green_matrix = orthophoto[args.green - 1].astype(float)
+ blue_matrix = orthophoto[args.blue - 1].astype(float)
+ re_matrix = orthophoto[args.re - 1].astype(float)
+ nir_matrix = orthophoto[args.nir - 1].astype(float)
outfile = args.out
# NDVI
print("Computing NDVI")
- #ndvi = calc_ndvi(nir_matrix, red_matrix)
- ndvi = (nir_matrix.astype(float) - red_matrix.astype(float)) / (nir_matrix + red_matrix)
+ # ndvi = calc_ndvi(nir_matrix, red_matrix)
+ ndvi = (nir_matrix.astype(float) - red_matrix.astype(float)) / (
+ nir_matrix + red_matrix
+ )
# NDRE
print("Computing NDRE")
- #ndre = calc_ndre(nir_matrix, re_matrix)
- ndre = (nir_matrix.astype(float) - re_matrix.astype(float)) / (nir_matrix + re_matrix)
+ # ndre = calc_ndre(nir_matrix, re_matrix)
+ ndre = (nir_matrix.astype(float) - re_matrix.astype(float)) / (
+ nir_matrix + re_matrix
+ )
- # GNDVI
+ # GNDVI
print("Computing GNDVI")
- #gndvi = calc_gndvi(nir_matrix, green_matrix)
- gndvi = (nir_matrix.astype(float) - green_matrix.astype(float)) / (nir_matrix + green_matrix)
+ # gndvi = calc_gndvi(nir_matrix, green_matrix)
+ gndvi = (nir_matrix.astype(float) - green_matrix.astype(float)) / (
+ nir_matrix + green_matrix
+ )
__import__("IPython").embed()
print("Saving Files")
# export raster
- for name, matrix in zip(['ndvi', 'ndre', 'gndvi' ] ,[ndvi,ndre,gndvi] ):
+ for name, matrix in zip(["ndvi", "ndre", "gndvi"], [ndvi, ndre, gndvi]):
print(name)
- out_driver = gdal.GetDriverByName('GTiff')\
- .Create(name+'_'+outfile.name, int(ndvi.shape[1]), int(ndvi.shape[0]), 1, gdal.GDT_Float32)
+ out_driver = gdal.GetDriverByName("GTiff").Create(
+ name + "_" + outfile.name,
+ int(ndvi.shape[1]),
+ int(ndvi.shape[0]),
+ 1,
+ gdal.GDT_Float32,
+ )
outband = out_driver.GetRasterBand(1)
outband.SetDescription(name.capitalize())
outband.WriteArray(matrix)
@@ -108,5 +128,3 @@ if __name__ == "__main__":
out_driver.SetProjection(outcrs.ExportToWkt())
out_driver.SetGeoTransform(raster.GetGeoTransform())
outband.FlushCache()
-
-
diff --git a/contrib/ndvi/ndvi.py b/contrib/ndvi/ndvi.py
index ab457f2d..ba74147d 100644
--- a/contrib/ndvi/ndvi.py
+++ b/contrib/ndvi/ndvi.py
@@ -4,31 +4,41 @@
import numpy
import argparse
import os.path
+
try:
from osgeo import gdal
from osgeo import osr
except ImportError:
- raise ImportError("You need to install python-gdal. run `apt-get install python-gdal`")
+ raise ImportError(
+ "You need to install python-gdal. run `apt-get install python-gdal`"
+ )
exit()
def parse_args():
p = argparse.ArgumentParser("A script that calculates the NDVI of a CIR orthophoto")
- p.add_argument("orthophoto", metavar="",
- type=argparse.FileType('r'),
- help="The CIR orthophoto. Must be a GeoTiff.")
- p.add_argument("nir", metavar="N", type=int,
- help="NIR band number")
- p.add_argument("vis", metavar="N", type=int,
- help="Vis band number")
- p.add_argument("out", metavar="",
- type=argparse.FileType('w'),
- help="The output file. Also must be in GeoTiff format")
- p.add_argument("--overwrite", "-o",
- action='store_true',
- default=False,
- help="Will overwrite output file if it exists. ")
+ p.add_argument(
+ "orthophoto",
+ metavar="",
+ type=argparse.FileType("r"),
+ help="The CIR orthophoto. Must be a GeoTiff.",
+ )
+ p.add_argument("nir", metavar="N", type=int, help="NIR band number")
+ p.add_argument("vis", metavar="N", type=int, help="Vis band number")
+ p.add_argument(
+ "out",
+ metavar="",
+ type=argparse.FileType("w"),
+ help="The output file. Also must be in GeoTiff format",
+ )
+ p.add_argument(
+ "--overwrite",
+ "-o",
+ action="store_true",
+ default=False,
+ help="Will overwrite output file if it exists. ",
+ )
return p.parse_args()
@@ -44,7 +54,10 @@ def calc_ndvi(nir, vis):
# for each cell, calculate ndvi (masking out where divide by 0)
ndvi = numpy.empty(nir.shape, dtype=float)
mask = numpy.not_equal((nirb + visb), 0.0)
- return numpy.choose(mask, (-1.0, numpy.true_divide(numpy.subtract(nirb, visb), numpy.add(nirb, visb))))
+ return numpy.choose(
+ mask,
+ (-1.0, numpy.true_divide(numpy.subtract(nirb, visb), numpy.add(nirb, visb))),
+ )
if __name__ == "__main__":
@@ -71,8 +84,9 @@ if __name__ == "__main__":
ndvi = calc_ndvi(nirb, visb)
# export raster
- out_driver = gdal.GetDriverByName('GTiff')\
- .Create(outfile.name, int(ndvi.shape[1]), int(ndvi.shape[0]), 1, gdal.GDT_Float32)
+ out_driver = gdal.GetDriverByName("GTiff").Create(
+ outfile.name, int(ndvi.shape[1]), int(ndvi.shape[0]), 1, gdal.GDT_Float32
+ )
outband = out_driver.GetRasterBand(1)
outband.WriteArray(ndvi)
outcrs = osr.SpatialReference()
diff --git a/contrib/ndvi/rename_sentera_agx710_multispectral_tif.py b/contrib/ndvi/rename_sentera_agx710_multispectral_tif.py
index d9415b17..fb0ee69e 100644
--- a/contrib/ndvi/rename_sentera_agx710_multispectral_tif.py
+++ b/contrib/ndvi/rename_sentera_agx710_multispectral_tif.py
@@ -4,57 +4,76 @@
import argparse
import sys
+
try:
from osgeo import gdal
except ImportError:
- raise ImportError("You need to install python-gdal : \
+ raise ImportError(
+ "You need to install python-gdal : \
run `sudo apt-get install libgdal-dev` \
# Check Gdal version with \
gdal-config --version \
#install corresponding gdal version with pip : \
- pip3 install GDAL==2.4.0")
+ pip3 install GDAL==2.4.0"
+ )
+
def parse_args():
- """ Parse arguments """
+ """Parse arguments"""
argument_parser = argparse.ArgumentParser(
- "A script that rename inplace Sentera AGX710 Geotiff orthophoto. ")
- argument_parser.add_argument("orthophoto", metavar="",
- type=argparse.FileType('r'),
- help="The input orthophoto. Must be a GeoTiff.")
+ "A script that rename inplace Sentera AGX710 Geotiff orthophoto. "
+ )
+ argument_parser.add_argument(
+ "orthophoto",
+ metavar="",
+ type=argparse.FileType("r"),
+ help="The input orthophoto. Must be a GeoTiff.",
+ )
return argument_parser.parse_args()
def rename_sentera_agx710_layers(name):
- """ Only rename Geotif built from Sentera AGX710 images with ODM """
+ """Only rename Geotif built from Sentera AGX710 images with ODM"""
if raster.RasterCount != 7:
- raise ImportError(F'File {name} does not have 7 layers as a regular\
- Geotif built from Sentera AGX710 images with ODM')
+ raise ImportError(
+ f"File {name} does not have 7 layers as a regular\
+ Geotif built from Sentera AGX710 images with ODM"
+ )
- if 'RedGreenBlue' in raster.GetRasterBand(1).GetDescription() and \
- 'RedEdgeGarbageNIR' in raster.GetRasterBand(2).GetDescription():
+ if (
+ "RedGreenBlue" in raster.GetRasterBand(1).GetDescription()
+ and "RedEdgeGarbageNIR" in raster.GetRasterBand(2).GetDescription()
+ ):
- print("Sentera AGX710 Geotiff file has been detected.\
- Layers are name are :")
- print("RedGreenBlue for Band 1\nRedEdgeGarbageNIR for Band 2\
- \nNone for Band 3\nNone for Band 4\nNone for Band 5\nNone for Band 6")
+ print(
+ "Sentera AGX710 Geotiff file has been detected.\
+ Layers are name are :"
+ )
+ print(
+ "RedGreenBlue for Band 1\nRedEdgeGarbageNIR for Band 2\
+ \nNone for Band 3\nNone for Band 4\nNone for Band 5\nNone for Band 6"
+ )
print("\nAfter renaming bands will be :")
- print("Red for Band 1\nGreen for Band 2\nBlue for Band 3\n\
- RedEdge for Band 4\nGarbage for Band 5\nNIR for Band 6")
+ print(
+ "Red for Band 1\nGreen for Band 2\nBlue for Band 3\n\
+ RedEdge for Band 4\nGarbage for Band 5\nNIR for Band 6"
+ )
answer = input(
- "Are you sure you want to rename the layers of the input file ? [yes/no] ")
- if answer =='yes':
- raster.GetRasterBand(1).SetDescription('Red')
- raster.GetRasterBand(2).SetDescription('Green')
- raster.GetRasterBand(3).SetDescription('Blue')
- raster.GetRasterBand(4).SetDescription('RedEdge')
- raster.GetRasterBand(5).SetDescription('Garbage')
- raster.GetRasterBand(6).SetDescription('NIR')
+ "Are you sure you want to rename the layers of the input file ? [yes/no] "
+ )
+ if answer == "yes":
+ raster.GetRasterBand(1).SetDescription("Red")
+ raster.GetRasterBand(2).SetDescription("Green")
+ raster.GetRasterBand(3).SetDescription("Blue")
+ raster.GetRasterBand(4).SetDescription("RedEdge")
+ raster.GetRasterBand(5).SetDescription("Garbage")
+ raster.GetRasterBand(6).SetDescription("NIR")
# raster.GetRasterBand(7).SetDescription('Alpha')
else:
print("No renaming")
- else :
- print(F'No need for band renaming in {name}')
+ else:
+ print(f"No need for band renaming in {name}")
sys.exit()
diff --git a/contrib/orthorectify/orthorectify.py b/contrib/orthorectify/orthorectify.py
index 9f0ee753..ec9a3569 100755
--- a/contrib/orthorectify/orthorectify.py
+++ b/contrib/orthorectify/orthorectify.py
@@ -4,6 +4,7 @@
import os
import sys
+
sys.path.insert(0, os.path.join("..", "..", os.path.dirname(__file__)))
from math import sqrt
@@ -19,62 +20,88 @@ default_dem_path = "odm_dem/dsm.tif"
default_outdir = "orthorectified"
default_image_list = "img_list.txt"
-parser = argparse.ArgumentParser(description='Orthorectification Tool')
-parser.add_argument('dataset',
- type=str,
- help='Path to ODM dataset')
-parser.add_argument('--dem',
- type=str,
- default=default_dem_path,
- help='Absolute path to DEM to use to orthorectify images. Default: %(default)s')
-parser.add_argument('--no-alpha',
- type=bool,
- help="Don't output an alpha channel")
-parser.add_argument('--interpolation',
- type=str,
- choices=('nearest', 'bilinear'),
- default='bilinear',
- help="Type of interpolation to use to sample pixel values.Default: %(default)s")
-parser.add_argument('--outdir',
- type=str,
- default=default_outdir,
- help="Output directory where to store results. Default: %(default)s")
-parser.add_argument('--image-list',
- type=str,
- default=default_image_list,
- help="Path to file that contains the list of image filenames to orthorectify. By default all images in a dataset are processed. Default: %(default)s")
-parser.add_argument('--images',
- type=str,
- default="",
- help="Comma-separated list of filenames to rectify. Use as an alternative to --image-list. Default: process all images.")
-parser.add_argument('--threads',
- type=int,
- default=multiprocessing.cpu_count(),
- help="Number of CPU processes to use. Default: %(default)s")
-parser.add_argument('--skip-visibility-test',
- type=bool,
- help="Skip visibility testing (faster but leaves artifacts due to relief displacement)")
+parser = argparse.ArgumentParser(description="Orthorectification Tool")
+parser.add_argument("dataset", type=str, help="Path to ODM dataset")
+parser.add_argument(
+ "--dem",
+ type=str,
+ default=default_dem_path,
+ help="Absolute path to DEM to use to orthorectify images. Default: %(default)s",
+)
+parser.add_argument("--no-alpha", type=bool, help="Don't output an alpha channel")
+parser.add_argument(
+ "--interpolation",
+ type=str,
+ choices=("nearest", "bilinear"),
+ default="bilinear",
+ help="Type of interpolation to use to sample pixel values.Default: %(default)s",
+)
+parser.add_argument(
+ "--outdir",
+ type=str,
+ default=default_outdir,
+ help="Output directory where to store results. Default: %(default)s",
+)
+parser.add_argument(
+ "--image-list",
+ type=str,
+ default=default_image_list,
+ help="Path to file that contains the list of image filenames to orthorectify. By default all images in a dataset are processed. Default: %(default)s",
+)
+parser.add_argument(
+ "--images",
+ type=str,
+ default="",
+ help="Comma-separated list of filenames to rectify. Use as an alternative to --image-list. Default: process all images.",
+)
+parser.add_argument(
+ "--threads",
+ type=int,
+ default=multiprocessing.cpu_count(),
+ help="Number of CPU processes to use. Default: %(default)s",
+)
+parser.add_argument(
+ "--skip-visibility-test",
+ type=bool,
+ help="Skip visibility testing (faster but leaves artifacts due to relief displacement)",
+)
args = parser.parse_args()
dataset_path = args.dataset
-dem_path = os.path.join(dataset_path, default_dem_path) if args.dem == default_dem_path else args.dem
+dem_path = (
+ os.path.join(dataset_path, default_dem_path)
+ if args.dem == default_dem_path
+ else args.dem
+)
interpolation = args.interpolation
with_alpha = not args.no_alpha
-image_list = os.path.join(dataset_path, default_image_list) if args.image_list == default_image_list else args.image_list
+image_list = (
+ os.path.join(dataset_path, default_image_list)
+ if args.image_list == default_image_list
+ else args.image_list
+)
-cwd_path = os.path.join(dataset_path, default_outdir) if args.outdir == default_outdir else args.outdir
+cwd_path = (
+ os.path.join(dataset_path, default_outdir)
+ if args.outdir == default_outdir
+ else args.outdir
+)
if not os.path.exists(cwd_path):
os.makedirs(cwd_path)
-target_images = [] # all
+target_images = [] # all
if args.images:
target_images = list(map(str.strip, args.images.split(",")))
print("Processing %s images" % len(target_images))
elif args.image_list:
with open(image_list) as f:
- target_images = list(filter(lambda filename: filename != '', map(str.strip, f.read().split("\n"))))
+ target_images = list(
+ filter(
+ lambda filename: filename != "", map(str.strip, f.read().split("\n"))
+ )
+ )
print("Processing %s images" % len(target_images))
if not os.path.exists(dem_path):
@@ -91,31 +118,32 @@ def bilinear_interpolate(im, x, y):
y0 = np.floor(y).astype(int)
y1 = y0 + 1
- x0 = np.clip(x0, 0, im.shape[1]-1)
- x1 = np.clip(x1, 0, im.shape[1]-1)
- y0 = np.clip(y0, 0, im.shape[0]-1)
- y1 = np.clip(y1, 0, im.shape[0]-1)
+ x0 = np.clip(x0, 0, im.shape[1] - 1)
+ x1 = np.clip(x1, 0, im.shape[1] - 1)
+ y0 = np.clip(y0, 0, im.shape[0] - 1)
+ y1 = np.clip(y1, 0, im.shape[0] - 1)
- Ia = im[ y0, x0 ]
- Ib = im[ y1, x0 ]
- Ic = im[ y0, x1 ]
- Id = im[ y1, x1 ]
+ Ia = im[y0, x0]
+ Ib = im[y1, x0]
+ Ic = im[y0, x1]
+ Id = im[y1, x1]
- wa = (x1-x) * (y1-y)
- wb = (x1-x) * (y-y0)
- wc = (x-x0) * (y1-y)
- wd = (x-x0) * (y-y0)
+ wa = (x1 - x) * (y1 - y)
+ wb = (x1 - x) * (y - y0)
+ wc = (x - x0) * (y1 - y)
+ wd = (x - x0) * (y - y0)
+
+ return wa * Ia + wb * Ib + wc * Ic + wd * Id
- return wa*Ia + wb*Ib + wc*Ic + wd*Id
# Read DEM
print("Reading DEM: %s" % dem_path)
with rasterio.open(dem_path) as dem_raster:
dem = dem_raster.read()[0]
- dem_has_nodata = dem_raster.profile.get('nodata') is not None
+ dem_has_nodata = dem_raster.profile.get("nodata") is not None
if dem_has_nodata:
- m = ma.array(dem, mask=dem==dem_raster.nodata)
+ m = ma.array(dem, mask=dem == dem_raster.nodata)
dem_min_value = m.min()
dem_max_value = m.max()
else:
@@ -124,10 +152,10 @@ with rasterio.open(dem_path) as dem_raster:
print("DEM Minimum: %s" % dem_min_value)
print("DEM Maximum: %s" % dem_max_value)
-
+
h, w = dem.shape
- crs = dem_raster.profile.get('crs')
+ crs = dem_raster.profile.get("crs")
dem_offset_x, dem_offset_y = (0, 0)
if crs:
@@ -138,20 +166,23 @@ with rasterio.open(dem_path) as dem_raster:
if not os.path.exists(coords_file):
print("Whoops! Cannot find %s (we need that!)" % coords_file)
exit(1)
-
+
with open(coords_file) as f:
- l = f.readline() # discard
+ l = f.readline() # discard
# second line is a northing/easting offset
l = f.readline().rstrip()
dem_offset_x, dem_offset_y = map(float, l.split(" "))
-
+
print("DEM offset: (%s, %s)" % (dem_offset_x, dem_offset_y))
print("DEM dimensions: %sx%s pixels" % (w, h))
# Read reconstruction
- udata = dataset.UndistortedDataSet(dataset.DataSet(os.path.join(dataset_path, "opensfm")), undistorted_data_path=os.path.join(dataset_path, "opensfm", "undistorted"))
+ udata = dataset.UndistortedDataSet(
+ dataset.DataSet(os.path.join(dataset_path, "opensfm")),
+ undistorted_data_path=os.path.join(dataset_path, "opensfm", "undistorted"),
+ )
reconstructions = udata.load_undistorted_reconstruction()
if len(reconstructions) == 0:
raise Exception("No reconstructions available")
@@ -168,7 +199,9 @@ with rasterio.open(dem_path) as dem_raster:
r = shot.pose.get_rotation_matrix()
Xs, Ys, Zs = shot.pose.get_origin()
- cam_grid_y, cam_grid_x = dem_raster.index(Xs + dem_offset_x, Ys + dem_offset_y)
+ cam_grid_y, cam_grid_x = dem_raster.index(
+ Xs + dem_offset_x, Ys + dem_offset_y
+ )
a1 = r[0][0]
b1 = r[0][1]
@@ -185,8 +218,10 @@ with rasterio.open(dem_path) as dem_raster:
for j in range(0, h):
for i in range(0, w):
- distance_map[j][i] = sqrt((cam_grid_x - i) ** 2 + (cam_grid_y - j) ** 2)
- distance_map[distance_map==0] = 1e-7
+ distance_map[j][i] = sqrt(
+ (cam_grid_x - i) ** 2 + (cam_grid_y - j) ** 2
+ )
+ distance_map[distance_map == 0] = 1e-7
print("Camera pose: (%f, %f, %f)" % (Xs, Ys, Zs))
@@ -195,7 +230,7 @@ with rasterio.open(dem_path) as dem_raster:
half_img_h = (img_h - 1) / 2.0
print("Image dimensions: %sx%s pixels" % (img_w, img_h))
f = shot.camera.focal * max(img_h, img_w)
- has_nodata = dem_raster.profile.get('nodata') is not None
+ has_nodata = dem_raster.profile.get("nodata") is not None
def process_pixels(step):
imgout = np.full((num_bands, dem_bbox_h, dem_bbox_w), np.nan)
@@ -226,9 +261,9 @@ with rasterio.open(dem_path) as dem_raster:
Ya -= dem_offset_y
# Colinearity function http://web.pdx.edu/~jduh/courses/geog493f14/Week03.pdf
- dx = (Xa - Xs)
- dy = (Ya - Ys)
- dz = (Za - Zs)
+ dx = Xa - Xs
+ dy = Ya - Ys
+ dz = Za - Zs
den = a3 * dx + b3 * dy + c3 * dz
x = half_img_w - (f * (a1 * dx + b1 * dy + c1 * dz) / den)
@@ -237,12 +272,29 @@ with rasterio.open(dem_path) as dem_raster:
if x >= 0 and y >= 0 and x <= img_w - 1 and y <= img_h - 1:
# Visibility test
if not args.skip_visibility_test:
- check_dem_points = np.column_stack(line(i, j, cam_grid_x, cam_grid_y))
- check_dem_points = check_dem_points[np.all(np.logical_and(np.array([0, 0]) <= check_dem_points, check_dem_points < [w, h]), axis=1)]
+ check_dem_points = np.column_stack(
+ line(i, j, cam_grid_x, cam_grid_y)
+ )
+ check_dem_points = check_dem_points[
+ np.all(
+ np.logical_and(
+ np.array([0, 0]) <= check_dem_points,
+ check_dem_points < [w, h],
+ ),
+ axis=1,
+ )
+ ]
visible = True
for p in check_dem_points:
- ray_z = Zs + (distance_map[p[1]][p[0]] / distance_map[j][i]) * dz
+ ray_z = (
+ Zs
+ + (
+ distance_map[p[1]][p[0]]
+ / distance_map[j][i]
+ )
+ * dz
+ )
if ray_z > dem_max_value:
break
@@ -252,7 +304,7 @@ with rasterio.open(dem_path) as dem_raster:
if not visible:
continue
- if interpolation == 'bilinear':
+ if interpolation == "bilinear":
xi = img_w - 1 - x
yi = img_h - 1 - y
values = bilinear_interpolate(shot_image, xi, yi)
@@ -294,9 +346,54 @@ with rasterio.open(dem_path) as dem_raster:
:param cpy principal point Y (image coordinates)
"""
Za = dem_min_value
- m = (a3*b1*cpy - a1*b3*cpy - (a3*b2 - a2*b3)*cpx - (a2*b1 - a1*b2)*f)
- Xa = dem_offset_x + (m*Xs + (b3*c1*cpy - b1*c3*cpy - (b3*c2 - b2*c3)*cpx - (b2*c1 - b1*c2)*f)*Za - (b3*c1*cpy - b1*c3*cpy - (b3*c2 - b2*c3)*cpx - (b2*c1 - b1*c2)*f)*Zs)/m
- Ya = dem_offset_y + (m*Ys - (a3*c1*cpy - a1*c3*cpy - (a3*c2 - a2*c3)*cpx - (a2*c1 - a1*c2)*f)*Za + (a3*c1*cpy - a1*c3*cpy - (a3*c2 - a2*c3)*cpx - (a2*c1 - a1*c2)*f)*Zs)/m
+ m = (
+ a3 * b1 * cpy
+ - a1 * b3 * cpy
+ - (a3 * b2 - a2 * b3) * cpx
+ - (a2 * b1 - a1 * b2) * f
+ )
+ Xa = (
+ dem_offset_x
+ + (
+ m * Xs
+ + (
+ b3 * c1 * cpy
+ - b1 * c3 * cpy
+ - (b3 * c2 - b2 * c3) * cpx
+ - (b2 * c1 - b1 * c2) * f
+ )
+ * Za
+ - (
+ b3 * c1 * cpy
+ - b1 * c3 * cpy
+ - (b3 * c2 - b2 * c3) * cpx
+ - (b2 * c1 - b1 * c2) * f
+ )
+ * Zs
+ )
+ / m
+ )
+ Ya = (
+ dem_offset_y
+ + (
+ m * Ys
+ - (
+ a3 * c1 * cpy
+ - a1 * c3 * cpy
+ - (a3 * c2 - a2 * c3) * cpx
+ - (a2 * c1 - a1 * c2) * f
+ )
+ * Za
+ + (
+ a3 * c1 * cpy
+ - a1 * c3 * cpy
+ - (a3 * c2 - a2 * c3) * cpx
+ - (a2 * c1 - a1 * c2) * f
+ )
+ * Zs
+ )
+ / m
+ )
y, x = dem_raster.index(Xa, Ya)
return (x, y)
@@ -313,11 +410,21 @@ with rasterio.open(dem_path) as dem_raster:
dem_bbox_miny = min(h - 1, max(0, dem_bbox_y.min()))
dem_bbox_maxx = min(w - 1, max(0, dem_bbox_x.max()))
dem_bbox_maxy = min(h - 1, max(0, dem_bbox_y.max()))
-
+
dem_bbox_w = 1 + dem_bbox_maxx - dem_bbox_minx
dem_bbox_h = 1 + dem_bbox_maxy - dem_bbox_miny
- print("Iterating over DEM box: [(%s, %s), (%s, %s)] (%sx%s pixels)" % (dem_bbox_minx, dem_bbox_miny, dem_bbox_maxx, dem_bbox_maxy, dem_bbox_w, dem_bbox_h))
+ print(
+ "Iterating over DEM box: [(%s, %s), (%s, %s)] (%sx%s pixels)"
+ % (
+ dem_bbox_minx,
+ dem_bbox_miny,
+ dem_bbox_maxx,
+ dem_bbox_maxy,
+ dem_bbox_w,
+ dem_bbox_h,
+ )
+ )
if max_workers > 1:
with multiprocessing.Pool(max_workers) as p:
@@ -325,7 +432,9 @@ with rasterio.open(dem_path) as dem_raster:
else:
results = [process_pixels(0)]
- results = list(filter(lambda r: r[1][0] <= r[1][2] and r[1][1] <= r[1][3], results))
+ results = list(
+ filter(lambda r: r[1][0] <= r[1][2] and r[1][1] <= r[1][3], results)
+ )
# Merge image
imgout, _ = results[0]
@@ -335,7 +444,7 @@ with rasterio.open(dem_path) as dem_raster:
resimg, _ = results[j % max_workers]
for b in range(num_bands):
imgout[b][im_j] = resimg[b][im_j]
-
+
# Merge bounds
minx = dem_bbox_w
miny = dem_bbox_h
@@ -347,10 +456,10 @@ with rasterio.open(dem_path) as dem_raster:
miny = min(bounds[1], miny)
maxx = max(bounds[2], maxx)
maxy = max(bounds[3], maxy)
-
+
print("Output bounds: (%s, %s), (%s, %s) pixels" % (minx, miny, maxx, maxy))
if minx <= maxx and miny <= maxy:
- imgout = imgout[:,miny:maxy+1,minx:maxx+1]
+ imgout = imgout[:, miny : maxy + 1, minx : maxx + 1]
if with_alpha:
alpha = np.zeros((imgout.shape[1], imgout.shape[2]), dtype=np.uint8)
@@ -361,26 +470,34 @@ with rasterio.open(dem_path) as dem_raster:
# Cast
imgout = imgout.astype(shot_image.dtype)
- dem_transform = dem_raster.profile['transform']
- offset_x, offset_y = dem_raster.xy(dem_bbox_miny + miny, dem_bbox_minx + minx, offset='ul')
-
+ dem_transform = dem_raster.profile["transform"]
+ offset_x, offset_y = dem_raster.xy(
+ dem_bbox_miny + miny, dem_bbox_minx + minx, offset="ul"
+ )
+
profile = {
- 'driver': 'GTiff',
- 'width': imgout.shape[2],
- 'height': imgout.shape[1],
- 'count': num_bands + 1 if with_alpha else num_bands,
- 'dtype': imgout.dtype.name,
- 'transform': rasterio.transform.Affine(dem_transform[0], dem_transform[1], offset_x,
- dem_transform[3], dem_transform[4], offset_y),
- 'nodata': None,
- 'crs': crs
+ "driver": "GTiff",
+ "width": imgout.shape[2],
+ "height": imgout.shape[1],
+ "count": num_bands + 1 if with_alpha else num_bands,
+ "dtype": imgout.dtype.name,
+ "transform": rasterio.transform.Affine(
+ dem_transform[0],
+ dem_transform[1],
+ offset_x,
+ dem_transform[3],
+ dem_transform[4],
+ offset_y,
+ ),
+ "nodata": None,
+ "crs": crs,
}
outfile = os.path.join(cwd_path, shot.id)
if not outfile.endswith(".tif"):
outfile = outfile + ".tif"
- with rasterio.open(outfile, 'w', BIGTIFF="IF_SAFER", **profile) as wout:
+ with rasterio.open(outfile, "w", BIGTIFF="IF_SAFER", **profile) as wout:
for b in range(num_bands):
wout.write(imgout[b], b + 1)
if with_alpha:
diff --git a/contrib/pc2dem/pc2dem.py b/contrib/pc2dem/pc2dem.py
index dc9743fe..ffa28db8 100755
--- a/contrib/pc2dem/pc2dem.py
+++ b/contrib/pc2dem/pc2dem.py
@@ -4,33 +4,42 @@
import os
import sys
+
sys.path.insert(0, os.path.join("..", "..", os.path.dirname(__file__)))
import argparse
import multiprocessing
from opendm.dem import commands
-parser = argparse.ArgumentParser(description='Generate DEMs from point clouds using ODM\'s algorithm.')
-parser.add_argument('point_cloud',
- type=str,
- help='Path to point cloud file (.las, .laz, .ply)')
-parser.add_argument('--type',
- type=str,
- choices=("dsm", "dtm"),
- default="dsm",
- help="Type of DEM. Default: %(default)s")
-parser.add_argument('--resolution',
- type=float,
- default=0.05,
- help='Resolution in m/px. Default: %(default)s')
-parser.add_argument('--gapfill-steps',
- default=3,
- type=int,
- help='Number of steps used to fill areas with gaps. Set to 0 to disable gap filling. '
- 'Starting with a radius equal to the output resolution, N different DEMs are generated with '
- 'progressively bigger radius using the inverse distance weighted (IDW) algorithm '
- 'and merged together. Remaining gaps are then merged using nearest neighbor interpolation. '
- 'Default: %(default)s')
+parser = argparse.ArgumentParser(
+ description="Generate DEMs from point clouds using ODM's algorithm."
+)
+parser.add_argument(
+ "point_cloud", type=str, help="Path to point cloud file (.las, .laz, .ply)"
+)
+parser.add_argument(
+ "--type",
+ type=str,
+ choices=("dsm", "dtm"),
+ default="dsm",
+ help="Type of DEM. Default: %(default)s",
+)
+parser.add_argument(
+ "--resolution",
+ type=float,
+ default=0.05,
+ help="Resolution in m/px. Default: %(default)s",
+)
+parser.add_argument(
+ "--gapfill-steps",
+ default=3,
+ type=int,
+ help="Number of steps used to fill areas with gaps. Set to 0 to disable gap filling. "
+ "Starting with a radius equal to the output resolution, N different DEMs are generated with "
+ "progressively bigger radius using the inverse distance weighted (IDW) algorithm "
+ "and merged together. Remaining gaps are then merged using nearest neighbor interpolation. "
+ "Default: %(default)s",
+)
args = parser.parse_args()
if not os.path.exists(args.point_cloud):
@@ -41,15 +50,18 @@ outdir = os.path.dirname(args.point_cloud)
radius_steps = [args.resolution / 2.0]
for _ in range(args.gapfill_steps - 1):
- radius_steps.append(radius_steps[-1] * 2) # 2 is arbitrary, maybe there's a better value?
+ radius_steps.append(
+ radius_steps[-1] * 2
+ ) # 2 is arbitrary, maybe there's a better value?
-commands.create_dem(args.point_cloud,
- args.type,
- output_type='idw' if args.type == 'dtm' else 'max',
- radiuses=list(map(str, radius_steps)),
- gapfill=args.gapfill_steps > 0,
- outdir=outdir,
- resolution=args.resolution,
- decimation=1,
- max_workers=multiprocessing.cpu_count()
- )
\ No newline at end of file
+commands.create_dem(
+ args.point_cloud,
+ args.type,
+ output_type="idw" if args.type == "dtm" else "max",
+ radiuses=list(map(str, radius_steps)),
+ gapfill=args.gapfill_steps > 0,
+ outdir=outdir,
+ resolution=args.resolution,
+ decimation=1,
+ max_workers=multiprocessing.cpu_count(),
+)
diff --git a/contrib/resize/resize.py b/contrib/resize/resize.py
index bed6ec1b..e83fa12b 100644
--- a/contrib/resize/resize.py
+++ b/contrib/resize/resize.py
@@ -7,35 +7,46 @@ import piexif
import multiprocessing
from multiprocessing.pool import ThreadPool
import sys
+
sys.path.append("../../")
from opendm.gcp import GCPFile
-parser = argparse.ArgumentParser(description='Exif Image Resize')
-parser.add_argument('--input', '-i',
- metavar='',
- required=True,
- help='Path to input image/GCP or image folder')
-parser.add_argument('--output', '-o',
- metavar='',
- required=True,
- help='Path to output image/GCP or image folder')
-parser.add_argument('--force', '-f',
- action='store_true',
- default=False,
- help='Overwrite results')
-parser.add_argument('amount',
- metavar='',
- type=str,
- help='Pixel of largest side or percentage to resize images by')
+parser = argparse.ArgumentParser(description="Exif Image Resize")
+parser.add_argument(
+ "--input",
+ "-i",
+ metavar="",
+ required=True,
+ help="Path to input image/GCP or image folder",
+)
+parser.add_argument(
+ "--output",
+ "-o",
+ metavar="",
+ required=True,
+ help="Path to output image/GCP or image folder",
+)
+parser.add_argument(
+ "--force", "-f", action="store_true", default=False, help="Overwrite results"
+)
+parser.add_argument(
+ "amount",
+ metavar="",
+ type=str,
+ help="Pixel of largest side or percentage to resize images by",
+)
args = parser.parse_args()
+
def die(msg):
print(msg)
exit(1)
+
class nonloc:
errors = 0
+
def resize_image(image_path, out_path, resize_to, out_path_is_file=False):
"""
:param image_path: path to the image
@@ -64,24 +75,36 @@ def resize_image(image_path, out_path, resize_to, out_path_is_file=False):
im.thumbnail((resized_width, resized_height), Image.LANCZOS)
driver = ext[1:].upper()
- if driver == 'JPG':
- driver = 'JPEG'
+ if driver == "JPG":
+ driver = "JPEG"
- if 'exif' in im.info:
- exif_dict = piexif.load(im.info['exif'])
- exif_dict['Exif'][piexif.ExifIFD.PixelXDimension] = resized_width
- exif_dict['Exif'][piexif.ExifIFD.PixelYDimension] = resized_height
- im.save(resized_image_path, driver, exif=piexif.dump(exif_dict), quality=100)
+ if "exif" in im.info:
+ exif_dict = piexif.load(im.info["exif"])
+ exif_dict["Exif"][piexif.ExifIFD.PixelXDimension] = resized_width
+ exif_dict["Exif"][piexif.ExifIFD.PixelYDimension] = resized_height
+ im.save(
+ resized_image_path, driver, exif=piexif.dump(exif_dict), quality=100
+ )
else:
im.save(resized_image_path, driver, quality=100)
im.close()
- print("{} ({}x{}) --> {} ({}x{})".format(image_path, width, height, resized_image_path, resized_width, resized_height))
+ print(
+ "{} ({}x{}) --> {} ({}x{})".format(
+ image_path,
+ width,
+ height,
+ resized_image_path,
+ resized_width,
+ resized_height,
+ )
+ )
except (IOError, ValueError) as e:
print("Error: Cannot resize {}: {}.".format(image_path, str(e)))
nonloc.errors += 1
+
def resize_gcp(gcp_path, out_path, resize_to, out_path_is_file=False):
"""
:param gcp_path: path to the GCP
@@ -110,6 +133,7 @@ def resize_gcp(gcp_path, out_path, resize_to, out_path_is_file=False):
print("Error: Cannot resize {}: {}.".format(gcp_path, str(e)))
nonloc.errors += 1
+
if not args.amount.endswith("%"):
args.amount = float(args.amount)
if args.amount <= 0:
@@ -157,13 +181,15 @@ if create_dir:
pool = ThreadPool(processes=multiprocessing.cpu_count())
+
def resize(file):
_, ext = os.path.splitext(file)
if ext.lower() == ".txt":
return resize_gcp(file, args.output, args.amount, not create_dir)
else:
return resize_image(file, args.output, args.amount, not create_dir)
+
+
pool.map(resize, files + gcps)
print("Process completed, {} errors.".format(nonloc.errors))
-
diff --git a/contrib/visveg/vegind.py b/contrib/visveg/vegind.py
index 42dcb62d..90503b46 100644
--- a/contrib/visveg/vegind.py
+++ b/contrib/visveg/vegind.py
@@ -3,28 +3,34 @@
import rasterio, os, sys
import numpy as np
+
class bcolors:
- OKBLUE = '\033[94m'
- OKGREEN = '\033[92m'
- WARNING = '\033[93m'
- FAIL = '\033[91m'
- ENDC = '\033[0m'
- BOLD = '\033[1m'
- UNDERLINE = '\033[4m'
+ OKBLUE = "\033[94m"
+ OKGREEN = "\033[92m"
+ WARNING = "\033[93m"
+ FAIL = "\033[91m"
+ ENDC = "\033[0m"
+ BOLD = "\033[1m"
+ UNDERLINE = "\033[4m"
+
try:
file = sys.argv[1]
typ = sys.argv[2]
(fileRoot, fileExt) = os.path.splitext(file)
outFileName = fileRoot + "_" + typ + fileExt
- if typ not in ['vari', 'tgi', 'ngrdi']:
+ if typ not in ["vari", "tgi", "ngrdi"]:
raise IndexError
except (TypeError, IndexError, NameError):
- print bcolors.FAIL + 'Arguments messed up. Check arguments order and index name' + bcolors.ENDC
- print 'Usage: ./vegind.py orto index'
- print ' orto - filepath to RGB orthophoto'
- print ' index - Vegetation Index'
- print bcolors.OKGREEN + 'Available indexes: vari, ngrdi, tgi' + bcolors.ENDC
+ print(
+ bcolors.FAIL
+ + "Arguments messed up. Check arguments order and index name"
+ + bcolors.ENDC
+ )
+ print("Usage: ./vegind.py orto index")
+ print(" orto - filepath to RGB orthophoto")
+ print(" index - Vegetation Index")
+ print(bcolors.OKGREEN + "Available indexes: vari, ngrdi, tgi" + bcolors.ENDC)
sys.exit()
@@ -38,12 +44,13 @@ def calcNgrdi(red, green):
:param green: green visible channel
:return: ngrdi index array
"""
- mask = np.not_equal(np.add(red,green), 0.0)
- return np.choose(mask, (-9999.0, np.true_divide(
- np.subtract(green,red),
- np.add(red,green))))
+ mask = np.not_equal(np.add(red, green), 0.0)
+ return np.choose(
+ mask, (-9999.0, np.true_divide(np.subtract(green, red), np.add(red, green)))
+ )
-def calcVari(red,green,blue):
+
+def calcVari(red, green, blue):
"""
Calculates Visible Atmospheric Resistant Index
Gitelson, A.A., Kaufman, Y.J., Stark, R., Rundquist, D., 2002.
@@ -54,10 +61,19 @@ def calcVari(red,green,blue):
:param blue: blue visible channel
:return: vari index array, that will be saved to tiff
"""
- mask = np.not_equal(np.subtract(np.add(green,red),blue), 0.0)
- return np.choose(mask, (-9999.0, np.true_divide(np.subtract(green,red),np.subtract(np.add(green,red),blue))))
+ mask = np.not_equal(np.subtract(np.add(green, red), blue), 0.0)
+ return np.choose(
+ mask,
+ (
+ -9999.0,
+ np.true_divide(
+ np.subtract(green, red), np.subtract(np.add(green, red), blue)
+ ),
+ ),
+ )
-def calcTgi(red,green,blue):
+
+def calcTgi(red, green, blue):
"""
Calculates Triangular Greenness Index
Hunt, E. Raymond Jr.; Doraiswamy, Paul C.; McMurtrey, James E.; Daughtry, Craig S.T.; Perry, Eileen M.; and Akhmedov, Bakhyt,
@@ -69,8 +85,12 @@ def calcTgi(red,green,blue):
:param blue: blue channel
:return: tgi index array, that will be saved to tiff
"""
- mask = np.not_equal(green-red+blue-255.0, 0.0)
- return np.choose(mask, (-9999.0, np.subtract(green, np.multiply(0.39,red), np.multiply(0.61, blue))))
+ mask = np.not_equal(green - red + blue - 255.0, 0.0)
+ return np.choose(
+ mask,
+ (-9999.0, np.subtract(green, np.multiply(0.39, red), np.multiply(0.61, blue))),
+ )
+
try:
with rasterio.Env():
@@ -80,16 +100,16 @@ try:
red = np.float32(ds.read(1))
green = np.float32(ds.read(2))
blue = np.float32(ds.read(3))
- np.seterr(divide='ignore', invalid='ignore')
- if typ == 'ngrdi':
- indeks = calcNgrdi(red,green)
- elif typ == 'vari':
+ np.seterr(divide="ignore", invalid="ignore")
+ if typ == "ngrdi":
+ indeks = calcNgrdi(red, green)
+ elif typ == "vari":
indeks = calcVari(red, green, blue)
- elif typ == 'tgi':
+ elif typ == "tgi":
indeks = calcTgi(red, green, blue)
- with rasterio.open(outFileName, 'w', BIGTIFF="IF_SAFER", **profile) as dst:
+ with rasterio.open(outFileName, "w", BIGTIFF="IF_SAFER", **profile) as dst:
dst.write(indeks.astype(rasterio.float32), 1)
except rasterio.errors.RasterioIOError:
- print bcolors.FAIL + 'Orthophoto file not found or access denied' + bcolors.ENDC
+ print(bcolors.FAIL + "Orthophoto file not found or access denied" + bcolors.ENDC)
sys.exit()
diff --git a/opendm/ai.py b/opendm/ai.py
index 9660e8c0..413253e7 100644
--- a/opendm/ai.py
+++ b/opendm/ai.py
@@ -7,6 +7,7 @@ import sys
import rawpy
import cv2
+
def read_image(img_path):
if img_path[-4:].lower() in [".dng", ".raw", ".nef"]:
try:
@@ -20,24 +21,24 @@ def read_image(img_path):
return None
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
-
+
return img
-def get_model(namespace, url, version, name = "model.onnx"):
+def get_model(namespace, url, version, name="model.onnx"):
version = version.replace(".", "_")
base_dir = os.path.join(os.path.dirname(__file__), "..")
- if sys.platform == 'win32':
- base_dir = os.path.join(os.getenv('PROGRAMDATA'),"ODM")
+ if sys.platform == "win32":
+ base_dir = os.path.join(os.getenv("PROGRAMDATA"), "ODM")
base_dir = os.path.join(os.path.abspath(base_dir), "storage", "models")
-
+
namespace_dir = os.path.join(base_dir, namespace)
versioned_dir = os.path.join(namespace_dir, version)
if not os.path.isdir(versioned_dir):
os.makedirs(versioned_dir, exist_ok=True)
-
+
# Check if we need to download it
model_file = os.path.join(versioned_dir, name)
if not os.path.isfile(model_file):
@@ -62,14 +63,16 @@ def get_model(namespace, url, version, name = "model.onnx"):
if os.path.basename(downloaded_file).lower().endswith(".zip"):
log.ODM_INFO("Extracting %s ..." % downloaded_file)
- with zipfile.ZipFile(downloaded_file, 'r') as z:
+ with zipfile.ZipFile(downloaded_file, "r") as z:
z.extractall(versioned_dir)
os.remove(downloaded_file)
-
+
if not os.path.isfile(model_file):
- log.ODM_WARNING("Cannot find %s (is the URL to the AI model correct?)" % model_file)
+ log.ODM_WARNING(
+ "Cannot find %s (is the URL to the AI model correct?)" % model_file
+ )
return None
else:
return model_file
else:
- return model_file
\ No newline at end of file
+ return model_file
diff --git a/opendm/align.py b/opendm/align.py
index a5643ced..bf7fd85f 100644
--- a/opendm/align.py
+++ b/opendm/align.py
@@ -13,42 +13,50 @@ from opendm import io
from opendm import system
from opendm.concurrency import get_max_memory
+
def get_point_cloud_crs(file):
- pipeline = pdal.Pipeline(json.dumps([ file ]))
+ pipeline = pdal.Pipeline(json.dumps([file]))
metadata = pipeline.quickinfo
reader_metadata = [val for key, val in metadata.items() if "readers" in key]
crs = CRS.from_string(reader_metadata[0]["srs"]["horizontal"])
return str(crs)
+
def get_raster_crs(file):
- with rasterio.open(file, 'r') as f:
+ with rasterio.open(file, "r") as f:
return str(f.crs)
+
def reproject_point_cloud(file, out_srs):
out_file = io.related_file_path(file, postfix="_reprojected_tmp")
- pipeline = pdal.Pipeline(json.dumps([ file, {
- "type": "filters.reprojection",
- "out_srs": out_srs
- }, out_file]))
+ pipeline = pdal.Pipeline(
+ json.dumps(
+ [file, {"type": "filters.reprojection", "out_srs": out_srs}, out_file]
+ )
+ )
pipeline.execute()
return out_file
+
def reproject_raster(file, out_srs):
out_file = io.related_file_path(file, postfix="_reprojected_tmp")
kwargs = {
- 'input': double_quote(file),
- 'output': double_quote(out_file),
- 'out_srs': out_srs,
- 'max_memory': get_max_memory()
+ "input": double_quote(file),
+ "output": double_quote(out_file),
+ "out_srs": out_srs,
+ "max_memory": get_max_memory(),
}
- system.run('gdalwarp '
- '-t_srs {out_srs} '
- '{input} '
- '{output} '
- '--config GDAL_CACHEMAX {max_memory}% '.format(**kwargs))
+ system.run(
+ "gdalwarp "
+ "-t_srs {out_srs} "
+ "{input} "
+ "{output} "
+ "--config GDAL_CACHEMAX {max_memory}% ".format(**kwargs)
+ )
return out_file
+
def compute_alignment_matrix(input_laz, align_file, stats_dir):
if os.path.exists(stats_dir):
shutil.rmtree(stats_dir)
@@ -70,7 +78,7 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir):
else:
log.ODM_WARNING("Unsupported alignment file: %s" % align_file)
return
-
+
to_delete = []
try:
@@ -81,7 +89,9 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir):
align_file = repr_func(align_file, input_crs)
to_delete.append(align_file)
- conf = dataclasses.asdict(codem.CodemRunConfig(align_file, input_laz, OUTPUT_DIR=stats_dir))
+ conf = dataclasses.asdict(
+ codem.CodemRunConfig(align_file, input_laz, OUTPUT_DIR=stats_dir)
+ )
fnd_obj, aoi_obj = codem.preprocess(conf)
fnd_obj.prep()
aoi_obj.prep()
@@ -102,46 +112,53 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir):
)
reg = app_reg.get_registration_transformation()
-
+
# Write JSON to stats folder
- with open(os.path.join(stats_dir, "registration.json"), 'w') as f:
- del dsm_reg.registration_parameters['matrix']
- del icp_reg.registration_parameters['matrix']
+ with open(os.path.join(stats_dir, "registration.json"), "w") as f:
+ del dsm_reg.registration_parameters["matrix"]
+ del icp_reg.registration_parameters["matrix"]
- f.write(json.dumps({
- 'coarse': dsm_reg.registration_parameters,
- 'fine': icp_reg.registration_parameters,
- }, indent=4))
+ f.write(
+ json.dumps(
+ {
+ "coarse": dsm_reg.registration_parameters,
+ "fine": icp_reg.registration_parameters,
+ },
+ indent=4,
+ )
+ )
- matrix = np.fromstring(reg['matrix'], dtype=float, sep=' ').reshape((4, 4))
+ matrix = np.fromstring(reg["matrix"], dtype=float, sep=" ").reshape((4, 4))
return matrix
finally:
for f in to_delete:
if os.path.isfile(f):
os.unlink(f)
+
def transform_point_cloud(input_laz, a_matrix, output_laz):
pipe = [
input_laz,
{
- 'type': 'filters.transformation',
- 'matrix': " ".join(list(map(str, a_matrix.flatten()))),
+ "type": "filters.transformation",
+ "matrix": " ".join(list(map(str, a_matrix.flatten()))),
},
output_laz,
]
p = pdal.Pipeline(json.dumps(pipe))
p.execute()
+
def transform_obj(input_obj, a_matrix, geo_offset, output_obj):
g_off = np.array([geo_offset[0], geo_offset[1], 0, 0])
- with open(input_obj, 'r') as fin:
- with open(output_obj, 'w') as fout:
+ with open(input_obj, "r") as fin:
+ with open(output_obj, "w") as fout:
lines = fin.readlines()
for line in lines:
if line.startswith("v "):
- v = np.fromstring(line.strip()[2:] + " 1", sep=' ', dtype=float)
+ v = np.fromstring(line.strip()[2:] + " 1", sep=" ", dtype=float)
vt = (a_matrix.dot((v + g_off)) - g_off)[:3]
- fout.write("v " + " ".join(map(str, list(vt))) + '\n')
+ fout.write("v " + " ".join(map(str, list(vt))) + "\n")
else:
- fout.write(line)
\ No newline at end of file
+ fout.write(line)
diff --git a/opendm/arghelpers.py b/opendm/arghelpers.py
index d3587601..1677ceb4 100644
--- a/opendm/arghelpers.py
+++ b/opendm/arghelpers.py
@@ -3,6 +3,7 @@ from shlex import _find_unsafe
import json
import os
+
def double_quote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
@@ -12,7 +13,8 @@ def double_quote(s):
# use double quotes, and prefix double quotes with a \
# the string $"b is then quoted as "$\"b"
- return '"' + s.replace('"', '\\\"') + '"'
+ return '"' + s.replace('"', '\\"') + '"'
+
def args_to_dict(args):
args_dict = vars(args)
@@ -23,20 +25,22 @@ def args_to_dict(args):
continue
# Don't leak token
- if k == 'sm_cluster' and args_dict[k] is not None:
+ if k == "sm_cluster" and args_dict[k] is not None:
result[k] = True
else:
result[k] = args_dict[k]
-
+
return result
+
def save_opts(opts_json, args):
try:
- with open(opts_json, "w", encoding='utf-8') as f:
+ with open(opts_json, "w", encoding="utf-8") as f:
f.write(json.dumps(args_to_dict(args)))
except Exception as e:
log.ODM_WARNING("Cannot save options to %s: %s" % (opts_json, str(e)))
+
def compare_args(opts_json, args, rerun_stages):
if not os.path.isfile(opts_json):
return {}
@@ -55,22 +59,31 @@ def compare_args(opts_json, args, rerun_stages):
if stage is not None and cur_value != prev_value:
diff[opt] = prev_value
-
+
return diff
except:
return {}
+
def find_rerun_stage(opts_json, args, rerun_stages, processopts):
# Find the proper rerun stage if one is not explicitly set
- if not ('rerun_is_set' in args or 'rerun_from_is_set' in args or 'rerun_all_is_set' in args):
+ if not (
+ "rerun_is_set" in args
+ or "rerun_from_is_set" in args
+ or "rerun_all_is_set" in args
+ ):
args_diff = compare_args(opts_json, args, rerun_stages)
if args_diff:
- if 'split_is_set' in args:
- return processopts[processopts.index('dataset'):], args_diff
+ if "split_is_set" in args:
+ return processopts[processopts.index("dataset") :], args_diff
try:
- stage_idxs = [processopts.index(rerun_stages[opt]) for opt in args_diff.keys() if rerun_stages[opt] is not None]
- return processopts[min(stage_idxs):], args_diff
+ stage_idxs = [
+ processopts.index(rerun_stages[opt])
+ for opt in args_diff.keys()
+ if rerun_stages[opt] is not None
+ ]
+ return processopts[min(stage_idxs) :], args_diff
except ValueError as e:
print(str(e))
- return None, {}
\ No newline at end of file
+ return None, {}
diff --git a/opendm/bgfilter.py b/opendm/bgfilter.py
index 5285535c..617043a6 100644
--- a/opendm/bgfilter.py
+++ b/opendm/bgfilter.py
@@ -1,4 +1,3 @@
-
import time
import numpy as np
import cv2
@@ -13,18 +12,22 @@ mutex = Lock()
# Implementation based on https://github.com/danielgatis/rembg by Daniel Gatis
# Use GPU if it is available, otherwise CPU
-provider = "CUDAExecutionProvider" if "CUDAExecutionProvider" in ort.get_available_providers() else "CPUExecutionProvider"
+provider = (
+ "CUDAExecutionProvider"
+ if "CUDAExecutionProvider" in ort.get_available_providers()
+ else "CPUExecutionProvider"
+)
-class BgFilter():
+
+class BgFilter:
def __init__(self, model):
self.model = model
- log.ODM_INFO(' ?> Using provider %s' % provider)
+ log.ODM_INFO(" ?> Using provider %s" % provider)
self.load_model()
-
def load_model(self):
- log.ODM_INFO(' -> Loading the model')
+ log.ODM_INFO(" -> Loading the model")
self.session = ort.InferenceSession(self.model, providers=[provider])
@@ -53,7 +56,10 @@ class BgFilter():
ort_outs = self.session.run(
None,
self.normalize(
- img, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225), (320, 320) # <-- image size
+ img,
+ (0.485, 0.456, 0.406),
+ (0.229, 0.224, 0.225),
+ (320, 320), # <-- image size
),
)
@@ -75,13 +81,13 @@ class BgFilter():
def run_img(self, img_path, dest):
img = read_image(img_path)
- mask = self.get_mask(img)
-
+ mask = self.get_mask(img)
+
img_name = os.path.basename(img_path)
fpath = os.path.join(dest, img_name)
fname, _ = os.path.splitext(fpath)
- mask_name = fname + '_mask.png'
+ mask_name = fname + "_mask.png"
cv2.imwrite(mask_name, mask)
return mask_name
diff --git a/opendm/boundary.py b/opendm/boundary.py
index 86ea5ae2..3f0f3982 100644
--- a/opendm/boundary.py
+++ b/opendm/boundary.py
@@ -10,7 +10,10 @@ from opendm.utils import double_quote
from osgeo import ogr
from opendm.shots import get_origin
-def compute_boundary_from_shots(reconstruction_json, buffer=0, reconstruction_offset=(0, 0)):
+
+def compute_boundary_from_shots(
+ reconstruction_json, buffer=0, reconstruction_offset=(0, 0)
+):
if not os.path.isfile(reconstruction_json):
raise IOError(reconstruction_json + " does not exist.")
@@ -20,15 +23,18 @@ def compute_boundary_from_shots(reconstruction_json, buffer=0, reconstruction_of
mp = ogr.Geometry(ogr.wkbMultiPoint)
- for shot_image in reconstruction['shots']:
- shot = reconstruction['shots'][shot_image]
- if shot.get('gps_dop', 999999) < 999999:
- camera = reconstruction['cameras'][shot['camera']]
+ for shot_image in reconstruction["shots"]:
+ shot = reconstruction["shots"][shot_image]
+ if shot.get("gps_dop", 999999) < 999999:
+ camera = reconstruction["cameras"][shot["camera"]]
p = ogr.Geometry(ogr.wkbPoint)
origin = get_origin(shot)
- p.AddPoint_2D(origin[0] + reconstruction_offset[0], origin[1] + reconstruction_offset[1])
+ p.AddPoint_2D(
+ origin[0] + reconstruction_offset[0],
+ origin[1] + reconstruction_offset[1],
+ )
mp.AddGeometry(p)
if mp.GetGeometryCount() < 3:
@@ -39,24 +45,27 @@ def compute_boundary_from_shots(reconstruction_json, buffer=0, reconstruction_of
return load_boundary(boundary.ExportToJson())
+
def load_boundary(boundary_json, reproject_to_proj4=None):
if not isinstance(boundary_json, str):
boundary_json = json.dumps(boundary_json)
- with fiona.open(io.BytesIO(boundary_json.encode('utf-8')), 'r') as src:
+ with fiona.open(io.BytesIO(boundary_json.encode("utf-8")), "r") as src:
if len(src) != 1:
raise IOError("Boundary must have a single polygon (found: %s)" % len(src))
-
- geom = src[0]['geometry']
- if geom['type'] != 'Polygon':
- raise IOError("Boundary must have a polygon feature (found: %s)" % geom['type'])
+ geom = src[0]["geometry"]
- rings = geom['coordinates']
+ if geom["type"] != "Polygon":
+ raise IOError(
+ "Boundary must have a polygon feature (found: %s)" % geom["type"]
+ )
+
+ rings = geom["coordinates"]
if len(rings) == 0:
raise IOError("Boundary geometry has no rings")
-
+
coords = rings[0]
if len(coords) == 0:
raise IOError("Boundary geometry has no coordinates")
@@ -64,58 +73,75 @@ def load_boundary(boundary_json, reproject_to_proj4=None):
dimensions = len(coords[0])
if reproject_to_proj4 is not None:
- t = transformer(CRS.from_proj4(fiona.crs.to_string(src.crs)),
- CRS.from_proj4(reproject_to_proj4))
+ t = transformer(
+ CRS.from_proj4(fiona.crs.to_string(src.crs)),
+ CRS.from_proj4(reproject_to_proj4),
+ )
coords = [t.TransformPoint(*c)[:dimensions] for c in coords]
-
+
return coords
+
def boundary_offset(boundary, reconstruction_offset):
if boundary is None or reconstruction_offset is None:
return boundary
-
+
res = []
dims = len(boundary[0])
for c in boundary:
if dims == 2:
- res.append((c[0] - reconstruction_offset[0], c[1] - reconstruction_offset[1]))
+ res.append(
+ (c[0] - reconstruction_offset[0], c[1] - reconstruction_offset[1])
+ )
else:
- res.append((c[0] - reconstruction_offset[0], c[1] - reconstruction_offset[1], c[2]))
-
+ res.append(
+ (c[0] - reconstruction_offset[0], c[1] - reconstruction_offset[1], c[2])
+ )
+
return res
+
def as_polygon(boundary):
if boundary is None:
return None
return "POLYGON((" + ", ".join([" ".join(map(str, c)) for c in boundary]) + "))"
+
def as_geojson(boundary):
- return '{"type":"FeatureCollection","features":[{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[%s]}}]}' % str(list(map(list, boundary)))
+ return (
+ '{"type":"FeatureCollection","features":[{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[%s]}}]}'
+ % str(list(map(list, boundary)))
+ )
+
def export_to_bounds_files(boundary, proj4, bounds_json_file, bounds_gpkg_file):
with open(bounds_json_file, "w") as f:
- f.write(json.dumps({
- "type": "FeatureCollection",
- "name": "bounds",
- "features": [{
- "type": "Feature",
- "properties": {},
- "geometry": {
- "type": "Polygon",
- "coordinates": [boundary]
+ f.write(
+ json.dumps(
+ {
+ "type": "FeatureCollection",
+ "name": "bounds",
+ "features": [
+ {
+ "type": "Feature",
+ "properties": {},
+ "geometry": {"type": "Polygon", "coordinates": [boundary]},
+ }
+ ],
}
- }]
- }))
-
+ )
+ )
+
if os.path.isfile(bounds_gpkg_file):
os.remove(bounds_gpkg_file)
-
+
kwargs = {
- 'proj4': proj4,
- 'input': double_quote(bounds_json_file),
- 'output': double_quote(bounds_gpkg_file)
+ "proj4": proj4,
+ "input": double_quote(bounds_json_file),
+ "output": double_quote(bounds_gpkg_file),
}
- system.run('ogr2ogr -overwrite -f GPKG -a_srs "{proj4}" {output} {input}'.format(**kwargs))
-
+ system.run(
+ 'ogr2ogr -overwrite -f GPKG -a_srs "{proj4}" {output} {input}'.format(**kwargs)
+ )
diff --git a/opendm/camera.py b/opendm/camera.py
index ee78caff..1050799f 100644
--- a/opendm/camera.py
+++ b/opendm/camera.py
@@ -1,29 +1,30 @@
import os, json
from opendm import log
+
def get_cameras_from_opensfm(reconstruction_file):
"""
Extract the cameras from OpenSfM's reconstruction.json
"""
if os.path.exists(reconstruction_file):
- with open(reconstruction_file, 'r') as fin:
+ with open(reconstruction_file, "r") as fin:
reconstructions = json.loads(fin.read())
-
+
result = {}
for recon in reconstructions:
- if 'cameras' in recon:
- for camera_id in recon['cameras']:
+ if "cameras" in recon:
+ for camera_id in recon["cameras"]:
# Strip "v2" from OpenSfM camera IDs
new_camera_id = camera_id
if new_camera_id.startswith("v2 "):
new_camera_id = new_camera_id[3:]
- result[new_camera_id] = recon['cameras'][camera_id]
-
+ result[new_camera_id] = recon["cameras"][camera_id]
+
# Remove "_prior" keys
keys = list(result[new_camera_id].keys())
for k in keys:
- if k.endswith('_prior'):
+ if k.endswith("_prior"):
result[new_camera_id].pop(k)
return result
else:
@@ -47,11 +48,26 @@ def get_opensfm_camera_models(cameras):
osfm_camera_id = "v2 " + camera_id
else:
osfm_camera_id = camera_id
-
+
# Add "_prior" keys
camera = cameras[camera_id]
- prior_fields = ["focal","focal_x","focal_y","c_x","c_y","k1","k2","p1","p2","k3"]
- valid_fields = ["id","width","height","projection_type"] + prior_fields + [f + "_prior" for f in prior_fields]
+ prior_fields = [
+ "focal",
+ "focal_x",
+ "focal_y",
+ "c_x",
+ "c_y",
+ "k1",
+ "k2",
+ "p1",
+ "p2",
+ "k3",
+ ]
+ valid_fields = (
+ ["id", "width", "height", "projection_type"]
+ + prior_fields
+ + [f + "_prior" for f in prior_fields]
+ )
keys = list(camera.keys())
for param in keys:
diff --git a/opendm/cogeo.py b/opendm/cogeo.py
index 289562e6..3c7cbc12 100644
--- a/opendm/cogeo.py
+++ b/opendm/cogeo.py
@@ -5,6 +5,7 @@ from opendm.concurrency import get_max_memory
from opendm import io
from opendm import log
+
def convert_to_cogeo(src_path, blocksize=256, max_workers=1, compression="DEFLATE"):
"""
Guarantee that the .tif passed as an argument is a Cloud Optimized GeoTIFF (cogeo)
@@ -20,43 +21,44 @@ def convert_to_cogeo(src_path, blocksize=256, max_workers=1, compression="DEFLAT
log.ODM_INFO("Optimizing %s as Cloud Optimized GeoTIFF" % src_path)
-
- tmpfile = io.related_file_path(src_path, postfix='_cogeo')
- swapfile = io.related_file_path(src_path, postfix='_cogeo_swap')
+ tmpfile = io.related_file_path(src_path, postfix="_cogeo")
+ swapfile = io.related_file_path(src_path, postfix="_cogeo_swap")
kwargs = {
- 'threads': max_workers if max_workers else 'ALL_CPUS',
- 'blocksize': blocksize,
- 'max_memory': get_max_memory(),
- 'src_path': src_path,
- 'tmpfile': tmpfile,
- 'compress': compression,
- 'predictor': '2' if compression in ['LZW', 'DEFLATE'] else '1',
+ "threads": max_workers if max_workers else "ALL_CPUS",
+ "blocksize": blocksize,
+ "max_memory": get_max_memory(),
+ "src_path": src_path,
+ "tmpfile": tmpfile,
+ "compress": compression,
+ "predictor": "2" if compression in ["LZW", "DEFLATE"] else "1",
}
try:
- system.run("gdal_translate "
- "-of COG "
- "-co NUM_THREADS={threads} "
- "-co BLOCKSIZE={blocksize} "
- "-co COMPRESS={compress} "
- "-co PREDICTOR={predictor} "
- "-co BIGTIFF=IF_SAFER "
- "-co RESAMPLING=NEAREST "
- "--config GDAL_CACHEMAX {max_memory}% "
- "--config GDAL_NUM_THREADS {threads} "
- "\"{src_path}\" \"{tmpfile}\" ".format(**kwargs))
+ system.run(
+ "gdal_translate "
+ "-of COG "
+ "-co NUM_THREADS={threads} "
+ "-co BLOCKSIZE={blocksize} "
+ "-co COMPRESS={compress} "
+ "-co PREDICTOR={predictor} "
+ "-co BIGTIFF=IF_SAFER "
+ "-co RESAMPLING=NEAREST "
+ "--config GDAL_CACHEMAX {max_memory}% "
+ "--config GDAL_NUM_THREADS {threads} "
+ '"{src_path}" "{tmpfile}" '.format(**kwargs)
+ )
except Exception as e:
log.ODM_WARNING("Cannot create Cloud Optimized GeoTIFF: %s" % str(e))
if os.path.isfile(tmpfile):
- shutil.move(src_path, swapfile) # Move to swap location
+ shutil.move(src_path, swapfile) # Move to swap location
try:
shutil.move(tmpfile, src_path)
except IOError as e:
log.ODM_WARNING("Cannot move %s to %s: %s" % (tmpfile, src_path, str(e)))
- shutil.move(swapfile, src_path) # Attempt to restore
+ shutil.move(swapfile, src_path) # Attempt to restore
if os.path.isfile(swapfile):
os.remove(swapfile)
diff --git a/opendm/concurrency.py b/opendm/concurrency.py
index dfcf58f4..973c3f77 100644
--- a/opendm/concurrency.py
+++ b/opendm/concurrency.py
@@ -1,6 +1,7 @@
from vmem import virtual_memory
import os
import sys
+
try:
import Queue as queue
except:
@@ -9,7 +10,8 @@ import threading
import time
from opendm import log
-def get_max_memory(minimum = 5, use_at_most = 0.5):
+
+def get_max_memory(minimum=5, use_at_most=0.5):
"""
:param minimum minimum value to return (return value will never be lower than this)
:param use_at_most use at most this fraction of the available memory. 0.5 = use at most 50% of available memory
@@ -17,7 +19,8 @@ def get_max_memory(minimum = 5, use_at_most = 0.5):
"""
return max(minimum, (100 - virtual_memory().percent) * use_at_most)
-def get_max_memory_mb(minimum = 100, use_at_most = 0.5):
+
+def get_max_memory_mb(minimum=100, use_at_most=0.5):
"""
:param minimum minimum value to return (return value will never be lower than this)
:param use_at_most use at most this fraction of the available memory. 0.5 = use at most 50% of available memory
@@ -25,13 +28,15 @@ def get_max_memory_mb(minimum = 100, use_at_most = 0.5):
"""
return max(minimum, (virtual_memory().available / 1024 / 1024) * use_at_most)
+
def get_total_memory():
return virtual_memory().total
+
def parallel_map(func, items, max_workers=1, single_thread_fallback=True):
"""
Our own implementation for parallel processing
- which handles gracefully CTRL+C and reverts to
+ which handles gracefully CTRL+C and reverts to
single thread processing in case of errors
:param items list of objects
:param func function to execute on each object
@@ -92,7 +97,9 @@ def parallel_map(func, items, max_workers=1, single_thread_fallback=True):
if error is not None and single_thread_fallback:
# Try to reprocess using a single thread
# in case this was a memory error
- log.ODM_WARNING("Failed to run process in parallel, retrying with a single thread...")
+ log.ODM_WARNING(
+ "Failed to run process in parallel, retrying with a single thread..."
+ )
use_single_thread = True
else:
use_single_thread = True
@@ -100,4 +107,4 @@ def parallel_map(func, items, max_workers=1, single_thread_fallback=True):
if use_single_thread:
# Boring, single thread processing
for q in items:
- process_one(q)
\ No newline at end of file
+ process_one(q)
diff --git a/opendm/config.py b/opendm/config.py
index b1d195e9..7131f0e0 100755
--- a/opendm/config.py
+++ b/opendm/config.py
@@ -9,105 +9,117 @@ import os
import sys
# parse arguments
-processopts = ['dataset', 'split', 'merge', 'opensfm', 'openmvs', 'odm_filterpoints',
- 'odm_meshing', 'mvs_texturing', 'odm_georeferencing',
- 'odm_dem', 'odm_orthophoto', 'odm_report', 'odm_postprocess']
+processopts = [
+ "dataset",
+ "split",
+ "merge",
+ "opensfm",
+ "openmvs",
+ "odm_filterpoints",
+ "odm_meshing",
+ "mvs_texturing",
+ "odm_georeferencing",
+ "odm_dem",
+ "odm_orthophoto",
+ "odm_report",
+ "odm_postprocess",
+]
rerun_stages = {
- '3d_tiles': 'odm_postprocess',
- 'align': 'odm_georeferencing',
- 'auto_boundary': 'odm_filterpoints',
- 'auto_boundary_distance': 'odm_filterpoints',
- 'bg_removal': 'dataset',
- 'boundary': 'odm_filterpoints',
- 'build_overviews': 'odm_orthophoto',
- 'camera_lens': 'dataset',
- 'cameras': 'dataset',
- 'cog': 'odm_dem',
- 'copy_to': 'odm_postprocess',
- 'crop': 'odm_georeferencing',
- 'dem_decimation': 'odm_dem',
- 'dem_euclidean_map': 'odm_dem',
- 'dem_gapfill_steps': 'odm_dem',
- 'dem_resolution': 'odm_dem',
- 'dsm': 'odm_dem',
- 'dtm': 'odm_dem',
- 'end_with': None,
- 'fast_orthophoto': 'odm_filterpoints',
- 'feature_quality': 'opensfm',
- 'feature_type': 'opensfm',
- 'force_gps': 'opensfm',
- 'gcp': 'dataset',
- 'geo': 'dataset',
- 'gltf': 'mvs_texturing',
- 'gps_accuracy': 'dataset',
- 'help': None,
- 'ignore_gsd': 'opensfm',
- 'matcher_neighbors': 'opensfm',
- 'matcher_order': 'opensfm',
- 'matcher_type': 'opensfm',
- 'max_concurrency': None,
- 'merge': 'Merge',
- 'mesh_octree_depth': 'odm_meshing',
- 'mesh_size': 'odm_meshing',
- 'min_num_features': 'opensfm',
- 'name': None,
- 'no_gpu': None,
- 'optimize_disk_space': None,
- 'orthophoto_compression': 'odm_orthophoto',
- 'orthophoto_cutline': 'odm_orthophoto',
- 'orthophoto_kmz': 'odm_orthophoto',
- 'orthophoto_no_tiled': 'odm_orthophoto',
- 'orthophoto_png': 'odm_orthophoto',
- 'orthophoto_resolution': 'odm_orthophoto',
- 'pc_classify': 'odm_georeferencing',
- 'pc_copc': 'odm_georeferencing',
- 'pc_csv': 'odm_georeferencing',
- 'pc_ept': 'odm_georeferencing',
- 'pc_filter': 'openmvs',
- 'pc_las': 'odm_georeferencing',
- 'pc_quality': 'opensfm',
- 'pc_rectify': 'odm_georeferencing',
- 'pc_sample': 'odm_filterpoints',
- 'pc_skip_geometric': 'openmvs',
- 'primary_band': 'dataset',
- 'project_path': None,
- 'radiometric_calibration': 'opensfm',
- 'rerun': None,
- 'rerun_all': None,
- 'rerun_from': None,
- 'rolling_shutter': 'opensfm',
- 'rolling_shutter_readout': 'opensfm',
- 'sfm_algorithm': 'opensfm',
- 'sfm_no_partial': 'opensfm',
- 'skip_3dmodel': 'odm_meshing',
- 'skip_band_alignment': 'opensfm',
- 'skip_orthophoto': 'odm_orthophoto',
- 'skip_report': 'odm_report',
- 'sky_removal': 'dataset',
- 'sm_cluster': 'split',
- 'sm_no_align': 'split',
- 'smrf_scalar': 'odm_dem',
- 'smrf_slope': 'odm_dem',
- 'smrf_threshold': 'odm_dem',
- 'smrf_window': 'odm_dem',
- 'split': 'split',
- 'split_image_groups': 'split',
- 'split_overlap': 'split',
- 'texturing_keep_unseen_faces': 'mvs_texturing',
- 'texturing_single_material': 'mvs_texturing',
- 'texturing_skip_global_seam_leveling': 'mvs_texturing',
- 'tiles': 'odm_dem',
- 'use_3dmesh': 'mvs_texturing',
- 'use_exif': 'dataset',
- 'use_fixed_camera_params': 'opensfm',
- 'use_hybrid_bundle_adjustment': 'opensfm',
- 'version': None,
- 'video_limit': 'dataset',
- 'video_resolution': 'dataset',
+ "3d_tiles": "odm_postprocess",
+ "align": "odm_georeferencing",
+ "auto_boundary": "odm_filterpoints",
+ "auto_boundary_distance": "odm_filterpoints",
+ "bg_removal": "dataset",
+ "boundary": "odm_filterpoints",
+ "build_overviews": "odm_orthophoto",
+ "camera_lens": "dataset",
+ "cameras": "dataset",
+ "cog": "odm_dem",
+ "copy_to": "odm_postprocess",
+ "crop": "odm_georeferencing",
+ "dem_decimation": "odm_dem",
+ "dem_euclidean_map": "odm_dem",
+ "dem_gapfill_steps": "odm_dem",
+ "dem_resolution": "odm_dem",
+ "dsm": "odm_dem",
+ "dtm": "odm_dem",
+ "end_with": None,
+ "fast_orthophoto": "odm_filterpoints",
+ "feature_quality": "opensfm",
+ "feature_type": "opensfm",
+ "force_gps": "opensfm",
+ "gcp": "dataset",
+ "geo": "dataset",
+ "gltf": "mvs_texturing",
+ "gps_accuracy": "dataset",
+ "help": None,
+ "ignore_gsd": "opensfm",
+ "matcher_neighbors": "opensfm",
+ "matcher_order": "opensfm",
+ "matcher_type": "opensfm",
+ "max_concurrency": None,
+ "merge": "Merge",
+ "mesh_octree_depth": "odm_meshing",
+ "mesh_size": "odm_meshing",
+ "min_num_features": "opensfm",
+ "name": None,
+ "no_gpu": None,
+ "optimize_disk_space": None,
+ "orthophoto_compression": "odm_orthophoto",
+ "orthophoto_cutline": "odm_orthophoto",
+ "orthophoto_kmz": "odm_orthophoto",
+ "orthophoto_no_tiled": "odm_orthophoto",
+ "orthophoto_png": "odm_orthophoto",
+ "orthophoto_resolution": "odm_orthophoto",
+ "pc_classify": "odm_georeferencing",
+ "pc_copc": "odm_georeferencing",
+ "pc_csv": "odm_georeferencing",
+ "pc_ept": "odm_georeferencing",
+ "pc_filter": "openmvs",
+ "pc_las": "odm_georeferencing",
+ "pc_quality": "opensfm",
+ "pc_rectify": "odm_georeferencing",
+ "pc_sample": "odm_filterpoints",
+ "pc_skip_geometric": "openmvs",
+ "primary_band": "dataset",
+ "project_path": None,
+ "radiometric_calibration": "opensfm",
+ "rerun": None,
+ "rerun_all": None,
+ "rerun_from": None,
+ "rolling_shutter": "opensfm",
+ "rolling_shutter_readout": "opensfm",
+ "sfm_algorithm": "opensfm",
+ "sfm_no_partial": "opensfm",
+ "skip_3dmodel": "odm_meshing",
+ "skip_band_alignment": "opensfm",
+ "skip_orthophoto": "odm_orthophoto",
+ "skip_report": "odm_report",
+ "sky_removal": "dataset",
+ "sm_cluster": "split",
+ "sm_no_align": "split",
+ "smrf_scalar": "odm_dem",
+ "smrf_slope": "odm_dem",
+ "smrf_threshold": "odm_dem",
+ "smrf_window": "odm_dem",
+ "split": "split",
+ "split_image_groups": "split",
+ "split_overlap": "split",
+ "texturing_keep_unseen_faces": "mvs_texturing",
+ "texturing_single_material": "mvs_texturing",
+ "texturing_skip_global_seam_leveling": "mvs_texturing",
+ "tiles": "odm_dem",
+ "use_3dmesh": "mvs_texturing",
+ "use_exif": "dataset",
+ "use_fixed_camera_params": "opensfm",
+ "use_hybrid_bundle_adjustment": "opensfm",
+ "version": None,
+ "video_limit": "dataset",
+ "video_resolution": "dataset",
}
-with open(os.path.join(context.root_path, 'VERSION')) as version_file:
+with open(os.path.join(context.root_path, "VERSION")) as version_file:
__version__ = version_file.read().strip()
@@ -117,770 +129,1043 @@ def path_or_json_string(string):
except ValueError as e:
raise argparse.ArgumentTypeError("{0}".format(str(e)))
+
# Django URL validation regex
def url_string(string):
import re
+
regex = re.compile(
- r'^(?:http|ftp)s?://' # http:// or https://
- r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.?)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
- r'localhost|' #localhost...
- r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
- r'(?::\d+)?' # optional port
- r'(?:/?|[/?]\S+)$', re.IGNORECASE)
-
+ r"^(?:http|ftp)s?://" # http:// or https://
+ r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.?)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" # domain...
+ r"localhost|" # localhost...
+ r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # ...or ip
+ r"(?::\d+)?" # optional port
+ r"(?:/?|[/?]\S+)$",
+ re.IGNORECASE,
+ )
+
if re.match(regex, string) is None:
- raise argparse.ArgumentTypeError("%s is not a valid URL. The URL must be in the format: http(s)://host[:port]/[?token=]" % string)
+ raise argparse.ArgumentTypeError(
+ "%s is not a valid URL. The URL must be in the format: http(s)://host[:port]/[?token=]"
+ % string
+ )
return string
+
class RerunFrom(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
- setattr(namespace, self.dest, processopts[processopts.index(values):])
- setattr(namespace, self.dest + '_is_set', True)
+ setattr(namespace, self.dest, processopts[processopts.index(values) :])
+ setattr(namespace, self.dest + "_is_set", True)
+
class StoreTrue(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, True)
- setattr(namespace, self.dest + '_is_set', True)
+ setattr(namespace, self.dest + "_is_set", True)
+
class StoreValue(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
- setattr(namespace, self.dest + '_is_set', True)
+ setattr(namespace, self.dest + "_is_set", True)
+
args = None
+
def config(argv=None, parser=None):
global args
if args is not None and argv is None:
return args
- if sys.platform == 'win32':
- usage_bin = 'run'
+ if sys.platform == "win32":
+ usage_bin = "run"
else:
- usage_bin = 'run.sh'
+ usage_bin = "run.sh"
if parser is None:
- parser = SettingsParser(description='ODM is a command line toolkit to generate maps, point clouds, 3D models and DEMs from drone, balloon or kite images.',
- usage='%s [options] ' % usage_bin,
- yaml_file=open(context.settings_path))
-
- parser.add_argument('--project-path',
- metavar='',
- action=StoreValue,
- help='Path to the project folder. Your project folder should contain subfolders for each dataset. Each dataset should have an "images" folder.')
- parser.add_argument('name',
- metavar='',
- action=StoreValue,
- type=str,
- default='code',
- nargs='?',
- help='Name of dataset (i.e subfolder name within project folder). Default: %(default)s')
+ parser = SettingsParser(
+ description="ODM is a command line toolkit to generate maps, point clouds, 3D models and DEMs from drone, balloon or kite images.",
+ usage="%s [options] " % usage_bin,
+ yaml_file=open(context.settings_path),
+ )
- parser.add_argument('--end-with', '-e',
- metavar='',
- action=StoreValue,
- default='odm_postprocess',
- choices=processopts,
- help='End processing at this stage. Can be one of: %(choices)s. Default: %(default)s')
+ parser.add_argument(
+ "--project-path",
+ metavar="",
+ action=StoreValue,
+ help='Path to the project folder. Your project folder should contain subfolders for each dataset. Each dataset should have an "images" folder.',
+ )
+ parser.add_argument(
+ "name",
+ metavar="",
+ action=StoreValue,
+ type=str,
+ default="code",
+ nargs="?",
+ help="Name of dataset (i.e subfolder name within project folder). Default: %(default)s",
+ )
+
+ parser.add_argument(
+ "--end-with",
+ "-e",
+ metavar="",
+ action=StoreValue,
+ default="odm_postprocess",
+ choices=processopts,
+ help="End processing at this stage. Can be one of: %(choices)s. Default: %(default)s",
+ )
rerun = parser.add_mutually_exclusive_group()
- rerun.add_argument('--rerun', '-r',
- metavar='',
- action=StoreValue,
- choices=processopts,
- help=('Rerun this stage only and stop. Can be one of: %(choices)s. Default: %(default)s'))
+ rerun.add_argument(
+ "--rerun",
+ "-r",
+ metavar="",
+ action=StoreValue,
+ choices=processopts,
+ help=(
+ "Rerun this stage only and stop. Can be one of: %(choices)s. Default: %(default)s"
+ ),
+ )
- rerun.add_argument('--rerun-all',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Permanently delete all previous results and rerun the processing pipeline.')
+ rerun.add_argument(
+ "--rerun-all",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Permanently delete all previous results and rerun the processing pipeline.",
+ )
- rerun.add_argument('--rerun-from',
- action=RerunFrom,
- metavar='',
- choices=processopts,
- help=('Rerun processing from this stage. Can be one of: %(choices)s. Default: %(default)s'))
+ rerun.add_argument(
+ "--rerun-from",
+ action=RerunFrom,
+ metavar="",
+ choices=processopts,
+ help=(
+ "Rerun processing from this stage. Can be one of: %(choices)s. Default: %(default)s"
+ ),
+ )
- parser.add_argument('--min-num-features',
- metavar='',
- action=StoreValue,
- default=10000,
- type=int,
- help=('Minimum number of features to extract per image. '
- 'More features can be useful for finding more matches between images, '
- 'potentially allowing the reconstruction of areas with little overlap or insufficient features. '
- 'More features also slow down processing. Default: %(default)s'))
-
- parser.add_argument('--feature-type',
- metavar='',
- action=StoreValue,
- default='dspsift',
- choices=['akaze', 'dspsift', 'hahog', 'orb', 'sift'],
- help=('Choose the algorithm for extracting keypoints and computing descriptors. '
- 'Can be one of: %(choices)s. Default: '
- '%(default)s'))
-
- parser.add_argument('--feature-quality',
- metavar='',
- action=StoreValue,
- default='high',
- choices=['ultra', 'high', 'medium', 'low', 'lowest'],
- help=('Set feature extraction quality. Higher quality generates better features, but requires more memory and takes longer. '
- 'Can be one of: %(choices)s. Default: '
- '%(default)s'))
-
- parser.add_argument('--matcher-type',
- metavar='',
- action=StoreValue,
- default='flann',
- choices=['bow', 'bruteforce', 'flann'],
- help=('Matcher algorithm, Fast Library for Approximate Nearest Neighbors or Bag of Words. FLANN is slower, but more stable. BOW is faster, but can sometimes miss valid matches. BRUTEFORCE is very slow but robust.'
- 'Can be one of: %(choices)s. Default: '
- '%(default)s'))
+ parser.add_argument(
+ "--min-num-features",
+ metavar="",
+ action=StoreValue,
+ default=10000,
+ type=int,
+ help=(
+ "Minimum number of features to extract per image. "
+ "More features can be useful for finding more matches between images, "
+ "potentially allowing the reconstruction of areas with little overlap or insufficient features. "
+ "More features also slow down processing. Default: %(default)s"
+ ),
+ )
- parser.add_argument('--matcher-neighbors',
- metavar='',
- action=StoreValue,
- default=0,
- type=int,
- help='Perform image matching with the nearest images based on GPS exif data. Set to 0 to match by triangulation. Default: %(default)s')
-
- parser.add_argument('--matcher-order',
- metavar='',
- action=StoreValue,
- default=0,
- type=int,
- help='Perform image matching with the nearest N images based on image filename order. Can speed up processing of sequential images, such as those extracted from video. It is applied only on non-georeferenced datasets. Set to 0 to disable. Default: %(default)s')
+ parser.add_argument(
+ "--feature-type",
+ metavar="",
+ action=StoreValue,
+ default="dspsift",
+ choices=["akaze", "dspsift", "hahog", "orb", "sift"],
+ help=(
+ "Choose the algorithm for extracting keypoints and computing descriptors. "
+ "Can be one of: %(choices)s. Default: "
+ "%(default)s"
+ ),
+ )
- parser.add_argument('--use-fixed-camera-params',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Turn off camera parameter optimization during bundle adjustment. This can be sometimes useful for improving results that exhibit doming/bowling or when images are taken with a rolling shutter camera. Default: %(default)s')
+ parser.add_argument(
+ "--feature-quality",
+ metavar="",
+ action=StoreValue,
+ default="high",
+ choices=["ultra", "high", "medium", "low", "lowest"],
+ help=(
+ "Set feature extraction quality. Higher quality generates better features, but requires more memory and takes longer. "
+ "Can be one of: %(choices)s. Default: "
+ "%(default)s"
+ ),
+ )
- parser.add_argument('--cameras',
- default='',
- metavar='',
- action=StoreValue,
- type=path_or_json_string,
- help='Use the camera parameters computed from '
- 'another dataset instead of calculating them. '
- 'Can be specified either as path to a cameras.json file or as a '
- 'JSON string representing the contents of a '
- 'cameras.json file. Default: %(default)s')
+ parser.add_argument(
+ "--matcher-type",
+ metavar="",
+ action=StoreValue,
+ default="flann",
+ choices=["bow", "bruteforce", "flann"],
+ help=(
+ "Matcher algorithm, Fast Library for Approximate Nearest Neighbors or Bag of Words. FLANN is slower, but more stable. BOW is faster, but can sometimes miss valid matches. BRUTEFORCE is very slow but robust."
+ "Can be one of: %(choices)s. Default: "
+ "%(default)s"
+ ),
+ )
- parser.add_argument('--camera-lens',
- metavar='',
- action=StoreValue,
- default='auto',
- choices=['auto', 'perspective', 'brown', 'fisheye', 'fisheye_opencv', 'spherical', 'equirectangular', 'dual'],
- help=('Set a camera projection type. Manually setting a value '
- 'can help improve geometric undistortion. By default the application '
- 'tries to determine a lens type from the images metadata. Can be one of: %(choices)s. Default: '
- '%(default)s'))
+ parser.add_argument(
+ "--matcher-neighbors",
+ metavar="",
+ action=StoreValue,
+ default=0,
+ type=int,
+ help="Perform image matching with the nearest images based on GPS exif data. Set to 0 to match by triangulation. Default: %(default)s",
+ )
- parser.add_argument('--radiometric-calibration',
- metavar='',
- action=StoreValue,
- default='none',
- choices=['none', 'camera', 'camera+sun'],
- help=('Set the radiometric calibration to perform on images. '
- 'When processing multispectral and thermal images you should set this option '
- 'to obtain reflectance/temperature values (otherwise you will get digital number values). '
- '[camera] applies black level, vignetting, row gradient gain/exposure compensation (if appropriate EXIF tags are found) and computes absolute temperature values. '
- '[camera+sun] is experimental, applies all the corrections of [camera], plus compensates for spectral radiance registered via a downwelling light sensor (DLS) taking in consideration the angle of the sun. '
- 'Can be one of: %(choices)s. Default: '
- '%(default)s'))
+ parser.add_argument(
+ "--matcher-order",
+ metavar="",
+ action=StoreValue,
+ default=0,
+ type=int,
+ help="Perform image matching with the nearest N images based on image filename order. Can speed up processing of sequential images, such as those extracted from video. It is applied only on non-georeferenced datasets. Set to 0 to disable. Default: %(default)s",
+ )
- parser.add_argument('--max-concurrency',
- metavar='',
- action=StoreValue,
- default=context.num_cores,
- type=int,
- help=('The maximum number of processes to use in various '
- 'processes. Peak memory requirement is ~1GB per '
- 'thread and 2 megapixel image resolution. Default: %(default)s'))
+ parser.add_argument(
+ "--use-fixed-camera-params",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Turn off camera parameter optimization during bundle adjustment. This can be sometimes useful for improving results that exhibit doming/bowling or when images are taken with a rolling shutter camera. Default: %(default)s",
+ )
- parser.add_argument('--use-hybrid-bundle-adjustment',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Run local bundle adjustment for every image added to the reconstruction and a global '
- 'adjustment every 100 images. Speeds up reconstruction for very large datasets. Default: %(default)s')
+ parser.add_argument(
+ "--cameras",
+ default="",
+ metavar="",
+ action=StoreValue,
+ type=path_or_json_string,
+ help="Use the camera parameters computed from "
+ "another dataset instead of calculating them. "
+ "Can be specified either as path to a cameras.json file or as a "
+ "JSON string representing the contents of a "
+ "cameras.json file. Default: %(default)s",
+ )
- parser.add_argument('--sfm-algorithm',
- metavar='',
- action=StoreValue,
- default='incremental',
- choices=['incremental', 'triangulation', 'planar'],
- help=('Choose the structure from motion algorithm. For aerial datasets, if camera GPS positions and angles are available, triangulation can generate better results. For planar scenes captured at fixed altitude with nadir-only images, planar can be much faster. '
- 'Can be one of: %(choices)s. Default: '
- '%(default)s'))
+ parser.add_argument(
+ "--camera-lens",
+ metavar="",
+ action=StoreValue,
+ default="auto",
+ choices=[
+ "auto",
+ "perspective",
+ "brown",
+ "fisheye",
+ "fisheye_opencv",
+ "spherical",
+ "equirectangular",
+ "dual",
+ ],
+ help=(
+ "Set a camera projection type. Manually setting a value "
+ "can help improve geometric undistortion. By default the application "
+ "tries to determine a lens type from the images metadata. Can be one of: %(choices)s. Default: "
+ "%(default)s"
+ ),
+ )
- parser.add_argument('--sfm-no-partial',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Do not attempt to merge partial reconstructions. This can happen when images do not have sufficient overlap or are isolated. Default: %(default)s')
+ parser.add_argument(
+ "--radiometric-calibration",
+ metavar="",
+ action=StoreValue,
+ default="none",
+ choices=["none", "camera", "camera+sun"],
+ help=(
+ "Set the radiometric calibration to perform on images. "
+ "When processing multispectral and thermal images you should set this option "
+ "to obtain reflectance/temperature values (otherwise you will get digital number values). "
+ "[camera] applies black level, vignetting, row gradient gain/exposure compensation (if appropriate EXIF tags are found) and computes absolute temperature values. "
+ "[camera+sun] is experimental, applies all the corrections of [camera], plus compensates for spectral radiance registered via a downwelling light sensor (DLS) taking in consideration the angle of the sun. "
+ "Can be one of: %(choices)s. Default: "
+ "%(default)s"
+ ),
+ )
- parser.add_argument('--sky-removal',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Automatically compute image masks using AI to remove the sky. Experimental. Default: %(default)s')
-
- parser.add_argument('--bg-removal',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Automatically compute image masks using AI to remove the background. Experimental. Default: %(default)s')
+ parser.add_argument(
+ "--max-concurrency",
+ metavar="",
+ action=StoreValue,
+ default=context.num_cores,
+ type=int,
+ help=(
+ "The maximum number of processes to use in various "
+ "processes. Peak memory requirement is ~1GB per "
+ "thread and 2 megapixel image resolution. Default: %(default)s"
+ ),
+ )
- parser.add_argument('--use-3dmesh',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Use a full 3D mesh to compute the orthophoto instead of a 2.5D mesh. This option is a bit faster and provides similar results in planar areas. Default: %(default)s')
+ parser.add_argument(
+ "--use-hybrid-bundle-adjustment",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Run local bundle adjustment for every image added to the reconstruction and a global "
+ "adjustment every 100 images. Speeds up reconstruction for very large datasets. Default: %(default)s",
+ )
- parser.add_argument('--skip-3dmodel',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Skip generation of a full 3D model. This can save time if you only need 2D results such as orthophotos and DEMs. Default: %(default)s')
-
- parser.add_argument('--skip-report',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Skip generation of PDF report. This can save time if you don\'t need a report. Default: %(default)s')
-
- parser.add_argument('--skip-orthophoto',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Skip generation of the orthophoto. This can save time if you only need 3D results or DEMs. Default: %(default)s')
-
- parser.add_argument('--ignore-gsd',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Ignore Ground Sampling Distance (GSD).'
- 'A memory and processor hungry change relative to the default behavior if set to true. '
- 'Ordinarily, GSD estimates are used to cap the maximum resolution of image outputs and resizes images when necessary, resulting in faster processing and lower memory usage. '
- 'Since GSD is an estimate, sometimes ignoring it can result in slightly better image output quality. '
- 'Never set --ignore-gsd to true unless you are positive you need it, and even then: do not use it. Default: %(default)s')
-
- parser.add_argument('--no-gpu',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Do not use GPU acceleration, even if it\'s available. Default: %(default)s')
-
- parser.add_argument('--mesh-size',
- metavar='',
- action=StoreValue,
- default=200000,
- type=int,
- help=('The maximum vertex count of the output mesh. '
- 'Default: %(default)s'))
+ parser.add_argument(
+ "--sfm-algorithm",
+ metavar="",
+ action=StoreValue,
+ default="incremental",
+ choices=["incremental", "triangulation", "planar"],
+ help=(
+ "Choose the structure from motion algorithm. For aerial datasets, if camera GPS positions and angles are available, triangulation can generate better results. For planar scenes captured at fixed altitude with nadir-only images, planar can be much faster. "
+ "Can be one of: %(choices)s. Default: "
+ "%(default)s"
+ ),
+ )
- parser.add_argument('--mesh-octree-depth',
- metavar='',
- action=StoreValue,
- default=11,
- type=int,
- help=('Octree depth used in the mesh reconstruction, '
- 'increase to get more vertices, recommended '
- 'values are 8-12. Default: %(default)s'))
+ parser.add_argument(
+ "--sfm-no-partial",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Do not attempt to merge partial reconstructions. This can happen when images do not have sufficient overlap or are isolated. Default: %(default)s",
+ )
- parser.add_argument('--fast-orthophoto',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Skips dense reconstruction and 3D model generation. '
- 'It generates an orthophoto directly from the sparse reconstruction. '
- 'If you just need an orthophoto and do not need a full 3D model, turn on this option. Default: %(default)s')
+ parser.add_argument(
+ "--sky-removal",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Automatically compute image masks using AI to remove the sky. Experimental. Default: %(default)s",
+ )
- parser.add_argument('--crop',
- metavar='',
- action=StoreValue,
- default=3,
- type=float,
- help=('Automatically crop image outputs by creating a smooth buffer '
- 'around the dataset boundaries, shrunk by N meters. '
- 'Use 0 to disable cropping. '
- 'Default: %(default)s'))
+ parser.add_argument(
+ "--bg-removal",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Automatically compute image masks using AI to remove the background. Experimental. Default: %(default)s",
+ )
- parser.add_argument('--boundary',
- default='',
- metavar='',
- action=StoreValue,
- type=path_or_json_string,
- help='GeoJSON polygon limiting the area of the reconstruction. '
- 'Can be specified either as path to a GeoJSON file or as a '
- 'JSON string representing the contents of a '
- 'GeoJSON file. Default: %(default)s')
+ parser.add_argument(
+ "--use-3dmesh",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Use a full 3D mesh to compute the orthophoto instead of a 2.5D mesh. This option is a bit faster and provides similar results in planar areas. Default: %(default)s",
+ )
- parser.add_argument('--auto-boundary',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Automatically set a boundary using camera shot locations to limit the area of the reconstruction. '
- 'This can help remove far away background artifacts (sky, background landscapes, etc.). See also --boundary. '
- 'Default: %(default)s')
+ parser.add_argument(
+ "--skip-3dmodel",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Skip generation of a full 3D model. This can save time if you only need 2D results such as orthophotos and DEMs. Default: %(default)s",
+ )
- parser.add_argument('--auto-boundary-distance',
- metavar='',
- action=StoreValue,
- type=float,
- default=0,
- help='Specify the distance between camera shot locations and the outer edge of the boundary when computing the boundary with --auto-boundary. Set to 0 to automatically choose a value. '
- 'Default: %(default)s')
+ parser.add_argument(
+ "--skip-report",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Skip generation of PDF report. This can save time if you don't need a report. Default: %(default)s",
+ )
- parser.add_argument('--pc-quality',
- metavar='',
- action=StoreValue,
- default='medium',
- choices=['ultra', 'high', 'medium', 'low', 'lowest'],
- help=('Set point cloud quality. Higher quality generates better, denser point clouds, but requires more memory and takes longer. Each step up in quality increases processing time roughly by a factor of 4x.'
- 'Can be one of: %(choices)s. Default: '
- '%(default)s'))
+ parser.add_argument(
+ "--skip-orthophoto",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Skip generation of the orthophoto. This can save time if you only need 3D results or DEMs. Default: %(default)s",
+ )
- parser.add_argument('--pc-classify',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Classify the point cloud outputs. '
- 'You can control the behavior of this option by tweaking the --dem-* parameters. '
- 'Default: '
- '%(default)s')
+ parser.add_argument(
+ "--ignore-gsd",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Ignore Ground Sampling Distance (GSD)."
+ "A memory and processor hungry change relative to the default behavior if set to true. "
+ "Ordinarily, GSD estimates are used to cap the maximum resolution of image outputs and resizes images when necessary, resulting in faster processing and lower memory usage. "
+ "Since GSD is an estimate, sometimes ignoring it can result in slightly better image output quality. "
+ "Never set --ignore-gsd to true unless you are positive you need it, and even then: do not use it. Default: %(default)s",
+ )
- parser.add_argument('--pc-csv',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Export the georeferenced point cloud in CSV format. Default: %(default)s')
-
- parser.add_argument('--pc-las',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Export the georeferenced point cloud in LAS format. Default: %(default)s')
+ parser.add_argument(
+ "--no-gpu",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Do not use GPU acceleration, even if it's available. Default: %(default)s",
+ )
- parser.add_argument('--pc-ept',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Export the georeferenced point cloud in Entwine Point Tile (EPT) format. Default: %(default)s')
+ parser.add_argument(
+ "--mesh-size",
+ metavar="",
+ action=StoreValue,
+ default=200000,
+ type=int,
+ help=("The maximum vertex count of the output mesh. " "Default: %(default)s"),
+ )
- parser.add_argument('--pc-copc',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Save the georeferenced point cloud in Cloud Optimized Point Cloud (COPC) format. Default: %(default)s')
+ parser.add_argument(
+ "--mesh-octree-depth",
+ metavar="",
+ action=StoreValue,
+ default=11,
+ type=int,
+ help=(
+ "Octree depth used in the mesh reconstruction, "
+ "increase to get more vertices, recommended "
+ "values are 8-12. Default: %(default)s"
+ ),
+ )
- parser.add_argument('--pc-filter',
- metavar='',
- action=StoreValue,
- type=float,
- default=5,
- help='Filters the point cloud by removing points that deviate more than N standard deviations from the local mean. Set to 0 to disable filtering. '
- 'Default: %(default)s')
-
- parser.add_argument('--pc-sample',
- metavar='',
- action=StoreValue,
- type=float,
- default=0,
- help='Filters the point cloud by keeping only a single point around a radius N (in meters). This can be useful to limit the output resolution of the point cloud and remove duplicate points. Set to 0 to disable sampling. '
- 'Default: %(default)s')
+ parser.add_argument(
+ "--fast-orthophoto",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Skips dense reconstruction and 3D model generation. "
+ "It generates an orthophoto directly from the sparse reconstruction. "
+ "If you just need an orthophoto and do not need a full 3D model, turn on this option. Default: %(default)s",
+ )
- parser.add_argument('--pc-skip-geometric',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Geometric estimates improve the accuracy of the point cloud by computing geometrically consistent depthmaps but may not be usable in larger datasets. This flag disables geometric estimates. '
- 'Default: %(default)s')
+ parser.add_argument(
+ "--crop",
+ metavar="",
+ action=StoreValue,
+ default=3,
+ type=float,
+ help=(
+ "Automatically crop image outputs by creating a smooth buffer "
+ "around the dataset boundaries, shrunk by N meters. "
+ "Use 0 to disable cropping. "
+ "Default: %(default)s"
+ ),
+ )
- parser.add_argument('--smrf-scalar',
- metavar='',
- action=StoreValue,
- type=float,
- default=1.25,
- help='Simple Morphological Filter elevation scalar parameter. '
- 'Default: %(default)s')
+ parser.add_argument(
+ "--boundary",
+ default="",
+ metavar="",
+ action=StoreValue,
+ type=path_or_json_string,
+ help="GeoJSON polygon limiting the area of the reconstruction. "
+ "Can be specified either as path to a GeoJSON file or as a "
+ "JSON string representing the contents of a "
+ "GeoJSON file. Default: %(default)s",
+ )
- parser.add_argument('--smrf-slope',
- metavar='',
+ parser.add_argument(
+ "--auto-boundary",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Automatically set a boundary using camera shot locations to limit the area of the reconstruction. "
+ "This can help remove far away background artifacts (sky, background landscapes, etc.). See also --boundary. "
+ "Default: %(default)s",
+ )
+
+ parser.add_argument(
+ "--auto-boundary-distance",
+ metavar="",
+ action=StoreValue,
+ type=float,
+ default=0,
+ help="Specify the distance between camera shot locations and the outer edge of the boundary when computing the boundary with --auto-boundary. Set to 0 to automatically choose a value. "
+ "Default: %(default)s",
+ )
+
+ parser.add_argument(
+ "--pc-quality",
+ metavar="",
+ action=StoreValue,
+ default="medium",
+ choices=["ultra", "high", "medium", "low", "lowest"],
+ help=(
+ "Set point cloud quality. Higher quality generates better, denser point clouds, but requires more memory and takes longer. Each step up in quality increases processing time roughly by a factor of 4x."
+ "Can be one of: %(choices)s. Default: "
+ "%(default)s"
+ ),
+ )
+
+ parser.add_argument(
+ "--pc-classify",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Classify the point cloud outputs. "
+ "You can control the behavior of this option by tweaking the --dem-* parameters. "
+ "Default: "
+ "%(default)s",
+ )
+
+ parser.add_argument(
+ "--pc-csv",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Export the georeferenced point cloud in CSV format. Default: %(default)s",
+ )
+
+ parser.add_argument(
+ "--pc-las",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Export the georeferenced point cloud in LAS format. Default: %(default)s",
+ )
+
+ parser.add_argument(
+ "--pc-ept",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Export the georeferenced point cloud in Entwine Point Tile (EPT) format. Default: %(default)s",
+ )
+
+ parser.add_argument(
+ "--pc-copc",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Save the georeferenced point cloud in Cloud Optimized Point Cloud (COPC) format. Default: %(default)s",
+ )
+
+ parser.add_argument(
+ "--pc-filter",
+ metavar="",
+ action=StoreValue,
+ type=float,
+ default=5,
+ help="Filters the point cloud by removing points that deviate more than N standard deviations from the local mean. Set to 0 to disable filtering. "
+ "Default: %(default)s",
+ )
+
+ parser.add_argument(
+ "--pc-sample",
+ metavar="",
+ action=StoreValue,
+ type=float,
+ default=0,
+ help="Filters the point cloud by keeping only a single point around a radius N (in meters). This can be useful to limit the output resolution of the point cloud and remove duplicate points. Set to 0 to disable sampling. "
+ "Default: %(default)s",
+ )
+
+ parser.add_argument(
+ "--pc-skip-geometric",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Geometric estimates improve the accuracy of the point cloud by computing geometrically consistent depthmaps but may not be usable in larger datasets. This flag disables geometric estimates. "
+ "Default: %(default)s",
+ )
+
+ parser.add_argument(
+ "--smrf-scalar",
+ metavar="",
+ action=StoreValue,
+ type=float,
+ default=1.25,
+ help="Simple Morphological Filter elevation scalar parameter. "
+ "Default: %(default)s",
+ )
+
+ parser.add_argument(
+ "--smrf-slope",
+ metavar="",
action=StoreValue,
type=float,
default=0.15,
- help='Simple Morphological Filter slope parameter (rise over run). '
- 'Default: %(default)s')
-
- parser.add_argument('--smrf-threshold',
- metavar='',
+ help="Simple Morphological Filter slope parameter (rise over run). "
+ "Default: %(default)s",
+ )
+
+ parser.add_argument(
+ "--smrf-threshold",
+ metavar="",
action=StoreValue,
type=float,
default=0.5,
- help='Simple Morphological Filter elevation threshold parameter (meters). '
- 'Default: %(default)s')
-
- parser.add_argument('--smrf-window',
- metavar='',
+ help="Simple Morphological Filter elevation threshold parameter (meters). "
+ "Default: %(default)s",
+ )
+
+ parser.add_argument(
+ "--smrf-window",
+ metavar="",
action=StoreValue,
type=float,
default=18.0,
- help='Simple Morphological Filter window radius parameter (meters). '
- 'Default: %(default)s')
+ help="Simple Morphological Filter window radius parameter (meters). "
+ "Default: %(default)s",
+ )
- parser.add_argument('--texturing-skip-global-seam-leveling',
- action=StoreTrue,
- nargs=0,
- default=False,
- help=('Skip normalization of colors across all images. Useful when processing radiometric data. Default: %(default)s'))
+ parser.add_argument(
+ "--texturing-skip-global-seam-leveling",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help=(
+ "Skip normalization of colors across all images. Useful when processing radiometric data. Default: %(default)s"
+ ),
+ )
- parser.add_argument('--texturing-keep-unseen-faces',
- action=StoreTrue,
- nargs=0,
- default=False,
- help=('Keep faces in the mesh that are not seen in any camera. '
- 'Default: %(default)s'))
+ parser.add_argument(
+ "--texturing-keep-unseen-faces",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help=(
+ "Keep faces in the mesh that are not seen in any camera. "
+ "Default: %(default)s"
+ ),
+ )
- parser.add_argument('--texturing-single-material',
- action=StoreTrue,
- nargs=0,
- default=False,
- help=('Generate OBJs that have a single material and a single texture file instead of multiple ones. '
- 'Default: %(default)s'))
+ parser.add_argument(
+ "--texturing-single-material",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help=(
+ "Generate OBJs that have a single material and a single texture file instead of multiple ones. "
+ "Default: %(default)s"
+ ),
+ )
- parser.add_argument('--gltf',
- action=StoreTrue,
- nargs=0,
- default=False,
- help=('Generate single file Binary glTF (GLB) textured models. '
- 'Default: %(default)s'))
+ parser.add_argument(
+ "--gltf",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help=(
+ "Generate single file Binary glTF (GLB) textured models. "
+ "Default: %(default)s"
+ ),
+ )
- parser.add_argument('--gcp',
- metavar='',
- action=StoreValue,
- default=None,
- help=('Path to the file containing the ground control '
- 'points used for georeferencing. '
- 'The file needs to '
- 'use the following format: \n'
- 'EPSG: or <+proj definition>\n'
- 'geo_x geo_y geo_z im_x im_y image_name [gcp_name] [extra1] [extra2]\n'
- 'Default: %(default)s'))
+ parser.add_argument(
+ "--gcp",
+ metavar="",
+ action=StoreValue,
+ default=None,
+ help=(
+ "Path to the file containing the ground control "
+ "points used for georeferencing. "
+ "The file needs to "
+ "use the following format: \n"
+ "EPSG: or <+proj definition>\n"
+ "geo_x geo_y geo_z im_x im_y image_name [gcp_name] [extra1] [extra2]\n"
+ "Default: %(default)s"
+ ),
+ )
- parser.add_argument('--geo',
- metavar='',
- action=StoreValue,
- default=None,
- help=('Path to the image geolocation file containing the camera center coordinates used for georeferencing. '
- 'If you don\'t have values for yaw/pitch/roll you can set them to 0. '
- 'The file needs to '
- 'use the following format: \n'
- 'EPSG: or <+proj definition>\n'
- 'image_name geo_x geo_y geo_z [yaw (degrees)] [pitch (degrees)] [roll (degrees)] [horz accuracy (meters)] [vert accuracy (meters)]\n'
- 'Default: %(default)s'))
-
- parser.add_argument('--align',
- metavar='',
- action=StoreValue,
- default=None,
- help=('Path to a GeoTIFF DEM or a LAS/LAZ point cloud '
- 'that the reconstruction outputs should be automatically aligned to. Experimental. '
- 'Default: %(default)s'))
+ parser.add_argument(
+ "--geo",
+ metavar="",
+ action=StoreValue,
+ default=None,
+ help=(
+ "Path to the image geolocation file containing the camera center coordinates used for georeferencing. "
+ "If you don't have values for yaw/pitch/roll you can set them to 0. "
+ "The file needs to "
+ "use the following format: \n"
+ "EPSG: or <+proj definition>\n"
+ "image_name geo_x geo_y geo_z [yaw (degrees)] [pitch (degrees)] [roll (degrees)] [horz accuracy (meters)] [vert accuracy (meters)]\n"
+ "Default: %(default)s"
+ ),
+ )
- parser.add_argument('--use-exif',
- action=StoreTrue,
- nargs=0,
- default=False,
- help=('Use this tag if you have a GCP File but '
- 'want to use the EXIF information for georeferencing instead. Default: %(default)s'))
+ parser.add_argument(
+ "--align",
+ metavar="",
+ action=StoreValue,
+ default=None,
+ help=(
+ "Path to a GeoTIFF DEM or a LAS/LAZ point cloud "
+ "that the reconstruction outputs should be automatically aligned to. Experimental. "
+ "Default: %(default)s"
+ ),
+ )
- parser.add_argument('--dtm',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Use this tag to build a DTM (Digital Terrain Model, ground only) using a simple '
- 'morphological filter. Check the --dem* and --smrf* parameters for finer tuning. Default: %(default)s')
+ parser.add_argument(
+ "--use-exif",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help=(
+ "Use this tag if you have a GCP File but "
+ "want to use the EXIF information for georeferencing instead. Default: %(default)s"
+ ),
+ )
- parser.add_argument('--dsm',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Use this tag to build a DSM (Digital Surface Model, ground + objects) using a progressive '
- 'morphological filter. Check the --dem* parameters for finer tuning. Default: %(default)s')
+ parser.add_argument(
+ "--dtm",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Use this tag to build a DTM (Digital Terrain Model, ground only) using a simple "
+ "morphological filter. Check the --dem* and --smrf* parameters for finer tuning. Default: %(default)s",
+ )
- parser.add_argument('--dem-gapfill-steps',
- metavar='',
- action=StoreValue,
- default=3,
- type=int,
- help='Number of steps used to fill areas with gaps. Set to 0 to disable gap filling. '
- 'Starting with a radius equal to the output resolution, N different DEMs are generated with '
- 'progressively bigger radius using the inverse distance weighted (IDW) algorithm '
- 'and merged together. Remaining gaps are then merged using nearest neighbor interpolation. '
- 'Default: %(default)s')
+ parser.add_argument(
+ "--dsm",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Use this tag to build a DSM (Digital Surface Model, ground + objects) using a progressive "
+ "morphological filter. Check the --dem* parameters for finer tuning. Default: %(default)s",
+ )
- parser.add_argument('--dem-resolution',
- metavar='',
- action=StoreValue,
- type=float,
- default=5,
- help='DSM/DTM resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate.'
- ' Default: %(default)s')
+ parser.add_argument(
+ "--dem-gapfill-steps",
+ metavar="",
+ action=StoreValue,
+ default=3,
+ type=int,
+ help="Number of steps used to fill areas with gaps. Set to 0 to disable gap filling. "
+ "Starting with a radius equal to the output resolution, N different DEMs are generated with "
+ "progressively bigger radius using the inverse distance weighted (IDW) algorithm "
+ "and merged together. Remaining gaps are then merged using nearest neighbor interpolation. "
+ "Default: %(default)s",
+ )
- parser.add_argument('--dem-decimation',
- metavar='',
- action=StoreValue,
- default=1,
- type=int,
- help='Decimate the points before generating the DEM. 1 is no decimation (full quality). '
- '100 decimates ~99%% of the points. Useful for speeding up generation of DEM results in very large datasets. Default: %(default)s')
-
- parser.add_argument('--dem-euclidean-map',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Computes an euclidean raster map for each DEM. '
- 'The map reports the distance from each cell to the nearest '
- 'NODATA value (before any hole filling takes place). '
- 'This can be useful to isolate the areas that have been filled. '
- 'Default: '
- '%(default)s')
+ parser.add_argument(
+ "--dem-resolution",
+ metavar="",
+ action=StoreValue,
+ type=float,
+ default=5,
+ help="DSM/DTM resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate."
+ " Default: %(default)s",
+ )
- parser.add_argument('--orthophoto-resolution',
- metavar=' 0.0>',
- action=StoreValue,
- default=5,
- type=float,
- help=('Orthophoto resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate.'
- 'Default: %(default)s'))
+ parser.add_argument(
+ "--dem-decimation",
+ metavar="",
+ action=StoreValue,
+ default=1,
+ type=int,
+ help="Decimate the points before generating the DEM. 1 is no decimation (full quality). "
+ "100 decimates ~99%% of the points. Useful for speeding up generation of DEM results in very large datasets. Default: %(default)s",
+ )
- parser.add_argument('--orthophoto-no-tiled',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Set this parameter if you want a striped GeoTIFF. '
- 'Default: %(default)s')
+ parser.add_argument(
+ "--dem-euclidean-map",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Computes an euclidean raster map for each DEM. "
+ "The map reports the distance from each cell to the nearest "
+ "NODATA value (before any hole filling takes place). "
+ "This can be useful to isolate the areas that have been filled. "
+ "Default: "
+ "%(default)s",
+ )
- parser.add_argument('--orthophoto-png',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Set this parameter if you want to generate a PNG rendering of the orthophoto. '
- 'Default: %(default)s')
-
- parser.add_argument('--orthophoto-kmz',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Set this parameter if you want to generate a Google Earth (KMZ) rendering of the orthophoto. '
- 'Default: %(default)s')
+ parser.add_argument(
+ "--orthophoto-resolution",
+ metavar=" 0.0>",
+ action=StoreValue,
+ default=5,
+ type=float,
+ help=(
+ "Orthophoto resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate."
+ "Default: %(default)s"
+ ),
+ )
- parser.add_argument('--orthophoto-compression',
- metavar='',
- action=StoreValue,
- type=str,
- choices=['JPEG', 'LZW', 'PACKBITS', 'DEFLATE', 'LZMA', 'NONE'],
- default='DEFLATE',
- help='Set the compression to use for orthophotos. Can be one of: %(choices)s. Default: %(default)s')
-
- parser.add_argument('--orthophoto-cutline',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Generates a polygon around the cropping area '
- 'that cuts the orthophoto around the edges of features. This polygon '
- 'can be useful for stitching seamless mosaics with multiple overlapping orthophotos. '
- 'Default: '
- '%(default)s')
+ parser.add_argument(
+ "--orthophoto-no-tiled",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Set this parameter if you want a striped GeoTIFF. "
+ "Default: %(default)s",
+ )
- parser.add_argument('--tiles',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Generate static tiles for orthophotos and DEMs that are '
- 'suitable for viewers like Leaflet or OpenLayers. '
- 'Default: %(default)s')
+ parser.add_argument(
+ "--orthophoto-png",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Set this parameter if you want to generate a PNG rendering of the orthophoto. "
+ "Default: %(default)s",
+ )
- parser.add_argument('--3d-tiles',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Generate OGC 3D Tiles outputs. Default: %(default)s')
+ parser.add_argument(
+ "--orthophoto-kmz",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Set this parameter if you want to generate a Google Earth (KMZ) rendering of the orthophoto. "
+ "Default: %(default)s",
+ )
- parser.add_argument('--rolling-shutter',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Turn on rolling shutter correction. If the camera '
- 'has a rolling shutter and the images were taken in motion, you can turn on this option '
- 'to improve the accuracy of the results. See also --rolling-shutter-readout. '
- 'Default: %(default)s')
+ parser.add_argument(
+ "--orthophoto-compression",
+ metavar="",
+ action=StoreValue,
+ type=str,
+ choices=["JPEG", "LZW", "PACKBITS", "DEFLATE", "LZMA", "NONE"],
+ default="DEFLATE",
+ help="Set the compression to use for orthophotos. Can be one of: %(choices)s. Default: %(default)s",
+ )
- parser.add_argument('--rolling-shutter-readout',
- type=float,
- action=StoreValue,
- metavar='',
- default=0,
- help='Override the rolling shutter readout time for your camera sensor (in milliseconds), instead of using the rolling shutter readout database. '
- 'Note that not all cameras are present in the database. Set to 0 to use the database value. '
- 'Default: %(default)s')
+ parser.add_argument(
+ "--orthophoto-cutline",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Generates a polygon around the cropping area "
+ "that cuts the orthophoto around the edges of features. This polygon "
+ "can be useful for stitching seamless mosaics with multiple overlapping orthophotos. "
+ "Default: "
+ "%(default)s",
+ )
- parser.add_argument('--build-overviews',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Build orthophoto overviews for faster display in programs such as QGIS. Default: %(default)s')
+ parser.add_argument(
+ "--tiles",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Generate static tiles for orthophotos and DEMs that are "
+ "suitable for viewers like Leaflet or OpenLayers. "
+ "Default: %(default)s",
+ )
- parser.add_argument('--cog',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Create Cloud-Optimized GeoTIFFs instead of normal GeoTIFFs. Default: %(default)s')
+ parser.add_argument(
+ "--3d-tiles",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Generate OGC 3D Tiles outputs. Default: %(default)s",
+ )
- parser.add_argument('--copy-to',
- metavar='',
- action=StoreValue,
- help='Copy output results to this folder after processing.')
+ parser.add_argument(
+ "--rolling-shutter",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Turn on rolling shutter correction. If the camera "
+ "has a rolling shutter and the images were taken in motion, you can turn on this option "
+ "to improve the accuracy of the results. See also --rolling-shutter-readout. "
+ "Default: %(default)s",
+ )
- parser.add_argument('--version',
- action='version',
- version='ODM {0}'.format(__version__),
- help='Displays version number and exits. ')
+ parser.add_argument(
+ "--rolling-shutter-readout",
+ type=float,
+ action=StoreValue,
+ metavar="",
+ default=0,
+ help="Override the rolling shutter readout time for your camera sensor (in milliseconds), instead of using the rolling shutter readout database. "
+ "Note that not all cameras are present in the database. Set to 0 to use the database value. "
+ "Default: %(default)s",
+ )
- parser.add_argument('--video-limit',
- type=int,
- action=StoreValue,
- default=500,
- metavar='',
- help='Maximum number of frames to extract from video files for processing. Set to 0 for no limit. Default: %(default)s')
+ parser.add_argument(
+ "--build-overviews",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Build orthophoto overviews for faster display in programs such as QGIS. Default: %(default)s",
+ )
- parser.add_argument('--video-resolution',
- type=int,
- action=StoreValue,
- default=4000,
- metavar='',
- help='The maximum output resolution of extracted video frames in pixels. Default: %(default)s')
-
- parser.add_argument('--split',
- type=int,
- action=StoreValue,
- default=999999,
- metavar='',
- help='Average number of images per submodel. When '
- 'splitting a large dataset into smaller '
- 'submodels, images are grouped into clusters. '
- 'This value regulates the number of images that '
- 'each cluster should have on average. Default: %(default)s')
+ parser.add_argument(
+ "--cog",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Create Cloud-Optimized GeoTIFFs instead of normal GeoTIFFs. Default: %(default)s",
+ )
- parser.add_argument('--split-overlap',
- type=float,
- action=StoreValue,
- metavar='',
- default=150,
- help='Radius of the overlap between submodels. '
- 'After grouping images into clusters, images '
- 'that are closer than this radius to a cluster '
- 'are added to the cluster. This is done to ensure '
- 'that neighboring submodels overlap. Default: %(default)s')
+ parser.add_argument(
+ "--copy-to",
+ metavar="",
+ action=StoreValue,
+ help="Copy output results to this folder after processing.",
+ )
- parser.add_argument('--split-image-groups',
- metavar='',
- action=StoreValue,
- default=None,
- help=('Path to the image groups file that controls how images should be split into groups. '
- 'The file needs to use the following format: \n'
- 'image_name group_name\n'
- 'Default: %(default)s'))
+ parser.add_argument(
+ "--version",
+ action="version",
+ version="ODM {0}".format(__version__),
+ help="Displays version number and exits. ",
+ )
- parser.add_argument('--sm-no-align',
- action=StoreTrue,
- nargs=0,
- default=False,
- help='Skip alignment of submodels in split-merge. Useful if GPS is good enough on very large datasets. Default: %(default)s')
+ parser.add_argument(
+ "--video-limit",
+ type=int,
+ action=StoreValue,
+ default=500,
+ metavar="",
+ help="Maximum number of frames to extract from video files for processing. Set to 0 for no limit. Default: %(default)s",
+ )
- parser.add_argument('--sm-cluster',
- metavar='',
- action=StoreValue,
- type=url_string,
- default=None,
- help='URL to a ClusterODM instance '
- 'for distributing a split-merge workflow on '
- 'multiple nodes in parallel. '
- 'Default: %(default)s')
+ parser.add_argument(
+ "--video-resolution",
+ type=int,
+ action=StoreValue,
+ default=4000,
+ metavar="",
+ help="The maximum output resolution of extracted video frames in pixels. Default: %(default)s",
+ )
- parser.add_argument('--merge',
- metavar='',
- action=StoreValue,
- default='all',
- choices=['all', 'pointcloud', 'orthophoto', 'dem'],
- help=('Choose what to merge in the merge step in a split dataset. '
- 'By default all available outputs are merged. '
- 'Options: %(choices)s. Default: '
- '%(default)s'))
+ parser.add_argument(
+ "--split",
+ type=int,
+ action=StoreValue,
+ default=999999,
+ metavar="",
+ help="Average number of images per submodel. When "
+ "splitting a large dataset into smaller "
+ "submodels, images are grouped into clusters. "
+ "This value regulates the number of images that "
+ "each cluster should have on average. Default: %(default)s",
+ )
- parser.add_argument('--force-gps',
- action=StoreTrue,
- nargs=0,
- default=False,
- help=('Use images\' GPS exif data for reconstruction, even if there are GCPs present.'
- 'This flag is useful if you have high precision GPS measurements. '
- 'If there are no GCPs, this flag does nothing. Default: %(default)s'))
-
- parser.add_argument('--gps-accuracy',
- type=float,
- action=StoreValue,
- metavar='',
- default=3,
- help='Set a value in meters for the GPS Dilution of Precision (DOP) '
- 'information for all images. If your images are tagged '
- 'with high precision GPS information (RTK), this value will be automatically '
- 'set accordingly. You can use this option to manually set it in case the reconstruction '
- 'fails. Lowering this option can sometimes help control bowling-effects over large areas. Default: %(default)s')
+ parser.add_argument(
+ "--split-overlap",
+ type=float,
+ action=StoreValue,
+ metavar="",
+ default=150,
+ help="Radius of the overlap between submodels. "
+ "After grouping images into clusters, images "
+ "that are closer than this radius to a cluster "
+ "are added to the cluster. This is done to ensure "
+ "that neighboring submodels overlap. Default: %(default)s",
+ )
- parser.add_argument('--optimize-disk-space',
- action=StoreTrue,
- nargs=0,
- default=False,
- help=('Delete heavy intermediate files to optimize disk space usage. This '
- 'affects the ability to restart the pipeline from an intermediate stage, '
- 'but allows datasets to be processed on machines that don\'t have sufficient '
- 'disk space available. Default: %(default)s'))
+ parser.add_argument(
+ "--split-image-groups",
+ metavar="",
+ action=StoreValue,
+ default=None,
+ help=(
+ "Path to the image groups file that controls how images should be split into groups. "
+ "The file needs to use the following format: \n"
+ "image_name group_name\n"
+ "Default: %(default)s"
+ ),
+ )
- parser.add_argument('--pc-rectify',
- action=StoreTrue,
- nargs=0,
- default=False,
- help=('Perform ground rectification on the point cloud. This means that wrongly classified ground '
- 'points will be re-classified and gaps will be filled. Useful for generating DTMs. '
- 'Default: %(default)s'))
+ parser.add_argument(
+ "--sm-no-align",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help="Skip alignment of submodels in split-merge. Useful if GPS is good enough on very large datasets. Default: %(default)s",
+ )
- parser.add_argument('--primary-band',
- metavar='',
- action=StoreValue,
- default="auto",
- type=str,
- help=('When processing multispectral datasets, you can specify the name of the primary band that will be used for reconstruction. '
- 'It\'s recommended to choose a band which has sharp details and is in focus. '
- 'Default: %(default)s'))
+ parser.add_argument(
+ "--sm-cluster",
+ metavar="",
+ action=StoreValue,
+ type=url_string,
+ default=None,
+ help="URL to a ClusterODM instance "
+ "for distributing a split-merge workflow on "
+ "multiple nodes in parallel. "
+ "Default: %(default)s",
+ )
- parser.add_argument('--skip-band-alignment',
- action=StoreTrue,
- nargs=0,
- default=False,
- help=('When processing multispectral datasets, ODM will automatically align the images for each band. '
- 'If the images have been postprocessed and are already aligned, use this option. '
- 'Default: %(default)s'))
+ parser.add_argument(
+ "--merge",
+ metavar="",
+ action=StoreValue,
+ default="all",
+ choices=["all", "pointcloud", "orthophoto", "dem"],
+ help=(
+ "Choose what to merge in the merge step in a split dataset. "
+ "By default all available outputs are merged. "
+ "Options: %(choices)s. Default: "
+ "%(default)s"
+ ),
+ )
+
+ parser.add_argument(
+ "--force-gps",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help=(
+ "Use images' GPS exif data for reconstruction, even if there are GCPs present."
+ "This flag is useful if you have high precision GPS measurements. "
+ "If there are no GCPs, this flag does nothing. Default: %(default)s"
+ ),
+ )
+
+ parser.add_argument(
+ "--gps-accuracy",
+ type=float,
+ action=StoreValue,
+ metavar="",
+ default=3,
+ help="Set a value in meters for the GPS Dilution of Precision (DOP) "
+ "information for all images. If your images are tagged "
+ "with high precision GPS information (RTK), this value will be automatically "
+ "set accordingly. You can use this option to manually set it in case the reconstruction "
+ "fails. Lowering this option can sometimes help control bowling-effects over large areas. Default: %(default)s",
+ )
+
+ parser.add_argument(
+ "--optimize-disk-space",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help=(
+ "Delete heavy intermediate files to optimize disk space usage. This "
+ "affects the ability to restart the pipeline from an intermediate stage, "
+ "but allows datasets to be processed on machines that don't have sufficient "
+ "disk space available. Default: %(default)s"
+ ),
+ )
+
+ parser.add_argument(
+ "--pc-rectify",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help=(
+ "Perform ground rectification on the point cloud. This means that wrongly classified ground "
+ "points will be re-classified and gaps will be filled. Useful for generating DTMs. "
+ "Default: %(default)s"
+ ),
+ )
+
+ parser.add_argument(
+ "--primary-band",
+ metavar="",
+ action=StoreValue,
+ default="auto",
+ type=str,
+ help=(
+ "When processing multispectral datasets, you can specify the name of the primary band that will be used for reconstruction. "
+ "It's recommended to choose a band which has sharp details and is in focus. "
+ "Default: %(default)s"
+ ),
+ )
+
+ parser.add_argument(
+ "--skip-band-alignment",
+ action=StoreTrue,
+ nargs=0,
+ default=False,
+ help=(
+ "When processing multispectral datasets, ODM will automatically align the images for each band. "
+ "If the images have been postprocessed and are already aligned, use this option. "
+ "Default: %(default)s"
+ ),
+ )
args, unknown = parser.parse_known_args(argv)
- DEPRECATED = ["--verbose", "--debug", "--time", "--resize-to", "--depthmap-resolution", "--pc-geometric", "--texturing-data-term", "--texturing-outlier-removal-type", "--texturing-tone-mapping", "--texturing-skip-local-seam-leveling"]
+ DEPRECATED = [
+ "--verbose",
+ "--debug",
+ "--time",
+ "--resize-to",
+ "--depthmap-resolution",
+ "--pc-geometric",
+ "--texturing-data-term",
+ "--texturing-outlier-removal-type",
+ "--texturing-tone-mapping",
+ "--texturing-skip-local-seam-leveling",
+ ]
unknown_e = [p for p in unknown if p not in DEPRECATED]
if len(unknown_e) > 0:
raise parser.error("unrecognized arguments: %s" % " ".join(unknown_e))
@@ -891,37 +1176,49 @@ def config(argv=None, parser=None):
# check that the project path setting has been set properly
if not args.project_path:
- log.ODM_ERROR('You need to set the project path in the '
- 'settings.yaml file before you can run ODM, '
- 'or use `--project-path `. Run `python3 '
- 'run.py --help` for more information. ')
+ log.ODM_ERROR(
+ "You need to set the project path in the "
+ "settings.yaml file before you can run ODM, "
+ "or use `--project-path `. Run `python3 "
+ "run.py --help` for more information. "
+ )
sys.exit(1)
if args.fast_orthophoto:
- log.ODM_INFO('Fast orthophoto is turned on, automatically setting --skip-3dmodel')
- args.skip_3dmodel = True
+ log.ODM_INFO(
+ "Fast orthophoto is turned on, automatically setting --skip-3dmodel"
+ )
+ args.skip_3dmodel = True
if args.pc_rectify and not args.pc_classify:
- log.ODM_INFO("Ground rectify is turned on, automatically turning on point cloud classification")
- args.pc_classify = True
+ log.ODM_INFO(
+ "Ground rectify is turned on, automatically turning on point cloud classification"
+ )
+ args.pc_classify = True
if args.dtm and not args.pc_classify:
- log.ODM_INFO("DTM is turned on, automatically turning on point cloud classification")
- args.pc_classify = True
+ log.ODM_INFO(
+ "DTM is turned on, automatically turning on point cloud classification"
+ )
+ args.pc_classify = True
if args.skip_3dmodel and args.use_3dmesh:
- log.ODM_WARNING('--skip-3dmodel is set, but so is --use-3dmesh. --skip-3dmodel will be ignored.')
- args.skip_3dmodel = False
+ log.ODM_WARNING(
+ "--skip-3dmodel is set, but so is --use-3dmesh. --skip-3dmodel will be ignored."
+ )
+ args.skip_3dmodel = False
if args.orthophoto_cutline and not args.crop:
- log.ODM_WARNING("--orthophoto-cutline is set, but --crop is not. --crop will be set to 0.01")
- args.crop = 0.01
+ log.ODM_WARNING(
+ "--orthophoto-cutline is set, but --crop is not. --crop will be set to 0.01"
+ )
+ args.crop = 0.01
if args.sm_cluster:
try:
Node.from_url(args.sm_cluster).info()
except exceptions.NodeConnectionError as e:
- log.ODM_ERROR("Cluster node seems to be offline: %s" % str(e))
+ log.ODM_ERROR("Cluster node seems to be offline: %s" % str(e))
sys.exit(1)
-
+
return args
diff --git a/opendm/context.py b/opendm/context.py
index 72d8cc06..3ebdf7dc 100644
--- a/opendm/context.py
+++ b/opendm/context.py
@@ -6,16 +6,19 @@ import multiprocessing
current_path = os.path.abspath(os.path.dirname(__file__))
root_path, _ = os.path.split(current_path)
-superbuild_path = os.path.join(root_path, 'SuperBuild')
-superbuild_bin_path = os.path.join(superbuild_path, 'install', 'bin')
+superbuild_path = os.path.join(root_path, "SuperBuild")
+superbuild_bin_path = os.path.join(superbuild_path, "install", "bin")
# add opencv,opensfm to python path
-python_packages_paths = [os.path.join(superbuild_path, p) for p in [
- 'install/lib/python3.9/dist-packages',
- 'install/lib/python3.8/dist-packages',
- 'install/lib/python3/dist-packages',
- 'install/bin/opensfm',
-]]
+python_packages_paths = [
+ os.path.join(superbuild_path, p)
+ for p in [
+ "install/lib/python3.9/dist-packages",
+ "install/lib/python3.8/dist-packages",
+ "install/lib/python3/dist-packages",
+ "install/bin/opensfm",
+ ]
+]
for p in python_packages_paths:
sys.path.append(p)
@@ -23,25 +26,37 @@ for p in python_packages_paths:
# define opensfm path
opensfm_path = os.path.join(superbuild_bin_path, "opensfm")
-poisson_recon_path = os.path.join(superbuild_bin_path, 'PoissonRecon')
-dem2mesh_path = os.path.join(superbuild_bin_path, 'dem2mesh')
-dem2points_path = os.path.join(superbuild_bin_path, 'dem2points')
+poisson_recon_path = os.path.join(superbuild_bin_path, "PoissonRecon")
+dem2mesh_path = os.path.join(superbuild_bin_path, "dem2mesh")
+dem2points_path = os.path.join(superbuild_bin_path, "dem2points")
# define mvstex path
mvstex_path = os.path.join(superbuild_bin_path, "texrecon")
# openmvs paths
omvs_densify_path = os.path.join(superbuild_bin_path, "OpenMVS", "DensifyPointCloud")
-omvs_reconstructmesh_path = os.path.join(superbuild_bin_path, "OpenMVS", "ReconstructMesh")
+omvs_reconstructmesh_path = os.path.join(
+ superbuild_bin_path, "OpenMVS", "ReconstructMesh"
+)
fpcfilter_path = os.path.join(superbuild_bin_path, "FPCFilter")
odm_orthophoto_path = os.path.join(superbuild_bin_path, "odm_orthophoto")
-settings_path = os.path.join(root_path, 'settings.yaml')
+settings_path = os.path.join(root_path, "settings.yaml")
# Define supported image extensions
-supported_extensions = {'.jpg','.jpeg','.png', '.tif', '.tiff', '.bmp', '.raw', '.dng', '.nef'}
-supported_video_extensions = {'.mp4', '.mov', '.lrv', '.ts'}
+supported_extensions = {
+ ".jpg",
+ ".jpeg",
+ ".png",
+ ".tif",
+ ".tiff",
+ ".bmp",
+ ".raw",
+ ".dng",
+ ".nef",
+}
+supported_video_extensions = {".mp4", ".mov", ".lrv", ".ts"}
# Define the number of cores
num_cores = multiprocessing.cpu_count()
@@ -49,4 +64,4 @@ num_cores = multiprocessing.cpu_count()
# Print python paths if invoked as a script
if __name__ == "__main__":
- print("export PYTHONPATH=" + ":".join(python_packages_paths))
\ No newline at end of file
+ print("export PYTHONPATH=" + ":".join(python_packages_paths))
diff --git a/opendm/cropper.py b/opendm/cropper.py
index 54ccb9ba..27442fd4 100644
--- a/opendm/cropper.py
+++ b/opendm/cropper.py
@@ -7,8 +7,9 @@ import json, os
from opendm.concurrency import get_max_memory
from opendm.utils import double_quote
+
class Cropper:
- def __init__(self, storage_dir, files_prefix = "crop"):
+ def __init__(self, storage_dir, files_prefix="crop"):
self.storage_dir = storage_dir
self.files_prefix = files_prefix
@@ -16,19 +17,25 @@ class Cropper:
"""
@return a path relative to storage_dir and prefixed with files_prefix
"""
- return os.path.join(self.storage_dir, '{}.{}'.format(self.files_prefix, suffix))
+ return os.path.join(self.storage_dir, "{}.{}".format(self.files_prefix, suffix))
@staticmethod
- def crop(gpkg_path, geotiff_path, gdal_options, keep_original=True, warp_options=[]):
+ def crop(
+ gpkg_path, geotiff_path, gdal_options, keep_original=True, warp_options=[]
+ ):
if not os.path.exists(gpkg_path) or not os.path.exists(geotiff_path):
- log.ODM_WARNING("Either {} or {} does not exist, will skip cropping.".format(gpkg_path, geotiff_path))
+ log.ODM_WARNING(
+ "Either {} or {} does not exist, will skip cropping.".format(
+ gpkg_path, geotiff_path
+ )
+ )
return geotiff_path
log.ODM_INFO("Cropping %s" % geotiff_path)
# Rename original file
# path/to/odm_orthophoto.tif --> path/to/odm_orthophoto.original.tif
-
+
path, filename = os.path.split(geotiff_path)
# path = path/to
# filename = odm_orthophoto.tif
@@ -42,46 +49,50 @@ class Cropper:
try:
kwargs = {
- 'gpkg_path': double_quote(gpkg_path),
- 'geotiffInput': double_quote(original_geotiff),
- 'geotiffOutput': double_quote(geotiff_path),
- 'options': ' '.join(map(lambda k: '-co {}={}'.format(k, gdal_options[k]), gdal_options)),
- 'warpOptions': ' '.join(warp_options),
- 'max_memory': get_max_memory()
+ "gpkg_path": double_quote(gpkg_path),
+ "geotiffInput": double_quote(original_geotiff),
+ "geotiffOutput": double_quote(geotiff_path),
+ "options": " ".join(
+ map(lambda k: "-co {}={}".format(k, gdal_options[k]), gdal_options)
+ ),
+ "warpOptions": " ".join(warp_options),
+ "max_memory": get_max_memory(),
}
- run('gdalwarp -cutline {gpkg_path} '
- '-crop_to_cutline '
- '{options} '
- '{warpOptions} '
- '{geotiffInput} '
- '{geotiffOutput} '
- '--config GDAL_CACHEMAX {max_memory}%'.format(**kwargs))
+ run(
+ "gdalwarp -cutline {gpkg_path} "
+ "-crop_to_cutline "
+ "{options} "
+ "{warpOptions} "
+ "{geotiffInput} "
+ "{geotiffOutput} "
+ "--config GDAL_CACHEMAX {max_memory}%".format(**kwargs)
+ )
if not keep_original:
os.remove(original_geotiff)
except Exception as e:
- log.ODM_WARNING('Something went wrong while cropping: {}'.format(e))
-
+ log.ODM_WARNING("Something went wrong while cropping: {}".format(e))
+
# Revert rename
os.replace(original_geotiff, geotiff_path)
return geotiff_path
@staticmethod
- def merge_bounds(input_bound_files, output_bounds, buffer_distance = 0):
+ def merge_bounds(input_bound_files, output_bounds, buffer_distance=0):
"""
Merge multiple bound files into a single bound computed from the convex hull
of all bounds (minus a buffer distance in meters)
"""
geomcol = ogr.Geometry(ogr.wkbGeometryCollection)
- driver = ogr.GetDriverByName('GPKG')
+ driver = ogr.GetDriverByName("GPKG")
srs = None
for input_bound_file in input_bound_files:
- ds = driver.Open(input_bound_file, 0) # ready-only
+ ds = driver.Open(input_bound_file, 0) # ready-only
layer = ds.GetLayer()
srs = layer.GetSpatialRef()
@@ -89,7 +100,7 @@ class Cropper:
# Collect all Geometry
for feature in layer:
geomcol.AddGeometry(feature.GetGeometryRef())
-
+
ds = None
# Calculate convex hull
@@ -121,7 +132,9 @@ class Cropper:
# Save and close output data source
out_ds = None
- def create_bounds_geojson(self, pointcloud_path, buffer_distance = 0, decimation_step=40):
+ def create_bounds_geojson(
+ self, pointcloud_path, buffer_distance=0, decimation_step=40
+ ):
"""
Compute a buffered polygon around the data extents (not just a bounding box)
of the given point cloud.
@@ -129,51 +142,71 @@ class Cropper:
@return filename to GeoJSON containing the polygon
"""
if not os.path.exists(pointcloud_path):
- log.ODM_WARNING('Point cloud does not exist, cannot generate bounds {}'.format(pointcloud_path))
- return ''
+ log.ODM_WARNING(
+ "Point cloud does not exist, cannot generate bounds {}".format(
+ pointcloud_path
+ )
+ )
+ return ""
# Do decimation prior to extracting boundary information
- decimated_pointcloud_path = self.path('decimated.las')
+ decimated_pointcloud_path = self.path("decimated.las")
- run("pdal translate -i \"{}\" "
- "-o \"{}\" "
+ run(
+ 'pdal translate -i "{}" '
+ '-o "{}" '
"decimation "
- "--filters.decimation.step={} ".format(pointcloud_path, decimated_pointcloud_path, decimation_step))
+ "--filters.decimation.step={} ".format(
+ pointcloud_path, decimated_pointcloud_path, decimation_step
+ )
+ )
if not os.path.exists(decimated_pointcloud_path):
- log.ODM_WARNING('Could not decimate point cloud, thus cannot generate GPKG bounds {}'.format(decimated_pointcloud_path))
- return ''
+ log.ODM_WARNING(
+ "Could not decimate point cloud, thus cannot generate GPKG bounds {}".format(
+ decimated_pointcloud_path
+ )
+ )
+ return ""
# Use PDAL to dump boundary information
# then read the information back
- boundary_file_path = self.path('boundary.json')
+ boundary_file_path = self.path("boundary.json")
+
+ run(
+ 'pdal info --boundary --filters.hexbin.edge_size=1 --filters.hexbin.threshold=0 "{0}" > "{1}"'.format(
+ decimated_pointcloud_path, boundary_file_path
+ )
+ )
- run('pdal info --boundary --filters.hexbin.edge_size=1 --filters.hexbin.threshold=0 "{0}" > "{1}"'.format(decimated_pointcloud_path, boundary_file_path))
-
pc_geojson_boundary_feature = None
- with open(boundary_file_path, 'r') as f:
+ with open(boundary_file_path, "r") as f:
json_f = json.loads(f.read())
- pc_geojson_boundary_feature = json_f['boundary']['boundary_json']
+ pc_geojson_boundary_feature = json_f["boundary"]["boundary_json"]
- if pc_geojson_boundary_feature is None: raise RuntimeError("Could not determine point cloud boundaries")
+ if pc_geojson_boundary_feature is None:
+ raise RuntimeError("Could not determine point cloud boundaries")
# Write bounds to GeoJSON
- tmp_bounds_geojson_path = self.path('tmp-bounds.geojson')
+ tmp_bounds_geojson_path = self.path("tmp-bounds.geojson")
with open(tmp_bounds_geojson_path, "w") as f:
- f.write(json.dumps({
- "type": "FeatureCollection",
- "features": [{
- "type": "Feature",
- "geometry": pc_geojson_boundary_feature
- }]
- }))
+ f.write(
+ json.dumps(
+ {
+ "type": "FeatureCollection",
+ "features": [
+ {"type": "Feature", "geometry": pc_geojson_boundary_feature}
+ ],
+ }
+ )
+ )
# Create a convex hull around the boundary
- # as to encompass the entire area (no holes)
- driver = ogr.GetDriverByName('GeoJSON')
- ds = driver.Open(tmp_bounds_geojson_path, 0) # ready-only
+ # as to encompass the entire area (no holes)
+ driver = ogr.GetDriverByName("GeoJSON")
+ ds = driver.Open(tmp_bounds_geojson_path, 0) # ready-only
layer = ds.GetLayer()
# Collect all Geometry
@@ -191,7 +224,7 @@ class Cropper:
BUFFER_SMOOTH_DISTANCE = 3
if buffer_distance > 0:
- # For small areas, check that buffering doesn't obliterate
+ # For small areas, check that buffering doesn't obliterate
# our hull
tmp = convexhull.Buffer(-(buffer_distance + BUFFER_SMOOTH_DISTANCE))
tmp = tmp.Buffer(BUFFER_SMOOTH_DISTANCE)
@@ -201,7 +234,7 @@ class Cropper:
log.ODM_WARNING("Very small crop area detected, we will not smooth it.")
# Save to a new file
- bounds_geojson_path = self.path('bounds.geojson')
+ bounds_geojson_path = self.path("bounds.geojson")
if os.path.exists(bounds_geojson_path):
os.remove(bounds_geojson_path)
@@ -220,50 +253,65 @@ class Cropper:
# Remove decimated point cloud
if os.path.exists(decimated_pointcloud_path):
os.remove(decimated_pointcloud_path)
-
+
# Remove tmp bounds
if os.path.exists(tmp_bounds_geojson_path):
os.remove(tmp_bounds_geojson_path)
return bounds_geojson_path
-
- def create_bounds_gpkg(self, pointcloud_path, buffer_distance = 0, decimation_step=40):
+ def create_bounds_gpkg(
+ self, pointcloud_path, buffer_distance=0, decimation_step=40
+ ):
"""
Compute a buffered polygon around the data extents (not just a bounding box)
of the given point cloud.
-
+
@return filename to Geopackage containing the polygon
"""
if not os.path.exists(pointcloud_path):
- log.ODM_WARNING('Point cloud does not exist, cannot generate GPKG bounds {}'.format(pointcloud_path))
- return ''
+ log.ODM_WARNING(
+ "Point cloud does not exist, cannot generate GPKG bounds {}".format(
+ pointcloud_path
+ )
+ )
+ return ""
- bounds_geojson_path = self.create_bounds_geojson(pointcloud_path, buffer_distance, decimation_step)
+ bounds_geojson_path = self.create_bounds_geojson(
+ pointcloud_path, buffer_distance, decimation_step
+ )
- summary_file_path = os.path.join(self.storage_dir, '{}.summary.json'.format(self.files_prefix))
+ summary_file_path = os.path.join(
+ self.storage_dir, "{}.summary.json".format(self.files_prefix)
+ )
export_summary_json(pointcloud_path, summary_file_path)
-
+
pc_proj4 = None
- with open(summary_file_path, 'r') as f:
+ with open(summary_file_path, "r") as f:
json_f = json.loads(f.read())
- pc_proj4 = json_f['summary']['srs']['proj4']
+ pc_proj4 = json_f["summary"]["srs"]["proj4"]
- if pc_proj4 is None: raise RuntimeError("Could not determine point cloud proj4 declaration")
+ if pc_proj4 is None:
+ raise RuntimeError("Could not determine point cloud proj4 declaration")
- bounds_gpkg_path = os.path.join(self.storage_dir, '{}.bounds.gpkg'.format(self.files_prefix))
+ bounds_gpkg_path = os.path.join(
+ self.storage_dir, "{}.bounds.gpkg".format(self.files_prefix)
+ )
if os.path.isfile(bounds_gpkg_path):
os.remove(bounds_gpkg_path)
# Convert bounds to GPKG
kwargs = {
- 'input': double_quote(bounds_geojson_path),
- 'output': double_quote(bounds_gpkg_path),
- 'proj4': pc_proj4
+ "input": double_quote(bounds_geojson_path),
+ "output": double_quote(bounds_gpkg_path),
+ "proj4": pc_proj4,
}
- run('ogr2ogr -overwrite -f GPKG -a_srs "{proj4}" {output} {input}'.format(**kwargs))
+ run(
+ 'ogr2ogr -overwrite -f GPKG -a_srs "{proj4}" {output} {input}'.format(
+ **kwargs
+ )
+ )
return bounds_gpkg_path
-
diff --git a/opendm/cutline.py b/opendm/cutline.py
index e3f8bf8b..d774ea73 100644
--- a/opendm/cutline.py
+++ b/opendm/cutline.py
@@ -7,7 +7,7 @@ import math
import sys
from opendm import log
from opendm import io
-from opendm import concurrency
+from opendm import concurrency
from opendm import get_image_size
from opendm import system
@@ -18,68 +18,79 @@ import shapely
from shapely.geometry import LineString, mapping, shape
from shapely.ops import polygonize, unary_union
-if sys.platform == 'win32':
- # Temporary fix for: ValueError: GEOSGeom_createLinearRing_r returned a NULL pointer
+if sys.platform == "win32":
+ # Temporary fix for: ValueError: GEOSGeom_createLinearRing_r returned a NULL pointer
# https://github.com/Toblerity/Shapely/issues/1005
shapely.speedups.disable()
+
def write_raster(data, file):
profile = {
- 'driver': 'GTiff',
- 'width': data.shape[1],
- 'height': data.shape[0],
- 'count': 1,
- 'dtype': 'float32',
- 'transform': None,
- 'nodata': None,
- 'crs': None
+ "driver": "GTiff",
+ "width": data.shape[1],
+ "height": data.shape[0],
+ "count": 1,
+ "dtype": "float32",
+ "transform": None,
+ "nodata": None,
+ "crs": None,
}
- with rasterio.open(file, 'w', BIGTIFF="IF_SAFER", **profile) as wout:
+ with rasterio.open(file, "w", BIGTIFF="IF_SAFER", **profile) as wout:
wout.write(data, 1)
-def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrency=1, scale=1):
+
+def compute_cutline(
+ orthophoto_file, crop_area_file, destination, max_concurrency=1, scale=1
+):
if io.file_exists(orthophoto_file) and io.file_exists(crop_area_file):
log.ODM_INFO("Computing cutline")
scale = max(0.0001, min(1, scale))
scaled_orthophoto = None
if scale < 1:
- log.ODM_INFO("Scaling orthophoto to %s%% to compute cutline" % (scale * 100))
+ log.ODM_INFO(
+ "Scaling orthophoto to %s%% to compute cutline" % (scale * 100)
+ )
scaled_orthophoto = io.related_file_path(orthophoto_file, postfix=".scaled")
# Scale orthophoto before computing cutline
- system.run("gdal_translate -outsize {}% 0 "
+ system.run(
+ "gdal_translate -outsize {}% 0 "
"-co NUM_THREADS={} "
"--config GDAL_CACHEMAX {}% "
'"{}" "{}"'.format(
- scale * 100,
- max_concurrency,
- concurrency.get_max_memory(),
- orthophoto_file,
- scaled_orthophoto
- ))
-
+ scale * 100,
+ max_concurrency,
+ concurrency.get_max_memory(),
+ orthophoto_file,
+ scaled_orthophoto,
+ )
+ )
+
orthophoto_file = scaled_orthophoto
-
+
# open raster
- f = rasterio.open(orthophoto_file)
- rast = f.read(1) # First band only
+ f = rasterio.open(orthophoto_file)
+ rast = f.read(1) # First band only
height, width = rast.shape
number_lines = int(max(8, math.ceil(min(width, height) / 256.0)))
line_hor_offset = int(width / number_lines)
line_ver_offset = int(height / number_lines)
if line_hor_offset <= 2 or line_ver_offset <= 2:
- log.ODM_WARNING("Cannot compute cutline, orthophoto is too small (%sx%spx)" % (width, height))
+ log.ODM_WARNING(
+ "Cannot compute cutline, orthophoto is too small (%sx%spx)"
+ % (width, height)
+ )
return
- crop_f = fiona.open(crop_area_file, 'r')
+ crop_f = fiona.open(crop_area_file, "r")
if len(crop_f) == 0:
log.ODM_WARNING("Crop area is empty, cannot compute cutline")
return
- crop_poly = shape(crop_f[1]['geometry'])
+ crop_poly = shape(crop_f[1]["geometry"])
crop_f.close()
linestrings = []
@@ -93,35 +104,51 @@ def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrenc
cost_map = np.full((height, width), 1, dtype=np.float32)
# Write edges to cost map
- cost_map[edges==True] = 0 # Low cost
+ cost_map[edges == True] = 0 # Low cost
# Write "barrier, floor is lava" costs
- if direction == 'vertical':
- lines = [((i, 0), (i, height - 1)) for i in range(line_hor_offset, width - line_hor_offset, line_hor_offset)]
+ if direction == "vertical":
+ lines = [
+ ((i, 0), (i, height - 1))
+ for i in range(
+ line_hor_offset, width - line_hor_offset, line_hor_offset
+ )
+ ]
points = []
pad_x = int(line_hor_offset / 2.0)
for i in range(0, len(lines)):
- a,b = lines[i]
- points.append(((a[0] - pad_x , a[1]), (b[0] - pad_x, b[1])))
- a,b = lines[-1]
- points.append(((a[0] + pad_x , a[1]), (b[0] + pad_x, b[1])))
+ a, b = lines[i]
+ points.append(((a[0] - pad_x, a[1]), (b[0] - pad_x, b[1])))
+ a, b = lines[-1]
+ points.append(((a[0] + pad_x, a[1]), (b[0] + pad_x, b[1])))
else:
- lines = [((0, j), (width - 1, j)) for j in range(line_ver_offset, height - line_ver_offset, line_ver_offset)]
+ lines = [
+ ((0, j), (width - 1, j))
+ for j in range(
+ line_ver_offset, height - line_ver_offset, line_ver_offset
+ )
+ ]
points = []
pad_y = int(line_ver_offset / 2.0)
for i in range(0, len(lines)):
- a,b = lines[i]
- points.append(((a[0] , a[1] - pad_y), (b[0], b[1] - pad_y)))
- a,b = lines[-1]
- points.append(((a[0] , a[1] + pad_y), (b[0], b[1] + pad_y)))
-
+ a, b = lines[i]
+ points.append(((a[0], a[1] - pad_y), (b[0], b[1] - pad_y)))
+ a, b = lines[-1]
+ points.append(((a[0], a[1] + pad_y), (b[0], b[1] + pad_y)))
+
for a, b in lines:
- rr,cc = line(*a, *b)
- cost_map[cc, rr] = 9999 # Lava
-
+ rr, cc = line(*a, *b)
+ cost_map[cc, rr] = 9999 # Lava
+
# Calculate route
for a, b in points:
- line_coords, cost = route_through_array(cost_map, (a[1], a[0]), (b[1], b[0]), fully_connected=True, geometric=True)
+ line_coords, cost = route_through_array(
+ cost_map,
+ (a[1], a[0]),
+ (b[1], b[0]),
+ fully_connected=True,
+ geometric=True,
+ )
# Convert to geographic
geo_line_coords = [f.xy(*c) for c in line_coords]
@@ -129,11 +156,10 @@ def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrenc
# Simplify
ls = LineString(geo_line_coords)
linestrings.append(ls.simplify(0.05, preserve_topology=False))
-
- compute_linestrings('vertical')
- compute_linestrings('horizontal')
-
+ compute_linestrings("vertical")
+ compute_linestrings("horizontal")
+
# Generate polygons and keep only those inside the crop area
log.ODM_INFO("Generating polygons... this could take a bit.")
polygons = []
@@ -148,7 +174,7 @@ def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrenc
log.ODM_INFO("Merging polygons")
cutline_polygons = unary_union(polygons)
- if not hasattr(cutline_polygons, '__getitem__'):
+ if not hasattr(cutline_polygons, "__getitem__"):
cutline_polygons = [cutline_polygons]
largest_cutline = cutline_polygons[0]
@@ -157,27 +183,21 @@ def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrenc
if p.area > max_area:
max_area = p.area
largest_cutline = p
-
+
log.ODM_INFO("Largest cutline found: %s m^2" % max_area)
meta = {
- 'crs': {'init': str(f.crs).lower() },
- 'driver': 'GPKG',
- 'schema': {
- 'properties': {},
- 'geometry': 'Polygon'
- }
+ "crs": {"init": str(f.crs).lower()},
+ "driver": "GPKG",
+ "schema": {"properties": {}, "geometry": "Polygon"},
}
# Remove previous
if os.path.exists(destination):
os.remove(destination)
-
- with fiona.open(destination, 'w', **meta) as sink:
- sink.write({
- 'geometry': mapping(largest_cutline),
- 'properties': {}
- })
+
+ with fiona.open(destination, "w", **meta) as sink:
+ sink.write({"geometry": mapping(largest_cutline), "properties": {}})
f.close()
log.ODM_INFO("Wrote %s" % destination)
@@ -185,4 +205,7 @@ def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrenc
if scaled_orthophoto is not None and os.path.exists(scaled_orthophoto):
os.remove(scaled_orthophoto)
else:
- log.ODM_WARNING("We've been asked to compute cutline, but either %s or %s is missing. Skipping..." % (orthophoto_file, crop_area_file))
+ log.ODM_WARNING(
+ "We've been asked to compute cutline, but either %s or %s is missing. Skipping..."
+ % (orthophoto_file, crop_area_file)
+ )
diff --git a/opendm/dem/commands.py b/opendm/dem/commands.py
index 8113621e..0c2d29b9 100755
--- a/opendm/dem/commands.py
+++ b/opendm/dem/commands.py
@@ -30,6 +30,7 @@ except ModuleNotFoundError:
except:
pass
+
def classify(lasFile, scalar, slope, threshold, window):
start = datetime.now()
@@ -38,57 +39,83 @@ def classify(lasFile, scalar, slope, threshold, window):
except:
log.ODM_WARNING("Error creating classified file %s" % lasFile)
- log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start))
+ log.ODM_INFO("Created %s in %s" % (lasFile, datetime.now() - start))
return lasFile
+
def rectify(lasFile, reclassify_threshold=5, min_area=750, min_points=500):
start = datetime.now()
try:
- log.ODM_INFO("Rectifying {} using with [reclassify threshold: {}, min area: {}, min points: {}]".format(lasFile, reclassify_threshold, min_area, min_points))
+ log.ODM_INFO(
+ "Rectifying {} using with [reclassify threshold: {}, min area: {}, min points: {}]".format(
+ lasFile, reclassify_threshold, min_area, min_points
+ )
+ )
run_rectification(
- input=lasFile, output=lasFile, \
- reclassify_plan='median', reclassify_threshold=reclassify_threshold, \
- extend_plan='surrounding', extend_grid_distance=5, \
- min_area=min_area, min_points=min_points)
+ input=lasFile,
+ output=lasFile,
+ reclassify_plan="median",
+ reclassify_threshold=reclassify_threshold,
+ extend_plan="surrounding",
+ extend_grid_distance=5,
+ min_area=min_area,
+ min_points=min_points,
+ )
- log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start))
+ log.ODM_INFO("Created %s in %s" % (lasFile, datetime.now() - start))
except Exception as e:
log.ODM_WARNING("Error rectifying ground in file %s: %s" % (lasFile, str(e)))
return lasFile
+
error = None
-def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'], gapfill=True,
- outdir='', resolution=0.1, max_workers=1, max_tile_size=4096,
- decimation=None, with_euclidean_map=False,
- apply_smoothing=True, max_tiles=None):
- """ Create DEM from multiple radii, and optionally gapfill """
-
+
+def create_dem(
+ input_point_cloud,
+ dem_type,
+ output_type="max",
+ radiuses=["0.56"],
+ gapfill=True,
+ outdir="",
+ resolution=0.1,
+ max_workers=1,
+ max_tile_size=4096,
+ decimation=None,
+ with_euclidean_map=False,
+ apply_smoothing=True,
+ max_tiles=None,
+):
+ """Create DEM from multiple radii, and optionally gapfill"""
+
start = datetime.now()
kwargs = {
- 'input': input_point_cloud,
- 'outdir': outdir,
- 'outputType': output_type,
- 'radiuses': ",".join(map(str, radiuses)),
- 'resolution': resolution,
- 'maxTiles': 0 if max_tiles is None else max_tiles,
- 'decimation': 1 if decimation is None else decimation,
- 'classification': 2 if dem_type == 'dtm' else -1,
- 'tileSize': max_tile_size
+ "input": input_point_cloud,
+ "outdir": outdir,
+ "outputType": output_type,
+ "radiuses": ",".join(map(str, radiuses)),
+ "resolution": resolution,
+ "maxTiles": 0 if max_tiles is None else max_tiles,
+ "decimation": 1 if decimation is None else decimation,
+ "classification": 2 if dem_type == "dtm" else -1,
+ "tileSize": max_tile_size,
}
- system.run('renderdem "{input}" '
- '--outdir "{outdir}" '
- '--output-type {outputType} '
- '--radiuses {radiuses} '
- '--resolution {resolution} '
- '--max-tiles {maxTiles} '
- '--decimation {decimation} '
- '--classification {classification} '
- '--tile-size {tileSize} '
- '--force '.format(**kwargs), env_vars={'OMP_NUM_THREADS': max_workers})
+ system.run(
+ 'renderdem "{input}" '
+ '--outdir "{outdir}" '
+ "--output-type {outputType} "
+ "--radiuses {radiuses} "
+ "--resolution {resolution} "
+ "--max-tiles {maxTiles} "
+ "--decimation {decimation} "
+ "--classification {classification} "
+ "--tile-size {tileSize} "
+ "--force ".format(**kwargs),
+ env_vars={"OMP_NUM_THREADS": max_workers},
+ )
output_file = "%s.tif" % dem_type
output_path = os.path.abspath(os.path.join(outdir, output_file))
@@ -99,7 +126,7 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
filename = os.path.basename(p)
m = re.match("^r([\d\.]+)_x\d+_y\d+\.tif", filename)
if m is not None:
- tiles.append({'filename': p, 'radius': float(m.group(1))})
+ tiles.append({"filename": p, "radius": float(m.group(1))})
if len(tiles) == 0:
raise system.ExitException("No DEM tiles were generated, something went wrong")
@@ -107,31 +134,33 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
log.ODM_INFO("Generated %s tiles" % len(tiles))
# Sort tiles by decreasing radius
- tiles.sort(key=lambda t: float(t['radius']), reverse=True)
+ tiles.sort(key=lambda t: float(t["radius"]), reverse=True)
# Create virtual raster
tiles_vrt_path = os.path.abspath(os.path.join(outdir, "tiles.vrt"))
tiles_file_list = os.path.abspath(os.path.join(outdir, "tiles_list.txt"))
- with open(tiles_file_list, 'w') as f:
+ with open(tiles_file_list, "w") as f:
for t in tiles:
- f.write(t['filename'] + '\n')
+ f.write(t["filename"] + "\n")
run('gdalbuildvrt -input_file_list "%s" "%s" ' % (tiles_file_list, tiles_vrt_path))
merged_vrt_path = os.path.abspath(os.path.join(outdir, "merged.vrt"))
- geotiff_small_path = os.path.abspath(os.path.join(outdir, 'tiles.small.tif'))
- geotiff_small_filled_path = os.path.abspath(os.path.join(outdir, 'tiles.small_filled.tif'))
- geotiff_path = os.path.abspath(os.path.join(outdir, 'tiles.tif'))
+ geotiff_small_path = os.path.abspath(os.path.join(outdir, "tiles.small.tif"))
+ geotiff_small_filled_path = os.path.abspath(
+ os.path.join(outdir, "tiles.small_filled.tif")
+ )
+ geotiff_path = os.path.abspath(os.path.join(outdir, "tiles.tif"))
# Build GeoTIFF
kwargs = {
- 'max_memory': get_max_memory(),
- 'threads': max_workers if max_workers else 'ALL_CPUS',
- 'tiles_vrt': tiles_vrt_path,
- 'merged_vrt': merged_vrt_path,
- 'geotiff': geotiff_path,
- 'geotiff_small': geotiff_small_path,
- 'geotiff_small_filled': geotiff_small_filled_path
+ "max_memory": get_max_memory(),
+ "threads": max_workers if max_workers else "ALL_CPUS",
+ "tiles_vrt": tiles_vrt_path,
+ "merged_vrt": merged_vrt_path,
+ "geotiff": geotiff_path,
+ "geotiff_small": geotiff_small_path,
+ "geotiff_small_filled": geotiff_small_filled_path,
}
if gapfill:
@@ -139,41 +168,62 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
# behaves strangely when reading data directly from a .VRT
# so we need to convert to GeoTIFF first.
# Scale to 10% size
- run('gdal_translate '
- '-co NUM_THREADS={threads} '
- '-co BIGTIFF=IF_SAFER '
- '-co COMPRESS=DEFLATE '
- '--config GDAL_CACHEMAX {max_memory}% '
- '-outsize 10% 0 '
- '"{tiles_vrt}" "{geotiff_small}"'.format(**kwargs))
+ run(
+ "gdal_translate "
+ "-co NUM_THREADS={threads} "
+ "-co BIGTIFF=IF_SAFER "
+ "-co COMPRESS=DEFLATE "
+ "--config GDAL_CACHEMAX {max_memory}% "
+ "-outsize 10% 0 "
+ '"{tiles_vrt}" "{geotiff_small}"'.format(**kwargs)
+ )
# Fill scaled
- gdal_fillnodata(['.',
- '-co', 'NUM_THREADS=%s' % kwargs['threads'],
- '-co', 'BIGTIFF=IF_SAFER',
- '-co', 'COMPRESS=DEFLATE',
- '--config', 'GDAL_CACHE_MAX', str(kwargs['max_memory']) + '%',
- '-b', '1',
- '-of', 'GTiff',
- kwargs['geotiff_small'], kwargs['geotiff_small_filled']])
-
+ gdal_fillnodata(
+ [
+ ".",
+ "-co",
+ "NUM_THREADS=%s" % kwargs["threads"],
+ "-co",
+ "BIGTIFF=IF_SAFER",
+ "-co",
+ "COMPRESS=DEFLATE",
+ "--config",
+ "GDAL_CACHE_MAX",
+ str(kwargs["max_memory"]) + "%",
+ "-b",
+ "1",
+ "-of",
+ "GTiff",
+ kwargs["geotiff_small"],
+ kwargs["geotiff_small_filled"],
+ ]
+ )
+
# Merge filled scaled DEM with unfilled DEM using bilinear interpolation
- run('gdalbuildvrt -resolution highest -r bilinear "%s" "%s" "%s"' % (merged_vrt_path, geotiff_small_filled_path, tiles_vrt_path))
- run('gdal_translate '
- '-co NUM_THREADS={threads} '
- '-co TILED=YES '
- '-co BIGTIFF=IF_SAFER '
- '-co COMPRESS=DEFLATE '
- '--config GDAL_CACHEMAX {max_memory}% '
- '"{merged_vrt}" "{geotiff}"'.format(**kwargs))
+ run(
+ 'gdalbuildvrt -resolution highest -r bilinear "%s" "%s" "%s"'
+ % (merged_vrt_path, geotiff_small_filled_path, tiles_vrt_path)
+ )
+ run(
+ "gdal_translate "
+ "-co NUM_THREADS={threads} "
+ "-co TILED=YES "
+ "-co BIGTIFF=IF_SAFER "
+ "-co COMPRESS=DEFLATE "
+ "--config GDAL_CACHEMAX {max_memory}% "
+ '"{merged_vrt}" "{geotiff}"'.format(**kwargs)
+ )
else:
- run('gdal_translate '
- '-co NUM_THREADS={threads} '
- '-co TILED=YES '
- '-co BIGTIFF=IF_SAFER '
- '-co COMPRESS=DEFLATE '
- '--config GDAL_CACHEMAX {max_memory}% '
- '"{tiles_vrt}" "{geotiff}"'.format(**kwargs))
+ run(
+ "gdal_translate "
+ "-co NUM_THREADS={threads} "
+ "-co TILED=YES "
+ "-co BIGTIFF=IF_SAFER "
+ "-co COMPRESS=DEFLATE "
+ "--config GDAL_CACHEMAX {max_memory}% "
+ '"{tiles_vrt}" "{geotiff}"'.format(**kwargs)
+ )
if apply_smoothing:
median_smoothing(geotiff_path, output_path, num_workers=max_workers)
@@ -185,19 +235,29 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
if with_euclidean_map:
emap_path = io.related_file_path(output_path, postfix=".euclideand")
compute_euclidean_map(tiles_vrt_path, emap_path, overwrite=True)
-
- for cleanup_file in [tiles_vrt_path, tiles_file_list, merged_vrt_path, geotiff_small_path, geotiff_small_filled_path]:
- if os.path.exists(cleanup_file): os.remove(cleanup_file)
+
+ for cleanup_file in [
+ tiles_vrt_path,
+ tiles_file_list,
+ merged_vrt_path,
+ geotiff_small_path,
+ geotiff_small_filled_path,
+ ]:
+ if os.path.exists(cleanup_file):
+ os.remove(cleanup_file)
for t in tiles:
- if os.path.exists(t['filename']): os.remove(t['filename'])
+ if os.path.exists(t["filename"]):
+ os.remove(t["filename"])
- log.ODM_INFO('Completed %s in %s' % (output_file, datetime.now() - start))
+ log.ODM_INFO("Completed %s in %s" % (output_file, datetime.now() - start))
def compute_euclidean_map(geotiff_path, output_path, overwrite=False):
if not os.path.exists(geotiff_path):
- log.ODM_WARNING("Cannot compute euclidean map (file does not exist: %s)" % geotiff_path)
+ log.ODM_WARNING(
+ "Cannot compute euclidean map (file does not exist: %s)" % geotiff_path
+ )
return
nodata = -9999
@@ -212,55 +272,73 @@ def compute_euclidean_map(geotiff_path, output_path, overwrite=False):
if gdal_proximity is not None:
try:
- gdal_proximity(['gdal_proximity.py',
- geotiff_path, output_path, '-values', str(nodata),
- '-co', 'TILED=YES',
- '-co', 'BIGTIFF=IF_SAFER',
- '-co', 'COMPRESS=DEFLATE',
- ])
+ gdal_proximity(
+ [
+ "gdal_proximity.py",
+ geotiff_path,
+ output_path,
+ "-values",
+ str(nodata),
+ "-co",
+ "TILED=YES",
+ "-co",
+ "BIGTIFF=IF_SAFER",
+ "-co",
+ "COMPRESS=DEFLATE",
+ ]
+ )
except Exception as e:
log.ODM_WARNING("Cannot compute euclidean distance: %s" % str(e))
if os.path.exists(output_path):
return output_path
else:
- log.ODM_WARNING("Cannot compute euclidean distance file: %s" % output_path)
+ log.ODM_WARNING(
+ "Cannot compute euclidean distance file: %s" % output_path
+ )
else:
log.ODM_WARNING("Cannot compute euclidean map, gdal_proximity is missing")
-
+
else:
log.ODM_INFO("Found a euclidean distance map: %s" % output_path)
return output_path
-def median_smoothing(geotiff_path, output_path, window_size=512, num_workers=1, radius=4):
- """ Apply median smoothing """
+def median_smoothing(
+ geotiff_path, output_path, window_size=512, num_workers=1, radius=4
+):
+ """Apply median smoothing"""
start = datetime.now()
if not os.path.exists(geotiff_path):
- raise Exception('File %s does not exist!' % geotiff_path)
+ raise Exception("File %s does not exist!" % geotiff_path)
kwargs = {
- 'input': geotiff_path,
- 'output': output_path,
- 'window': window_size,
- 'radius': radius,
+ "input": geotiff_path,
+ "output": output_path,
+ "window": window_size,
+ "radius": radius,
}
- system.run('fastrasterfilter "{input}" '
- '--output "{output}" '
- '--window-size {window} '
- '--radius {radius} '
- '--co TILED=YES '
- '--co BIGTIFF=IF_SAFER '
- '--co COMPRESS=DEFLATE '.format(**kwargs), env_vars={'OMP_NUM_THREADS': num_workers})
+ system.run(
+ 'fastrasterfilter "{input}" '
+ '--output "{output}" '
+ "--window-size {window} "
+ "--radius {radius} "
+ "--co TILED=YES "
+ "--co BIGTIFF=IF_SAFER "
+ "--co COMPRESS=DEFLATE ".format(**kwargs),
+ env_vars={"OMP_NUM_THREADS": num_workers},
+ )
- log.ODM_INFO('Completed smoothing to create %s in %s' % (output_path, datetime.now() - start))
+ log.ODM_INFO(
+ "Completed smoothing to create %s in %s" % (output_path, datetime.now() - start)
+ )
return output_path
-def get_dem_radius_steps(stats_file, steps, resolution, multiplier = 1.0):
+def get_dem_radius_steps(stats_file, steps, resolution, multiplier=1.0):
radius_steps = [point_cloud.get_spacing(stats_file, resolution) * multiplier]
for _ in range(steps - 1):
radius_steps.append(radius_steps[-1] * math.sqrt(2))
-
- return radius_steps
\ No newline at end of file
+
+ return radius_steps
diff --git a/opendm/dem/ground_rectification/bounds/types.py b/opendm/dem/ground_rectification/bounds/types.py
index da6a79ab..e8771c30 100755
--- a/opendm/dem/ground_rectification/bounds/types.py
+++ b/opendm/dem/ground_rectification/bounds/types.py
@@ -4,6 +4,7 @@ from ..point_cloud import PointCloud
EPSILON = 0.00001
+
class PolyBounds(object):
def __init__(self, points):
self.__points = points
@@ -36,6 +37,7 @@ class PolyBounds(object):
def corners(self):
return self._corners
+
class BoxBounds(object):
def __init__(self, x_min, x_max, y_min, y_max):
self._corners = (x_min, x_max, y_min, y_max)
@@ -70,15 +72,15 @@ class BoxBounds(object):
def area(self):
(x_min, x_max, y_min, y_max) = self._corners
- return (x_max - x_min) * (y_max - y_min)
+ return (x_max - x_min) * (y_max - y_min)
def divide_by_point(self, point):
"""Divide the box into four boxes, marked by the point. It is assumed that the point is inside the box"""
[x_point, y_point] = point
(x_min, x_max, y_min, y_max) = self._corners
return [
- BoxBounds(x_min, x_point, y_min, y_point),
+ BoxBounds(x_min, x_point, y_min, y_point),
BoxBounds(x_point + EPSILON, x_max, y_min, y_point),
- BoxBounds(x_min, x_point, y_point + EPSILON, y_max),
- BoxBounds(x_point + EPSILON, x_max, y_point + EPSILON, y_max)
+ BoxBounds(x_min, x_point, y_point + EPSILON, y_max),
+ BoxBounds(x_point + EPSILON, x_max, y_point + EPSILON, y_max),
]
diff --git a/opendm/dem/ground_rectification/bounds/utils.py b/opendm/dem/ground_rectification/bounds/utils.py
index 78c7300b..8e8c5c6d 100755
--- a/opendm/dem/ground_rectification/bounds/utils.py
+++ b/opendm/dem/ground_rectification/bounds/utils.py
@@ -2,12 +2,20 @@ import numpy as np
from scipy.spatial import ConvexHull
from .types import BoxBounds, PolyBounds
+
def calculate_convex_hull_bounds(points):
hull = ConvexHull(points)
return PolyBounds(points[hull.vertices])
+
def box_from_point_and_size(center, width, height):
- return BoxBounds(center[0] - width / 2, center[0] + width / 2, center[1] - height / 2, center[1] + height / 2)
+ return BoxBounds(
+ center[0] - width / 2,
+ center[0] + width / 2,
+ center[1] - height / 2,
+ center[1] + height / 2,
+ )
+
def box_from_cloud(point_cloud):
xy = point_cloud.get_xy()
diff --git a/opendm/dem/ground_rectification/extra_dimensions/dimension.py b/opendm/dem/ground_rectification/extra_dimensions/dimension.py
index 261bc5c0..233d1ac0 100755
--- a/opendm/dem/ground_rectification/extra_dimensions/dimension.py
+++ b/opendm/dem/ground_rectification/extra_dimensions/dimension.py
@@ -1,6 +1,7 @@
import numpy as np
from abc import ABCMeta, abstractmethod
+
class Dimension(object):
__metaclass__ = ABCMeta
diff --git a/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py b/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py
index d2f72fdb..34322982 100755
--- a/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py
+++ b/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py
@@ -2,6 +2,7 @@ import numpy as np
from sklearn.linear_model import RANSACRegressor
from .dimension import Dimension
+
class DistanceDimension(Dimension):
"""Assign each point the distance to the estimated ground"""
@@ -32,14 +33,14 @@ class DistanceDimension(Dimension):
super(DistanceDimension, self)._set_values(point_cloud, diff)
def get_name(self):
- return 'distance_to_ground'
+ return "distance_to_ground"
def get_las_type(self):
- return 'float64'
+ return "float64"
def __calculate_angle(self, model):
"Calculate the angle between the estimated plane and the XY plane"
a = model.estimator_.coef_[0]
b = model.estimator_.coef_[1]
- angle = np.arccos(1 / np.sqrt(a ** 2 + b ** 2 + 1))
+ angle = np.arccos(1 / np.sqrt(a**2 + b**2 + 1))
return np.degrees(angle)
diff --git a/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py b/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py
index c371e83b..592524f8 100755
--- a/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py
+++ b/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py
@@ -1,6 +1,7 @@
import numpy as np
from .dimension import Dimension
+
class ExtendedDimension(Dimension):
"""Whether the point was added or was already on the original point cloud"""
@@ -17,7 +18,7 @@ class ExtendedDimension(Dimension):
super(ExtendedDimension, self)._set_values(point_cloud, added)
def get_name(self):
- return 'extended'
+ return "extended"
def get_las_type(self):
- return 'uint16'
+ return "uint16"
diff --git a/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py b/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py
index b7fed6b6..bf8a93db 100755
--- a/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py
+++ b/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py
@@ -1,6 +1,7 @@
import numpy as np
from .dimension import Dimension
+
class PartitionDimension(Dimension):
"""Group points by partition"""
@@ -15,11 +16,13 @@ class PartitionDimension(Dimension):
def assign(self, *point_clouds, **kwargs):
for point_cloud in point_clouds:
- super(PartitionDimension, self)._set_values(point_cloud, np.full(point_cloud.len(), self.counter))
+ super(PartitionDimension, self)._set_values(
+ point_cloud, np.full(point_cloud.len(), self.counter)
+ )
self.counter += 1
def get_name(self):
return self.name
def get_las_type(self):
- return 'uint32'
+ return "uint32"
diff --git a/opendm/dem/ground_rectification/extra_dimensions/userdata_dimension.py b/opendm/dem/ground_rectification/extra_dimensions/userdata_dimension.py
index 0d6dd517..ab3715e3 100755
--- a/opendm/dem/ground_rectification/extra_dimensions/userdata_dimension.py
+++ b/opendm/dem/ground_rectification/extra_dimensions/userdata_dimension.py
@@ -1,6 +1,7 @@
import numpy as np
from .dimension import Dimension
+
class UserDataDimension(Dimension):
"""A dimension that stores the user data of a point cloud."""
@@ -16,10 +17,12 @@ class UserDataDimension(Dimension):
# Simply copy the value of the UserData dimension from the original point cloud
# to the new point cloud
for point_cloud in point_clouds:
- super(UserDataDimension, self)._set_values(point_cloud, point_cloud.user_data)
+ super(UserDataDimension, self)._set_values(
+ point_cloud, point_cloud.user_data
+ )
def get_name(self):
- return 'UserData'
+ return "UserData"
def get_las_type(self):
- return 'uint8'
+ return "uint8"
diff --git a/opendm/dem/ground_rectification/grid/builder.py b/opendm/dem/ground_rectification/grid/builder.py
index be141a43..b863bc6e 100755
--- a/opendm/dem/ground_rectification/grid/builder.py
+++ b/opendm/dem/ground_rectification/grid/builder.py
@@ -3,9 +3,11 @@ from sklearn.neighbors import BallTree
EPSILON = 0.00001
+
def build_grid(bounds, point_cloud, distance):
"""First, a 2D grid is built with a distance of 'distance' between points, inside the given bounds.
- Then, only points that don't have a point cloud neighbour closer than 'distance' are left. The rest are filtered out."""
+ Then, only points that don't have a point cloud neighbour closer than 'distance' are left. The rest are filtered out.
+ """
# Generate a grid of 2D points inside the bounds, with a distance of 'distance' between them
grid = __build_grid(bounds, distance)
@@ -16,14 +18,20 @@ def build_grid(bounds, point_cloud, distance):
# Filter out the grid points that have a neighbor closer than 'distance' from the given point cloud
return __calculate_lonely_points(grid_inside, point_cloud, distance)
+
def __build_grid(bounds, distance):
x_min, x_max, y_min, y_max = bounds.corners()
- grid = [[x, y] for x in np.arange(x_min, x_max + distance, distance) for y in np.arange(y_min, y_max + distance, distance)]
+ grid = [
+ [x, y]
+ for x in np.arange(x_min, x_max + distance, distance)
+ for y in np.arange(y_min, y_max + distance, distance)
+ ]
return np.array(grid)
+
def __calculate_lonely_points(grid, point_cloud, distance):
# Generate BallTree for point cloud
- ball_tree = BallTree(point_cloud.get_xy(), metric='manhattan')
+ ball_tree = BallTree(point_cloud.get_xy(), metric="manhattan")
# Calculate for each of the points in the grid, the amount of neighbors in the original ground cloud
count = ball_tree.query_radius(grid, distance - EPSILON, count_only=True)
diff --git a/opendm/dem/ground_rectification/io/las_io.py b/opendm/dem/ground_rectification/io/las_io.py
index 11afb59b..7e7d8800 100755
--- a/opendm/dem/ground_rectification/io/las_io.py
+++ b/opendm/dem/ground_rectification/io/las_io.py
@@ -1,5 +1,7 @@
import time
-from opendm.dem.ground_rectification.extra_dimensions.userdata_dimension import UserDataDimension
+from opendm.dem.ground_rectification.extra_dimensions.userdata_dimension import (
+ UserDataDimension,
+)
import pdal
import numpy as np
from opendm import log
@@ -7,8 +9,11 @@ from ..point_cloud import PointCloud
import pdb
import json
+
def read_cloud(point_cloud_path):
- pipeline = pdal.Pipeline('[{"type":"readers.las","filename":"%s"}]' % point_cloud_path)
+ pipeline = pdal.Pipeline(
+ '[{"type":"readers.las","filename":"%s"}]' % point_cloud_path
+ )
pipeline.execute()
arrays = pipeline.arrays[0]
@@ -43,33 +48,37 @@ def write_cloud(metadata, point_cloud, output_point_cloud_path):
red, green, blue = np.hsplit(point_cloud.rgb, 3)
- arrays = np.zeros(len(x),
- dtype=[('X', '= 1:
- db = DBSCAN(eps=distance + EPSILON, min_samples=1, metric='manhattan', n_jobs=-1).fit(grid)
+ db = DBSCAN(
+ eps=distance + EPSILON, min_samples=1, metric="manhattan", n_jobs=-1
+ ).fit(grid)
clusters = set(db.labels_)
for cluster in clusters:
cluster_members = grid[db.labels_ == cluster]
- point_cloud_neighbors, point_cloud_neighbors_mask = self.__find_cluster_neighbors(cluster_members, distance)
+ point_cloud_neighbors, point_cloud_neighbors_mask = (
+ self.__find_cluster_neighbors(cluster_members, distance)
+ )
if self.__is_cluster_surrounded(cluster_members, point_cloud_neighbors):
- result.add_cluster_partition(cluster_members, point_cloud_neighbors, point_cloud_neighbors_mask)
+ result.add_cluster_partition(
+ cluster_members,
+ point_cloud_neighbors,
+ point_cloud_neighbors_mask,
+ )
else:
- point_cloud_neighbors, point_cloud_neighbors_mask, bounding_box = self.__find_points_for_non_surrounded_cluster(bounds, cluster_members, distance, min_area, min_points)
- result.add_zone_partition(cluster_members, point_cloud_neighbors, point_cloud_neighbors_mask, bounding_box)
+ point_cloud_neighbors, point_cloud_neighbors_mask, bounding_box = (
+ self.__find_points_for_non_surrounded_cluster(
+ bounds, cluster_members, distance, min_area, min_points
+ )
+ )
+ result.add_zone_partition(
+ cluster_members,
+ point_cloud_neighbors,
+ point_cloud_neighbors_mask,
+ bounding_box,
+ )
return result.build_result(self.point_cloud)
- def __find_points_for_non_surrounded_cluster(self, bounds, cluster_members, distance, min_area, min_points):
+ def __find_points_for_non_surrounded_cluster(
+ self, bounds, cluster_members, distance, min_area, min_points
+ ):
(center_x, center_y) = bounds.center()
[x_min, y_min] = np.amin(cluster_members, axis=0)
@@ -84,22 +106,35 @@ class SurroundingPartitions(PartitionPlan):
return ratio > MIN_PERCENTAGE_OF_POINTS_IN_CONVEX_HULL
def __find_cluster_neighbors(self, cluster_members, distance):
- mask_per_point = self.manhattan_ball_tree.query_radius(cluster_members, distance * 3)
+ mask_per_point = self.manhattan_ball_tree.query_radius(
+ cluster_members, distance * 3
+ )
all_neighbor_mask = np.concatenate(mask_per_point)
point_cloud_neighbors = self.point_cloud[all_neighbor_mask]
return point_cloud_neighbors, all_neighbor_mask
+
class ExecutionResult:
def __init__(self, cloud_size):
- self.partitions = [ ]
+ self.partitions = []
self.marked_as_neighbors = np.zeros(cloud_size, dtype=bool)
- def add_cluster_partition(self, cluster_members, point_cloud_neighbors, point_cloud_neighbors_mask):
- convex_hull = calculate_convex_hull_bounds(np.concatenate((point_cloud_neighbors.get_xy(), cluster_members)))
+ def add_cluster_partition(
+ self, cluster_members, point_cloud_neighbors, point_cloud_neighbors_mask
+ ):
+ convex_hull = calculate_convex_hull_bounds(
+ np.concatenate((point_cloud_neighbors.get_xy(), cluster_members))
+ )
self.marked_as_neighbors[point_cloud_neighbors_mask] = True
self.partitions.append(Partition(point_cloud_neighbors, bounds=convex_hull))
- def add_zone_partition(self, cluster_members, point_cloud_neighbors, point_cloud_neighbors_mask, bounding_box):
+ def add_zone_partition(
+ self,
+ cluster_members,
+ point_cloud_neighbors,
+ point_cloud_neighbors_mask,
+ bounding_box,
+ ):
self.marked_as_neighbors[point_cloud_neighbors_mask] = True
self.partitions.append(Partition(point_cloud_neighbors, bounds=bounding_box))
diff --git a/opendm/dem/ground_rectification/point_cloud.py b/opendm/dem/ground_rectification/point_cloud.py
index a39bdf76..686128d5 100755
--- a/opendm/dem/ground_rectification/point_cloud.py
+++ b/opendm/dem/ground_rectification/point_cloud.py
@@ -1,9 +1,20 @@
import numpy as np
from numpy.lib.recfunctions import append_fields
+
class PointCloud:
"""Representation of a 3D point cloud"""
- def __init__(self, xy, z, classification, rgb, indices, extra_dimensions, extra_dimensions_metadata):
+
+ def __init__(
+ self,
+ xy,
+ z,
+ classification,
+ rgb,
+ indices,
+ extra_dimensions,
+ extra_dimensions_metadata,
+ ):
self.xy = xy
self.z = z
self.classification = classification
@@ -17,17 +28,35 @@ class PointCloud:
xy = np.column_stack((x, y))
rgb = np.column_stack((red, green, blue))
indices = indices if indices is not None else np.arange(0, len(x))
- return PointCloud(xy, z, classification, rgb, indices, { }, { })
+ return PointCloud(xy, z, classification, rgb, indices, {}, {})
@staticmethod
def with_xy(xy):
[x, y] = np.hsplit(xy, 2)
empty = np.empty(xy.shape[0])
- return PointCloud.with_dimensions(x.ravel(), y.ravel(), empty, np.empty(xy.shape[0], dtype=np.uint8), empty, empty, empty)
+ return PointCloud.with_dimensions(
+ x.ravel(),
+ y.ravel(),
+ empty,
+ np.empty(xy.shape[0], dtype=np.uint8),
+ empty,
+ empty,
+ empty,
+ )
def __getitem__(self, mask):
- masked_dimensions = { name: values[mask] for name, values in self.extra_dimensions.items() }
- return PointCloud(self.xy[mask], self.z[mask], self.classification[mask], self.rgb[mask], self.indices[mask], masked_dimensions, self.extra_dimensions_metadata)
+ masked_dimensions = {
+ name: values[mask] for name, values in self.extra_dimensions.items()
+ }
+ return PointCloud(
+ self.xy[mask],
+ self.z[mask],
+ self.classification[mask],
+ self.rgb[mask],
+ self.indices[mask],
+ masked_dimensions,
+ self.extra_dimensions_metadata,
+ )
def concatenate(self, other_cloud):
for name, dimension in self.extra_dimensions_metadata.items():
@@ -36,13 +65,20 @@ class PointCloud:
for name, dimension in other_cloud.extra_dimensions_metadata.items():
if name not in self.extra_dimensions:
dimension.assign_default(self)
- new_indices = np.arange(len(self.indices), len(self.indices) + len(other_cloud.indices))
+ new_indices = np.arange(
+ len(self.indices), len(self.indices) + len(other_cloud.indices)
+ )
self.xy = np.concatenate((self.xy, other_cloud.xy))
self.z = np.concatenate((self.z, other_cloud.z))
- self.classification = np.concatenate((self.classification, other_cloud.classification))
+ self.classification = np.concatenate(
+ (self.classification, other_cloud.classification)
+ )
self.rgb = np.concatenate((self.rgb, other_cloud.rgb))
self.indices = np.concatenate((self.indices, new_indices))
- self.extra_dimensions = { name: np.concatenate((values, other_cloud.extra_dimensions[name])) for name, values in self.extra_dimensions.items() }
+ self.extra_dimensions = {
+ name: np.concatenate((values, other_cloud.extra_dimensions[name]))
+ for name, values in self.extra_dimensions.items()
+ }
def update(self, other_cloud):
for name, dimension in self.extra_dimensions_metadata.items():
diff --git a/opendm/dem/ground_rectification/rectify.py b/opendm/dem/ground_rectification/rectify.py
index c1073a22..9ec6b4d0 100755
--- a/opendm/dem/ground_rectification/rectify.py
+++ b/opendm/dem/ground_rectification/rectify.py
@@ -14,16 +14,30 @@ from .point_cloud import PointCloud
EPSILON = 0.00001
+
def run_rectification(**kwargs):
- header, point_cloud = read_cloud(kwargs['input'])
+ header, point_cloud = read_cloud(kwargs["input"])
- if 'reclassify_plan' in kwargs and kwargs['reclassify_plan'] is not None:
- point_cloud = reclassify_cloud(point_cloud, kwargs['reclassify_plan'], kwargs['reclassify_threshold'], kwargs['min_points'], kwargs['min_area'])
+ if "reclassify_plan" in kwargs and kwargs["reclassify_plan"] is not None:
+ point_cloud = reclassify_cloud(
+ point_cloud,
+ kwargs["reclassify_plan"],
+ kwargs["reclassify_threshold"],
+ kwargs["min_points"],
+ kwargs["min_area"],
+ )
- if 'extend_plan' in kwargs and kwargs['extend_plan'] is not None:
- point_cloud = extend_cloud(point_cloud, kwargs['extend_plan'], kwargs['extend_grid_distance'], kwargs['min_points'], kwargs['min_area'])
+ if "extend_plan" in kwargs and kwargs["extend_plan"] is not None:
+ point_cloud = extend_cloud(
+ point_cloud,
+ kwargs["extend_plan"],
+ kwargs["extend_grid_distance"],
+ kwargs["min_points"],
+ kwargs["min_area"],
+ )
+
+ write_cloud(header, point_cloud, kwargs["output"])
- write_cloud(header, point_cloud, kwargs['output'])
def reclassify_cloud(point_cloud, plan, threshold, min_points, min_area):
# Get only ground
@@ -33,10 +47,13 @@ def reclassify_cloud(point_cloud, plan, threshold, min_points, min_area):
partition_plan = select_partition_plan(plan, ground_cloud)
# Execute the partition plan, and get all the partitions
- partitions = [result for result in partition_plan.execute(min_points=min_points, min_area=min_area)]
+ partitions = [
+ result
+ for result in partition_plan.execute(min_points=min_points, min_area=min_area)
+ ]
# Add 'distance to ground' and 'partition number' dimensions to the cloud
- for dimension in [DistanceDimension(), PartitionDimension('reclassify_partition')]:
+ for dimension in [DistanceDimension(), PartitionDimension("reclassify_partition")]:
# Calculate new dimension for partition
for partition in partitions:
@@ -46,13 +63,14 @@ def reclassify_cloud(point_cloud, plan, threshold, min_points, min_area):
point_cloud.update(partition.point_cloud)
# Calculate the points that need to be reclassified
- mask = point_cloud.get_extra_dimension_values('distance_to_ground') > threshold
+ mask = point_cloud.get_extra_dimension_values("distance_to_ground") > threshold
# Reclassify them as 'unclassified'
point_cloud.classification[mask] = 1
return point_cloud
+
def extend_cloud(point_cloud, plan, distance, min_points, min_area):
# Get only ground
ground_cloud = point_cloud[point_cloud.classification == 2]
@@ -70,10 +88,12 @@ def extend_cloud(point_cloud, plan, distance, min_points, min_area):
partition_plan = select_partition_plan(plan, ground_cloud)
# Execute the partition plan, and get all the partitions
- partitions = partition_plan.execute(distance=distance, min_points=min_points, min_area=min_area, bounds=bounds)
+ partitions = partition_plan.execute(
+ distance=distance, min_points=min_points, min_area=min_area, bounds=bounds
+ )
# Create dimensions
- partition_dimension = PartitionDimension('extend_partition')
+ partition_dimension = PartitionDimension("extend_partition")
extended_dimension = ExtendedDimension()
for partition in partitions:
@@ -98,7 +118,6 @@ def extend_cloud(point_cloud, plan, distance, min_points, min_area):
# Update new information to the original point cloud
point_cloud.update(partition.point_cloud)
-
# Calculate the bounding box of the original cloud
bbox = point_cloud.get_bounding_box()
@@ -111,9 +130,12 @@ def extend_cloud(point_cloud, plan, distance, min_points, min_area):
# Add the new points to the original point cloud
return point_cloud
+
def __calculate_new_points(grid_points_inside, partition_point_cloud):
# Calculate RANSCAC model
- model = RANSACRegressor().fit(partition_point_cloud.get_xy(), partition_point_cloud.get_z())
+ model = RANSACRegressor().fit(
+ partition_point_cloud.get_xy(), partition_point_cloud.get_z()
+ )
# With the ransac model, calculate the altitude for each grid point
grid_points_altitude = model.predict(grid_points_inside.get_xy())
@@ -131,24 +153,78 @@ def __calculate_new_points(grid_points_inside, partition_point_cloud):
[x, y] = np.hsplit(grid_points_inside.get_xy(), 2)
# Return point cloud
- return PointCloud.with_dimensions(x.ravel(), y.ravel(), grid_points_altitude, classification, red, green, blue, grid_points_inside.indices)
+ return PointCloud.with_dimensions(
+ x.ravel(),
+ y.ravel(),
+ grid_points_altitude,
+ classification,
+ red,
+ green,
+ blue,
+ grid_points_inside.indices,
+ )
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='This script takes a pre-classified point cloud, and then it re-clasiffies wrongly classified ground point to non-ground points and finally adds ground points where needed.')
- parser.add_argument('input', type=str, help='The path where to find the pre-classified point cloud.')
- parser.add_argument('output', type=str, help='The path where to save the rectified point cloud.')
- parser.add_argument('--reclassify_plan', type=str, help='The partition plan to use reclasiffication. Must be one of(one, uniform, median, surrounding)')
- parser.add_argument('--reclassify_threshold', type=float, help='Every point with a distance to the estimated ground that is higher than the threshold will be reclassified as non ground', default=5)
- parser.add_argument('--extend_plan', type=str, help='The partition plan to use for extending the ground. Must be one of(one, uniform, median, surrounding)')
- parser.add_argument('--extend_grid_distance', type=float, help='The distance between points on the grid that will be added to the point cloud.', default=5)
- parser.add_argument('--min_area', type=int, help='Some partition plans need a minimum area as a stopping criteria.', default=750)
- parser.add_argument('--min_points', type=int, help='Some partition plans need a minimum number of points as a stopping criteria.', default=500)
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="This script takes a pre-classified point cloud, and then it re-clasiffies wrongly classified ground point to non-ground points and finally adds ground points where needed."
+ )
+ parser.add_argument(
+ "input", type=str, help="The path where to find the pre-classified point cloud."
+ )
+ parser.add_argument(
+ "output", type=str, help="The path where to save the rectified point cloud."
+ )
+ parser.add_argument(
+ "--reclassify_plan",
+ type=str,
+ help="The partition plan to use reclasiffication. Must be one of(one, uniform, median, surrounding)",
+ )
+ parser.add_argument(
+ "--reclassify_threshold",
+ type=float,
+ help="Every point with a distance to the estimated ground that is higher than the threshold will be reclassified as non ground",
+ default=5,
+ )
+ parser.add_argument(
+ "--extend_plan",
+ type=str,
+ help="The partition plan to use for extending the ground. Must be one of(one, uniform, median, surrounding)",
+ )
+ parser.add_argument(
+ "--extend_grid_distance",
+ type=float,
+ help="The distance between points on the grid that will be added to the point cloud.",
+ default=5,
+ )
+ parser.add_argument(
+ "--min_area",
+ type=int,
+ help="Some partition plans need a minimum area as a stopping criteria.",
+ default=750,
+ )
+ parser.add_argument(
+ "--min_points",
+ type=int,
+ help="Some partition plans need a minimum number of points as a stopping criteria.",
+ default=500,
+ )
args = parser.parse_args()
if args.reclassify_plan is None and args.extend_plan is None:
- raise Exception("Please set a reclassifying or extension plan. Otherwise there is nothing for me to do.")
+ raise Exception(
+ "Please set a reclassifying or extension plan. Otherwise there is nothing for me to do."
+ )
- run(input=args.input, reclassify_plan=args.reclassify_plan, reclassify_threshold=args.reclassify_threshold, \
- extend_plan=args.extend_plan, extend_grid_distance=args.extend_grid_distance, \
- output=args.output, min_points=args.min_points, min_area=args.min_area, debug=False)
+ run(
+ input=args.input,
+ reclassify_plan=args.reclassify_plan,
+ reclassify_threshold=args.reclassify_threshold,
+ extend_plan=args.extend_plan,
+ extend_grid_distance=args.extend_grid_distance,
+ output=args.output,
+ min_points=args.min_points,
+ min_area=args.min_area,
+ debug=False,
+ )
diff --git a/opendm/dem/merge.py b/opendm/dem/merge.py
index fdeef93a..c7164570 100644
--- a/opendm/dem/merge.py
+++ b/opendm/dem/merge.py
@@ -9,19 +9,22 @@ from opendm import log
from opendm import io
import os
-def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_map_source=None):
+
+def euclidean_merge_dems(
+ input_dems, output_dem, creation_options={}, euclidean_map_source=None
+):
"""
Based on https://github.com/mapbox/rio-merge-rgba
and ideas from Anna Petrasova
implementation by Piero Toffanin
- Computes a merged DEM by computing/using a euclidean
- distance to NODATA cells map for all DEMs and then blending all overlapping DEM cells
+ Computes a merged DEM by computing/using a euclidean
+ distance to NODATA cells map for all DEMs and then blending all overlapping DEM cells
by a weighted average based on such euclidean distance.
"""
inputs = []
- bounds=None
- precision=7
+ bounds = None
+ precision = 7
existing_dems = []
for dem in input_dems:
@@ -41,13 +44,19 @@ def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_
profile = first.profile
for dem in existing_dems:
- eumap = compute_euclidean_map(dem, io.related_file_path(dem, postfix=".euclideand", replace_base=euclidean_map_source), overwrite=False)
+ eumap = compute_euclidean_map(
+ dem,
+ io.related_file_path(
+ dem, postfix=".euclideand", replace_base=euclidean_map_source
+ ),
+ overwrite=False,
+ )
if eumap and io.file_exists(eumap):
inputs.append((dem, eumap))
log.ODM_INFO("%s valid DEM rasters to merge" % len(inputs))
- sources = [(rasterio.open(d), rasterio.open(e)) for d,e in inputs]
+ sources = [(rasterio.open(d), rasterio.open(e)) for d, e in inputs]
# Extent from option or extent of all inputs.
if bounds:
@@ -82,10 +91,10 @@ def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_
profile["transform"] = output_transform
profile["height"] = output_height
profile["width"] = output_width
- profile["tiled"] = creation_options.get('TILED', 'YES') == 'YES'
- profile["blockxsize"] = creation_options.get('BLOCKXSIZE', 512)
- profile["blockysize"] = creation_options.get('BLOCKYSIZE', 512)
- profile["compress"] = creation_options.get('COMPRESS', 'LZW')
+ profile["tiled"] = creation_options.get("TILED", "YES") == "YES"
+ profile["blockxsize"] = creation_options.get("BLOCKXSIZE", 512)
+ profile["blockysize"] = creation_options.get("BLOCKYSIZE", 512)
+ profile["compress"] = creation_options.get("COMPRESS", "LZW")
profile["nodata"] = src_nodata
# Creation opts
@@ -123,17 +132,35 @@ def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_
nodata = src_d.nodatavals[0]
# Alternative, custom get_window using rounding
- src_window_d = tuple(zip(rowcol(
- src_d.transform, left, top, op=round, precision=precision
- ), rowcol(
- src_d.transform, right, bottom, op=round, precision=precision
- )))
+ src_window_d = tuple(
+ zip(
+ rowcol(
+ src_d.transform, left, top, op=round, precision=precision
+ ),
+ rowcol(
+ src_d.transform,
+ right,
+ bottom,
+ op=round,
+ precision=precision,
+ ),
+ )
+ )
- src_window_e = tuple(zip(rowcol(
- src_e.transform, left, top, op=round, precision=precision
- ), rowcol(
- src_e.transform, right, bottom, op=round, precision=precision
- )))
+ src_window_e = tuple(
+ zip(
+ rowcol(
+ src_e.transform, left, top, op=round, precision=precision
+ ),
+ rowcol(
+ src_e.transform,
+ right,
+ bottom,
+ op=round,
+ precision=precision,
+ ),
+ )
+ )
temp_d = np.zeros(dst_shape, dtype=dtype)
temp_d = src_d.read(
@@ -147,12 +174,12 @@ def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_
# Set NODATA areas in the euclidean map to a very low value
# so that:
- # - Areas with overlap prioritize DEM layers' cells that
+ # - Areas with overlap prioritize DEM layers' cells that
# are far away from NODATA areas
# - Areas that have no overlap are included in the final result
# even if they are very close to a NODATA cell
- temp_e[temp_e==0] = small_distance
- temp_e[temp_d==nodata] = 0
+ temp_e[temp_e == 0] = small_distance
+ temp_e[temp_d == nodata] = 0
np.multiply(temp_d, temp_e, out=temp_d)
np.add(dstarr, temp_d, out=dstarr)
@@ -163,9 +190,11 @@ def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_
# Perform nearest neighbor interpolation on areas where two or more rasters overlap
# but where both rasters have only interpolated data. This prevents the creation
# of artifacts that average areas of interpolation.
- indices = ndimage.distance_transform_edt(np.logical_and(distsum < 1, distsum > small_distance),
- return_distances=False,
- return_indices=True)
+ indices = ndimage.distance_transform_edt(
+ np.logical_and(distsum < 1, distsum > small_distance),
+ return_distances=False,
+ return_indices=True,
+ )
dstarr = dstarr[tuple(indices)]
dstarr[dstarr == 0.0] = src_nodata
diff --git a/opendm/dem/pdal.py b/opendm/dem/pdal.py
index 2c0b8e4e..fc4b9c0a 100644
--- a/opendm/dem/pdal.py
+++ b/opendm/dem/pdal.py
@@ -45,128 +45,118 @@ from datetime import datetime
def json_base():
- """ Create initial JSON for PDAL pipeline """
- return {'pipeline': []}
+ """Create initial JSON for PDAL pipeline"""
+ return {"pipeline": []}
def json_gdal_base(filename, output_type, radius, resolution=1, bounds=None):
- """ Create initial JSON for PDAL pipeline containing a Writer element """
+ """Create initial JSON for PDAL pipeline containing a Writer element"""
json = json_base()
d = {
- 'type': 'writers.gdal',
- 'resolution': resolution,
- 'radius': radius,
- 'filename': filename,
- 'output_type': output_type,
- 'data_type': 'float'
+ "type": "writers.gdal",
+ "resolution": resolution,
+ "radius": radius,
+ "filename": filename,
+ "output_type": output_type,
+ "data_type": "float",
}
if bounds is not None:
- d['bounds'] = "([%s,%s],[%s,%s])" % (bounds['minx'], bounds['maxx'], bounds['miny'], bounds['maxy'])
+ d["bounds"] = "([%s,%s],[%s,%s])" % (
+ bounds["minx"],
+ bounds["maxx"],
+ bounds["miny"],
+ bounds["maxy"],
+ )
- json['pipeline'].insert(0, d)
+ json["pipeline"].insert(0, d)
return json
def json_las_base(fout):
- """ Create initial JSON for writing to a LAS file """
+ """Create initial JSON for writing to a LAS file"""
json = json_base()
- json['pipeline'].insert(0, {
- 'type': 'writers.las',
- 'filename': fout
- })
+ json["pipeline"].insert(0, {"type": "writers.las", "filename": fout})
return json
def json_add_decimation_filter(json, step):
- """ Add decimation Filter element and return """
- json['pipeline'].insert(0, {
- 'type': 'filters.decimation',
- 'step': step
- })
+ """Add decimation Filter element and return"""
+ json["pipeline"].insert(0, {"type": "filters.decimation", "step": step})
return json
def json_add_classification_filter(json, classification, equality="equals"):
- """ Add classification Filter element and return """
- limits = 'Classification[{0}:{0}]'.format(classification)
- if equality == 'max':
- limits = 'Classification[:{0}]'.format(classification)
+ """Add classification Filter element and return"""
+ limits = "Classification[{0}:{0}]".format(classification)
+ if equality == "max":
+ limits = "Classification[:{0}]".format(classification)
- json['pipeline'].insert(0, {
- 'type': 'filters.range',
- 'limits': limits
- })
+ json["pipeline"].insert(0, {"type": "filters.range", "limits": limits})
return json
def is_ply_file(filename):
_, ext = os.path.splitext(filename)
- return ext.lower() == '.ply'
+ return ext.lower() == ".ply"
def json_add_reader(json, filename):
- """ Add Reader Element and return """
- reader_type = 'readers.las' # default
+ """Add Reader Element and return"""
+ reader_type = "readers.las" # default
if is_ply_file(filename):
- reader_type = 'readers.ply'
+ reader_type = "readers.ply"
- json['pipeline'].insert(0, {
- 'type': reader_type,
- 'filename': os.path.abspath(filename)
- })
+ json["pipeline"].insert(
+ 0, {"type": reader_type, "filename": os.path.abspath(filename)}
+ )
return json
def json_add_readers(json, filenames):
- """ Add merge Filter element and readers to a Writer element and return Filter element """
+ """Add merge Filter element and readers to a Writer element and return Filter element"""
for f in filenames:
json_add_reader(json, f)
if len(filenames) > 1:
- json['pipeline'].insert(0, {
- 'type': 'filters.merge'
- })
+ json["pipeline"].insert(0, {"type": "filters.merge"})
return json
""" Run PDAL commands """
+
def run_pipeline(json):
- """ Run PDAL Pipeline with provided JSON """
+ """Run PDAL Pipeline with provided JSON"""
# write to temp file
- f, jsonfile = tempfile.mkstemp(suffix='.json')
- os.write(f, jsonlib.dumps(json).encode('utf8'))
+ f, jsonfile = tempfile.mkstemp(suffix=".json")
+ os.write(f, jsonlib.dumps(json).encode("utf8"))
os.close(f)
- cmd = [
- 'pdal',
- 'pipeline',
- '-i %s' % double_quote(jsonfile)
- ]
- system.run(' '.join(cmd))
+ cmd = ["pdal", "pipeline", "-i %s" % double_quote(jsonfile)]
+ system.run(" ".join(cmd))
os.remove(jsonfile)
def run_pdaltranslate_smrf(fin, fout, scalar, slope, threshold, window):
- """ Run PDAL translate """
+ """Run PDAL translate"""
cmd = [
- 'pdal',
- 'translate',
- '-i %s' % fin,
- '-o %s' % fout,
- 'smrf',
- '--filters.smrf.scalar=%s' % scalar,
- '--filters.smrf.slope=%s' % slope,
- '--filters.smrf.threshold=%s' % threshold,
- '--filters.smrf.window=%s' % window,
+ "pdal",
+ "translate",
+ "-i %s" % fin,
+ "-o %s" % fout,
+ "smrf",
+ "--filters.smrf.scalar=%s" % scalar,
+ "--filters.smrf.slope=%s" % slope,
+ "--filters.smrf.threshold=%s" % threshold,
+ "--filters.smrf.window=%s" % window,
]
- system.run(' '.join(cmd))
+ system.run(" ".join(cmd))
def merge_point_clouds(input_files, output_file):
@@ -175,20 +165,20 @@ def merge_point_clouds(input_files, output_file):
return
cmd = [
- 'pdal',
- 'merge',
- ' '.join(map(double_quote, input_files + [output_file])),
+ "pdal",
+ "merge",
+ " ".join(map(double_quote, input_files + [output_file])),
]
- system.run(' '.join(cmd))
+ system.run(" ".join(cmd))
def translate(input, output):
cmd = [
- 'pdal',
- 'translate',
+ "pdal",
+ "translate",
'-i "%s"' % input,
'-o "%s"' % output,
]
- system.run(' '.join(cmd))
\ No newline at end of file
+ system.run(" ".join(cmd))
diff --git a/opendm/dem/utils.py b/opendm/dem/utils.py
index 9fb383a9..9a9cbce3 100644
--- a/opendm/dem/utils.py
+++ b/opendm/dem/utils.py
@@ -1,10 +1,9 @@
-
def get_dem_vars(args):
return {
- 'TILED': 'YES',
- 'COMPRESS': 'DEFLATE',
- 'BLOCKXSIZE': 512,
- 'BLOCKYSIZE': 512,
- 'BIGTIFF': 'IF_SAFER',
- 'NUM_THREADS': args.max_concurrency,
+ "TILED": "YES",
+ "COMPRESS": "DEFLATE",
+ "BLOCKXSIZE": 512,
+ "BLOCKYSIZE": 512,
+ "BIGTIFF": "IF_SAFER",
+ "NUM_THREADS": args.max_concurrency,
}
diff --git a/opendm/dls.py b/opendm/dls.py
index 7e0bf980..506bddc5 100644
--- a/opendm/dls.py
+++ b/opendm/dls.py
@@ -21,12 +21,12 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
-
import numpy as np
+
# for DLS correction, we need the sun position at the time the image was taken
# this can be computed using the pysolar package (ver 0.6)
# https://pypi.python.org/pypi/Pysolar/0.6
-# we import multiple times with checking here because the case of Pysolar is
+# we import multiple times with checking here because the case of Pysolar is
# different depending on the python version :(
import imp
@@ -34,52 +34,62 @@ havePysolar = False
try:
import pysolar.solar as pysolar
+
havePysolar = True
except ImportError:
try:
import Pysolar.solar as pysolar
+
havePysolar = True
except ImportError:
import pysolar.solar as pysolar
- havePysolar = True
-finally:
+
+ havePysolar = True
+finally:
if not havePysolar:
print("Unable to import pysolar")
+
def fresnel(phi):
- return __multilayer_transmission(phi, n=[1.000277,1.6,1.38])
+ return __multilayer_transmission(phi, n=[1.000277, 1.6, 1.38])
+
# define functions to compute the DLS-Sun angle:
-def __fresnel_transmission(phi, n1=1.000277, n2=1.38, polarization=[.5, .5]):
+def __fresnel_transmission(phi, n1=1.000277, n2=1.38, polarization=[0.5, 0.5]):
"""compute fresnel transmission between media with refractive indices n1 and n2"""
# computes the reflection and transmittance
# for incidence angles phi for transition from medium
# with refractive index n1 to n2
# teflon e.g. n2=1.38
- # polycarbonate n2=1.6
+ # polycarbonate n2=1.6
# polarization=[.5,.5] - unpolarized light
# polarization=[1.,0] - s-polarized light - perpendicular to plane of incidence
# polarization=[0,1.] - p-polarized light - parallel to plane of incidence
f1 = np.cos(phi)
- f2 = np.sqrt(1-(n1/n2*np.sin(phi))**2)
- Rs = ((n1*f1-n2*f2)/(n1*f1+n2*f2))**2
- Rp = ((n1*f2-n2*f1)/(n1*f2+n2*f1))**2
- T = 1.-polarization[0]*Rs-polarization[1]*Rp
- if T > 1: T= 0.
- if T < 0: T = 0.
- if np.isnan(T): T = 0.
+ f2 = np.sqrt(1 - (n1 / n2 * np.sin(phi)) ** 2)
+ Rs = ((n1 * f1 - n2 * f2) / (n1 * f1 + n2 * f2)) ** 2
+ Rp = ((n1 * f2 - n2 * f1) / (n1 * f2 + n2 * f1)) ** 2
+ T = 1.0 - polarization[0] * Rs - polarization[1] * Rp
+ if T > 1:
+ T = 0.0
+ if T < 0:
+ T = 0.0
+ if np.isnan(T):
+ T = 0.0
return T
-def __multilayer_transmission(phi, n, polarization=[.5, .5]):
+
+def __multilayer_transmission(phi, n, polarization=[0.5, 0.5]):
T = 1.0
phi_eff = np.copy(phi)
- for i in range(0,len(n)-1):
+ for i in range(0, len(n) - 1):
n1 = n[i]
- n2 = n[i+1]
- phi_eff = np.arcsin(np.sin(phi_eff)/n1)
+ n2 = n[i + 1]
+ phi_eff = np.arcsin(np.sin(phi_eff) / n1)
T *= __fresnel_transmission(phi_eff, n1, n2, polarization=polarization)
return T
+
# get the position of the sun in North-East-Down (NED) coordinate system
def ned_from_pysolar(sunAzimuth, sunAltitude):
"""Convert pysolar coordinates to NED coordinates."""
@@ -90,6 +100,7 @@ def ned_from_pysolar(sunAzimuth, sunAltitude):
)
return np.array(elements).transpose()
+
# get the sensor orientation in North-East-Down coordinates
# pose is a yaw/pitch/roll tuple of angles measured for the DLS
# ori is the 3D orientation vector of the DLS in body coordinates (typically [0,0,-1])
@@ -109,6 +120,7 @@ def get_orientation(pose, ori):
n = np.dot(R, ori)
return n
+
# from the current position (lat,lon,alt) tuple
# and time (UTC), as well as the sensor orientation (yaw,pitch,roll) tuple
# compute a sensor sun angle - this is needed as the actual sun irradiance
@@ -118,27 +130,31 @@ def get_orientation(pose, ori):
# For clear sky, I_direct/I_diffuse ~ 6 and we can simplify this to
# I_measured = I_direct * (cos (sun_sensor_angle) + 1/6)
+
def compute_sun_angle(
position,
pose,
utc_datetime,
sensor_orientation,
):
- """ compute the sun angle using pysolar functions"""
+ """compute the sun angle using pysolar functions"""
altitude = 0
azimuth = 0
import warnings
- with warnings.catch_warnings(): # Ignore pysolar leap seconds offset warning
+
+ with warnings.catch_warnings(): # Ignore pysolar leap seconds offset warning
warnings.simplefilter("ignore")
try:
altitude = pysolar.get_altitude(position[0], position[1], utc_datetime)
azimuth = pysolar.get_azimuth(position[0], position[1], utc_datetime)
- except AttributeError: # catch 0.6 version of pysolar required for python 2.7 support
+ except (
+ AttributeError
+ ): # catch 0.6 version of pysolar required for python 2.7 support
altitude = pysolar.GetAltitude(position[0], position[1], utc_datetime)
- azimuth = 180-pysolar.GetAzimuth(position[0], position[1], utc_datetime)
+ azimuth = 180 - pysolar.GetAzimuth(position[0], position[1], utc_datetime)
sunAltitude = np.radians(np.array(altitude))
sunAzimuth = np.radians(np.array(azimuth))
- sunAzimuth = sunAzimuth % (2 * np.pi ) #wrap range 0 to 2*pi
+ sunAzimuth = sunAzimuth % (2 * np.pi) # wrap range 0 to 2*pi
nSun = ned_from_pysolar(sunAzimuth, sunAltitude)
nSensor = np.array(get_orientation(pose, sensor_orientation))
angle = np.arccos(np.dot(nSun, nSensor))
diff --git a/opendm/entwine.py b/opendm/entwine.py
index fbfddbf9..e77fe151 100644
--- a/opendm/entwine.py
+++ b/opendm/entwine.py
@@ -20,47 +20,73 @@ def build(input_point_cloud_files, output_path, max_concurrency=8, rerun=False):
if io.dir_exists(output_path):
log.ODM_WARNING("Removing previous EPT directory: %s" % output_path)
shutil.rmtree(output_path)
-
+
if io.dir_exists(tmpdir):
log.ODM_WARNING("Removing previous EPT temp directory: %s" % tmpdir)
shutil.rmtree(tmpdir)
-
+
if rerun:
dir_cleanup()
# Attempt with entwine (faster, more memory hungry)
try:
- build_entwine(input_point_cloud_files, tmpdir, output_path, max_concurrency=max_concurrency)
+ build_entwine(
+ input_point_cloud_files,
+ tmpdir,
+ output_path,
+ max_concurrency=max_concurrency,
+ )
except Exception as e:
- log.ODM_WARNING("Cannot build EPT using entwine (%s), attempting with untwine..." % str(e))
+ log.ODM_WARNING(
+ "Cannot build EPT using entwine (%s), attempting with untwine..." % str(e)
+ )
dir_cleanup()
- build_untwine(input_point_cloud_files, tmpdir, output_path, max_concurrency=max_concurrency)
+ build_untwine(
+ input_point_cloud_files,
+ tmpdir,
+ output_path,
+ max_concurrency=max_concurrency,
+ )
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
-def build_entwine(input_point_cloud_files, tmpdir, output_path, max_concurrency=8, reproject=None):
+def build_entwine(
+ input_point_cloud_files, tmpdir, output_path, max_concurrency=8, reproject=None
+):
kwargs = {
- 'threads': max_concurrency,
- 'tmpdir': tmpdir,
- 'all_inputs': "-i " + " ".join(map(double_quote, input_point_cloud_files)),
- 'outputdir': output_path,
- 'reproject': (" -r %s " % reproject) if reproject is not None else ""
+ "threads": max_concurrency,
+ "tmpdir": tmpdir,
+ "all_inputs": "-i " + " ".join(map(double_quote, input_point_cloud_files)),
+ "outputdir": output_path,
+ "reproject": (" -r %s " % reproject) if reproject is not None else "",
}
- system.run('entwine build --threads {threads} --tmp "{tmpdir}" {all_inputs} -o "{outputdir}" {reproject}'.format(**kwargs))
+ system.run(
+ 'entwine build --threads {threads} --tmp "{tmpdir}" {all_inputs} -o "{outputdir}" {reproject}'.format(
+ **kwargs
+ )
+ )
-def build_untwine(input_point_cloud_files, tmpdir, output_path, max_concurrency=8, rerun=False):
+
+def build_untwine(
+ input_point_cloud_files, tmpdir, output_path, max_concurrency=8, rerun=False
+):
kwargs = {
# 'threads': max_concurrency,
- 'tmpdir': tmpdir,
- 'files': "--files " + " ".join(map(double_quote, input_point_cloud_files)),
- 'outputdir': output_path
+ "tmpdir": tmpdir,
+ "files": "--files " + " ".join(map(double_quote, input_point_cloud_files)),
+ "outputdir": output_path,
}
# Run untwine
- system.run('untwine --temp_dir "{tmpdir}" {files} --output_dir "{outputdir}"'.format(**kwargs))
+ system.run(
+ 'untwine --temp_dir "{tmpdir}" {files} --output_dir "{outputdir}"'.format(
+ **kwargs
+ )
+ )
+
def build_copc(input_point_cloud_files, output_file, convert_rgb_8_to_16=False):
if len(input_point_cloud_files) == 0:
@@ -68,7 +94,7 @@ def build_copc(input_point_cloud_files, output_file, convert_rgb_8_to_16=False):
return
base_path, ext = os.path.splitext(output_file)
-
+
tmpdir = io.related_file_path(base_path, postfix="-tmp")
if os.path.exists(tmpdir):
log.ODM_WARNING("Removing previous directory %s" % tmpdir)
@@ -91,28 +117,39 @@ def build_copc(input_point_cloud_files, output_file, convert_rgb_8_to_16=False):
filename, ext = os.path.splitext(base)
out_16 = os.path.join(tmpdir16, "%s_16%s" % (filename, ext))
try:
- system.run('pdal translate -i "{input}" -o "{output}" assign '
- '--filters.assign.value="Red = Red / 255 * 65535" '
- '--filters.assign.value="Green = Green / 255 * 65535" '
- '--filters.assign.value="Blue = Blue / 255 * 65535" '.format(input=f, output=out_16))
-
+ system.run(
+ 'pdal translate -i "{input}" -o "{output}" assign '
+ '--filters.assign.value="Red = Red / 255 * 65535" '
+ '--filters.assign.value="Green = Green / 255 * 65535" '
+ '--filters.assign.value="Blue = Blue / 255 * 65535" '.format(
+ input=f, output=out_16
+ )
+ )
+
converted.append(out_16)
except Exception as e:
- log.ODM_WARNING("Cannot convert point cloud to 16bit RGB, COPC is not going to follow the official spec: %s" % str(e))
+ log.ODM_WARNING(
+ "Cannot convert point cloud to 16bit RGB, COPC is not going to follow the official spec: %s"
+ % str(e)
+ )
ok = False
break
if ok:
input_point_cloud_files = converted
-
+
kwargs = {
- 'tmpdir': tmpdir,
- 'files': "--files " + " ".join(map(double_quote, input_point_cloud_files)),
- 'output': output_file
+ "tmpdir": tmpdir,
+ "files": "--files " + " ".join(map(double_quote, input_point_cloud_files)),
+ "output": output_file,
}
# Run untwine
- system.run('untwine --temp_dir "{tmpdir}" {files} -o "{output}" --single_file'.format(**kwargs))
+ system.run(
+ 'untwine --temp_dir "{tmpdir}" {files} -o "{output}" --single_file'.format(
+ **kwargs
+ )
+ )
for d in cleanup:
if os.path.exists(d):
- shutil.rmtree(d)
\ No newline at end of file
+ shutil.rmtree(d)
diff --git a/opendm/exiftool.py b/opendm/exiftool.py
index 7dfe087a..ecd3d423 100644
--- a/opendm/exiftool.py
+++ b/opendm/exiftool.py
@@ -7,22 +7,29 @@ from opendm.system import run
from opendm import log
from opendm.utils import double_quote
+
def extract_raw_thermal_image_data(image_path):
try:
- f, tmp_file_path = tempfile.mkstemp(suffix='.json')
+ f, tmp_file_path = tempfile.mkstemp(suffix=".json")
os.close(f)
try:
- output = run("exiftool -b -x ThumbnailImage -x PreviewImage -j \"%s\" > \"%s\"" % (image_path, tmp_file_path), quiet=True)
+ output = run(
+ 'exiftool -b -x ThumbnailImage -x PreviewImage -j "%s" > "%s"'
+ % (image_path, tmp_file_path),
+ quiet=True,
+ )
with open(tmp_file_path) as f:
j = json.loads(f.read())
if isinstance(j, list):
- j = j[0] # single file
-
+ j = j[0] # single file
+
if "RawThermalImage" in j:
- imageBytes = base64.b64decode(j["RawThermalImage"][len("base64:"):])
+ imageBytes = base64.b64decode(
+ j["RawThermalImage"][len("base64:") :]
+ )
with MemoryFile(imageBytes) as memfile:
with memfile.open() as dataset:
@@ -30,13 +37,15 @@ def extract_raw_thermal_image_data(image_path):
bands, h, w = img.shape
if bands != 1:
- raise Exception("Raw thermal image has more than one band? This is not supported")
+ raise Exception(
+ "Raw thermal image has more than one band? This is not supported"
+ )
# (1, 512, 640) --> (512, 640, 1)
- img = img[0][:,:,None]
+ img = img[0][:, :, None]
del j["RawThermalImage"]
-
+
return extract_temperature_params_from(j), img
else:
raise Exception("Invalid JSON (not a list)")
@@ -51,6 +60,7 @@ def extract_raw_thermal_image_data(image_path):
log.ODM_WARNING("Cannot create temporary file: %s" % str(e))
return {}, None
+
def unit(unit):
def _convert(v):
if isinstance(v, float):
@@ -64,8 +74,10 @@ def unit(unit):
return float(v)
else:
return float(v)
+
return _convert
+
def extract_temperature_params_from(tags):
# Defaults
meta = {
@@ -90,5 +102,5 @@ def extract_temperature_params_from(tags):
# All or nothing
raise Exception("Cannot find %s in tags" % m)
params[m] = (meta[m])(tags[m])
-
- return params
\ No newline at end of file
+
+ return params
diff --git a/opendm/gcp.py b/opendm/gcp.py
index 31f19316..7fc4fbae 100644
--- a/opendm/gcp.py
+++ b/opendm/gcp.py
@@ -4,6 +4,7 @@ from opendm import log
from opendm import location
from pyproj import CRS
+
class GCPFile:
def __init__(self, gcp_path):
self.gcp_path = gcp_path
@@ -11,18 +12,18 @@ class GCPFile:
self.raw_srs = ""
self.srs = None
self.read()
-
+
def read(self):
if self.exists():
- with open(self.gcp_path, 'r') as f:
+ with open(self.gcp_path, "r") as f:
contents = f.read().strip()
# Strip eventual BOM characters
- contents = contents.replace('\ufeff', '')
-
- lines = list(map(str.strip, contents.split('\n')))
+ contents = contents.replace("\ufeff", "")
+
+ lines = list(map(str.strip, contents.split("\n")))
if lines:
- self.raw_srs = lines[0] # SRS
+ self.raw_srs = lines[0] # SRS
self.srs = location.parse_srs_header(self.raw_srs)
for line in lines[1:]:
@@ -36,7 +37,7 @@ class GCPFile:
def iter_entries(self):
for entry in self.entries:
yield self.parse_entry(entry)
-
+
def check_entries(self):
coords = {}
gcps = {}
@@ -54,24 +55,36 @@ class GCPFile:
description = "insufficient" if coords[k] < 2 else "not ideal"
for entry in gcps[k]:
log.ODM_WARNING(str(entry))
- log.ODM_WARNING("The number of images where the GCP %s has been tagged are %s" % (k, description))
- log.ODM_WARNING("You should tag at least %s more images" % (3 - coords[k]))
+ log.ODM_WARNING(
+ "The number of images where the GCP %s has been tagged are %s"
+ % (k, description)
+ )
+ log.ODM_WARNING(
+ "You should tag at least %s more images" % (3 - coords[k])
+ )
log.ODM_WARNING("=====================================")
errors += 1
if len(coords) < 3:
- log.ODM_WARNING("Low number of GCPs detected (%s). For best results use at least 5." % (3 - len(coords)))
+ log.ODM_WARNING(
+ "Low number of GCPs detected (%s). For best results use at least 5."
+ % (3 - len(coords))
+ )
log.ODM_WARNING("=====================================")
errors += 1
if errors > 0:
- log.ODM_WARNING("Some issues detected with GCPs (but we're going to process this anyway)")
+ log.ODM_WARNING(
+ "Some issues detected with GCPs (but we're going to process this anyway)"
+ )
def parse_entry(self, entry):
if entry:
parts = entry.split()
x, y, z, px, py, filename = parts[:6]
extras = " ".join(parts[6:])
- return GCPEntry(float(x), float(y), float(z), float(px), float(py), filename, extras)
+ return GCPEntry(
+ float(x), float(y), float(z), float(px), float(py), filename, extras
+ )
def get_entry(self, n):
if n < self.entries_count():
@@ -79,7 +92,7 @@ class GCPFile:
def entries_count(self):
return len(self.entries)
-
+
def exists(self):
return bool(self.gcp_path and os.path.exists(self.gcp_path))
@@ -97,8 +110,8 @@ class GCPFile:
entry.py *= ratio
output.append(str(entry))
- with open(gcp_file_output, 'w') as f:
- f.write('\n'.join(output) + '\n')
+ with open(gcp_file_output, "w") as f:
+ f.write("\n".join(output) + "\n")
return gcp_file_output
@@ -114,11 +127,17 @@ class GCPFile:
utm_zone, hemisphere = location.get_utm_zone_and_hemisphere_from(lon, lat)
return "WGS84 UTM %s%s" % (utm_zone, hemisphere)
- def create_utm_copy(self, gcp_file_output, filenames=None, rejected_entries=None, include_extras=True):
+ def create_utm_copy(
+ self,
+ gcp_file_output,
+ filenames=None,
+ rejected_entries=None,
+ include_extras=True,
+ ):
"""
Creates a new GCP file from an existing GCP file
- by optionally including only filenames and reprojecting each point to
- a UTM CRS. Rejected entries can recorded by passing a list object to
+ by optionally including only filenames and reprojecting each point to
+ a UTM CRS. Rejected entries can recorded by passing a list object to
rejected_entries.
"""
if os.path.exists(gcp_file_output):
@@ -130,15 +149,17 @@ class GCPFile:
for entry in self.iter_entries():
if filenames is None or entry.filename in filenames:
- entry.x, entry.y, entry.z = transformer.TransformPoint(entry.x, entry.y, entry.z)
+ entry.x, entry.y, entry.z = transformer.TransformPoint(
+ entry.x, entry.y, entry.z
+ )
if not include_extras:
- entry.extras = ''
+ entry.extras = ""
output.append(str(entry))
elif isinstance(rejected_entries, list):
rejected_entries.append(entry)
- with open(gcp_file_output, 'w') as f:
- f.write('\n'.join(output) + '\n')
+ with open(gcp_file_output, "w") as f:
+ f.write("\n".join(output) + "\n")
return gcp_file_output
@@ -151,7 +172,7 @@ class GCPFile:
"""
if not self.exists() or not os.path.exists(images_dir):
return None
-
+
if os.path.exists(gcp_file_output):
os.remove(gcp_file_output)
@@ -159,23 +180,23 @@ class GCPFile:
output = [self.raw_srs]
files_found = 0
-
+
for entry in self.iter_entries():
if entry.filename in files:
output.append(str(entry))
files_found += 1
if files_found >= min_images:
- with open(gcp_file_output, 'w') as f:
- f.write('\n'.join(output) + '\n')
+ with open(gcp_file_output, "w") as f:
+ f.write("\n".join(output) + "\n")
return gcp_file_output
- def make_micmac_copy(self, output_dir, precisionxy=1, precisionz=1, utm_zone = None):
+ def make_micmac_copy(self, output_dir, precisionxy=1, precisionz=1, utm_zone=None):
"""
Convert this GCP file in a format compatible with MicMac.
:param output_dir directory where to save the two MicMac GCP files. The directory must exist.
- :param utm_zone UTM zone to use for output coordinates (UTM string, PROJ4 or EPSG definition).
+ :param utm_zone UTM zone to use for output coordinates (UTM string, PROJ4 or EPSG definition).
If one is not specified, the nearest UTM zone will be selected.
:param precisionxy horizontal precision of GCP measurements in meters.
:param precisionz vertical precision of GCP measurements in meters.
@@ -187,8 +208,8 @@ class GCPFile:
if not isinstance(precisionz, float) and not isinstance(precisionz, int):
raise AssertionError("precisionz must be a number")
- gcp_3d_file = os.path.join(output_dir, '3d_gcp.txt')
- gcp_2d_file = os.path.join(output_dir, '2d_gcp.txt')
+ gcp_3d_file = os.path.join(output_dir, "3d_gcp.txt")
+ gcp_2d_file = os.path.join(output_dir, "2d_gcp.txt")
if os.path.exists(gcp_3d_file):
os.remove(gcp_3d_file)
@@ -209,21 +230,27 @@ class GCPFile:
gcps[k] = [entry]
else:
gcps[k].append(entry)
-
- with open(gcp_3d_file, 'w') as f3:
- with open(gcp_2d_file, 'w') as f2:
+ with open(gcp_3d_file, "w") as f3:
+ with open(gcp_2d_file, "w") as f2:
gcp_n = 1
for k in gcps:
- f3.write("GCP{} {} {} {}\n".format(gcp_n, k, precisionxy, precisionz))
+ f3.write(
+ "GCP{} {} {} {}\n".format(gcp_n, k, precisionxy, precisionz)
+ )
for entry in gcps[k]:
- f2.write("GCP{} {} {} {}\n".format(gcp_n, entry.filename, entry.px, entry.py))
-
+ f2.write(
+ "GCP{} {} {} {}\n".format(
+ gcp_n, entry.filename, entry.px, entry.py
+ )
+ )
+
gcp_n += 1
-
+
return (gcp_3d_file, gcp_2d_file)
+
class GCPEntry:
def __init__(self, x, y, z, px, py, filename, extras=""):
self.x = x
@@ -236,9 +263,8 @@ class GCPEntry:
def coords_key(self):
return "{} {} {}".format(self.x, self.y, self.z)
-
+
def __str__(self):
- return "{} {} {} {} {} {} {}".format(self.x, self.y, self.z,
- self.px, self.py,
- self.filename,
- self.extras).rstrip()
\ No newline at end of file
+ return "{} {} {} {} {} {} {}".format(
+ self.x, self.y, self.z, self.px, self.py, self.filename, self.extras
+ ).rstrip()
diff --git a/opendm/geo.py b/opendm/geo.py
index 7baa8c91..efac44e4 100644
--- a/opendm/geo.py
+++ b/opendm/geo.py
@@ -4,19 +4,20 @@ from opendm import log
from opendm import location
from pyproj import CRS
+
class GeoFile:
def __init__(self, geo_path):
self.geo_path = geo_path
self.entries = {}
self.srs = None
- with open(self.geo_path, 'r') as f:
+ with open(self.geo_path, "r") as f:
contents = f.read().strip()
-
- # Strip eventual BOM characters
- contents = contents.replace('\ufeff', '')
- lines = list(map(str.strip, contents.split('\n')))
+ # Strip eventual BOM characters
+ contents = contents.replace("\ufeff", "")
+
+ lines = list(map(str.strip, contents.split("\n")))
if lines:
self.raw_srs = lines[0] # SRS
self.srs = location.parse_srs_header(self.raw_srs)
@@ -47,23 +48,45 @@ class GeoFile:
horizontal_accuracy = vertical_accuracy = None
if len(parts) >= 9:
- horizontal_accuracy,vertical_accuracy = [float(p) for p in parts[7:9]]
+ horizontal_accuracy, vertical_accuracy = [
+ float(p) for p in parts[7:9]
+ ]
i = 9
extras = " ".join(parts[i:])
- self.entries[filename] = GeoEntry(filename, x, y, z,
- yaw, pitch, roll,
- horizontal_accuracy, vertical_accuracy,
- extras)
+ self.entries[filename] = GeoEntry(
+ filename,
+ x,
+ y,
+ z,
+ yaw,
+ pitch,
+ roll,
+ horizontal_accuracy,
+ vertical_accuracy,
+ extras,
+ )
else:
log.ODM_WARNING("Malformed geo line: %s" % line)
-
+
def get_entry(self, filename):
return self.entries.get(filename)
class GeoEntry:
- def __init__(self, filename, x, y, z, yaw=None, pitch=None, roll=None, horizontal_accuracy=None, vertical_accuracy=None, extras=None):
+ def __init__(
+ self,
+ filename,
+ x,
+ y,
+ z,
+ yaw=None,
+ pitch=None,
+ roll=None,
+ horizontal_accuracy=None,
+ vertical_accuracy=None,
+ extras=None,
+ ):
self.filename = filename
self.x = x
self.y = y
@@ -76,11 +99,18 @@ class GeoEntry:
self.extras = extras
def __str__(self):
- return "{} ({} {} {}) ({} {} {}) ({} {}) {}".format(self.filename,
- self.x, self.y, self.z,
- self.yaw, self.pitch, self.roll,
- self.horizontal_accuracy, self.vertical_accuracy,
- self.extras).rstrip()
-
+ return "{} ({} {} {}) ({} {} {}) ({} {}) {}".format(
+ self.filename,
+ self.x,
+ self.y,
+ self.z,
+ self.yaw,
+ self.pitch,
+ self.roll,
+ self.horizontal_accuracy,
+ self.vertical_accuracy,
+ self.extras,
+ ).rstrip()
+
def position_string(self):
return "{} {} {}".format(self.x, self.y, self.z)
diff --git a/opendm/get_image_size.py b/opendm/get_image_size.py
index 2bc44276..de8c582e 100644
--- a/opendm/get_image_size.py
+++ b/opendm/get_image_size.py
@@ -5,11 +5,12 @@ from opendm import log
Image.MAX_IMAGE_PIXELS = None
+
def get_image_size(file_path, fallback_on_error=True):
"""
Return (width, height) for a given img file
"""
-
+
try:
if file_path[-4:].lower() in [".dng", ".raw", ".nef"]:
with rawpy.imread(file_path) as img:
@@ -20,11 +21,14 @@ def get_image_size(file_path, fallback_on_error=True):
width, height = img.size
except Exception as e:
if fallback_on_error:
- log.ODM_WARNING("Cannot read %s with image library, fallback to cv2: %s" % (file_path, str(e)))
+ log.ODM_WARNING(
+ "Cannot read %s with image library, fallback to cv2: %s"
+ % (file_path, str(e))
+ )
img = cv2.imread(file_path)
width = img.shape[1]
height = img.shape[0]
else:
raise e
- return (width, height)
\ No newline at end of file
+ return (width, height)
diff --git a/opendm/gltf.py b/opendm/gltf.py
index dfbd4f11..565e4e49 100644
--- a/opendm/gltf.py
+++ b/opendm/gltf.py
@@ -17,7 +17,7 @@ def load_obj(obj_path, _info=print):
obj_base_path = os.path.dirname(os.path.abspath(obj_path))
obj = {
- 'materials': {},
+ "materials": {},
}
vertices = []
uvs = []
@@ -33,7 +33,7 @@ def load_obj(obj_path, _info=print):
if line.startswith("mtllib "):
# Materials
mtl_file = "".join(line.split()[1:]).strip()
- obj['materials'].update(load_mtl(mtl_file, obj_base_path, _info=_info))
+ obj["materials"].update(load_mtl(mtl_file, obj_base_path, _info=_info))
elif line.startswith("v "):
# Vertices
vertices.append(list(map(float, line.split()[1:4])))
@@ -44,37 +44,52 @@ def load_obj(obj_path, _info=print):
normals.append(list(map(float, line.split()[1:4])))
elif line.startswith("usemtl "):
mtl_name = "".join(line.split()[1:]).strip()
- if not mtl_name in obj['materials']:
+ if not mtl_name in obj["materials"]:
raise Exception("%s material is missing" % mtl_name)
current_material = mtl_name
elif line.startswith("f "):
if current_material not in faces:
faces[current_material] = []
-
- a,b,c = line.split()[1:]
+
+ a, b, c = line.split()[1:]
if a.count("/") == 2:
av, at, an = map(int, a.split("/")[0:3])
bv, bt, bn = map(int, b.split("/")[0:3])
cv, ct, cn = map(int, c.split("/")[0:3])
- faces[current_material].append((av - 1, bv - 1, cv - 1, at - 1, bt - 1, ct - 1, an - 1, bn - 1, cn - 1))
+ faces[current_material].append(
+ (
+ av - 1,
+ bv - 1,
+ cv - 1,
+ at - 1,
+ bt - 1,
+ ct - 1,
+ an - 1,
+ bn - 1,
+ cn - 1,
+ )
+ )
else:
av, at = map(int, a.split("/")[0:2])
bv, bt = map(int, b.split("/")[0:2])
cv, ct = map(int, c.split("/")[0:2])
- faces[current_material].append((av - 1, bv - 1, cv - 1, at - 1, bt - 1, ct - 1))
+ faces[current_material].append(
+ (av - 1, bv - 1, cv - 1, at - 1, bt - 1, ct - 1)
+ )
- obj['vertices'] = np.array(vertices, dtype=np.float32)
- obj['uvs'] = np.array(uvs, dtype=np.float32)
- obj['normals'] = np.array(normals, dtype=np.float32)
- obj['faces'] = faces
+ obj["vertices"] = np.array(vertices, dtype=np.float32)
+ obj["uvs"] = np.array(uvs, dtype=np.float32)
+ obj["normals"] = np.array(normals, dtype=np.float32)
+ obj["faces"] = faces
- obj['materials'] = convert_materials_to_jpeg(obj['materials'])
+ obj["materials"] = convert_materials_to_jpeg(obj["materials"])
return obj
+
def convert_materials_to_jpeg(materials):
min_value = 0
@@ -93,14 +108,16 @@ def convert_materials_to_jpeg(materials):
try:
data_range = np.iinfo(image.dtype)
min_value = min(min_value, 0)
- value_range = max(value_range, float(data_range.max) - float(data_range.min))
+ value_range = max(
+ value_range, float(data_range.max) - float(data_range.min)
+ )
except ValueError:
# For floats use the actual range of the image values
min_value = min(min_value, float(image.min()))
value_range = max(value_range, float(image.max()) - min_value)
-
+
if value_range == 0:
- value_range = 255 # Should never happen
+ value_range = 255 # Should never happen
for mat in materials:
image = materials[mat]
@@ -117,7 +134,14 @@ def convert_materials_to_jpeg(materials):
with MemoryFile() as memfile:
bands, h, w = image.shape
bands = min(3, bands)
- with memfile.open(driver='JPEG', jpeg_quality=90, count=bands, width=w, height=h, dtype=rasterio.dtypes.uint8) as dst:
+ with memfile.open(
+ driver="JPEG",
+ jpeg_quality=90,
+ count=bands,
+ width=w,
+ height=h,
+ dtype=rasterio.dtypes.uint8,
+ ) as dst:
for b in range(1, min(3, bands) + 1):
dst.write(image[b - 1], b)
memfile.seek(0)
@@ -125,12 +149,13 @@ def convert_materials_to_jpeg(materials):
return materials
+
def load_mtl(mtl_file, obj_base_path, _info=print):
mtl_file = os.path.join(obj_base_path, mtl_file)
if not os.path.isfile(mtl_file):
raise IOError("Cannot open %s" % mtl_file)
-
+
mats = {}
current_mtl = ""
@@ -143,30 +168,34 @@ def load_mtl(mtl_file, obj_base_path, _info=print):
map_kd = os.path.join(obj_base_path, map_kd_filename)
if not os.path.isfile(map_kd):
raise IOError("Cannot open %s" % map_kd)
-
+
_info("Loading %s" % map_kd_filename)
- with rasterio.open(map_kd, 'r') as src:
+ with rasterio.open(map_kd, "r") as src:
mats[current_mtl] = src.read()
return mats
+
def paddedBuffer(buf, boundary):
r = len(buf) % boundary
- if r == 0:
- return buf
+ if r == 0:
+ return buf
pad = boundary - r
- return buf + b'\x00' * pad
+ return buf + b"\x00" * pad
-def obj2glb(input_obj, output_glb, rtc=(None, None), draco_compression=True, _info=print):
+
+def obj2glb(
+ input_obj, output_glb, rtc=(None, None), draco_compression=True, _info=print
+):
_info("Converting %s --> %s" % (input_obj, output_glb))
obj = load_obj(input_obj, _info=_info)
- vertices = obj['vertices']
- uvs = obj['uvs']
+ vertices = obj["vertices"]
+ uvs = obj["uvs"]
# Flip Y
uvs = (([0, 1] - (uvs * [0, 1])) + uvs * [1, 0]).astype(np.float32)
- normals = obj['normals']
+ normals = obj["normals"]
- binary = b''
+ binary = b""
accessors = []
bufferViews = []
primitives = []
@@ -175,26 +204,29 @@ def obj2glb(input_obj, output_glb, rtc=(None, None), draco_compression=True, _in
images = []
bufOffset = 0
+
def addBufferView(buf, target=None):
nonlocal bufferViews, bufOffset
- bufferViews += [pygltflib.BufferView(
- buffer=0,
- byteOffset=bufOffset,
- byteLength=len(buf),
- target=target,
- )]
+ bufferViews += [
+ pygltflib.BufferView(
+ buffer=0,
+ byteOffset=bufOffset,
+ byteLength=len(buf),
+ target=target,
+ )
+ ]
bufOffset += len(buf)
return len(bufferViews) - 1
- for material in obj['faces'].keys():
- faces = obj['faces'][material]
+ for material in obj["faces"].keys():
+ faces = obj["faces"][material]
faces = np.array(faces, dtype=np.uint32)
- prim_vertices = vertices[faces[:,0:3].flatten()]
- prim_uvs = uvs[faces[:,3:6].flatten()]
+ prim_vertices = vertices[faces[:, 0:3].flatten()]
+ prim_uvs = uvs[faces[:, 3:6].flatten()]
if faces.shape[1] == 9:
- prim_normals = normals[faces[:,6:9].flatten()]
+ prim_normals = normals[faces[:, 6:9].flatten()]
normals_blob = prim_normals.tobytes()
else:
prim_normals = None
@@ -206,13 +238,13 @@ def obj2glb(input_obj, output_glb, rtc=(None, None), draco_compression=True, _in
binary += vertices_blob + uvs_blob
if normals_blob is not None:
binary += normals_blob
-
+
verticesBufferView = addBufferView(vertices_blob, pygltflib.ARRAY_BUFFER)
uvsBufferView = addBufferView(uvs_blob, pygltflib.ARRAY_BUFFER)
normalsBufferView = None
if normals_blob is not None:
normalsBufferView = addBufferView(normals_blob, pygltflib.ARRAY_BUFFER)
-
+
accessors += [
pygltflib.Accessor(
bufferView=verticesBufferView,
@@ -244,50 +276,59 @@ def obj2glb(input_obj, output_glb, rtc=(None, None), draco_compression=True, _in
)
]
- primitives += [pygltflib.Primitive(
- attributes=pygltflib.Attributes(POSITION=verticesBufferView, TEXCOORD_0=uvsBufferView, NORMAL=normalsBufferView), material=len(primitives)
- )]
+ primitives += [
+ pygltflib.Primitive(
+ attributes=pygltflib.Attributes(
+ POSITION=verticesBufferView,
+ TEXCOORD_0=uvsBufferView,
+ NORMAL=normalsBufferView,
+ ),
+ material=len(primitives),
+ )
+ ]
- for material in obj['faces'].keys():
- texture_blob = paddedBuffer(obj['materials'][material], 4)
+ for material in obj["faces"].keys():
+ texture_blob = paddedBuffer(obj["materials"][material], 4)
binary += texture_blob
textureBufferView = addBufferView(texture_blob)
images += [pygltflib.Image(bufferView=textureBufferView, mimeType="image/jpeg")]
textures += [pygltflib.Texture(source=len(images) - 1, sampler=0)]
- mat = pygltflib.Material(pbrMetallicRoughness=pygltflib.PbrMetallicRoughness(baseColorTexture=pygltflib.TextureInfo(index=len(textures) - 1), metallicFactor=0, roughnessFactor=1),
- alphaMode=pygltflib.OPAQUE)
- mat.extensions = {
- 'KHR_materials_unlit': {}
- }
+ mat = pygltflib.Material(
+ pbrMetallicRoughness=pygltflib.PbrMetallicRoughness(
+ baseColorTexture=pygltflib.TextureInfo(index=len(textures) - 1),
+ metallicFactor=0,
+ roughnessFactor=1,
+ ),
+ alphaMode=pygltflib.OPAQUE,
+ )
+ mat.extensions = {"KHR_materials_unlit": {}}
materials += [mat]
gltf = pygltflib.GLTF2(
scene=0,
scenes=[pygltflib.Scene(nodes=[0])],
nodes=[pygltflib.Node(mesh=0)],
- meshes=[pygltflib.Mesh(
- primitives=primitives
- )],
+ meshes=[pygltflib.Mesh(primitives=primitives)],
materials=materials,
textures=textures,
- samplers=[pygltflib.Sampler(magFilter=pygltflib.LINEAR, minFilter=pygltflib.LINEAR)],
+ samplers=[
+ pygltflib.Sampler(magFilter=pygltflib.LINEAR, minFilter=pygltflib.LINEAR)
+ ],
images=images,
accessors=accessors,
bufferViews=bufferViews,
buffers=[pygltflib.Buffer(byteLength=len(binary))],
)
- gltf.extensionsRequired = ['KHR_materials_unlit']
- gltf.extensionsUsed = ['KHR_materials_unlit']
+ gltf.extensionsRequired = ["KHR_materials_unlit"]
+ gltf.extensionsUsed = ["KHR_materials_unlit"]
if rtc != (None, None) and len(rtc) >= 2:
- gltf.extensionsUsed.append('CESIUM_RTC')
+ gltf.extensionsUsed.append("CESIUM_RTC")
gltf.extensions = {
- 'CESIUM_RTC': {
- 'center': [float(rtc[0]), float(rtc[1]), 0.0]
- }
+ "CESIUM_RTC": {"center": [float(rtc[0]), float(rtc[1]), 0.0]}
}
gltf.set_binary_blob(binary)
@@ -300,11 +341,13 @@ def obj2glb(input_obj, output_glb, rtc=(None, None), draco_compression=True, _in
_info("Compressing with draco")
try:
compressed_glb = io.related_file_path(output_glb, postfix="_compressed")
- system.run('draco_transcoder -i "{}" -o "{}" -qt 16 -qp 16'.format(output_glb, compressed_glb))
+ system.run(
+ 'draco_transcoder -i "{}" -o "{}" -qt 16 -qp 16'.format(
+ output_glb, compressed_glb
+ )
+ )
if os.path.isfile(compressed_glb) and os.path.isfile(output_glb):
os.remove(output_glb)
os.rename(compressed_glb, output_glb)
except Exception as e:
log.ODM_WARNING("Cannot compress GLB with draco: %s" % str(e))
-
-
diff --git a/opendm/gpu.py b/opendm/gpu.py
index 2b423614..db6dc049 100644
--- a/opendm/gpu.py
+++ b/opendm/gpu.py
@@ -5,8 +5,10 @@ import ctypes
from opendm import log
from repoze.lru import lru_cache
+
def gpu_disabled_by_user_env():
- return bool(os.environ.get('ODM_NO_GPU'))
+ return bool(os.environ.get("ODM_NO_GPU"))
+
@lru_cache(maxsize=None)
def has_popsift_and_can_handle_texsize(width, height):
@@ -16,7 +18,10 @@ def has_popsift_and_can_handle_texsize(width, height):
compute_major, compute_minor = get_cuda_compute_version(0)
if compute_major < 3 or (compute_major == 3 and compute_minor < 5):
# Not supported
- log.ODM_INFO("CUDA compute platform is not supported (detected: %s.%s but we need at least 3.5)" % (compute_major, compute_minor))
+ log.ODM_INFO(
+ "CUDA compute platform is not supported (detected: %s.%s but we need at least 3.5)"
+ % (compute_major, compute_minor)
+ )
return False
except Exception as e:
log.ODM_INFO("Using CPU for feature extraction: %s" % str(e))
@@ -24,6 +29,7 @@ def has_popsift_and_can_handle_texsize(width, height):
try:
from opensfm import pypopsift
+
return pypopsift.fits_texture(int(width * 1.02), int(height * 1.02))
except (ModuleNotFoundError, ImportError):
return False
@@ -31,25 +37,26 @@ def has_popsift_and_can_handle_texsize(width, height):
log.ODM_WARNING(str(e))
return False
+
@lru_cache(maxsize=None)
-def get_cuda_compute_version(device_id = 0):
+def get_cuda_compute_version(device_id=0):
cuda_lib = "libcuda.so"
- if sys.platform == 'win32':
- cuda_lib = os.path.join(os.environ.get('SYSTEMROOT'), 'system32', 'nvcuda.dll')
+ if sys.platform == "win32":
+ cuda_lib = os.path.join(os.environ.get("SYSTEMROOT"), "system32", "nvcuda.dll")
if not os.path.isfile(cuda_lib):
cuda_lib = "nvcuda.dll"
nvcuda = ctypes.cdll.LoadLibrary(cuda_lib)
- nvcuda.cuInit.argtypes = (ctypes.c_uint32, )
- nvcuda.cuInit.restypes = (ctypes.c_int32)
+ nvcuda.cuInit.argtypes = (ctypes.c_uint32,)
+ nvcuda.cuInit.restypes = ctypes.c_int32
if nvcuda.cuInit(0) != 0:
raise Exception("Cannot initialize CUDA")
- nvcuda.cuDeviceGetCount.argtypes = (ctypes.POINTER(ctypes.c_int32), )
- nvcuda.cuDeviceGetCount.restypes = (ctypes.c_int32)
-
+ nvcuda.cuDeviceGetCount.argtypes = (ctypes.POINTER(ctypes.c_int32),)
+ nvcuda.cuDeviceGetCount.restypes = ctypes.c_int32
+
device_count = ctypes.c_int32()
if nvcuda.cuDeviceGetCount(ctypes.byref(device_count)) != 0:
raise Exception("Cannot get device count")
@@ -57,16 +64,26 @@ def get_cuda_compute_version(device_id = 0):
if device_count.value == 0:
raise Exception("No devices")
- nvcuda.cuDeviceComputeCapability.argtypes = (ctypes.POINTER(ctypes.c_int32), ctypes.POINTER(ctypes.c_int32), ctypes.c_int32)
- nvcuda.cuDeviceComputeCapability.restypes = (ctypes.c_int32)
+ nvcuda.cuDeviceComputeCapability.argtypes = (
+ ctypes.POINTER(ctypes.c_int32),
+ ctypes.POINTER(ctypes.c_int32),
+ ctypes.c_int32,
+ )
+ nvcuda.cuDeviceComputeCapability.restypes = ctypes.c_int32
compute_major = ctypes.c_int32()
compute_minor = ctypes.c_int32()
- if nvcuda.cuDeviceComputeCapability(ctypes.byref(compute_major), ctypes.byref(compute_minor), device_id) != 0:
+ if (
+ nvcuda.cuDeviceComputeCapability(
+ ctypes.byref(compute_major), ctypes.byref(compute_minor), device_id
+ )
+ != 0
+ ):
raise Exception("Cannot get CUDA compute version")
return (compute_major.value, compute_minor.value)
+
def has_gpu(args):
if gpu_disabled_by_user_env():
log.ODM_INFO("Disabling GPU features (ODM_NO_GPU is set)")
@@ -75,8 +92,10 @@ def has_gpu(args):
log.ODM_INFO("Disabling GPU features (--no-gpu is set)")
return False
- if sys.platform == 'win32':
- nvcuda_path = os.path.join(os.environ.get('SYSTEMROOT'), 'system32', 'nvcuda.dll')
+ if sys.platform == "win32":
+ nvcuda_path = os.path.join(
+ os.environ.get("SYSTEMROOT"), "system32", "nvcuda.dll"
+ )
if os.path.isfile(nvcuda_path):
log.ODM_INFO("CUDA drivers detected")
return True
@@ -84,7 +103,7 @@ def has_gpu(args):
log.ODM_INFO("No CUDA drivers detected, using CPU")
return False
else:
- if shutil.which('nvidia-smi') is not None:
+ if shutil.which("nvidia-smi") is not None:
log.ODM_INFO("nvidia-smi detected")
return True
else:
diff --git a/opendm/gsd.py b/opendm/gsd.py
index ebfc99ef..e088e874 100644
--- a/opendm/gsd.py
+++ b/opendm/gsd.py
@@ -6,6 +6,7 @@ from repoze.lru import lru_cache
from opendm import log
from opendm.shots import get_origin
+
def rounded_gsd(reconstruction_json, default_value=None, ndigits=0, ignore_gsd=False):
"""
:param reconstruction_json path to OpenSfM's reconstruction.json
@@ -22,7 +23,14 @@ def rounded_gsd(reconstruction_json, default_value=None, ndigits=0, ignore_gsd=F
return default_value
-def image_max_size(photos, target_resolution, reconstruction_json, gsd_error_estimate = 0.5, ignore_gsd=False, has_gcp=False):
+def image_max_size(
+ photos,
+ target_resolution,
+ reconstruction_json,
+ gsd_error_estimate=0.5,
+ ignore_gsd=False,
+ has_gcp=False,
+):
"""
:param photos images database
:param target_resolution resolution the user wants have in cm / pixel
@@ -37,7 +45,9 @@ def image_max_size(photos, target_resolution, reconstruction_json, gsd_error_est
if ignore_gsd:
isf = 1.0
else:
- isf = image_scale_factor(target_resolution, reconstruction_json, gsd_error_estimate, has_gcp=has_gcp)
+ isf = image_scale_factor(
+ target_resolution, reconstruction_json, gsd_error_estimate, has_gcp=has_gcp
+ )
for p in photos:
max_width = max(p.width, max_width)
@@ -45,7 +55,10 @@ def image_max_size(photos, target_resolution, reconstruction_json, gsd_error_est
return int(math.ceil(max(max_width, max_height) * isf))
-def image_scale_factor(target_resolution, reconstruction_json, gsd_error_estimate = 0.5, has_gcp=False):
+
+def image_scale_factor(
+ target_resolution, reconstruction_json, gsd_error_estimate=0.5, has_gcp=False
+):
"""
:param target_resolution resolution the user wants have in cm / pixel
:param reconstruction_json path to OpenSfM's reconstruction.json
@@ -66,8 +79,15 @@ def image_scale_factor(target_resolution, reconstruction_json, gsd_error_estimat
return 1.0
-def cap_resolution(resolution, reconstruction_json, gsd_error_estimate = 0.1, gsd_scaling = 1.0, ignore_gsd=False,
- ignore_resolution=False, has_gcp=False):
+def cap_resolution(
+ resolution,
+ reconstruction_json,
+ gsd_error_estimate=0.1,
+ gsd_scaling=1.0,
+ ignore_gsd=False,
+ ignore_resolution=False,
+ has_gcp=False,
+):
"""
:param resolution resolution in cm / pixel
:param reconstruction_json path to OpenSfM's reconstruction.json
@@ -81,19 +101,28 @@ def cap_resolution(resolution, reconstruction_json, gsd_error_estimate = 0.1, gs
if ignore_gsd:
return resolution
- gsd = opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=has_gcp or ignore_resolution)
+ gsd = opensfm_reconstruction_average_gsd(
+ reconstruction_json, use_all_shots=has_gcp or ignore_resolution
+ )
if gsd is not None:
gsd = gsd * (1 - gsd_error_estimate) * gsd_scaling
if gsd > resolution or ignore_resolution:
- log.ODM_WARNING('Maximum resolution set to {} * (GSD - {}%) '
- '({:.2f} cm / pixel, requested resolution was {:.2f} cm / pixel)'
- .format(gsd_scaling, gsd_error_estimate * 100, gsd, resolution))
+ log.ODM_WARNING(
+ "Maximum resolution set to {} * (GSD - {}%) "
+ "({:.2f} cm / pixel, requested resolution was {:.2f} cm / pixel)".format(
+ gsd_scaling, gsd_error_estimate * 100, gsd, resolution
+ )
+ )
return gsd
else:
return resolution
else:
- log.ODM_WARNING('Cannot calculate GSD, using requested resolution of {:.2f}'.format(resolution))
+ log.ODM_WARNING(
+ "Cannot calculate GSD, using requested resolution of {:.2f}".format(
+ resolution
+ )
+ )
return resolution
@@ -102,7 +131,7 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
"""
Computes the average Ground Sampling Distance of an OpenSfM reconstruction.
:param reconstruction_json path to OpenSfM's reconstruction.json
- :return Ground Sampling Distance value (cm / pixel) or None if
+ :return Ground Sampling Distance value (cm / pixel) or None if
a GSD estimate cannot be compute
"""
if not os.path.isfile(reconstruction_json):
@@ -115,34 +144,41 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
reconstruction = data[0]
point_heights = []
- for pointId in reconstruction['points']:
- point = reconstruction['points'][pointId]
- point_heights.append(point['coordinates'][2])
+ for pointId in reconstruction["points"]:
+ point = reconstruction["points"][pointId]
+ point_heights.append(point["coordinates"][2])
ground_height = np.median(point_heights)
gsds = []
- for shotImage in reconstruction['shots']:
- shot = reconstruction['shots'][shotImage]
- if use_all_shots or shot.get('gps_dop', 999999) < 999999:
- camera = reconstruction['cameras'][shot['camera']]
+ for shotImage in reconstruction["shots"]:
+ shot = reconstruction["shots"][shotImage]
+ if use_all_shots or shot.get("gps_dop", 999999) < 999999:
+ camera = reconstruction["cameras"][shot["camera"]]
shot_origin = get_origin(shot)
shot_height = shot_origin[2]
- focal_ratio = camera.get('focal', camera.get('focal_x'))
+ focal_ratio = camera.get("focal", camera.get("focal_x"))
if not focal_ratio:
- log.ODM_WARNING("Cannot parse focal values from %s. This is likely an unsupported camera model." % reconstruction_json)
+ log.ODM_WARNING(
+ "Cannot parse focal values from %s. This is likely an unsupported camera model."
+ % reconstruction_json
+ )
return None
-
- gsds.append(calculate_gsd_from_focal_ratio(focal_ratio,
- shot_height - ground_height,
- camera['width']))
-
+
+ gsds.append(
+ calculate_gsd_from_focal_ratio(
+ focal_ratio, shot_height - ground_height, camera["width"]
+ )
+ )
+
if len(gsds) > 0:
mean = np.mean(gsds)
if mean < 0:
- log.ODM_WARNING("Negative GSD estimated, this might indicate a flipped Z-axis.")
+ log.ODM_WARNING(
+ "Negative GSD estimated, this might indicate a flipped Z-axis."
+ )
return abs(mean)
-
+
return None
@@ -160,9 +196,9 @@ def calculate_gsd(sensor_width, flight_height, focal_length, image_width):
>>> calculate_gsd(13.2, 100, 8.8, 0)
"""
if sensor_width != 0:
- return calculate_gsd_from_focal_ratio(focal_length / sensor_width,
- flight_height,
- image_width)
+ return calculate_gsd_from_focal_ratio(
+ focal_length / sensor_width, flight_height, image_width
+ )
else:
return None
@@ -176,5 +212,5 @@ def calculate_gsd_from_focal_ratio(focal_ratio, flight_height, image_width):
"""
if focal_ratio == 0 or image_width == 0:
return None
-
- return ((flight_height * 100) / image_width) / focal_ratio
\ No newline at end of file
+
+ return ((flight_height * 100) / image_width) / focal_ratio
diff --git a/opendm/io.py b/opendm/io.py
index fc878edb..b37a7644 100644
--- a/opendm/io.py
+++ b/opendm/io.py
@@ -2,6 +2,7 @@ import os
import shutil, errno
import json
+
def absolute_path_file(path_file):
return os.path.abspath(path_file)
@@ -30,7 +31,9 @@ def copy(src, dst):
except OSError as e:
if e.errno == errno.ENOTDIR:
shutil.copy(src, dst)
- else: raise
+ else:
+ raise
+
def rename_file(src, dst):
try:
@@ -46,7 +49,7 @@ def rename_file(src, dst):
# find a file in the root directory
def find(filename, folder):
for root, dirs, files in os.walk(folder):
- return '/'.join((root, filename)) if filename in files else None
+ return "/".join((root, filename)) if filename in files else None
def related_file_path(input_file_path, prefix="", postfix="", replace_base=None):
@@ -68,6 +71,7 @@ def related_file_path(input_file_path, prefix="", postfix="", replace_base=None)
return os.path.join(path, "{}{}{}{}".format(prefix, basename, postfix, ext))
+
def path_or_json_string_to_dict(string):
if string == "":
return {}
@@ -79,13 +83,14 @@ def path_or_json_string_to_dict(string):
raise ValueError("{0} is not a valid JSON string.".format(string))
elif file_exists(string):
try:
- with open(string, 'r') as f:
+ with open(string, "r") as f:
return json.loads(f.read())
except:
raise ValueError("{0} is not a valid JSON file.".format(string))
else:
raise ValueError("{0} is not a valid JSON file or string.".format(string))
-
+
+
def touch(file):
- with open(file, 'w') as fout:
- fout.write("Done!\n")
\ No newline at end of file
+ with open(file, "w") as fout:
+ fout.write("Done!\n")
diff --git a/opendm/location.py b/opendm/location.py
index bf78da6b..1d733fcd 100644
--- a/opendm/location.py
+++ b/opendm/location.py
@@ -3,9 +3,10 @@ from opendm import log
from pyproj import Proj, Transformer, CRS
from osgeo import osr
+
def extract_utm_coords(photos, images_path, output_coords_file):
"""
- Create a coordinate file containing the GPS positions of all cameras
+ Create a coordinate file containing the GPS positions of all cameras
to be used later in the ODM toolchain for automatic georeferecing
:param photos ([ODM_Photo]) list of photos
:param images_path (str) path to dataset images
@@ -13,8 +14,10 @@ def extract_utm_coords(photos, images_path, output_coords_file):
:return None
"""
if len(photos) == 0:
- raise Exception("No input images, cannot create coordinates file of GPS positions")
-
+ raise Exception(
+ "No input images, cannot create coordinates file of GPS positions"
+ )
+
utm_zone = None
hemisphere = None
coords = []
@@ -23,21 +26,27 @@ def extract_utm_coords(photos, images_path, output_coords_file):
if photo.latitude is None or photo.longitude is None:
log.ODM_WARNING("GPS position not available for %s" % photo.filename)
continue
-
+
if utm_zone is None:
- utm_zone, hemisphere = get_utm_zone_and_hemisphere_from(photo.longitude, photo.latitude)
+ utm_zone, hemisphere = get_utm_zone_and_hemisphere_from(
+ photo.longitude, photo.latitude
+ )
try:
alt = photo.altitude if photo.altitude is not None else 0
- coord = convert_to_utm(photo.longitude, photo.latitude, alt, utm_zone, hemisphere)
+ coord = convert_to_utm(
+ photo.longitude, photo.latitude, alt, utm_zone, hemisphere
+ )
except:
- raise Exception("Failed to convert GPS position to UTM for %s" % photo.filename)
-
+ raise Exception(
+ "Failed to convert GPS position to UTM for %s" % photo.filename
+ )
+
coords.append(coord)
if utm_zone is None:
raise Exception("No images seem to have GPS information")
-
+
# Calculate average
dx = 0.0
dy = 0.0
@@ -55,13 +64,16 @@ def extract_utm_coords(photos, images_path, output_coords_file):
f.write("%s %s\n" % (dx, dy))
for coord in coords:
f.write("%s %s %s\n" % (coord[0] - dx, coord[1] - dy, coord[2]))
-
+
+
def transform2(from_srs, to_srs, x, y):
return transformer(from_srs, to_srs).TransformPoint(x, y, 0)[:2]
+
def transform3(from_srs, to_srs, x, y, z):
return transformer(from_srs, to_srs).TransformPoint(x, y, z)
+
def proj_srs_convert(srs):
"""
Convert a Proj SRS object to osr SRS object
@@ -74,16 +86,18 @@ def proj_srs_convert(srs):
else:
proj4 = srs.to_proj4()
res.ImportFromProj4(proj4)
-
+
res.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
return res
+
def transformer(from_srs, to_srs):
src = proj_srs_convert(from_srs)
tgt = proj_srs_convert(to_srs)
return osr.CoordinateTransformation(src, tgt)
-
+
+
def get_utm_zone_and_hemisphere_from(lon, lat):
"""
Calculate the UTM zone and hemisphere that a longitude/latitude pair falls on
@@ -91,10 +105,11 @@ def get_utm_zone_and_hemisphere_from(lon, lat):
:param lat latitude
:return [utm_zone, hemisphere]
"""
- utm_zone = (int(math.floor((lon + 180.0)/6.0)) % 60) + 1
- hemisphere = 'S' if lat < 0 else 'N'
+ utm_zone = (int(math.floor((lon + 180.0) / 6.0)) % 60) + 1
+ hemisphere = "S" if lat < 0 else "N"
return [utm_zone, hemisphere]
+
def convert_to_utm(lon, lat, alt, utm_zone, hemisphere):
"""
Convert longitude, latitude and elevation values to UTM
@@ -105,14 +120,17 @@ def convert_to_utm(lon, lat, alt, utm_zone, hemisphere):
:param hemisphere one of 'N' or 'S'
:return [x,y,z] UTM coordinates
"""
- if hemisphere == 'N':
- p = Proj(proj='utm',zone=utm_zone,ellps='WGS84', preserve_units=True)
+ if hemisphere == "N":
+ p = Proj(proj="utm", zone=utm_zone, ellps="WGS84", preserve_units=True)
else:
- p = Proj(proj='utm',zone=utm_zone,ellps='WGS84', preserve_units=True, south=True)
-
- x,y = p(lon, lat)
+ p = Proj(
+ proj="utm", zone=utm_zone, ellps="WGS84", preserve_units=True, south=True
+ )
+
+ x, y = p(lon, lat)
return [x, y, alt]
+
def parse_srs_header(header):
"""
Parse a header coming from GCP or coordinate file
@@ -120,48 +138,51 @@ def parse_srs_header(header):
:return Proj object
"""
header = header.strip()
- ref = header.split(' ')
+ ref = header.split(" ")
try:
- if ref[0] == 'WGS84' and ref[1] == 'UTM':
+ if ref[0] == "WGS84" and ref[1] == "UTM":
datum = ref[0]
utm_pole = (ref[2][len(ref[2]) - 1]).upper()
- utm_zone = int(ref[2][:len(ref[2]) - 1])
-
- proj_args = {
- 'zone': utm_zone,
- 'datum': datum
- }
+ utm_zone = int(ref[2][: len(ref[2]) - 1])
- proj4 = '+proj=utm +zone={zone} +datum={datum} +units=m +no_defs=True'
- if utm_pole == 'S':
- proj4 += ' +south=True'
+ proj_args = {"zone": utm_zone, "datum": datum}
+
+ proj4 = "+proj=utm +zone={zone} +datum={datum} +units=m +no_defs=True"
+ if utm_pole == "S":
+ proj4 += " +south=True"
srs = CRS.from_proj4(proj4.format(**proj_args))
- elif '+proj' in header:
- srs = CRS.from_proj4(header.strip('\''))
+ elif "+proj" in header:
+ srs = CRS.from_proj4(header.strip("'"))
elif header.lower().startswith("epsg:"):
srs = CRS.from_epsg(header.lower()[5:])
else:
- raise RuntimeError('Could not parse coordinates. Bad SRS supplied: %s' % header)
+ raise RuntimeError(
+ "Could not parse coordinates. Bad SRS supplied: %s" % header
+ )
except RuntimeError as e:
- log.ODM_ERROR('Uh oh! There seems to be a problem with your coordinates/GCP file.\n\n'
- 'The line: %s\n\n'
- 'Is not valid. Projections that are valid include:\n'
- ' - EPSG:*****\n'
- ' - WGS84 UTM **(N|S)\n'
- ' - Any valid proj4 string (for example, +proj=utm +zone=32 +north +ellps=WGS84 +datum=WGS84 +units=m +no_defs)\n\n'
- ' Some valid EPSG codes are not yet available in OpenDroneMap and need substituted with valid proj4 strings\n'
- ' Try searching for equivalent proj4 strings at spatialreference.org or epsg.io.\n'
- 'Modify your input and try again.' % header)
+ log.ODM_ERROR(
+ "Uh oh! There seems to be a problem with your coordinates/GCP file.\n\n"
+ "The line: %s\n\n"
+ "Is not valid. Projections that are valid include:\n"
+ " - EPSG:*****\n"
+ " - WGS84 UTM **(N|S)\n"
+ " - Any valid proj4 string (for example, +proj=utm +zone=32 +north +ellps=WGS84 +datum=WGS84 +units=m +no_defs)\n\n"
+ " Some valid EPSG codes are not yet available in OpenDroneMap and need substituted with valid proj4 strings\n"
+ " Try searching for equivalent proj4 strings at spatialreference.org or epsg.io.\n"
+ "Modify your input and try again." % header
+ )
raise RuntimeError(e)
-
+
return srs
+
def utm_srs_from_ll(lon, lat):
utm_zone, hemisphere = get_utm_zone_and_hemisphere_from(lon, lat)
return parse_srs_header("WGS84 UTM %s%s" % (utm_zone, hemisphere))
+
def utm_transformers_from_ll(lon, lat):
source_srs = CRS.from_epsg(4326)
target_srs = utm_srs_from_ll(lon, lat)
diff --git a/opendm/log.py b/opendm/log.py
index fbcbfed5..31955e5e 100644
--- a/opendm/log.py
+++ b/opendm/log.py
@@ -11,38 +11,41 @@ from repoze.lru import lru_cache
from opendm.arghelpers import double_quote, args_to_dict
from vmem import virtual_memory
-if sys.platform == 'win32' or os.getenv('no_ansiesc'):
- # No colors on Windows (sorry !) or existing no_ansiesc env variable
- HEADER = ''
- OKBLUE = ''
- OKGREEN = ''
- DEFAULT = ''
- WARNING = ''
- FAIL = ''
- ENDC = ''
+if sys.platform == "win32" or os.getenv("no_ansiesc"):
+ # No colors on Windows (sorry !) or existing no_ansiesc env variable
+ HEADER = ""
+ OKBLUE = ""
+ OKGREEN = ""
+ DEFAULT = ""
+ WARNING = ""
+ FAIL = ""
+ ENDC = ""
else:
- HEADER = '\033[95m'
- OKBLUE = '\033[94m'
- OKGREEN = '\033[92m'
- DEFAULT = '\033[39m'
- WARNING = '\033[93m'
- FAIL = '\033[91m'
- ENDC = '\033[0m'
+ HEADER = "\033[95m"
+ OKBLUE = "\033[94m"
+ OKGREEN = "\033[92m"
+ DEFAULT = "\033[39m"
+ WARNING = "\033[93m"
+ FAIL = "\033[91m"
+ ENDC = "\033[0m"
lock = threading.Lock()
+
@lru_cache(maxsize=None)
def odm_version():
with open(os.path.join(os.path.dirname(__file__), "..", "VERSION")) as f:
return f.read().split("\n")[0].strip()
+
def memory():
mem = virtual_memory()
return {
- 'total': round(mem.total / 1024 / 1024),
- 'available': round(mem.available / 1024 / 1024)
+ "total": round(mem.total / 1024 / 1024),
+ "available": round(mem.available / 1024 / 1024),
}
+
class ODMLogger:
def __init__(self):
self.json = None
@@ -55,74 +58,76 @@ class ODMLogger:
print("%s%s %s%s" % (startc, level, msg, ENDC))
sys.stdout.flush()
if self.json is not None:
- self.json['stages'][-1]['messages'].append({
- 'message': msg,
- 'type': level_name.lower()
- })
-
+ self.json["stages"][-1]["messages"].append(
+ {"message": msg, "type": level_name.lower()}
+ )
+
def init_json_output(self, output_files, args):
self.json_output_files = output_files
self.json_output_file = output_files[0]
self.json = {}
- self.json['odmVersion'] = odm_version()
- self.json['memory'] = memory()
- self.json['cpus'] = multiprocessing.cpu_count()
- self.json['images'] = -1
- self.json['options'] = args_to_dict(args)
- self.json['startTime'] = self.start_time.isoformat()
- self.json['stages'] = []
- self.json['processes'] = []
- self.json['success'] = False
+ self.json["odmVersion"] = odm_version()
+ self.json["memory"] = memory()
+ self.json["cpus"] = multiprocessing.cpu_count()
+ self.json["images"] = -1
+ self.json["options"] = args_to_dict(args)
+ self.json["startTime"] = self.start_time.isoformat()
+ self.json["stages"] = []
+ self.json["processes"] = []
+ self.json["success"] = False
def log_json_stage_run(self, name, start_time):
if self.json is not None:
- self.json['stages'].append({
- 'name': name,
- 'startTime': start_time.isoformat(),
- 'messages': [],
- })
-
+ self.json["stages"].append(
+ {
+ "name": name,
+ "startTime": start_time.isoformat(),
+ "messages": [],
+ }
+ )
+
def log_json_images(self, count):
if self.json is not None:
- self.json['images'] = count
-
- def log_json_stage_error(self, error, exit_code, stack_trace = ""):
+ self.json["images"] = count
+
+ def log_json_stage_error(self, error, exit_code, stack_trace=""):
if self.json is not None:
- self.json['error'] = {
- 'code': exit_code,
- 'message': error
- }
- self.json['stackTrace'] = list(map(str.strip, stack_trace.split("\n")))
+ self.json["error"] = {"code": exit_code, "message": error}
+ self.json["stackTrace"] = list(map(str.strip, stack_trace.split("\n")))
self._log_json_end_time()
def log_json_success(self):
if self.json is not None:
- self.json['success'] = True
+ self.json["success"] = True
self._log_json_end_time()
-
- def log_json_process(self, cmd, exit_code, output = []):
+
+ def log_json_process(self, cmd, exit_code, output=[]):
if self.json is not None:
d = {
- 'command': cmd,
- 'exitCode': exit_code,
+ "command": cmd,
+ "exitCode": exit_code,
}
if output:
- d['output'] = output
+ d["output"] = output
- self.json['processes'].append(d)
+ self.json["processes"].append(d)
def _log_json_end_time(self):
if self.json is not None:
end_time = datetime.datetime.now()
- self.json['endTime'] = end_time.isoformat()
- self.json['totalTime'] = round((end_time - self.start_time).total_seconds(), 2)
+ self.json["endTime"] = end_time.isoformat()
+ self.json["totalTime"] = round(
+ (end_time - self.start_time).total_seconds(), 2
+ )
+
+ if self.json["stages"]:
+ last_stage = self.json["stages"][-1]
+ last_stage["endTime"] = end_time.isoformat()
+ start_time = dateutil.parser.isoparse(last_stage["startTime"])
+ last_stage["totalTime"] = round(
+ (end_time - start_time).total_seconds(), 2
+ )
- if self.json['stages']:
- last_stage = self.json['stages'][-1]
- last_stage['endTime'] = end_time.isoformat()
- start_time = dateutil.parser.isoparse(last_stage['startTime'])
- last_stage['totalTime'] = round((end_time - start_time).total_seconds(), 2)
-
def info(self, msg):
self.log(DEFAULT, msg, "INFO")
@@ -138,13 +143,14 @@ class ODMLogger:
def close(self):
if self.json is not None and self.json_output_file is not None:
try:
- with open(self.json_output_file, 'w') as f:
+ with open(self.json_output_file, "w") as f:
f.write(json.dumps(self.json, indent=4))
for f in self.json_output_files[1:]:
shutil.copy(self.json_output_file, f)
except Exception as e:
print("Cannot write log.json: %s" % str(e))
+
logger = ODMLogger()
ODM_INFO = logger.info
diff --git a/opendm/mesh.py b/opendm/mesh.py
index 584458bf..1e24baed 100644
--- a/opendm/mesh.py
+++ b/opendm/mesh.py
@@ -9,42 +9,69 @@ from opendm import point_cloud
from scipy import signal
import numpy as np
-def create_25dmesh(inPointCloud, outMesh, radius_steps=["0.05"], dsm_resolution=0.05, depth=8, samples=1, maxVertexCount=100000, available_cores=None, method='gridded', smooth_dsm=True, max_tiles=None):
+
+def create_25dmesh(
+ inPointCloud,
+ outMesh,
+ radius_steps=["0.05"],
+ dsm_resolution=0.05,
+ depth=8,
+ samples=1,
+ maxVertexCount=100000,
+ available_cores=None,
+ method="gridded",
+ smooth_dsm=True,
+ max_tiles=None,
+):
# Create DSM from point cloud
# Create temporary directory
mesh_directory = os.path.dirname(outMesh)
- tmp_directory = os.path.join(mesh_directory, 'tmp')
+ tmp_directory = os.path.join(mesh_directory, "tmp")
if os.path.exists(tmp_directory):
shutil.rmtree(tmp_directory)
os.mkdir(tmp_directory)
- log.ODM_INFO('Created temporary directory: %s' % tmp_directory)
+ log.ODM_INFO("Created temporary directory: %s" % tmp_directory)
- log.ODM_INFO('Creating DSM for 2.5D mesh')
+ log.ODM_INFO("Creating DSM for 2.5D mesh")
commands.create_dem(
- inPointCloud,
- 'mesh_dsm',
- output_type='max',
- radiuses=radius_steps,
- gapfill=True,
- outdir=tmp_directory,
- resolution=dsm_resolution,
- max_workers=available_cores,
- apply_smoothing=smooth_dsm,
- max_tiles=max_tiles
- )
+ inPointCloud,
+ "mesh_dsm",
+ output_type="max",
+ radiuses=radius_steps,
+ gapfill=True,
+ outdir=tmp_directory,
+ resolution=dsm_resolution,
+ max_workers=available_cores,
+ apply_smoothing=smooth_dsm,
+ max_tiles=max_tiles,
+ )
- if method == 'gridded':
- mesh = dem_to_mesh_gridded(os.path.join(tmp_directory, 'mesh_dsm.tif'), outMesh, maxVertexCount, maxConcurrency=max(1, available_cores))
- elif method == 'poisson':
- dsm_points = dem_to_points(os.path.join(tmp_directory, 'mesh_dsm.tif'), os.path.join(tmp_directory, 'dsm_points.ply'))
- mesh = screened_poisson_reconstruction(dsm_points, outMesh, depth=depth,
- samples=samples,
- maxVertexCount=maxVertexCount,
- threads=max(1, available_cores - 1)), # poissonrecon can get stuck on some machines if --threads == all cores
+ if method == "gridded":
+ mesh = dem_to_mesh_gridded(
+ os.path.join(tmp_directory, "mesh_dsm.tif"),
+ outMesh,
+ maxVertexCount,
+ maxConcurrency=max(1, available_cores),
+ )
+ elif method == "poisson":
+ dsm_points = dem_to_points(
+ os.path.join(tmp_directory, "mesh_dsm.tif"),
+ os.path.join(tmp_directory, "dsm_points.ply"),
+ )
+ mesh = (
+ screened_poisson_reconstruction(
+ dsm_points,
+ outMesh,
+ depth=depth,
+ samples=samples,
+ maxVertexCount=maxVertexCount,
+ threads=max(1, available_cores - 1),
+ ),
+ ) # poissonrecon can get stuck on some machines if --threads == all cores
else:
- raise 'Not a valid method: ' + method
+ raise "Not a valid method: " + method
# Cleanup tmp
if os.path.exists(tmp_directory):
@@ -54,26 +81,28 @@ def create_25dmesh(inPointCloud, outMesh, radius_steps=["0.05"], dsm_resolution=
def dem_to_points(inGeotiff, outPointCloud):
- log.ODM_INFO('Sampling points from DSM: %s' % inGeotiff)
+ log.ODM_INFO("Sampling points from DSM: %s" % inGeotiff)
kwargs = {
- 'bin': context.dem2points_path,
- 'outfile': outPointCloud,
- 'infile': inGeotiff
+ "bin": context.dem2points_path,
+ "outfile": outPointCloud,
+ "infile": inGeotiff,
}
- system.run('"{bin}" -inputFile "{infile}" '
- '-outputFile "{outfile}" '
- '-skirtHeightThreshold 1.5 '
- '-skirtIncrements 0.2 '
- '-skirtHeightCap 100 '
- '-verbose '.format(**kwargs))
+ system.run(
+ '"{bin}" -inputFile "{infile}" '
+ '-outputFile "{outfile}" '
+ "-skirtHeightThreshold 1.5 "
+ "-skirtIncrements 0.2 "
+ "-skirtHeightCap 100 "
+ "-verbose ".format(**kwargs)
+ )
return outPointCloud
def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, maxConcurrency=1):
- log.ODM_INFO('Creating mesh from DSM: %s' % inGeotiff)
+ log.ODM_INFO("Creating mesh from DSM: %s" % inGeotiff)
mesh_path, mesh_filename = os.path.split(outMesh)
# mesh_path = path/to
@@ -85,47 +114,53 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, maxConcurrency=1):
outMeshDirty = os.path.join(mesh_path, "{}.dirty{}".format(basename, ext))
- # This should work without issues most of the times,
+ # This should work without issues most of the times,
# but just in case we lower maxConcurrency if it fails.
while True:
try:
kwargs = {
- 'bin': context.dem2mesh_path,
- 'outfile': outMeshDirty,
- 'infile': inGeotiff,
- 'maxVertexCount': maxVertexCount,
- 'maxConcurrency': maxConcurrency
+ "bin": context.dem2mesh_path,
+ "outfile": outMeshDirty,
+ "infile": inGeotiff,
+ "maxVertexCount": maxVertexCount,
+ "maxConcurrency": maxConcurrency,
}
- system.run('"{bin}" -inputFile "{infile}" '
+ system.run(
+ '"{bin}" -inputFile "{infile}" '
'-outputFile "{outfile}" '
- '-maxTileLength 2000 '
- '-maxVertexCount {maxVertexCount} '
- '-maxConcurrency {maxConcurrency} '
- '-edgeSwapThreshold 0.15 '
- '-verbose '.format(**kwargs))
+ "-maxTileLength 2000 "
+ "-maxVertexCount {maxVertexCount} "
+ "-maxConcurrency {maxConcurrency} "
+ "-edgeSwapThreshold 0.15 "
+ "-verbose ".format(**kwargs)
+ )
break
except Exception as e:
maxConcurrency = math.floor(maxConcurrency / 2)
if maxConcurrency >= 1:
- log.ODM_WARNING("dem2mesh failed, retrying with lower concurrency (%s) in case this is a memory issue" % maxConcurrency)
+ log.ODM_WARNING(
+ "dem2mesh failed, retrying with lower concurrency (%s) in case this is a memory issue"
+ % maxConcurrency
+ )
else:
raise e
-
- # Cleanup and reduce vertex count if necessary
+ # Cleanup and reduce vertex count if necessary
# (as dem2mesh cannot guarantee that we'll have the target vertex count)
cleanupArgs = {
- 'reconstructmesh': context.omvs_reconstructmesh_path,
- 'outfile': outMesh,
- 'infile': outMeshDirty,
- 'max_faces': maxVertexCount * 2
+ "reconstructmesh": context.omvs_reconstructmesh_path,
+ "outfile": outMesh,
+ "infile": outMeshDirty,
+ "max_faces": maxVertexCount * 2,
}
- system.run('"{reconstructmesh}" -i "{infile}" '
- '-o "{outfile}" '
- '--archive-type 3 '
- '--remove-spikes 0 --remove-spurious 0 --smooth 0 '
- '--target-face-num {max_faces} -v 0'.format(**cleanupArgs))
+ system.run(
+ '"{reconstructmesh}" -i "{infile}" '
+ '-o "{outfile}" '
+ "--archive-type 3 "
+ "--remove-spikes 0 --remove-spurious 0 --smooth 0 "
+ "--target-face-num {max_faces} -v 0".format(**cleanupArgs)
+ )
# Delete intermediate results
os.remove(outMeshDirty)
@@ -133,7 +168,15 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, maxConcurrency=1):
return outMesh
-def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples = 1, maxVertexCount=100000, pointWeight=4, threads=context.num_cores):
+def screened_poisson_reconstruction(
+ inPointCloud,
+ outMesh,
+ depth=8,
+ samples=1,
+ maxVertexCount=100000,
+ pointWeight=4,
+ threads=context.num_cores,
+):
mesh_path, mesh_filename = os.path.split(outMesh)
# mesh_path = path/to
@@ -146,38 +189,42 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
outMeshDirty = os.path.join(mesh_path, "{}.dirty{}".format(basename, ext))
if os.path.isfile(outMeshDirty):
os.remove(outMeshDirty)
-
+
# Since PoissonRecon has some kind of a race condition on ppc64el, and this helps...
- if platform.machine() == 'ppc64le':
- log.ODM_WARNING("ppc64le platform detected, forcing single-threaded operation for PoissonRecon")
+ if platform.machine() == "ppc64le":
+ log.ODM_WARNING(
+ "ppc64le platform detected, forcing single-threaded operation for PoissonRecon"
+ )
threads = 1
while True:
poissonReconArgs = {
- 'bin': context.poisson_recon_path,
- 'outfile': outMeshDirty,
- 'infile': inPointCloud,
- 'depth': depth,
- 'samples': samples,
- 'pointWeight': pointWeight,
- 'threads': int(threads)
+ "bin": context.poisson_recon_path,
+ "outfile": outMeshDirty,
+ "infile": inPointCloud,
+ "depth": depth,
+ "samples": samples,
+ "pointWeight": pointWeight,
+ "threads": int(threads),
}
# Run PoissonRecon
try:
- system.run('"{bin}" --in "{infile}" '
- '--out "{outfile}" '
- '--depth {depth} '
- '--pointWeight {pointWeight} '
- '--samplesPerNode {samples} '
- '--threads {threads} '
- '--bType 2 '
- '--linearFit '.format(**poissonReconArgs))
+ system.run(
+ '"{bin}" --in "{infile}" '
+ '--out "{outfile}" '
+ "--depth {depth} "
+ "--pointWeight {pointWeight} "
+ "--samplesPerNode {samples} "
+ "--threads {threads} "
+ "--bType 2 "
+ "--linearFit ".format(**poissonReconArgs)
+ )
except Exception as e:
log.ODM_WARNING(str(e))
-
+
if os.path.isfile(outMeshDirty):
- break # Done!
+ break # Done!
else:
# PoissonRecon will sometimes fail due to race conditions
@@ -187,22 +234,26 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
if threads < 1:
break
else:
- log.ODM_WARNING("PoissonRecon failed with %s threads, let's retry with %s..." % (threads * 2, threads))
-
+ log.ODM_WARNING(
+ "PoissonRecon failed with %s threads, let's retry with %s..."
+ % (threads * 2, threads)
+ )
# Cleanup and reduce vertex count if necessary
cleanupArgs = {
- 'reconstructmesh': context.omvs_reconstructmesh_path,
- 'outfile': outMesh,
- 'infile':outMeshDirty,
- 'max_faces': maxVertexCount * 2
+ "reconstructmesh": context.omvs_reconstructmesh_path,
+ "outfile": outMesh,
+ "infile": outMeshDirty,
+ "max_faces": maxVertexCount * 2,
}
- system.run('"{reconstructmesh}" -i "{infile}" '
- '-o "{outfile}" '
- '--archive-type 3 '
- '--remove-spikes 0 --remove-spurious 20 --smooth 0 '
- '--target-face-num {max_faces} -v 0'.format(**cleanupArgs))
+ system.run(
+ '"{reconstructmesh}" -i "{infile}" '
+ '-o "{outfile}" '
+ "--archive-type 3 "
+ "--remove-spikes 0 --remove-spurious 20 --smooth 0 "
+ "--target-face-num {max_faces} -v 0".format(**cleanupArgs)
+ )
# Delete intermediate results
os.remove(outMeshDirty)
diff --git a/opendm/multispectral.py b/opendm/multispectral.py
index 953bef6c..32ddcff4 100644
--- a/opendm/multispectral.py
+++ b/opendm/multispectral.py
@@ -14,6 +14,7 @@ from skimage.filters import rank, gaussian
# Loosely based on https://github.com/micasense/imageprocessing/blob/master/micasense/utils.py
+
def dn_to_radiance(photo, image):
"""
Convert Digital Number values to Radiance values
@@ -24,8 +25,10 @@ def dn_to_radiance(photo, image):
image = image.astype("float32")
if len(image.shape) != 3:
- raise ValueError("Image should have shape length of 3 (got: %s)" % len(image.shape))
-
+ raise ValueError(
+ "Image should have shape length of 3 (got: %s)" % len(image.shape)
+ )
+
# Thermal (this should never happen, but just in case..)
if photo.is_thermal():
return image
@@ -50,8 +53,10 @@ def dn_to_radiance(photo, image):
if bit_depth_max:
image /= bit_depth_max
else:
- log.ODM_WARNING("Cannot normalize DN for %s, bit depth is missing" % photo.filename)
-
+ log.ODM_WARNING(
+ "Cannot normalize DN for %s, bit depth is missing" % photo.filename
+ )
+
if V is not None:
# vignette correction
V = np.repeat(V[:, :, np.newaxis], image.shape[2], axis=2)
@@ -62,17 +67,17 @@ def dn_to_radiance(photo, image):
R = 1.0 / (1.0 + a2 * y / exposure_time - a3 * y)
R = np.repeat(R[:, :, np.newaxis], image.shape[2], axis=2)
image *= R
-
+
# Floor any negative radiances to zero (can happen due to noise around blackLevel)
if dark_level is not None:
image[image < 0] = 0
-
+
# apply the radiometric calibration - i.e. scale by the gain-exposure product and
# multiply with the radiometric calibration coefficient
if gain is not None and exposure_time is not None:
- image /= (gain * exposure_time)
-
+ image /= gain * exposure_time
+
if a1 is not None:
# multiply with the radiometric calibration coefficient
image *= a1
@@ -82,6 +87,7 @@ def dn_to_radiance(photo, image):
return image
+
def vignette_map(photo):
x_vc, y_vc = photo.get_vignetting_center()
polynomial = photo.get_vignetting_polynomial()
@@ -111,9 +117,10 @@ def vignette_map(photo):
vignette = 1.0 / vignette
return vignette, x, y
-
+
return None, None, None
+
def dn_to_reflectance(photo, image, use_sun_sensor=True):
radiance = dn_to_radiance(photo, image)
irradiance = compute_irradiance(photo, use_sun_sensor=use_sun_sensor)
@@ -122,6 +129,7 @@ def dn_to_reflectance(photo, image, use_sun_sensor=True):
reflectance[reflectance > 1.0] = 1.0
return reflectance
+
def compute_irradiance(photo, use_sun_sensor=True):
# Thermal (this should never happen, but just in case..)
if photo.is_thermal():
@@ -136,70 +144,85 @@ def compute_irradiance(photo, use_sun_sensor=True):
if use_sun_sensor and photo.get_sun_sensor():
# Estimate it
- dls_orientation_vector = np.array([0,0,-1])
- sun_vector_ned, sensor_vector_ned, sun_sensor_angle, \
- solar_elevation, solar_azimuth = dls.compute_sun_angle([photo.latitude, photo.longitude],
- photo.get_dls_pose(),
- photo.get_utc_time(),
- dls_orientation_vector)
+ dls_orientation_vector = np.array([0, 0, -1])
+ (
+ sun_vector_ned,
+ sensor_vector_ned,
+ sun_sensor_angle,
+ solar_elevation,
+ solar_azimuth,
+ ) = dls.compute_sun_angle(
+ [photo.latitude, photo.longitude],
+ photo.get_dls_pose(),
+ photo.get_utc_time(),
+ dls_orientation_vector,
+ )
angular_correction = dls.fresnel(sun_sensor_angle)
# TODO: support for direct and scattered irradiance
- direct_to_diffuse_ratio = 6.0 # Assumption, clear skies
+ direct_to_diffuse_ratio = 6.0 # Assumption, clear skies
spectral_irradiance = photo.get_sun_sensor()
percent_diffuse = 1.0 / direct_to_diffuse_ratio
sensor_irradiance = spectral_irradiance / angular_correction
# Find direct irradiance in the plane normal to the sun
- untilted_direct_irr = sensor_irradiance / (percent_diffuse + np.cos(sun_sensor_angle))
+ untilted_direct_irr = sensor_irradiance / (
+ percent_diffuse + np.cos(sun_sensor_angle)
+ )
direct_irradiance = untilted_direct_irr
scattered_irradiance = untilted_direct_irr * percent_diffuse
# compute irradiance on the ground using the solar altitude angle
- horizontal_irradiance = direct_irradiance * np.sin(solar_elevation) + scattered_irradiance
+ horizontal_irradiance = (
+ direct_irradiance * np.sin(solar_elevation) + scattered_irradiance
+ )
return horizontal_irradiance
elif use_sun_sensor:
log.ODM_WARNING("No sun sensor values found for %s" % photo.filename)
-
+
return 1.0
+
def get_photos_by_band(multi_camera, user_band_name):
band_name = get_primary_band_name(multi_camera, user_band_name)
for band in multi_camera:
- if band['name'] == band_name:
- return band['photos']
+ if band["name"] == band_name:
+ return band["photos"]
def get_primary_band_name(multi_camera, user_band_name):
if len(multi_camera) < 1:
raise Exception("Invalid multi_camera list")
-
+
# Pick RGB, or Green, or Blue, in this order, if available, otherwise first band
if user_band_name == "auto":
- for aliases in [['rgb', 'redgreenblue'], ['green', 'g'], ['blue', 'b']]:
+ for aliases in [["rgb", "redgreenblue"], ["green", "g"], ["blue", "b"]]:
for band in multi_camera:
- if band['name'].lower() in aliases:
- return band['name']
-
- return multi_camera[0]['name']
+ if band["name"].lower() in aliases:
+ return band["name"]
+
+ return multi_camera[0]["name"]
for band in multi_camera:
- if band['name'].lower() == user_band_name.lower():
- return band['name']
-
- band_name_fallback = multi_camera[0]['name']
+ if band["name"].lower() == user_band_name.lower():
+ return band["name"]
- log.ODM_WARNING("Cannot find band name \"%s\", will use \"%s\" instead" % (user_band_name, band_name_fallback))
+ band_name_fallback = multi_camera[0]["name"]
+
+ log.ODM_WARNING(
+ 'Cannot find band name "%s", will use "%s" instead'
+ % (user_band_name, band_name_fallback)
+ )
return band_name_fallback
def compute_band_maps(multi_camera, primary_band):
"""
- Computes maps of:
+ Computes maps of:
- { photo filename --> associated primary band photo } (s2p)
- { primary band filename --> list of associated secondary band photos } (p2s)
by looking at capture UUID, capture time or filenames as a fallback
@@ -207,10 +230,10 @@ def compute_band_maps(multi_camera, primary_band):
band_name = get_primary_band_name(multi_camera, primary_band)
primary_band_photos = None
for band in multi_camera:
- if band['name'] == band_name:
- primary_band_photos = band['photos']
+ if band["name"] == band_name:
+ primary_band_photos = band["photos"]
break
-
+
# Try using capture time as the grouping factor
try:
unique_id_map = {}
@@ -220,29 +243,36 @@ def compute_band_maps(multi_camera, primary_band):
for p in primary_band_photos:
uuid = p.get_capture_id()
if uuid is None:
- raise Exception("Cannot use capture time (no information in %s)" % p.filename)
-
+ raise Exception(
+ "Cannot use capture time (no information in %s)" % p.filename
+ )
+
# Should be unique across primary band
if unique_id_map.get(uuid) is not None:
raise Exception("Unreliable UUID/capture time detected (duplicate)")
unique_id_map[uuid] = p
-
+
for band in multi_camera:
- photos = band['photos']
+ photos = band["photos"]
for p in photos:
uuid = p.get_capture_id()
if uuid is None:
- raise Exception("Cannot use UUID/capture time (no information in %s)" % p.filename)
-
+ raise Exception(
+ "Cannot use UUID/capture time (no information in %s)"
+ % p.filename
+ )
+
# Should match the primary band
if unique_id_map.get(uuid) is None:
- raise Exception("Unreliable UUID/capture time detected (no primary band match)")
+ raise Exception(
+ "Unreliable UUID/capture time detected (no primary band match)"
+ )
s2p[p.filename] = unique_id_map[uuid]
- if band['name'] != band_name:
+ if band["name"] != band_name:
p2s.setdefault(unique_id_map[uuid].filename, []).append(p)
return s2p, p2s
@@ -260,38 +290,58 @@ def compute_band_maps(multi_camera, primary_band):
# Quick check
if filename_without_band == p.filename:
- raise Exception("Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly." % p.filename)
+ raise Exception(
+ "Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly."
+ % p.filename
+ )
filename_map[filename_without_band] = p
for band in multi_camera:
- photos = band['photos']
+ photos = band["photos"]
for p in photos:
filename_without_band = re.sub(file_regex, "\\1\\2", p.filename)
# Quick check
if filename_without_band == p.filename:
- raise Exception("Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly." % p.filename)
-
+ raise Exception(
+ "Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly."
+ % p.filename
+ )
+
if not filename_without_band in filename_map:
- raise Exception("Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly, check that your images have the appropriate CaptureUUID XMP tag and that no images are missing." % p.filename)
-
+ raise Exception(
+ "Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly, check that your images have the appropriate CaptureUUID XMP tag and that no images are missing."
+ % p.filename
+ )
+
s2p[p.filename] = filename_map[filename_without_band]
- if band['name'] != band_name:
- p2s.setdefault(filename_map[filename_without_band].filename, []).append(p)
+ if band["name"] != band_name:
+ p2s.setdefault(
+ filename_map[filename_without_band].filename, []
+ ).append(p)
return s2p, p2s
-def compute_alignment_matrices(multi_camera, primary_band_name, images_path, s2p, p2s, max_concurrency=1, max_samples=30):
+
+def compute_alignment_matrices(
+ multi_camera,
+ primary_band_name,
+ images_path,
+ s2p,
+ p2s,
+ max_concurrency=1,
+ max_samples=30,
+):
log.ODM_INFO("Computing band alignment")
alignment_info = {}
# For each secondary band
for band in multi_camera:
- if band['name'] != primary_band_name:
+ if band["name"] != primary_band_name:
matrices = []
def parallel_compute_homography(p):
@@ -301,53 +351,80 @@ def compute_alignment_matrices(multi_camera, primary_band_name, images_path, s2p
return
# Find good matrix candidates for alignment
-
- primary_band_photo = s2p.get(p['filename'])
+
+ primary_band_photo = s2p.get(p["filename"])
if primary_band_photo is None:
- log.ODM_WARNING("Cannot find primary band photo for %s" % p['filename'])
+ log.ODM_WARNING(
+ "Cannot find primary band photo for %s" % p["filename"]
+ )
return
- warp_matrix, dimension, algo = compute_homography(os.path.join(images_path, p['filename']),
- os.path.join(images_path, primary_band_photo.filename))
-
+ warp_matrix, dimension, algo = compute_homography(
+ os.path.join(images_path, p["filename"]),
+ os.path.join(images_path, primary_band_photo.filename),
+ )
+
if warp_matrix is not None:
- log.ODM_INFO("%s --> %s good match" % (p['filename'], primary_band_photo.filename))
+ log.ODM_INFO(
+ "%s --> %s good match"
+ % (p["filename"], primary_band_photo.filename)
+ )
- matrices.append({
- 'warp_matrix': warp_matrix,
- 'eigvals': np.linalg.eigvals(warp_matrix),
- 'dimension': dimension,
- 'algo': algo
- })
+ matrices.append(
+ {
+ "warp_matrix": warp_matrix,
+ "eigvals": np.linalg.eigvals(warp_matrix),
+ "dimension": dimension,
+ "algo": algo,
+ }
+ )
else:
- log.ODM_INFO("%s --> %s cannot be matched" % (p['filename'], primary_band_photo.filename))
+ log.ODM_INFO(
+ "%s --> %s cannot be matched"
+ % (p["filename"], primary_band_photo.filename)
+ )
except Exception as e:
- log.ODM_WARNING("Failed to compute homography for %s: %s" % (p['filename'], str(e)))
+ log.ODM_WARNING(
+ "Failed to compute homography for %s: %s"
+ % (p["filename"], str(e))
+ )
- parallel_map(parallel_compute_homography, [{'filename': p.filename} for p in band['photos']], max_concurrency, single_thread_fallback=False)
+ parallel_map(
+ parallel_compute_homography,
+ [{"filename": p.filename} for p in band["photos"]],
+ max_concurrency,
+ single_thread_fallback=False,
+ )
# Find the matrix that has the most common eigvals
# among all matrices. That should be the "best" alignment.
for m1 in matrices:
- acc = np.array([0.0,0.0,0.0])
- e = m1['eigvals']
+ acc = np.array([0.0, 0.0, 0.0])
+ e = m1["eigvals"]
for m2 in matrices:
- acc += abs(e - m2['eigvals'])
+ acc += abs(e - m2["eigvals"])
+
+ m1["score"] = acc.sum()
- m1['score'] = acc.sum()
-
# Sort
- matrices.sort(key=lambda x: x['score'], reverse=False)
-
+ matrices.sort(key=lambda x: x["score"], reverse=False)
+
if len(matrices) > 0:
- alignment_info[band['name']] = matrices[0]
- log.ODM_INFO("%s band will be aligned using warp matrix %s (score: %s)" % (band['name'], matrices[0]['warp_matrix'], matrices[0]['score']))
+ alignment_info[band["name"]] = matrices[0]
+ log.ODM_INFO(
+ "%s band will be aligned using warp matrix %s (score: %s)"
+ % (band["name"], matrices[0]["warp_matrix"], matrices[0]["score"])
+ )
else:
- log.ODM_WARNING("Cannot find alignment matrix for band %s, The band might end up misaligned!" % band['name'])
+ log.ODM_WARNING(
+ "Cannot find alignment matrix for band %s, The band might end up misaligned!"
+ % band["name"]
+ )
return alignment_info
+
def compute_homography(image_filename, align_image_filename):
try:
# Convert images to grayscale if needed
@@ -355,17 +432,20 @@ def compute_homography(image_filename, align_image_filename):
if image.shape[2] == 3:
image_gray = to_8bit(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
else:
- image_gray = to_8bit(image[:,:,0])
+ image_gray = to_8bit(image[:, :, 0])
max_dim = max(image_gray.shape)
if max_dim <= 320:
- log.ODM_WARNING("Small image for band alignment (%sx%s), this might be tough to compute." % (image_gray.shape[1], image_gray.shape[0]))
+ log.ODM_WARNING(
+ "Small image for band alignment (%sx%s), this might be tough to compute."
+ % (image_gray.shape[1], image_gray.shape[0])
+ )
align_image = imread(align_image_filename, unchanged=True, anydepth=True)
if align_image.shape[2] == 3:
align_image_gray = to_8bit(cv2.cvtColor(align_image, cv2.COLOR_BGR2GRAY))
else:
- align_image_gray = to_8bit(align_image[:,:,0])
+ align_image_gray = to_8bit(align_image[:, :, 0])
def compute_using(algorithm):
try:
@@ -378,7 +458,7 @@ def compute_homography(image_filename, align_image_filename):
return None, (None, None)
det = np.linalg.det(h)
-
+
# Check #1 homography's determinant will not be close to zero
if abs(det) < 0.25:
return None, (None, None)
@@ -387,35 +467,37 @@ def compute_homography(image_filename, align_image_filename):
svd = np.linalg.svd(h, compute_uv=False)
if svd[-1] == 0:
return None, (None, None)
-
+
ratio = svd[0] / svd[-1]
if ratio > 100000:
return None, (None, None)
return h, (align_image_gray.shape[1], align_image_gray.shape[0])
-
+
warp_matrix = None
dimension = None
algo = None
if max_dim > 320:
- algo = 'feat'
+ algo = "feat"
result = compute_using(find_features_homography)
-
+
if result[0] is None:
- algo = 'ecc'
- log.ODM_INFO("Can't use features matching, will use ECC (this might take a bit)")
+ algo = "ecc"
+ log.ODM_INFO(
+ "Can't use features matching, will use ECC (this might take a bit)"
+ )
result = compute_using(find_ecc_homography)
if result[0] is None:
algo = None
- else: # ECC only for low resolution images
- algo = 'ecc'
+ else: # ECC only for low resolution images
+ algo = "ecc"
log.ODM_INFO("Using ECC (this might take a bit)")
result = compute_using(find_ecc_homography)
if result[0] is None:
algo = None
-
+
warp_matrix, dimension = result
return warp_matrix, dimension, algo
@@ -423,9 +505,16 @@ def compute_homography(image_filename, align_image_filename):
log.ODM_WARNING("Compute homography: %s" % str(e))
return None, (None, None), None
-def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000, termination_eps=1e-8, start_eps=1e-4):
+
+def find_ecc_homography(
+ image_gray,
+ align_image_gray,
+ number_of_iterations=1000,
+ termination_eps=1e-8,
+ start_eps=1e-4,
+):
pyramid_levels = 0
- h,w = image_gray.shape
+ h, w = image_gray.shape
max_dim = max(h, w)
downscale = 0
@@ -435,29 +524,36 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000,
if downscale > 0:
f = 1 / (2**downscale)
- image_gray = cv2.resize(image_gray, None, fx=f, fy=f, interpolation=cv2.INTER_AREA)
- h,w = image_gray.shape
+ image_gray = cv2.resize(
+ image_gray, None, fx=f, fy=f, interpolation=cv2.INTER_AREA
+ )
+ h, w = image_gray.shape
min_dim = min(h, w)
while min_dim > 300:
min_dim /= 2.0
pyramid_levels += 1
-
+
log.ODM_INFO("Pyramid levels: %s" % pyramid_levels)
-
+
# Quick check on size
if align_image_gray.shape[0] != image_gray.shape[0]:
align_image_gray = to_8bit(align_image_gray)
image_gray = to_8bit(image_gray)
- fx = image_gray.shape[1]/align_image_gray.shape[1]
- fy = image_gray.shape[0]/align_image_gray.shape[0]
+ fx = image_gray.shape[1] / align_image_gray.shape[1]
+ fy = image_gray.shape[0] / align_image_gray.shape[0]
- align_image_gray = cv2.resize(align_image_gray, None,
- fx=fx,
- fy=fy,
- interpolation=(cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4))
+ align_image_gray = cv2.resize(
+ align_image_gray,
+ None,
+ fx=fx,
+ fy=fy,
+ interpolation=(
+ cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4
+ ),
+ )
# Build pyramids
image_gray_pyr = [image_gray]
@@ -465,16 +561,32 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000,
for level in range(pyramid_levels):
image_gray_pyr[0] = to_8bit(image_gray_pyr[0], force_normalize=True)
- image_gray_pyr.insert(0, cv2.resize(image_gray_pyr[0], None, fx=1/2, fy=1/2,
- interpolation=cv2.INTER_AREA))
+ image_gray_pyr.insert(
+ 0,
+ cv2.resize(
+ image_gray_pyr[0],
+ None,
+ fx=1 / 2,
+ fy=1 / 2,
+ interpolation=cv2.INTER_AREA,
+ ),
+ )
align_image_pyr[0] = to_8bit(align_image_pyr[0], force_normalize=True)
- align_image_pyr.insert(0, cv2.resize(align_image_pyr[0], None, fx=1/2, fy=1/2,
- interpolation=cv2.INTER_AREA))
+ align_image_pyr.insert(
+ 0,
+ cv2.resize(
+ align_image_pyr[0],
+ None,
+ fx=1 / 2,
+ fy=1 / 2,
+ interpolation=cv2.INTER_AREA,
+ ),
+ )
# Define the motion model, scale the initial warp matrix to smallest level
warp_matrix = np.eye(3, 3, dtype=np.float32)
- for level in range(pyramid_levels+1):
+ for level in range(pyramid_levels + 1):
ig = gradient(gaussian(image_gray_pyr[level]))
aig = gradient(gaussian(align_image_pyr[level]))
@@ -482,56 +594,84 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000,
eps = termination_eps
else:
eps = start_eps - ((start_eps - termination_eps) / (pyramid_levels)) * level
-
+
# Define termination criteria
- criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
- number_of_iterations, eps)
+ criteria = (
+ cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
+ number_of_iterations,
+ eps,
+ )
try:
log.ODM_INFO("Computing ECC pyramid level %s" % level)
- _, warp_matrix = cv2.findTransformECC(ig, aig, warp_matrix, cv2.MOTION_HOMOGRAPHY, criteria, inputMask=None, gaussFiltSize=9)
+ _, warp_matrix = cv2.findTransformECC(
+ ig,
+ aig,
+ warp_matrix,
+ cv2.MOTION_HOMOGRAPHY,
+ criteria,
+ inputMask=None,
+ gaussFiltSize=9,
+ )
except Exception as e:
if level != pyramid_levels:
- log.ODM_INFO("Could not compute ECC warp_matrix at pyramid level %s, resetting matrix" % level)
+ log.ODM_INFO(
+ "Could not compute ECC warp_matrix at pyramid level %s, resetting matrix"
+ % level
+ )
warp_matrix = np.eye(3, 3, dtype=np.float32)
else:
raise e
- if level != pyramid_levels:
- warp_matrix = warp_matrix * np.array([[1,1,2],[1,1,2],[0.5,0.5,1]], dtype=np.float32)
+ if level != pyramid_levels:
+ warp_matrix = warp_matrix * np.array(
+ [[1, 1, 2], [1, 1, 2], [0.5, 0.5, 1]], dtype=np.float32
+ )
if downscale > 0:
- return warp_matrix * (np.array([[1,1,2],[1,1,2],[0.5,0.5,1]], dtype=np.float32) ** downscale)
+ return warp_matrix * (
+ np.array([[1, 1, 2], [1, 1, 2], [0.5, 0.5, 1]], dtype=np.float32)
+ ** downscale
+ )
else:
return warp_matrix
-def find_features_homography(image_gray, align_image_gray, feature_retention=0.7, min_match_count=10):
+def find_features_homography(
+ image_gray, align_image_gray, feature_retention=0.7, min_match_count=10
+):
# Detect SIFT features and compute descriptors.
detector = cv2.SIFT_create(edgeThreshold=10, contrastThreshold=0.1)
- h,w = image_gray.shape
+ h, w = image_gray.shape
max_dim = max(h, w)
downscale = 0
max_size = 4096
while max_dim / (2**downscale) > max_size:
downscale += 1
-
+
if downscale > 0:
f = 1 / (2**downscale)
- image_gray = cv2.resize(image_gray, None, fx=f, fy=f, interpolation=cv2.INTER_AREA)
- h,w = image_gray.shape
+ image_gray = cv2.resize(
+ image_gray, None, fx=f, fy=f, interpolation=cv2.INTER_AREA
+ )
+ h, w = image_gray.shape
if align_image_gray.shape[0] != image_gray.shape[0]:
- fx = image_gray.shape[1]/align_image_gray.shape[1]
- fy = image_gray.shape[0]/align_image_gray.shape[0]
+ fx = image_gray.shape[1] / align_image_gray.shape[1]
+ fy = image_gray.shape[0] / align_image_gray.shape[0]
- align_image_gray = cv2.resize(align_image_gray, None,
- fx=fx,
- fy=fy,
- interpolation=(cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4))
+ align_image_gray = cv2.resize(
+ align_image_gray,
+ None,
+ fx=fx,
+ fy=fy,
+ interpolation=(
+ cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4
+ ),
+ )
kp_image, desc_image = detector.detectAndCompute(image_gray, None)
kp_align_image, desc_align_image = detector.detectAndCompute(align_image_gray, None)
@@ -574,22 +714,27 @@ def find_features_homography(image_gray, align_image_gray, feature_retention=0.7
h, _ = cv2.findHomography(points_image, points_align_image, cv2.RANSAC)
if h is None:
return None
-
+
if downscale > 0:
- return h * (np.array([[1,1,2],[1,1,2],[0.5,0.5,1]], dtype=np.float32) ** downscale)
+ return h * (
+ np.array([[1, 1, 2], [1, 1, 2], [0.5, 0.5, 1]], dtype=np.float32)
+ ** downscale
+ )
else:
return h
+
def gradient(im, ksize=5):
im = local_normalize(im)
- grad_x = cv2.Sobel(im,cv2.CV_32F,1,0,ksize=ksize)
- grad_y = cv2.Sobel(im,cv2.CV_32F,0,1,ksize=ksize)
+ grad_x = cv2.Sobel(im, cv2.CV_32F, 1, 0, ksize=ksize)
+ grad_y = cv2.Sobel(im, cv2.CV_32F, 0, 1, ksize=ksize)
grad = cv2.addWeighted(np.absolute(grad_x), 0.5, np.absolute(grad_y), 0.5, 0)
return grad
+
def local_normalize(im):
width, _ = im.shape
- disksize = int(width/5)
+ disksize = int(width / 5)
if disksize % 2 == 0:
disksize = disksize + 1
selem = disk(disksize)
@@ -636,11 +781,16 @@ def resize_match(image, dimension):
mw, mh = dimension
if w != mw or h != mh:
- fx = mw/w
- fy = mh/h
- image = cv2.resize(image, None,
- fx=fx,
- fy=fx,
- interpolation=(cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4))
+ fx = mw / w
+ fy = mh / h
+ image = cv2.resize(
+ image,
+ None,
+ fx=fx,
+ fy=fx,
+ interpolation=(
+ cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4
+ ),
+ )
return image
diff --git a/opendm/net.py b/opendm/net.py
index f2bb5b3e..4594f48e 100644
--- a/opendm/net.py
+++ b/opendm/net.py
@@ -2,6 +2,7 @@ import requests
import math
import os
import time
+
try:
import queue
except ImportError:
@@ -11,7 +12,15 @@ from pyodm.utils import AtomicCounter
from pyodm.exceptions import RangeNotAvailableError, OdmError
from urllib3.exceptions import ReadTimeoutError
-def download(url, destination, progress_callback=None, parallel_downloads=16, parallel_chunks_size=10, timeout=30):
+
+def download(
+ url,
+ destination,
+ progress_callback=None,
+ parallel_downloads=16,
+ parallel_chunks_size=10,
+ timeout=30,
+):
"""Download files in parallel (download accelerator)
Args:
@@ -31,19 +40,25 @@ def download(url, destination, progress_callback=None, parallel_downloads=16, pa
download_stream = requests.get(url, timeout=timeout, stream=True)
headers = download_stream.headers
-
+
output_path = os.path.join(destination, os.path.basename(url))
# Keep track of download progress (if possible)
- content_length = download_stream.headers.get('content-length')
+ content_length = download_stream.headers.get("content-length")
total_length = int(content_length) if content_length is not None else None
downloaded = 0
chunk_size = int(parallel_chunks_size * 1024 * 1024)
use_fallback = False
- accept_ranges = headers.get('accept-ranges')
+ accept_ranges = headers.get("accept-ranges")
# Can we do parallel downloads?
- if accept_ranges is not None and accept_ranges.lower() == 'bytes' and total_length is not None and total_length > chunk_size and parallel_downloads > 1:
+ if (
+ accept_ranges is not None
+ and accept_ranges.lower() == "bytes"
+ and total_length is not None
+ and total_length > chunk_size
+ and parallel_downloads > 1
+ ):
num_chunks = int(math.ceil(total_length / float(chunk_size)))
num_workers = parallel_downloads
@@ -63,7 +78,7 @@ def download(url, destination, progress_callback=None, parallel_downloads=16, pa
out_file.write(fd.read())
os.unlink(chunk_file)
-
+
current_chunk += 1
else:
time.sleep(0.1)
@@ -78,17 +93,29 @@ def download(url, destination, progress_callback=None, parallel_downloads=16, pa
try:
# Download chunk
- res = requests.get(url, stream=True, timeout=timeout, headers={'Range': 'bytes=%s-%s' % bytes_range})
+ res = requests.get(
+ url,
+ stream=True,
+ timeout=timeout,
+ headers={"Range": "bytes=%s-%s" % bytes_range},
+ )
if res.status_code == 206:
- with open("%s.part%s" % (output_path, part_num), 'wb') as fd:
+ with open(
+ "%s.part%s" % (output_path, part_num), "wb"
+ ) as fd:
bytes_written = 0
try:
for chunk in res.iter_content(4096):
bytes_written += fd.write(chunk)
- except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
+ except (
+ requests.exceptions.Timeout,
+ requests.exceptions.ConnectionError,
+ ) as e:
raise OdmError(str(e))
-
- if bytes_written != (bytes_range[1] - bytes_range[0] + 1):
+
+ if bytes_written != (
+ bytes_range[1] - bytes_range[0] + 1
+ ):
# Process again
q.put((part_num, bytes_range))
return
@@ -97,8 +124,12 @@ def download(url, destination, progress_callback=None, parallel_downloads=16, pa
nonloc.completed_chunks.value += 1
if progress_callback is not None:
- progress_callback(100.0 * nonloc.completed_chunks.value / num_chunks)
-
+ progress_callback(
+ 100.0
+ * nonloc.completed_chunks.value
+ / num_chunks
+ )
+
nonloc.merge_chunks[part_num] = True
else:
nonloc.error = RangeNotAvailableError()
@@ -136,7 +167,7 @@ def download(url, destination, progress_callback=None, parallel_downloads=16, pa
q.put((-1, None))
for t in threads:
t.join()
-
+
merge_thread.join()
if nonloc.error is not None:
@@ -149,7 +180,7 @@ def download(url, destination, progress_callback=None, parallel_downloads=16, pa
if use_fallback:
# Single connection, boring download
- with open(output_path, 'wb') as fd:
+ with open(output_path, "wb") as fd:
for chunk in download_stream.iter_content(4096):
downloaded += len(chunk)
@@ -157,8 +188,12 @@ def download(url, destination, progress_callback=None, parallel_downloads=16, pa
progress_callback((100.0 * float(downloaded) / total_length))
fd.write(chunk)
-
- except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, ReadTimeoutError) as e:
+
+ except (
+ requests.exceptions.Timeout,
+ requests.exceptions.ConnectionError,
+ ReadTimeoutError,
+ ) as e:
raise OdmError(e)
- return output_path
\ No newline at end of file
+ return output_path
diff --git a/opendm/nvm.py b/opendm/nvm.py
index d3c07d99..275fd6cf 100644
--- a/opendm/nvm.py
+++ b/opendm/nvm.py
@@ -1,6 +1,7 @@
import os
from opendm import log
+
def replace_nvm_images(src_nvm_file, img_map, dst_nvm_file):
"""
Create a new NVM file from an existing NVM file
@@ -11,15 +12,15 @@ def replace_nvm_images(src_nvm_file, img_map, dst_nvm_file):
with open(src_nvm_file) as f:
lines = list(map(str.strip, f.read().split("\n")))
-
+
# Quick check
if len(lines) < 3 or lines[0] != "NVM_V3" or lines[1].strip() != "":
raise Exception("%s does not seem to be a valid NVM file" % src_nvm_file)
-
+
num_images = int(lines[2])
entries = []
- for l in lines[3:3+num_images]:
+ for l in lines[3 : 3 + num_images]:
image_path, *p = l.split(" ")
dir_name = os.path.dirname(image_path)
@@ -27,15 +28,20 @@ def replace_nvm_images(src_nvm_file, img_map, dst_nvm_file):
new_filename = img_map.get(file_name)
if new_filename is not None:
- entries.append("%s %s" % (os.path.join(dir_name, new_filename), " ".join(p)))
+ entries.append(
+ "%s %s" % (os.path.join(dir_name, new_filename), " ".join(p))
+ )
else:
- log.ODM_WARNING("Cannot find %s in image map for %s" % (file_name, dst_nvm_file))
-
+ log.ODM_WARNING(
+ "Cannot find %s in image map for %s" % (file_name, dst_nvm_file)
+ )
+
if num_images != len(entries):
- raise Exception("Cannot write %s, not all band images have been matched" % dst_nvm_file)
+ raise Exception(
+ "Cannot write %s, not all band images have been matched" % dst_nvm_file
+ )
with open(dst_nvm_file, "w") as f:
f.write("NVM_V3\n\n%s\n" % len(entries))
f.write("\n".join(entries))
f.write("\n\n0\n0\n\n0")
-
\ No newline at end of file
diff --git a/opendm/objpacker/__init__.py b/opendm/objpacker/__init__.py
index f3bbb024..02d72f62 100644
--- a/opendm/objpacker/__init__.py
+++ b/opendm/objpacker/__init__.py
@@ -1 +1 @@
-from .objpacker import obj_pack
\ No newline at end of file
+from .objpacker import obj_pack
diff --git a/opendm/objpacker/imagepacker/imagepacker.py b/opendm/objpacker/imagepacker/imagepacker.py
index 2090d73f..f974611f 100644
--- a/opendm/objpacker/imagepacker/imagepacker.py
+++ b/opendm/objpacker/imagepacker/imagepacker.py
@@ -29,8 +29,10 @@ import math
# Based off of the great writeup, demo and code at:
# http://codeincomplete.com/posts/2011/5/7/bin_packing/
-class Block():
+
+class Block:
"""A rectangular block, to be packed"""
+
def __init__(self, w, h, data=None, padding=0):
self.w = w
self.h = h
@@ -38,15 +40,17 @@ class Block():
self.y = None
self.fit = None
self.data = data
- self.padding = padding # not implemented yet
+ self.padding = padding # not implemented yet
def __str__(self):
return "({x},{y}) ({w}x{h}): {data}".format(
- x=self.x,y=self.y, w=self.w,h=self.h, data=self.data)
+ x=self.x, y=self.y, w=self.w, h=self.h, data=self.data
+ )
-class _BlockNode():
+class _BlockNode:
"""A BlockPacker node"""
+
def __init__(self, x, y, w, h, used=False, right=None, down=None):
self.x = x
self.y = y
@@ -57,20 +61,21 @@ class _BlockNode():
self.down = down
def __repr__(self):
- return "({x},{y}) ({w}x{h})".format(x=self.x,y=self.y,w=self.w,h=self.h)
+ return "({x},{y}) ({w}x{h})".format(x=self.x, y=self.y, w=self.w, h=self.h)
-class BlockPacker():
+class BlockPacker:
"""Packs blocks of varying sizes into a single, larger block"""
+
def __init__(self):
self.root = None
def fit(self, blocks):
nblocks = len(blocks)
- w = blocks[0].w# if nblocks > 0 else 0
- h = blocks[0].h# if nblocks > 0 else 0
+ w = blocks[0].w # if nblocks > 0 else 0
+ h = blocks[0].h # if nblocks > 0 else 0
- self.root = _BlockNode(0,0, w,h)
+ self.root = _BlockNode(0, 0, w, h)
for block in blocks:
node = self.find_node(self.root, block.w, block.h)
@@ -99,14 +104,8 @@ class BlockPacker():
def split_node(self, node, w, h):
node.used = True
- node.down = _BlockNode(
- node.x, node.y + h,
- node.w, node.h - h
- )
- node.right = _BlockNode(
- node.x + w, node.y,
- node.w - w, h
- )
+ node.down = _BlockNode(node.x, node.y + h, node.w, node.h - h)
+ node.right = _BlockNode(node.x + w, node.y, node.w - w, h)
return node
def grow_node(self, w, h):
@@ -131,11 +130,13 @@ class BlockPacker():
def grow_right(self, w, h):
old_root = self.root
self.root = _BlockNode(
- 0, 0,
- old_root.w + w, old_root.h,
+ 0,
+ 0,
+ old_root.w + w,
+ old_root.h,
down=old_root,
right=_BlockNode(self.root.w, 0, w, self.root.h),
- used=True
+ used=True,
)
node = self.find_node(self.root, w, h)
@@ -147,11 +148,13 @@ class BlockPacker():
def grow_down(self, w, h):
old_root = self.root
self.root = _BlockNode(
- 0, 0,
- old_root.w, old_root.h + h,
+ 0,
+ 0,
+ old_root.w,
+ old_root.h + h,
down=_BlockNode(0, self.root.h, self.root.w, h),
right=old_root,
- used=True
+ used=True,
)
node = self.find_node(self.root, w, h)
@@ -162,14 +165,14 @@ class BlockPacker():
def crop_by_extents(image, extent):
- if min(extent.min_x,extent.min_y) < 0 or max(extent.max_x,extent.max_y) > 1:
+ if min(extent.min_x, extent.min_y) < 0 or max(extent.max_x, extent.max_y) > 1:
print("\tWARNING! UV Coordinates lying outside of [0:1] space!")
-
+
_, h, w = image.shape
- minx = max(math.floor(extent.min_x*w), 0)
- miny = max(math.floor(extent.min_y*h), 0)
- maxx = min(math.ceil(extent.max_x*w), w)
- maxy = min(math.ceil(extent.max_y*h), h)
+ minx = max(math.floor(extent.min_x * w), 0)
+ miny = max(math.floor(extent.min_y * h), 0)
+ maxx = min(math.ceil(extent.max_x * w), w)
+ maxy = min(math.ceil(extent.max_y * h), h)
image = image[:, miny:maxy, minx:maxx]
delta_w = maxx - minx
@@ -180,15 +183,16 @@ def crop_by_extents(image, extent):
return (image, changes)
-def pack(obj, background=(0,0,0,0), format="PNG", extents=None):
+
+def pack(obj, background=(0, 0, 0, 0), format="PNG", extents=None):
blocks = []
image_name_map = {}
profile = None
- for mat in obj['materials']:
- filename = obj['materials'][mat]
+ for mat in obj["materials"]:
+ filename = obj["materials"][mat]
- with rasterio.open(filename, 'r') as f:
+ with rasterio.open(filename, "r") as f:
profile = f.profile
image = f.read()
@@ -197,7 +201,7 @@ def pack(obj, background=(0,0,0,0), format="PNG", extents=None):
changes = None
if extents and extents[mat]:
image, changes = crop_by_extents(image, extents[mat])
-
+
image_name_map[filename] = image
_, h, w = image.shape
@@ -211,7 +215,9 @@ def pack(obj, background=(0,0,0,0), format="PNG", extents=None):
packer.fit(blocks)
# output_image = Image.new("RGBA", (packer.root.w, packer.root.h))
- output_image = np.zeros((profile['count'], packer.root.h, packer.root.w), dtype=profile['dtype'])
+ output_image = np.zeros(
+ (profile["count"], packer.root.h, packer.root.w), dtype=profile["dtype"]
+ )
uv_changes = {}
for block in blocks:
@@ -222,18 +228,17 @@ def pack(obj, background=(0,0,0,0), format="PNG", extents=None):
uv_changes[mat] = {
"offset": (
# should be in [0, 1] range
- (block.x - (changes[0] if changes else 0))/output_image.shape[2],
+ (block.x - (changes[0] if changes else 0)) / output_image.shape[2],
# UV origin is bottom left, PIL assumes top left!
- (block.y - (changes[1] if changes else 0))/output_image.shape[1]
+ (block.y - (changes[1] if changes else 0)) / output_image.shape[1],
),
-
"aspect": (
- ((1/changes[2]) if changes else 1) * (im_w/output_image.shape[2]),
- ((1/changes[3]) if changes else 1) * (im_h/output_image.shape[1])
+ ((1 / changes[2]) if changes else 1) * (im_w / output_image.shape[2]),
+ ((1 / changes[3]) if changes else 1) * (im_h / output_image.shape[1]),
),
}
- output_image[:, block.y:block.y + im_h, block.x:block.x + im_w] = image
+ output_image[:, block.y : block.y + im_h, block.x : block.x + im_w] = image
output_image = np.flip(output_image, axis=1)
return output_image, uv_changes, profile
diff --git a/opendm/objpacker/imagepacker/utils.py b/opendm/objpacker/imagepacker/utils.py
index 8124648c..93442d20 100644
--- a/opendm/objpacker/imagepacker/utils.py
+++ b/opendm/objpacker/imagepacker/utils.py
@@ -22,14 +22,15 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-class AABB():
+
+class AABB:
def __init__(self, min_x=None, min_y=None, max_x=None, max_y=None):
self.min_x = min_x
self.min_y = min_y
self.max_x = max_x
self.max_y = max_y
- def add(self, x,y):
+ def add(self, x, y):
self.min_x = min(self.min_x, x) if self.min_x is not None else x
self.min_y = min(self.min_y, y) if self.min_y is not None else y
self.max_x = max(self.max_x, x) if self.max_x is not None else x
@@ -45,9 +46,4 @@ class AABB():
return None
def __repr__(self):
- return "({},{}) ({},{})".format(
- self.min_x,
- self.min_y,
- self.max_x,
- self.max_y
- )
\ No newline at end of file
+ return "({},{}) ({},{})".format(self.min_x, self.min_y, self.max_x, self.max_y)
diff --git a/opendm/objpacker/objpacker.py b/opendm/objpacker/objpacker.py
index 75ddf863..8e408d8b 100644
--- a/opendm/objpacker/objpacker.py
+++ b/opendm/objpacker/objpacker.py
@@ -2,6 +2,7 @@ import os
import rasterio
import warnings
import numpy as np
+
try:
from .imagepacker.utils import AABB
from .imagepacker import pack
@@ -11,16 +12,17 @@ except ImportError:
warnings.filterwarnings("ignore", category=rasterio.errors.NotGeoreferencedWarning)
+
def load_obj(obj_path, _info=print):
if not os.path.isfile(obj_path):
raise IOError("Cannot open %s" % obj_path)
obj_base_path = os.path.dirname(os.path.abspath(obj_path))
obj = {
- 'filename': os.path.basename(obj_path),
- 'root_dir': os.path.dirname(os.path.abspath(obj_path)),
- 'mtl_filenames': [],
- 'materials': {},
+ "filename": os.path.basename(obj_path),
+ "root_dir": os.path.dirname(os.path.abspath(obj_path)),
+ "mtl_filenames": [],
+ "materials": {},
}
uvs = []
@@ -34,8 +36,8 @@ def load_obj(obj_path, _info=print):
if line.startswith("mtllib "):
# Materials
mtl_file = "".join(line.split()[1:]).strip()
- obj['materials'].update(load_mtl(mtl_file, obj_base_path, _info=_info))
- obj['mtl_filenames'].append(mtl_file)
+ obj["materials"].update(load_mtl(mtl_file, obj_base_path, _info=_info))
+ obj["mtl_filenames"].append(mtl_file)
# elif line.startswith("v "):
# # Vertices
# vertices.append(list(map(float, line.split()[1:4])))
@@ -46,7 +48,7 @@ def load_obj(obj_path, _info=print):
# normals.append(list(map(float, line.split()[1:4])))
elif line.startswith("usemtl "):
mtl_name = "".join(line.split()[1:]).strip()
- if not mtl_name in obj['materials']:
+ if not mtl_name in obj["materials"]:
raise Exception("%s material is missing" % mtl_name)
current_material = mtl_name
@@ -54,17 +56,18 @@ def load_obj(obj_path, _info=print):
if current_material not in faces:
faces[current_material] = []
- a,b,c = line.split()[1:]
+ a, b, c = line.split()[1:]
at = int(a.split("/")[1])
bt = int(b.split("/")[1])
ct = int(c.split("/")[1])
- faces[current_material].append((at - 1, bt - 1, ct - 1))
+ faces[current_material].append((at - 1, bt - 1, ct - 1))
- obj['uvs'] = np.array(uvs, dtype=np.float32)
- obj['faces'] = faces
+ obj["uvs"] = np.array(uvs, dtype=np.float32)
+ obj["faces"] = faces
return obj
+
def load_mtl(mtl_file, obj_base_path, _info=print):
mtl_file = os.path.join(obj_base_path, mtl_file)
@@ -88,10 +91,12 @@ def load_mtl(mtl_file, obj_base_path, _info=print):
return mats
-def write_obj_changes(obj_file, mtl_file, uv_changes, single_mat, output_dir, _info=print):
+def write_obj_changes(
+ obj_file, mtl_file, uv_changes, single_mat, output_dir, _info=print
+):
with open(obj_file) as f:
obj_lines = f.readlines()
-
+
out_lines = []
uv_lines = []
current_material = None
@@ -122,7 +127,7 @@ def write_obj_changes(obj_file, mtl_file, uv_changes, single_mat, output_dir, _i
for v in line[2:].split():
parts = v.split("/")
if len(parts) >= 2 and parts[1]:
- uv_idx = int(parts[1]) - 1 # uv indexes start from 1
+ uv_idx = int(parts[1]) - 1 # uv indexes start from 1
uv_line_idx = uv_lines[uv_idx]
uv_line = obj_lines[uv_line_idx][3:]
uv = [float(uv.strip()) for uv in uv_line.split()]
@@ -139,28 +144,30 @@ def write_obj_changes(obj_file, mtl_file, uv_changes, single_mat, output_dir, _i
out_file = os.path.join(output_dir, os.path.basename(obj_file))
_info("Writing %s" % out_file)
- with open(out_file, 'w') as f:
+ with open(out_file, "w") as f:
f.writelines(out_lines)
+
def write_output_tex(img, profile, path, _info=print):
_, w, h = img.shape
- profile['width'] = w
- profile['height'] = h
+ profile["width"] = w
+ profile["height"] = h
- if 'tiled' in profile:
- profile['tiled'] = False
+ if "tiled" in profile:
+ profile["tiled"] = False
_info("Writing %s (%sx%s pixels)" % (path, w, h))
- with rasterio.open(path, 'w', **profile) as dst:
+ with rasterio.open(path, "w", **profile) as dst:
for b in range(1, img.shape[0] + 1):
dst.write(img[b - 1], b)
- sidecar = path + '.aux.xml'
+ sidecar = path + ".aux.xml"
if os.path.isfile(sidecar):
os.unlink(sidecar)
+
def write_output_mtl(src_mtl, mat_file, dst_mtl):
- with open(src_mtl, 'r') as src:
+ with open(src_mtl, "r") as src:
lines = src.readlines()
out = []
@@ -176,8 +183,8 @@ def write_output_mtl(src_mtl, mat_file, dst_mtl):
break
else:
out.append(l)
-
- with open(dst_mtl, 'w') as dst:
+
+ with open(dst_mtl, "w") as dst:
dst.write("".join(out))
if single_mat is None:
@@ -185,51 +192,68 @@ def write_output_mtl(src_mtl, mat_file, dst_mtl):
return single_mat
+
def obj_pack(obj_file, output_dir=None, _info=print):
if not output_dir:
output_dir = os.path.join(os.path.dirname(os.path.abspath(obj_file)), "packed")
-
+
obj = load_obj(obj_file, _info=_info)
- if not obj['mtl_filenames']:
+ if not obj["mtl_filenames"]:
raise Exception("No MTL files found, nothing to do")
- if os.path.abspath(obj_file) == os.path.abspath(os.path.join(output_dir, os.path.basename(obj_file))):
- raise Exception("This will overwrite %s. Choose a different output directory" % obj_file)
-
- if len(obj['mtl_filenames']) <= 1 and len(obj['materials']) <= 1:
+ if os.path.abspath(obj_file) == os.path.abspath(
+ os.path.join(output_dir, os.path.basename(obj_file))
+ ):
+ raise Exception(
+ "This will overwrite %s. Choose a different output directory" % obj_file
+ )
+
+ if len(obj["mtl_filenames"]) <= 1 and len(obj["materials"]) <= 1:
raise Exception("File already has a single material, nothing to do")
-
+
# Compute AABB for UVs
_info("Computing texture bounds")
extents = {}
- for material in obj['materials']:
+ for material in obj["materials"]:
bounds = AABB()
- faces = obj['faces'][material]
+ faces = obj["faces"][material]
for f in faces:
for uv_idx in f:
- uv = obj['uvs'][uv_idx]
+ uv = obj["uvs"][uv_idx]
bounds.add(uv[0], uv[1])
extents[material] = bounds
-
+
_info("Binary packing...")
output_image, uv_changes, profile = pack(obj, extents=extents)
- mtl_file = obj['mtl_filenames'][0]
- mat_file = os.path.basename(obj['materials'][next(iter(obj['materials']))])
-
+ mtl_file = obj["mtl_filenames"][0]
+ mat_file = os.path.basename(obj["materials"][next(iter(obj["materials"]))])
+
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
-
- write_output_tex(output_image, profile, os.path.join(output_dir, mat_file), _info=_info)
- single_mat = write_output_mtl(os.path.join(obj['root_dir'], mtl_file), mat_file, os.path.join(output_dir, mtl_file))
- write_obj_changes(obj_file, mtl_file, uv_changes, single_mat, output_dir, _info=_info)
-if __name__ == '__main__':
+ write_output_tex(
+ output_image, profile, os.path.join(output_dir, mat_file), _info=_info
+ )
+ single_mat = write_output_mtl(
+ os.path.join(obj["root_dir"], mtl_file),
+ mat_file,
+ os.path.join(output_dir, mtl_file),
+ )
+ write_obj_changes(
+ obj_file, mtl_file, uv_changes, single_mat, output_dir, _info=_info
+ )
+
+
+if __name__ == "__main__":
import argparse
- parser = argparse.ArgumentParser(description="Packs textured .OBJ Wavefront files into a single materials")
+
+ parser = argparse.ArgumentParser(
+ description="Packs textured .OBJ Wavefront files into a single materials"
+ )
parser.add_argument("obj", help="Path to the .OBJ file")
- parser.add_argument("-o","--output-dir", help="Output directory")
+ parser.add_argument("-o", "--output-dir", help="Output directory")
args = parser.parse_args()
- obj_pack(args.obj, args.output_dir)
\ No newline at end of file
+ obj_pack(args.obj, args.output_dir)
diff --git a/opendm/ogctiles.py b/opendm/ogctiles.py
index 1f4b690e..81a54c2f 100644
--- a/opendm/ogctiles.py
+++ b/opendm/ogctiles.py
@@ -11,7 +11,10 @@ from opendm.entwine import build_entwine
import fiona
from shapely.geometry import shape
-def build_textured_model(input_obj, output_path, reference_lla = None, model_bounds_file=None, rerun=False):
+
+def build_textured_model(
+ input_obj, output_path, reference_lla=None, model_bounds_file=None, rerun=False
+):
if not os.path.isfile(input_obj):
log.ODM_WARNING("No input OBJ file to process")
return
@@ -22,27 +25,27 @@ def build_textured_model(input_obj, output_path, reference_lla = None, model_bou
log.ODM_INFO("Generating OGC 3D Tiles textured model")
lat = lon = alt = 0
-
+
# Read reference_lla.json (if provided)
if reference_lla is not None and os.path.isfile(reference_lla):
try:
with open(reference_lla) as f:
reference_lla = json.loads(f.read())
- lat = reference_lla['latitude']
- lon = reference_lla['longitude']
- alt = reference_lla['altitude']
+ lat = reference_lla["latitude"]
+ lon = reference_lla["longitude"]
+ alt = reference_lla["altitude"]
except Exception as e:
log.ODM_WARNING("Cannot read %s: %s" % (reference_lla, str(e)))
# Read model bounds (if provided)
- divisions = 1 # default
- DIV_THRESHOLD = 10000 # m^2 (this is somewhat arbitrary)
+ divisions = 1 # default
+ DIV_THRESHOLD = 10000 # m^2 (this is somewhat arbitrary)
if model_bounds_file is not None and os.path.isfile(model_bounds_file):
try:
- with fiona.open(model_bounds_file, 'r') as f:
+ with fiona.open(model_bounds_file, "r") as f:
if len(f) == 1:
- poly = shape(f[1]['geometry'])
+ poly = shape(f[1]["geometry"])
area = poly.area
log.ODM_INFO("Approximate area: %s m^2" % round(area, 2))
@@ -57,18 +60,23 @@ def build_textured_model(input_obj, output_path, reference_lla = None, model_bou
try:
kwargs = {
- 'input': input_obj,
- 'output': output_path,
- 'divisions': divisions,
- 'lat': lat,
- 'lon': lon,
- 'alt': alt,
+ "input": input_obj,
+ "output": output_path,
+ "divisions": divisions,
+ "lat": lat,
+ "lon": lon,
+ "alt": alt,
}
- system.run('Obj2Tiles "{input}" "{output}" --divisions {divisions} --lat {lat} --lon {lon} --alt {alt} '.format(**kwargs))
+ system.run(
+ 'Obj2Tiles "{input}" "{output}" --divisions {divisions} --lat {lat} --lon {lon} --alt {alt} '.format(
+ **kwargs
+ )
+ )
except Exception as e:
log.ODM_WARNING("Cannot build 3D tiles textured model: %s" % str(e))
+
def build_pointcloud(input_pointcloud, output_path, max_concurrency, rerun=False):
if not os.path.isfile(input_pointcloud):
log.ODM_WARNING("No input point cloud file to process")
@@ -79,19 +87,21 @@ def build_pointcloud(input_pointcloud, output_path, max_concurrency, rerun=False
shutil.rmtree(output_path)
log.ODM_INFO("Generating OGC 3D Tiles point cloud")
-
+
try:
if not os.path.isdir(output_path):
os.mkdir(output_path)
tmpdir = os.path.join(output_path, "tmp")
entwine_output = os.path.join(output_path, "entwine")
-
- build_entwine([input_pointcloud], tmpdir, entwine_output, max_concurrency, "EPSG:4978")
-
+
+ build_entwine(
+ [input_pointcloud], tmpdir, entwine_output, max_concurrency, "EPSG:4978"
+ )
+
kwargs = {
- 'input': entwine_output,
- 'output': output_path,
+ "input": entwine_output,
+ "output": output_path,
}
system.run('entwine convert -i "{input}" -o "{output}"'.format(**kwargs))
@@ -109,27 +119,36 @@ def build_3dtiles(args, tree, reconstruction, rerun=False):
if rerun and os.path.exists(tiles_output_path):
shutil.rmtree(tiles_output_path)
-
+
if not os.path.isdir(tiles_output_path):
os.mkdir(tiles_output_path)
- # Model
+ # Model
if not os.path.isdir(model_output_path) or rerun:
reference_lla = os.path.join(tree.opensfm, "reference_lla.json")
- model_bounds_file = os.path.join(tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg')
+ model_bounds_file = os.path.join(
+ tree.odm_georeferencing, "odm_georeferenced_model.bounds.gpkg"
+ )
input_obj = os.path.join(tree.odm_texturing, tree.odm_textured_model_obj)
if not os.path.isfile(input_obj):
input_obj = os.path.join(tree.odm_25dtexturing, tree.odm_textured_model_obj)
- build_textured_model(input_obj, model_output_path, reference_lla, model_bounds_file, rerun)
+ build_textured_model(
+ input_obj, model_output_path, reference_lla, model_bounds_file, rerun
+ )
else:
log.ODM_WARNING("OGC 3D Tiles model %s already generated" % model_output_path)
# Point cloud
-
+
if not os.path.isdir(pointcloud_output_path) or rerun:
- build_pointcloud(tree.odm_georeferencing_model_laz, pointcloud_output_path, args.max_concurrency, rerun)
+ build_pointcloud(
+ tree.odm_georeferencing_model_laz,
+ pointcloud_output_path,
+ args.max_concurrency,
+ rerun,
+ )
else:
- log.ODM_WARNING("OGC 3D Tiles model %s already generated" % model_output_path)
\ No newline at end of file
+ log.ODM_WARNING("OGC 3D Tiles model %s already generated" % model_output_path)
diff --git a/opendm/opc.py b/opendm/opc.py
index b5fdd19e..e612a524 100644
--- a/opendm/opc.py
+++ b/opendm/opc.py
@@ -4,28 +4,36 @@ from opendm import log
from opendm.system import run
from opendm import io
+
def classify(point_cloud, max_threads=8):
tmp_output = io.related_file_path(point_cloud, postfix=".classified")
if os.path.isfile(tmp_output):
os.remove(tmp_output)
try:
- model = get_model("openpointclass",
- "https://github.com/uav4geo/OpenPointClass/releases/download/v1.1.3/vehicles-vegetation-buildings.zip",
+ model = get_model(
+ "openpointclass",
+ "https://github.com/uav4geo/OpenPointClass/releases/download/v1.1.3/vehicles-vegetation-buildings.zip",
"v1.0.0",
- name="model.bin")
+ name="model.bin",
+ )
if model is not None:
- run('pcclassify "%s" "%s" "%s" -u -s 2,64' % (point_cloud, tmp_output, model), env_vars={'OMP_NUM_THREADS': max_threads})
-
+ run(
+ 'pcclassify "%s" "%s" "%s" -u -s 2,64'
+ % (point_cloud, tmp_output, model),
+ env_vars={"OMP_NUM_THREADS": max_threads},
+ )
+
if os.path.isfile(tmp_output):
os.remove(point_cloud)
os.rename(tmp_output, point_cloud)
else:
- log.ODM_WARNING("Cannot classify using OpenPointClass (no output generated)")
+ log.ODM_WARNING(
+ "Cannot classify using OpenPointClass (no output generated)"
+ )
else:
log.ODM_WARNING("Cannot download/access model from %s" % (model_url))
except Exception as e:
log.ODM_WARNING("Cannot classify using OpenPointClass: %s" % str(e))
-
diff --git a/opendm/orthophoto.py b/opendm/orthophoto.py
index 0db4729c..d67c7ce4 100644
--- a/opendm/orthophoto.py
+++ b/opendm/orthophoto.py
@@ -19,39 +19,43 @@ from osgeo import ogr
def get_orthophoto_vars(args):
return {
- 'TILED': 'NO' if args.orthophoto_no_tiled else 'YES',
- 'COMPRESS': args.orthophoto_compression,
- 'PREDICTOR': '2' if args.orthophoto_compression in ['LZW', 'DEFLATE'] else '1',
- 'BIGTIFF': 'IF_SAFER',
- 'BLOCKXSIZE': 512,
- 'BLOCKYSIZE': 512,
- 'NUM_THREADS': args.max_concurrency
+ "TILED": "NO" if args.orthophoto_no_tiled else "YES",
+ "COMPRESS": args.orthophoto_compression,
+ "PREDICTOR": "2" if args.orthophoto_compression in ["LZW", "DEFLATE"] else "1",
+ "BIGTIFF": "IF_SAFER",
+ "BLOCKXSIZE": 512,
+ "BLOCKYSIZE": 512,
+ "NUM_THREADS": args.max_concurrency,
}
+
def build_overviews(orthophoto_file):
log.ODM_INFO("Building Overviews")
- kwargs = {'orthophoto': orthophoto_file}
-
+ kwargs = {"orthophoto": orthophoto_file}
+
# Run gdaladdo
- system.run('gdaladdo -r average '
- '--config BIGTIFF_OVERVIEW IF_SAFER '
- '--config COMPRESS_OVERVIEW JPEG '
- '{orthophoto} 2 4 8 16'.format(**kwargs))
+ system.run(
+ "gdaladdo -r average "
+ "--config BIGTIFF_OVERVIEW IF_SAFER "
+ "--config COMPRESS_OVERVIEW JPEG "
+ "{orthophoto} 2 4 8 16".format(**kwargs)
+ )
+
def generate_png(orthophoto_file, output_file=None, outsize=None):
if output_file is None:
base, ext = os.path.splitext(orthophoto_file)
- output_file = base + '.png'
-
+ output_file = base + ".png"
+
# See if we need to select top three bands
params = []
try:
gtif = gdal.Open(orthophoto_file)
bands = []
- for idx in range(1, gtif.RasterCount+1):
+ for idx in range(1, gtif.RasterCount + 1):
bands.append(gtif.GetRasterBand(idx).GetColorInterpretation())
- bands = dict(zip(bands, range(1, len(bands)+1)))
+ bands = dict(zip(bands, range(1, len(bands) + 1)))
if gtif.RasterCount >= 3:
red = bands.get(gdal.GCI_RedBand)
@@ -60,10 +64,10 @@ def generate_png(orthophoto_file, output_file=None, outsize=None):
if red is None or green is None or blue is None:
params.append("-b 1 -b 2 -b 3")
else:
- params.append("-b %s -b %s -b %s" % (red, green, blue))
+ params.append("-b %s -b %s -b %s" % (red, green, blue))
elif gtif.RasterCount <= 2:
params.append("-b 1")
-
+
alpha = bands.get(gdal.GCI_AlphaBand)
if alpha is not None:
params.append("-b %s" % alpha)
@@ -77,31 +81,41 @@ def generate_png(orthophoto_file, output_file=None, outsize=None):
params.append("-scale_1 -scale_2 -scale_3")
elif gtif.RasterCount <= 2:
params.append("-scale_1")
-
+
gtif = None
except Exception as e:
- log.ODM_WARNING("Cannot read orthophoto information for PNG generation: %s" % str(e))
+ log.ODM_WARNING(
+ "Cannot read orthophoto information for PNG generation: %s" % str(e)
+ )
if outsize is not None:
params.append("-outsize %s 0" % outsize)
- system.run('gdal_translate -of png "%s" "%s" %s '
- '-co WORLDFILE=YES '
- '--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, " ".join(params), get_max_memory()))
+ system.run(
+ 'gdal_translate -of png "%s" "%s" %s '
+ "-co WORLDFILE=YES "
+ "--config GDAL_CACHEMAX %s%% "
+ % (orthophoto_file, output_file, " ".join(params), get_max_memory())
+ )
+
def generate_kmz(orthophoto_file, output_file=None, outsize=None):
if output_file is None:
base, ext = os.path.splitext(orthophoto_file)
- output_file = base + '.kmz'
-
+ output_file = base + ".kmz"
+
# See if we need to select top three bands
bandparam = ""
gtif = gdal.Open(orthophoto_file)
if gtif.RasterCount > 4:
bandparam = "-b 1 -b 2 -b 3 -a_nodata 0"
- system.run('gdal_translate -of KMLSUPEROVERLAY -co FORMAT=PNG "%s" "%s" %s '
- '--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, bandparam, get_max_memory()))
+ system.run(
+ 'gdal_translate -of KMLSUPEROVERLAY -co FORMAT=PNG "%s" "%s" %s '
+ "--config GDAL_CACHEMAX %s%% "
+ % (orthophoto_file, output_file, bandparam, get_max_memory())
+ )
+
def generate_extent_polygon(orthophoto_file):
"""Function to return the orthophoto extent as a polygon into a gpkg file
@@ -110,11 +124,11 @@ def generate_extent_polygon(orthophoto_file):
orthophoto_file (str): the path to orthophoto file
"""
base, ext = os.path.splitext(orthophoto_file)
- output_file = base + '_extent.dxf'
+ output_file = base + "_extent.dxf"
try:
gtif = gdal.Open(orthophoto_file)
- srs = gtif.GetSpatialRef()
+ srs = gtif.GetSpatialRef()
geoTransform = gtif.GetGeoTransform()
# calculate the coordinates
@@ -122,10 +136,21 @@ def generate_extent_polygon(orthophoto_file):
maxy = geoTransform[3]
maxx = minx + geoTransform[1] * gtif.RasterXSize
miny = maxy + geoTransform[5] * gtif.RasterYSize
-
+
# create polygon in wkt format
- poly_wkt = "POLYGON ((%s %s, %s %s, %s %s, %s %s, %s %s))" % (minx, miny, minx, maxy, maxx, maxy, maxx, miny, minx, miny)
-
+ poly_wkt = "POLYGON ((%s %s, %s %s, %s %s, %s %s, %s %s))" % (
+ minx,
+ miny,
+ minx,
+ maxy,
+ maxx,
+ maxy,
+ maxx,
+ miny,
+ minx,
+ miny,
+ )
+
# create vector file
# just the DXF to support AutoCAD users
# to load the geotiff raster correctly.
@@ -147,59 +172,84 @@ def generate_extent_polygon(orthophoto_file):
gtif = None
log.ODM_INFO("Wrote %s" % output_file)
except Exception as e:
- log.ODM_WARNING("Cannot create extent layer for %s: %s" % (orthophoto_file, str(e)))
+ log.ODM_WARNING(
+ "Cannot create extent layer for %s: %s" % (orthophoto_file, str(e))
+ )
def generate_tfw(orthophoto_file):
base, ext = os.path.splitext(orthophoto_file)
- tfw_file = base + '.tfw'
+ tfw_file = base + ".tfw"
try:
with rasterio.open(orthophoto_file) as ds:
t = ds.transform
- with open(tfw_file, 'w') as f:
+ with open(tfw_file, "w") as f:
# rasterio affine values taken by
# https://mharty3.github.io/til/GIS/raster-affine-transforms/
- f.write("\n".join([str(v) for v in [t.a, t.d, t.b, t.e, t.c, t.f]]) + "\n")
+ f.write(
+ "\n".join([str(v) for v in [t.a, t.d, t.b, t.e, t.c, t.f]]) + "\n"
+ )
log.ODM_INFO("Wrote %s" % tfw_file)
except Exception as e:
log.ODM_WARNING("Cannot create .tfw for %s: %s" % (orthophoto_file, str(e)))
-def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir, resolution):
+def post_orthophoto_steps(
+ args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir, resolution
+):
if args.crop > 0 or args.boundary:
- Cropper.crop(bounds_file_path, orthophoto_file, get_orthophoto_vars(args), keep_original=not args.optimize_disk_space, warp_options=['-dstalpha'])
+ Cropper.crop(
+ bounds_file_path,
+ orthophoto_file,
+ get_orthophoto_vars(args),
+ keep_original=not args.optimize_disk_space,
+ warp_options=["-dstalpha"],
+ )
if args.build_overviews and not args.cog:
build_overviews(orthophoto_file)
if args.orthophoto_png:
generate_png(orthophoto_file)
-
+
if args.orthophoto_kmz:
generate_kmz(orthophoto_file)
if args.tiles:
- generate_orthophoto_tiles(orthophoto_file, orthophoto_tiles_dir, args.max_concurrency, resolution)
+ generate_orthophoto_tiles(
+ orthophoto_file, orthophoto_tiles_dir, args.max_concurrency, resolution
+ )
if args.cog:
- convert_to_cogeo(orthophoto_file, max_workers=args.max_concurrency, compression=args.orthophoto_compression)
+ convert_to_cogeo(
+ orthophoto_file,
+ max_workers=args.max_concurrency,
+ compression=args.orthophoto_compression,
+ )
generate_extent_polygon(orthophoto_file)
generate_tfw(orthophoto_file)
-def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance=20, only_max_coords_feature=False):
+
+def compute_mask_raster(
+ input_raster,
+ vector_mask,
+ output_raster,
+ blend_distance=20,
+ only_max_coords_feature=False,
+):
if not os.path.exists(input_raster):
log.ODM_WARNING("Cannot mask raster, %s does not exist" % input_raster)
return
-
+
if not os.path.exists(vector_mask):
log.ODM_WARNING("Cannot mask raster, %s does not exist" % vector_mask)
return
log.ODM_INFO("Computing mask raster: %s" % output_raster)
- with rasterio.open(input_raster, 'r') as rast:
+ with rasterio.open(input_raster, "r") as rast:
with fiona.open(vector_mask) as src:
burn_features = src
@@ -209,12 +259,17 @@ def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance
for feature in src:
if feature is not None:
# No complex shapes
- if len(feature['geometry']['coordinates'][0]) > max_coords_count:
- max_coords_count = len(feature['geometry']['coordinates'][0])
+ if (
+ len(feature["geometry"]["coordinates"][0])
+ > max_coords_count
+ ):
+ max_coords_count = len(
+ feature["geometry"]["coordinates"][0]
+ )
max_coords_feature = feature
if max_coords_feature is not None:
burn_features = [max_coords_feature]
-
+
shapes = [feature["geometry"] for feature in burn_features]
out_image, out_transform = mask(rast, shapes, nodata=0)
@@ -227,22 +282,28 @@ def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance
dist_t[dist_t > blend_distance] = 1
np.multiply(alpha_band, dist_t, out=alpha_band, casting="unsafe")
else:
- log.ODM_WARNING("%s does not have an alpha band, cannot blend cutline!" % input_raster)
+ log.ODM_WARNING(
+ "%s does not have an alpha band, cannot blend cutline!"
+ % input_raster
+ )
- with rasterio.open(output_raster, 'w', BIGTIFF="IF_SAFER", **rast.profile) as dst:
+ with rasterio.open(
+ output_raster, "w", BIGTIFF="IF_SAFER", **rast.profile
+ ) as dst:
dst.colorinterp = rast.colorinterp
dst.write(out_image)
return output_raster
+
def feather_raster(input_raster, output_raster, blend_distance=20):
if not os.path.exists(input_raster):
log.ODM_WARNING("Cannot feather raster, %s does not exist" % input_raster)
return
log.ODM_INFO("Computing feather raster: %s" % output_raster)
-
- with rasterio.open(input_raster, 'r') as rast:
+
+ with rasterio.open(input_raster, "r") as rast:
out_image = rast.read()
if blend_distance > 0:
if out_image.shape[0] >= 4:
@@ -252,22 +313,28 @@ def feather_raster(input_raster, output_raster, blend_distance=20):
dist_t[dist_t > blend_distance] = 1
np.multiply(alpha_band, dist_t, out=alpha_band, casting="unsafe")
else:
- log.ODM_WARNING("%s does not have an alpha band, cannot feather raster!" % input_raster)
+ log.ODM_WARNING(
+ "%s does not have an alpha band, cannot feather raster!"
+ % input_raster
+ )
- with rasterio.open(output_raster, 'w', BIGTIFF="IF_SAFER", **rast.profile) as dst:
+ with rasterio.open(
+ output_raster, "w", BIGTIFF="IF_SAFER", **rast.profile
+ ) as dst:
dst.colorinterp = rast.colorinterp
dst.write(out_image)
return output_raster
+
def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
"""
Based on https://github.com/mapbox/rio-merge-rgba/
Merge orthophotos around cutlines using a blend buffer.
"""
inputs = []
- bounds=None
- precision=7
+ bounds = None
+ precision = 7
for o, c in input_ortho_and_ortho_cuts:
if not io.file_exists(o):
@@ -286,11 +353,11 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
res = first.res
dtype = first.dtypes[0]
profile = first.profile
- num_bands = first.meta['count'] - 1 # minus alpha
+ num_bands = first.meta["count"] - 1 # minus alpha
colorinterp = first.colorinterp
log.ODM_INFO("%s valid orthophoto rasters to merge" % len(inputs))
- sources = [(rasterio.open(o), rasterio.open(c)) for o,c in inputs]
+ sources = [(rasterio.open(o), rasterio.open(c)) for o, c in inputs]
# scan input files.
# while we're at it, validate assumptions about inputs
@@ -321,12 +388,12 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
profile["transform"] = output_transform
profile["height"] = output_height
profile["width"] = output_width
- profile["tiled"] = orthophoto_vars.get('TILED', 'YES') == 'YES'
- profile["blockxsize"] = orthophoto_vars.get('BLOCKXSIZE', 512)
- profile["blockysize"] = orthophoto_vars.get('BLOCKYSIZE', 512)
- profile["compress"] = orthophoto_vars.get('COMPRESS', 'LZW')
- profile["predictor"] = orthophoto_vars.get('PREDICTOR', '2')
- profile["bigtiff"] = orthophoto_vars.get('BIGTIFF', 'IF_SAFER')
+ profile["tiled"] = orthophoto_vars.get("TILED", "YES") == "YES"
+ profile["blockxsize"] = orthophoto_vars.get("BLOCKXSIZE", 512)
+ profile["blockysize"] = orthophoto_vars.get("BLOCKYSIZE", 512)
+ profile["compress"] = orthophoto_vars.get("COMPRESS", "LZW")
+ profile["predictor"] = orthophoto_vars.get("PREDICTOR", "2")
+ profile["bigtiff"] = orthophoto_vars.get("BIGTIFF", "IF_SAFER")
profile.update()
# create destination file
@@ -346,11 +413,14 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
# First pass, write all rasters naively without blending
for src, _ in sources:
- src_window = tuple(zip(rowcol(
- src.transform, left, top, op=round, precision=precision
- ), rowcol(
- src.transform, right, bottom, op=round, precision=precision
- )))
+ src_window = tuple(
+ zip(
+ rowcol(src.transform, left, top, op=round, precision=precision),
+ rowcol(
+ src.transform, right, bottom, op=round, precision=precision
+ ),
+ )
+ )
temp = np.zeros(dst_shape, dtype=dtype)
temp = src.read(
@@ -370,11 +440,14 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
# Second pass, write all feathered rasters
# blending the edges
for src, _ in sources:
- src_window = tuple(zip(rowcol(
- src.transform, left, top, op=round, precision=precision
- ), rowcol(
- src.transform, right, bottom, op=round, precision=precision
- )))
+ src_window = tuple(
+ zip(
+ rowcol(src.transform, left, top, op=round, precision=precision),
+ rowcol(
+ src.transform, right, bottom, op=round, precision=precision
+ ),
+ )
+ )
temp = np.zeros(dst_shape, dtype=dtype)
temp = src.read(
@@ -383,10 +456,12 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
where = temp[-1] != 0
for b in range(0, num_bands):
- blended = temp[-1] / 255.0 * temp[b] + (1 - temp[-1] / 255.0) * dstarr[b]
- np.copyto(dstarr[b], blended, casting='unsafe', where=where)
+ blended = (
+ temp[-1] / 255.0 * temp[b] + (1 - temp[-1] / 255.0) * dstarr[b]
+ )
+ np.copyto(dstarr[b], blended, casting="unsafe", where=where)
dstarr[-1][where] = 255.0
-
+
# check if dest has any nodata pixels available
if np.count_nonzero(dstarr[-1]) == blocksize:
break
@@ -394,11 +469,14 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
# Third pass, write cut rasters
# blending the cutlines
for _, cut in sources:
- src_window = tuple(zip(rowcol(
- cut.transform, left, top, op=round, precision=precision
- ), rowcol(
- cut.transform, right, bottom, op=round, precision=precision
- )))
+ src_window = tuple(
+ zip(
+ rowcol(cut.transform, left, top, op=round, precision=precision),
+ rowcol(
+ cut.transform, right, bottom, op=round, precision=precision
+ ),
+ )
+ )
temp = np.zeros(dst_shape, dtype=dtype)
temp = cut.read(
@@ -408,8 +486,10 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
# For each band, average alpha values between
# destination raster and cut raster
for b in range(0, num_bands):
- blended = temp[-1] / 255.0 * temp[b] + (1 - temp[-1] / 255.0) * dstarr[b]
- np.copyto(dstarr[b], blended, casting='unsafe', where=temp[-1]!=0)
+ blended = (
+ temp[-1] / 255.0 * temp[b] + (1 - temp[-1] / 255.0) * dstarr[b]
+ )
+ np.copyto(dstarr[b], blended, casting="unsafe", where=temp[-1] != 0)
dstrast.write(dstarr, window=dst_window)
diff --git a/opendm/osfm.py b/opendm/osfm.py
index 0d49e1e4..632ed493 100644
--- a/opendm/osfm.py
+++ b/opendm/osfm.py
@@ -25,58 +25,72 @@ from opendm.gpu import has_popsift_and_can_handle_texsize, has_gpu
from opensfm import multiview, exif
from opensfm.actions.export_geocoords import _transform
+
class OSFMContext:
def __init__(self, opensfm_project_path):
self.opensfm_project_path = opensfm_project_path
-
+
def run(self, command):
- osfm_bin = os.path.join(context.opensfm_path, 'bin', 'opensfm')
- system.run('"%s" %s "%s"' %
- (osfm_bin, command, self.opensfm_project_path))
+ osfm_bin = os.path.join(context.opensfm_path, "bin", "opensfm")
+ system.run('"%s" %s "%s"' % (osfm_bin, command, self.opensfm_project_path))
def is_reconstruction_done(self):
- tracks_file = os.path.join(self.opensfm_project_path, 'tracks.csv')
- reconstruction_file = os.path.join(self.opensfm_project_path, 'reconstruction.json')
+ tracks_file = os.path.join(self.opensfm_project_path, "tracks.csv")
+ reconstruction_file = os.path.join(
+ self.opensfm_project_path, "reconstruction.json"
+ )
return io.file_exists(tracks_file) and io.file_exists(reconstruction_file)
def create_tracks(self, rerun=False):
- tracks_file = os.path.join(self.opensfm_project_path, 'tracks.csv')
- rs_file = self.path('rs_done.txt')
+ tracks_file = os.path.join(self.opensfm_project_path, "tracks.csv")
+ rs_file = self.path("rs_done.txt")
if not io.file_exists(tracks_file) or rerun:
- self.run('create_tracks')
+ self.run("create_tracks")
else:
- log.ODM_WARNING('Found a valid OpenSfM tracks file in: %s' % tracks_file)
+ log.ODM_WARNING("Found a valid OpenSfM tracks file in: %s" % tracks_file)
- def reconstruct(self, rolling_shutter_correct=False, merge_partial=False, rerun=False):
- reconstruction_file = os.path.join(self.opensfm_project_path, 'reconstruction.json')
+ def reconstruct(
+ self, rolling_shutter_correct=False, merge_partial=False, rerun=False
+ ):
+ reconstruction_file = os.path.join(
+ self.opensfm_project_path, "reconstruction.json"
+ )
if not io.file_exists(reconstruction_file) or rerun:
- self.run('reconstruct')
+ self.run("reconstruct")
if merge_partial:
self.check_merge_partial_reconstructions()
else:
- log.ODM_WARNING('Found a valid OpenSfM reconstruction file in: %s' % reconstruction_file)
+ log.ODM_WARNING(
+ "Found a valid OpenSfM reconstruction file in: %s" % reconstruction_file
+ )
# Check that a reconstruction file has been created
if not self.reconstructed():
- raise system.ExitException("The program could not process this dataset using the current settings. "
- "Check that the images have enough overlap, "
- "that there are enough recognizable features "
- "and that the images are in focus. "
- "The program will now exit.")
+ raise system.ExitException(
+ "The program could not process this dataset using the current settings. "
+ "Check that the images have enough overlap, "
+ "that there are enough recognizable features "
+ "and that the images are in focus. "
+ "The program will now exit."
+ )
if rolling_shutter_correct:
- rs_file = self.path('rs_done.txt')
+ rs_file = self.path("rs_done.txt")
if not io.file_exists(rs_file) or rerun:
- self.run('rs_correct')
+ self.run("rs_correct")
log.ODM_INFO("Re-running the reconstruction pipeline")
self.match_features(True)
self.create_tracks(True)
- self.reconstruct(rolling_shutter_correct=False, merge_partial=merge_partial, rerun=True)
+ self.reconstruct(
+ rolling_shutter_correct=False,
+ merge_partial=merge_partial,
+ rerun=True,
+ )
self.touch(rs_file)
else:
@@ -89,7 +103,10 @@ class OSFMContext:
tracks_manager = data.load_tracks_manager()
if len(reconstructions) > 1:
- log.ODM_WARNING("Multiple reconstructions detected (%s), this might be an indicator that some areas did not have sufficient overlap" % len(reconstructions))
+ log.ODM_WARNING(
+ "Multiple reconstructions detected (%s), this might be an indicator that some areas did not have sufficient overlap"
+ % len(reconstructions)
+ )
log.ODM_INFO("Attempting merge")
merged = Reconstruction()
@@ -110,7 +127,9 @@ class OSFMContext:
new_point = merged.create_point(point.id, point.coordinates)
new_point.color = point.color
except RuntimeError as e:
- log.ODM_WARNING("Cannot merge shot id %s (%s)" % (shot.id, str(e)))
+ log.ODM_WARNING(
+ "Cannot merge shot id %s (%s)" % (shot.id, str(e))
+ )
continue
for shot in rec.shots.values():
@@ -118,7 +137,9 @@ class OSFMContext:
try:
obsdict = tracks_manager.get_shot_observations(shot.id)
except RuntimeError:
- log.ODM_WARNING("Shot id %s missing from tracks_manager!" % shot.id)
+ log.ODM_WARNING(
+ "Shot id %s missing from tracks_manager!" % shot.id
+ )
continue
for track_id, obs in obsdict.items():
if track_id in merged.points:
@@ -126,7 +147,7 @@ class OSFMContext:
data.save_reconstruction([merged])
- def setup(self, args, images_path, reconstruction, append_config = [], rerun=False):
+ def setup(self, args, images_path, reconstruction, append_config=[], rerun=False):
"""
Setup a OpenSfM project
"""
@@ -136,14 +157,22 @@ class OSFMContext:
if not io.dir_exists(self.opensfm_project_path):
system.mkdir_p(self.opensfm_project_path)
- list_path = os.path.join(self.opensfm_project_path, 'image_list.txt')
+ list_path = os.path.join(self.opensfm_project_path, "image_list.txt")
if not io.file_exists(list_path) or rerun:
if reconstruction.multi_camera:
- photos = get_photos_by_band(reconstruction.multi_camera, args.primary_band)
+ photos = get_photos_by_band(
+ reconstruction.multi_camera, args.primary_band
+ )
if len(photos) < 1:
- raise Exception("Not enough images in selected band %s" % args.primary_band.lower())
- log.ODM_INFO("Reconstruction will use %s images from %s band" % (len(photos), args.primary_band.lower()))
+ raise Exception(
+ "Not enough images in selected band %s"
+ % args.primary_band.lower()
+ )
+ log.ODM_INFO(
+ "Reconstruction will use %s images from %s band"
+ % (len(photos), args.primary_band.lower())
+ )
else:
photos = reconstruction.photos
@@ -151,7 +180,7 @@ class OSFMContext:
num_zero_alt = 0
has_alt = True
has_gps = False
- with open(list_path, 'w') as fout:
+ with open(list_path, "w") as fout:
for photo in photos:
if photo.altitude is None:
has_alt = False
@@ -160,54 +189,69 @@ class OSFMContext:
if photo.latitude is not None and photo.longitude is not None:
has_gps = True
- fout.write('%s\n' % os.path.join(images_path, photo.filename))
-
+ fout.write("%s\n" % os.path.join(images_path, photo.filename))
+
# check 0 altitude images percentage when has_alt is True
if has_alt and num_zero_alt / len(photos) > 0.05:
- log.ODM_WARNING("More than 5% of images have zero altitude, this might be an indicator that the images have no altitude information")
+ log.ODM_WARNING(
+ "More than 5% of images have zero altitude, this might be an indicator that the images have no altitude information"
+ )
has_alt = False
# check for image_groups.txt (split-merge)
image_groups_file = os.path.join(args.project_path, "image_groups.txt")
- if 'split_image_groups_is_set' in args:
+ if "split_image_groups_is_set" in args:
image_groups_file = os.path.abspath(args.split_image_groups)
if io.file_exists(image_groups_file):
- dst_groups_file = os.path.join(self.opensfm_project_path, "image_groups.txt")
+ dst_groups_file = os.path.join(
+ self.opensfm_project_path, "image_groups.txt"
+ )
io.copy(image_groups_file, dst_groups_file)
log.ODM_INFO("Copied %s to %s" % (image_groups_file, dst_groups_file))
-
+
# check for cameras
if args.cameras:
try:
camera_overrides = camera.get_opensfm_camera_models(args.cameras)
- with open(os.path.join(self.opensfm_project_path, "camera_models_overrides.json"), 'w') as f:
+ with open(
+ os.path.join(
+ self.opensfm_project_path, "camera_models_overrides.json"
+ ),
+ "w",
+ ) as f:
f.write(json.dumps(camera_overrides))
- log.ODM_INFO("Wrote camera_models_overrides.json to OpenSfM directory")
+ log.ODM_INFO(
+ "Wrote camera_models_overrides.json to OpenSfM directory"
+ )
except Exception as e:
- log.ODM_WARNING("Cannot set camera_models_overrides.json: %s" % str(e))
+ log.ODM_WARNING(
+ "Cannot set camera_models_overrides.json: %s" % str(e)
+ )
# Check image masks
masks = []
for p in photos:
if p.mask is not None:
masks.append((p.filename, os.path.join(images_path, p.mask)))
-
+
if masks:
log.ODM_INFO("Found %s image masks" % len(masks))
- with open(os.path.join(self.opensfm_project_path, "mask_list.txt"), 'w') as f:
+ with open(
+ os.path.join(self.opensfm_project_path, "mask_list.txt"), "w"
+ ) as f:
for fname, mask in masks:
f.write("{} {}\n".format(fname, mask))
-
+
# Compute feature_process_size
- feature_process_size = 2048 # default
+ feature_process_size = 2048 # default
feature_quality_scale = {
- 'ultra': 1,
- 'high': 0.5,
- 'medium': 0.25,
- 'low': 0.125,
- 'lowest': 0.0675,
+ "ultra": 1,
+ "high": 0.5,
+ "medium": 0.25,
+ "low": 0.125,
+ "lowest": 0.0675,
}
max_dims = find_largest_photo_dims(photos)
@@ -221,17 +265,26 @@ class OSFMContext:
upper_limit = 4480
megapixels = (w * h) / 1e6
multiplier = 1
-
+
if megapixels < 2:
multiplier = 2
elif megapixels > 42:
multiplier = 0.5
-
- factor = min(1, feature_quality_scale[args.feature_quality] * multiplier)
- feature_process_size = min(upper_limit, max(lower_limit, int(max_dim * factor)))
- log.ODM_INFO("Photo dimensions for feature extraction: %ipx" % feature_process_size)
+
+ factor = min(
+ 1, feature_quality_scale[args.feature_quality] * multiplier
+ )
+ feature_process_size = min(
+ upper_limit, max(lower_limit, int(max_dim * factor))
+ )
+ log.ODM_INFO(
+ "Photo dimensions for feature extraction: %ipx"
+ % feature_process_size
+ )
else:
- log.ODM_WARNING("Cannot compute max image dimensions, going with defaults")
+ log.ODM_WARNING(
+ "Cannot compute max image dimensions, going with defaults"
+ )
# create config file for OpenSfM
if args.matcher_neighbors > 0:
@@ -240,7 +293,7 @@ class OSFMContext:
else:
matcher_graph_rounds = 50
matcher_neighbors = 0
-
+
# Always use matcher-neighbors if less than 4 pictures
if len(photos) <= 3:
matcher_graph_rounds = 0
@@ -248,14 +301,15 @@ class OSFMContext:
config = [
"use_exif_size: no",
- "flann_algorithm: KDTREE", # more stable, faster than KMEANS
+ "flann_algorithm: KDTREE", # more stable, faster than KMEANS
"feature_process_size: %s" % feature_process_size,
"feature_min_frames: %s" % args.min_num_features,
"processes: %s" % args.max_concurrency,
"matching_gps_neighbors: %s" % matcher_neighbors,
"matching_gps_distance: 0",
"matching_graph_rounds: %s" % matcher_graph_rounds,
- "optimize_camera_parameters: %s" % ('no' if args.use_fixed_camera_params else 'yes'),
+ "optimize_camera_parameters: %s"
+ % ("no" if args.use_fixed_camera_params else "yes"),
"reconstruction_algorithm: %s" % (args.sfm_algorithm),
"undistorted_image_format: tif",
"bundle_outlier_filtering_type: AUTO",
@@ -264,14 +318,16 @@ class OSFMContext:
"triangulation_type: ROBUST",
"retriangulation_ratio: 2",
]
-
+
if args.matcher_order > 0:
if not reconstruction.is_georeferenced():
config.append("matching_order_neighbors: %s" % args.matcher_order)
else:
- log.ODM_WARNING("Georeferenced reconstruction, ignoring --matcher-order")
+ log.ODM_WARNING(
+ "Georeferenced reconstruction, ignoring --matcher-order"
+ )
- if args.camera_lens != 'auto':
+ if args.camera_lens != "auto":
config.append("camera_projection_type: %s" % args.camera_lens.upper())
matcher_type = args.matcher_type
@@ -280,19 +336,23 @@ class OSFMContext:
osfm_matchers = {
"bow": "WORDS",
"flann": "FLANN",
- "bruteforce": "BRUTEFORCE"
+ "bruteforce": "BRUTEFORCE",
}
- if not has_gps and not 'matcher_type_is_set' in args:
- log.ODM_INFO("No GPS information, using BOW matching by default (you can override this by setting --matcher-type explicitly)")
+ if not has_gps and not "matcher_type_is_set" in args:
+ log.ODM_INFO(
+ "No GPS information, using BOW matching by default (you can override this by setting --matcher-type explicitly)"
+ )
matcher_type = "bow"
if matcher_type == "bow":
# Cannot use anything other than HAHOG with BOW
if feature_type != "HAHOG":
- log.ODM_WARNING("Using BOW matching, will use HAHOG feature type, not SIFT")
+ log.ODM_WARNING(
+ "Using BOW matching, will use HAHOG feature type, not SIFT"
+ )
feature_type = "HAHOG"
-
+
config.append("matcher_type: %s" % osfm_matchers[matcher_type])
# GPU acceleration?
@@ -309,7 +369,7 @@ class OSFMContext:
log.ODM_INFO("Using GPU for extracting SIFT features")
feature_type = "SIFT_GPU"
self.gpu_sift_feature_extraction = True
-
+
config.append("feature_type: %s" % feature_type)
if has_alt:
@@ -321,71 +381,87 @@ class OSFMContext:
config.append("align_method: auto")
else:
config.append("align_method: orientation_prior")
-
+
if args.use_hybrid_bundle_adjustment:
log.ODM_INFO("Enabling hybrid bundle adjustment")
- config.append("bundle_interval: 100") # Bundle after adding 'bundle_interval' cameras
- config.append("bundle_new_points_ratio: 1.2") # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
- config.append("local_bundle_radius: 1") # Max image graph distance for images to be included in local bundle adjustment
+ config.append(
+ "bundle_interval: 100"
+ ) # Bundle after adding 'bundle_interval' cameras
+ config.append(
+ "bundle_new_points_ratio: 1.2"
+ ) # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
+ config.append(
+ "local_bundle_radius: 1"
+ ) # Max image graph distance for images to be included in local bundle adjustment
else:
config.append("local_bundle_radius: 0")
-
+
if gcp_path:
config.append("bundle_use_gcp: yes")
if not args.force_gps:
config.append("bundle_use_gps: no")
else:
config.append("bundle_compensate_gps_bias: yes")
-
+
io.copy(gcp_path, self.path("gcp_list.txt"))
-
+
config = config + append_config
# write config file
log.ODM_INFO(config)
config_filename = self.get_config_file_path()
- with open(config_filename, 'w') as fout:
+ with open(config_filename, "w") as fout:
fout.write("\n".join(config))
-
+
# We impose our own reference_lla
if reconstruction.is_georeferenced():
- self.write_reference_lla(reconstruction.georef.utm_east_offset, reconstruction.georef.utm_north_offset, reconstruction.georef.proj4())
+ self.write_reference_lla(
+ reconstruction.georef.utm_east_offset,
+ reconstruction.georef.utm_north_offset,
+ reconstruction.georef.proj4(),
+ )
else:
- log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" % list_path)
+ log.ODM_WARNING(
+ "%s already exists, not rerunning OpenSfM setup" % list_path
+ )
def get_config_file_path(self):
- return os.path.join(self.opensfm_project_path, 'config.yaml')
+ return os.path.join(self.opensfm_project_path, "config.yaml")
def reconstructed(self):
if not io.file_exists(self.path("reconstruction.json")):
return False
-
- with open(self.path("reconstruction.json"), 'r') as f:
+
+ with open(self.path("reconstruction.json"), "r") as f:
return f.readline().strip() != "[]"
def extract_metadata(self, rerun=False):
metadata_dir = self.path("exif")
if not io.dir_exists(metadata_dir) or rerun:
- self.run('extract_metadata')
-
- def photos_to_metadata(self, photos, rolling_shutter, rolling_shutter_readout, rerun=False):
+ self.run("extract_metadata")
+
+ def photos_to_metadata(
+ self, photos, rolling_shutter, rolling_shutter_readout, rerun=False
+ ):
metadata_dir = self.path("exif")
if io.dir_exists(metadata_dir) and not rerun:
- log.ODM_WARNING("%s already exists, not rerunning photo to metadata" % metadata_dir)
+ log.ODM_WARNING(
+ "%s already exists, not rerunning photo to metadata" % metadata_dir
+ )
return
-
+
if io.dir_exists(metadata_dir):
shutil.rmtree(metadata_dir)
-
+
os.makedirs(metadata_dir, exist_ok=True)
-
+
camera_models = {}
data = DataSet(self.opensfm_project_path)
for p in photos:
d = p.to_opensfm_exif(rolling_shutter, rolling_shutter_readout)
- with open(os.path.join(metadata_dir, "%s.exif" % p.filename), 'w') as f:
+ with open(os.path.join(metadata_dir, "%s.exif" % p.filename), "w") as f:
f.write(json.dumps(d, indent=4))
camera_id = p.camera_id()
@@ -413,51 +489,55 @@ class OSFMContext:
def feature_matching(self, rerun=False):
features_dir = self.path("features")
-
+
if not io.dir_exists(features_dir) or rerun:
try:
- self.run('detect_features')
+ self.run("detect_features")
except system.SubprocessException as e:
# Sometimes feature extraction by GPU can fail
# for various reasons, so before giving up
# we try to fallback to CPU
- if hasattr(self, 'gpu_sift_feature_extraction'):
- log.ODM_WARNING("GPU SIFT extraction failed, maybe the graphics card is not supported? Attempting fallback to CPU")
- self.update_config({'feature_type': "SIFT"})
+ if hasattr(self, "gpu_sift_feature_extraction"):
+ log.ODM_WARNING(
+ "GPU SIFT extraction failed, maybe the graphics card is not supported? Attempting fallback to CPU"
+ )
+ self.update_config({"feature_type": "SIFT"})
if os.path.exists(features_dir):
shutil.rmtree(features_dir)
- self.run('detect_features')
+ self.run("detect_features")
else:
raise e
else:
- log.ODM_WARNING('Detect features already done: %s exists' % features_dir)
+ log.ODM_WARNING("Detect features already done: %s exists" % features_dir)
self.match_features(rerun)
def match_features(self, rerun=False):
matches_dir = self.path("matches")
if not io.dir_exists(matches_dir) or rerun:
- self.run('match_features')
+ self.run("match_features")
else:
- log.ODM_WARNING('Match features already done: %s exists' % matches_dir)
+ log.ODM_WARNING("Match features already done: %s exists" % matches_dir)
def align_reconstructions(self, rerun):
- alignment_file = self.path('alignment_done.txt')
+ alignment_file = self.path("alignment_done.txt")
if not io.file_exists(alignment_file) or rerun:
log.ODM_INFO("Aligning submodels...")
meta_data = metadataset.MetaDataSet(self.opensfm_project_path)
reconstruction_shots = tools.load_reconstruction_shots(meta_data)
- transformations = tools.align_reconstructions(reconstruction_shots,
- tools.partial_reconstruction_name,
- False)
+ transformations = tools.align_reconstructions(
+ reconstruction_shots, tools.partial_reconstruction_name, False
+ )
tools.apply_transformations(transformations)
self.touch(alignment_file)
else:
- log.ODM_WARNING('Found a alignment done progress file in: %s' % alignment_file)
+ log.ODM_WARNING(
+ "Found a alignment done progress file in: %s" % alignment_file
+ )
def touch(self, file):
- with open(file, 'w') as fout:
+ with open(file, "w") as fout:
fout.write("Done!\n")
def path(self, *paths):
@@ -467,14 +547,21 @@ class OSFMContext:
if not os.path.exists(output) or rerun:
try:
reconstruction_file = self.path("reconstruction.json")
- with open(output, 'w') as fout:
- fout.write(json.dumps(camera.get_cameras_from_opensfm(reconstruction_file), indent=4))
+ with open(output, "w") as fout:
+ fout.write(
+ json.dumps(
+ camera.get_cameras_from_opensfm(reconstruction_file),
+ indent=4,
+ )
+ )
except Exception as e:
log.ODM_WARNING("Cannot export cameras to %s. %s." % (output, str(e)))
else:
log.ODM_INFO("Already extracted cameras")
-
- def convert_and_undistort(self, rerun=False, imageFilter=None, image_list=None, runId="nominal"):
+
+ def convert_and_undistort(
+ self, rerun=False, imageFilter=None, image_list=None, runId="nominal"
+ ):
log.ODM_INFO("Undistorting %s ..." % self.opensfm_project_path)
done_flag_file = self.path("undistorted", "%s_done.txt" % runId)
@@ -484,9 +571,10 @@ class OSFMContext:
if image_list is not None:
ds._set_image_list(image_list)
- undistort.run_dataset(ds, "reconstruction.json",
- 0, None, "undistorted", imageFilter)
-
+ undistort.run_dataset(
+ ds, "reconstruction.json", 0, None, "undistorted", imageFilter
+ )
+
self.touch(done_flag_file)
else:
log.ODM_WARNING("Already undistorted (%s)" % runId)
@@ -503,13 +591,13 @@ class OSFMContext:
def backup_reconstruction(self):
if os.path.exists(self.recon_backup_file()):
os.remove(self.recon_backup_file())
-
+
log.ODM_INFO("Backing up reconstruction")
shutil.copyfile(self.recon_file(), self.recon_backup_file())
def recon_backup_file(self):
return self.path("reconstruction.backup.json")
-
+
def recon_file(self):
return self.path("reconstruction.json")
@@ -519,9 +607,9 @@ class OSFMContext:
# Augment reconstruction.json
for recon in reconstruction:
- shots = recon['shots']
+ shots = recon["shots"]
sids = list(shots)
-
+
for shot_id in sids:
secondary_photos = p2s.get(shot_id)
if secondary_photos is None:
@@ -531,10 +619,9 @@ class OSFMContext:
for p in secondary_photos:
shots[p.filename] = shots[shot_id]
- with open(self.recon_file(), 'w') as f:
+ with open(self.recon_file(), "w") as f:
f.write(json.dumps(reconstruction))
-
def update_config(self, cfg_dict):
cfg_file = self.get_config_file_path()
log.ODM_INFO("Updating %s" % cfg_file)
@@ -545,12 +632,16 @@ class OSFMContext:
for k, v in cfg_dict.items():
cfg[k] = v
log.ODM_INFO("%s: %s" % (k, v))
- with open(cfg_file, 'w') as fout:
+ with open(cfg_file, "w") as fout:
fout.write(yaml.dump(cfg, default_flow_style=False))
except Exception as e:
- log.ODM_WARNING("Cannot update configuration file %s: %s" % (cfg_file, str(e)))
+ log.ODM_WARNING(
+ "Cannot update configuration file %s: %s" % (cfg_file, str(e))
+ )
else:
- log.ODM_WARNING("Tried to update configuration, but %s does not exist." % cfg_file)
+ log.ODM_WARNING(
+ "Tried to update configuration, but %s does not exist." % cfg_file
+ )
def export_stats(self, rerun=False):
log.ODM_INFO("Export reconstruction stats")
@@ -569,7 +660,7 @@ class OSFMContext:
pdf_report = report.Report(data, odm_stats)
pdf_report.generate_report()
pdf_report.save_report("report.pdf")
-
+
if os.path.exists(osfm_report_path):
if os.path.exists(report_path):
os.unlink(report_path)
@@ -578,20 +669,22 @@ class OSFMContext:
log.ODM_WARNING("Report could not be generated")
else:
log.ODM_WARNING("Report %s already exported" % report_path)
-
+
def write_reference_lla(self, offset_x, offset_y, proj4):
reference_lla = self.path("reference_lla.json")
longlat = CRS.from_epsg("4326")
- lon, lat = location.transform2(CRS.from_proj4(proj4), longlat, offset_x, offset_y)
+ lon, lat = location.transform2(
+ CRS.from_proj4(proj4), longlat, offset_x, offset_y
+ )
+
+ with open(reference_lla, "w") as f:
+ f.write(
+ json.dumps(
+ {"latitude": lat, "longitude": lon, "altitude": 0.0}, indent=4
+ )
+ )
- with open(reference_lla, 'w') as f:
- f.write(json.dumps({
- 'latitude': lat,
- 'longitude': lon,
- 'altitude': 0.0
- }, indent=4))
-
log.ODM_INFO("Wrote reference_lla.json")
def ground_control_points(self, proj4):
@@ -602,7 +695,7 @@ class OSFMContext:
if not io.file_exists(gcp_stats_file):
return []
-
+
gcps_stats = {}
try:
with open(gcp_stats_file) as f:
@@ -612,35 +705,37 @@ class OSFMContext:
if not gcps_stats:
return []
-
+
ds = DataSet(self.opensfm_project_path)
reference = ds.load_reference()
projection = pyproj.Proj(proj4)
result = []
for gcp in gcps_stats:
- geocoords = _transform(gcp['coordinates'], reference, projection)
- result.append({
- 'id': gcp['id'],
- 'observations': gcp['observations'],
- 'coordinates': geocoords,
- 'error': gcp['error']
- })
+ geocoords = _transform(gcp["coordinates"], reference, projection)
+ result.append(
+ {
+ "id": gcp["id"],
+ "observations": gcp["observations"],
+ "coordinates": geocoords,
+ "error": gcp["error"],
+ }
+ )
return result
-
def name(self):
return os.path.basename(os.path.abspath(self.path("..")))
-def get_submodel_argv(args, submodels_path = None, submodel_name = None):
+
+def get_submodel_argv(args, submodels_path=None, submodel_name=None):
"""
Gets argv for a submodel starting from the args passed to the application startup.
Additionally, if project_name, submodels_path and submodel_name are passed, the function
handles the value and --project-path detection / override.
When all arguments are set to None, --project-path and project name are always removed.
- :return the same as argv, but removing references to --split,
+ :return the same as argv, but removing references to --split,
setting/replacing --project-path and name
removing --rerun-from, --rerun, --rerun-all, --sm-cluster
removing --pc-las, --pc-csv, --pc-ept, --tiles flags (processing these is wasteful)
@@ -652,9 +747,29 @@ def get_submodel_argv(args, submodels_path = None, submodel_name = None):
reading the contents of --cameras
reading the contents of --boundary
"""
- assure_always = ['orthophoto_cutline', 'dem_euclidean_map', 'skip_3dmodel', 'skip_report']
- remove_always = ['split', 'split_overlap', 'rerun_from', 'rerun', 'gcp', 'end_with', 'sm_cluster', 'rerun_all', 'pc_csv', 'pc_las', 'pc_ept', 'tiles', 'copy-to', 'cog']
- read_json_always = ['cameras', 'boundary']
+ assure_always = [
+ "orthophoto_cutline",
+ "dem_euclidean_map",
+ "skip_3dmodel",
+ "skip_report",
+ ]
+ remove_always = [
+ "split",
+ "split_overlap",
+ "rerun_from",
+ "rerun",
+ "gcp",
+ "end_with",
+ "sm_cluster",
+ "rerun_all",
+ "pc_csv",
+ "pc_las",
+ "pc_ept",
+ "tiles",
+ "copy-to",
+ "cog",
+ ]
+ read_json_always = ["cameras", "boundary"]
argv = sys.argv
@@ -662,14 +777,14 @@ def get_submodel_argv(args, submodels_path = None, submodel_name = None):
startup_script = argv[0]
# On Windows, make sure we always invoke the "run.bat" file
- if sys.platform == 'win32':
+ if sys.platform == "win32":
startup_script_dir = os.path.dirname(startup_script)
startup_script = os.path.join(startup_script_dir, "run")
- result = [startup_script]
+ result = [startup_script]
args_dict = vars(args).copy()
- set_keys = [k[:-len("_is_set")] for k in args_dict.keys() if k.endswith("_is_set")]
+ set_keys = [k[: -len("_is_set")] for k in args_dict.keys() if k.endswith("_is_set")]
# Handle project name and project path (special case)
if "name" in set_keys:
@@ -688,7 +803,7 @@ def get_submodel_argv(args, submodels_path = None, submodel_name = None):
if not k in set_keys:
set_keys.append(k)
args_dict[k] = True
-
+
# Read JSON always
for k in read_json_always:
if k in set_keys:
@@ -710,13 +825,13 @@ def get_submodel_argv(args, submodels_path = None, submodel_name = None):
# Populate result
for k in set_keys:
result.append("--%s" % k.replace("_", "-"))
-
+
# No second value for booleans
if isinstance(args_dict[k], bool) and args_dict[k] == True:
continue
-
+
result.append(str(args_dict[k]))
-
+
if submodels_path:
result.append("--project-path")
result.append(submodels_path)
@@ -726,6 +841,7 @@ def get_submodel_argv(args, submodels_path = None, submodel_name = None):
return result
+
def get_submodel_args_dict(args):
submodel_argv = get_submodel_argv(args)
result = {}
@@ -757,8 +873,8 @@ def get_submodel_paths(submodels_path, *paths):
return result
for f in os.listdir(submodels_path):
- if f.startswith('submodel'):
- p = os.path.join(submodels_path, f, *paths)
+ if f.startswith("submodel"):
+ p = os.path.join(submodels_path, f, *paths)
if os.path.exists(p):
result.append(p)
else:
@@ -766,6 +882,7 @@ def get_submodel_paths(submodels_path, *paths):
return result
+
def get_all_submodel_paths(submodels_path, *all_paths):
"""
:return Existing, multiple paths for all submodels as a nested list (all or nothing for each submodel)
@@ -780,11 +897,11 @@ def get_all_submodel_paths(submodels_path, *all_paths):
return result
for f in os.listdir(submodels_path):
- if f.startswith('submodel'):
+ if f.startswith("submodel"):
all_found = True
for ap in all_paths:
- p = os.path.join(submodels_path, f, ap)
+ p = os.path.join(submodels_path, f, ap)
if not os.path.exists(p):
log.ODM_WARNING("Missing %s from submodel %s" % (p, f))
all_found = False
@@ -794,11 +911,16 @@ def get_all_submodel_paths(submodels_path, *all_paths):
return result
+
def is_submodel(opensfm_root):
# A bit hackish, but works without introducing additional markers / flags
# Look at the path of the opensfm directory and see if "submodel_" is part of it
parts = os.path.abspath(opensfm_root).split(os.path.sep)
- return (len(parts) >= 2 and parts[-2][:9] == "submodel_") or \
- os.path.isfile(os.path.join(opensfm_root, "split_merge_stop_at_reconstruction.txt")) or \
- os.path.isfile(os.path.join(opensfm_root, "features", "empty"))
\ No newline at end of file
+ return (
+ (len(parts) >= 2 and parts[-2][:9] == "submodel_")
+ or os.path.isfile(
+ os.path.join(opensfm_root, "split_merge_stop_at_reconstruction.txt")
+ )
+ or os.path.isfile(os.path.join(opensfm_root, "features", "empty"))
+ )
diff --git a/opendm/photo.py b/opendm/photo.py
index 7549458f..f99e0505 100644
--- a/opendm/photo.py
+++ b/opendm/photo.py
@@ -19,16 +19,25 @@ from xml.parsers.expat import ExpatError
from opensfm.sensors import sensor_data
from opensfm.geo import ecef_from_lla
-projections = ['perspective', 'fisheye', 'fisheye_opencv', 'brown', 'dual', 'equirectangular', 'spherical']
+projections = [
+ "perspective",
+ "fisheye",
+ "fisheye_opencv",
+ "brown",
+ "dual",
+ "equirectangular",
+ "spherical",
+]
+
def find_mean_utc_time(photos):
utc_times = []
- for p in photos:
+ for p in photos:
if p.utc_time is not None:
utc_times.append(p.utc_time / 1000.0)
if len(utc_times) == 0:
return None
-
+
return np.mean(utc_times)
@@ -43,18 +52,20 @@ def find_largest_photo_dims(photos):
if mp > max_mp:
max_mp = mp
max_dims = (p.width, p.height)
-
+
return max_dims
+
def find_largest_photo_dim(photos):
max_dim = 0
for p in photos:
if p.width is None:
continue
max_dim = max(max_dim, max(p.width, p.height))
-
+
return max_dim
+
def find_largest_photo(photos):
max_p = None
max_area = 0
@@ -69,6 +80,7 @@ def find_largest_photo(photos):
return max_p
+
def get_mm_per_unit(resolution_unit):
"""Length of a resolution unit in millimeters.
@@ -87,12 +99,16 @@ def get_mm_per_unit(resolution_unit):
elif resolution_unit == 5: # um
return 0.001
else:
- log.ODM_WARNING("Unknown EXIF resolution unit value: {}".format(resolution_unit))
+ log.ODM_WARNING(
+ "Unknown EXIF resolution unit value: {}".format(resolution_unit)
+ )
return None
+
class PhotoCorruptedException(Exception):
pass
+
class GPSRefMock:
def __init__(self, ref):
self.values = [ref]
@@ -104,12 +120,12 @@ class ODM_Photo:
def __init__(self, path_file):
self.filename = os.path.basename(path_file)
self.mask = None
-
+
# Standard tags (virtually all photos have these)
self.width = None
self.height = None
- self.camera_make = ''
- self.camera_model = ''
+ self.camera_make = ""
+ self.camera_model = ""
self.orientation = 1
# Geo tags
@@ -118,7 +134,7 @@ class ODM_Photo:
self.altitude = None
# Multi-band fields
- self.band_name = 'RGB'
+ self.band_name = "RGB"
self.band_index = 0
self.capture_uuid = None
@@ -167,20 +183,29 @@ class ODM_Photo:
# self.bandwidth = None
# RTK
- self.gps_xy_stddev = None # Dilution of Precision X/Y
- self.gps_z_stddev = None # Dilution of Precision Z
+ self.gps_xy_stddev = None # Dilution of Precision X/Y
+ self.gps_z_stddev = None # Dilution of Precision Z
# Misc SFM
- self.camera_projection = 'brown'
+ self.camera_projection = "brown"
self.focal_ratio = 0.85
# parse values from metadata
self.parse_exif_values(path_file)
def __str__(self):
- return '{} | camera: {} {} | dimensions: {} x {} | lat: {} | lon: {} | alt: {} | band: {} ({})'.format(
- self.filename, self.camera_make, self.camera_model, self.width, self.height,
- self.latitude, self.longitude, self.altitude, self.band_name, self.band_index)
+ return "{} | camera: {} {} | dimensions: {} x {} | lat: {} | lon: {} | alt: {} | band: {} ({})".format(
+ self.filename,
+ self.camera_make,
+ self.camera_model,
+ self.width,
+ self.height,
+ self.latitude,
+ self.longitude,
+ self.altitude,
+ self.band_name,
+ self.band_index,
+ )
def set_mask(self, mask):
self.mask = mask
@@ -189,8 +214,12 @@ class ODM_Photo:
self.latitude = geo_entry.y
self.longitude = geo_entry.x
self.altitude = geo_entry.z
- if geo_entry.yaw is not None and geo_entry.pitch is not None and geo_entry.roll is not None:
- self.yaw = geo_entry.yaw
+ if (
+ geo_entry.yaw is not None
+ and geo_entry.pitch is not None
+ and geo_entry.roll is not None
+ ):
+ self.yaw = geo_entry.yaw
self.pitch = geo_entry.pitch
self.roll = geo_entry.roll
self.dls_yaw = geo_entry.yaw
@@ -201,7 +230,7 @@ class ODM_Photo:
def parse_exif_values(self, _path_file):
# Disable exifread log
- logging.getLogger('exifread').setLevel(logging.CRITICAL)
+ logging.getLogger("exifread").setLevel(logging.CRITICAL)
try:
self.width, self.height = get_image_size.get_image_size(_path_file)
@@ -211,104 +240,137 @@ class ODM_Photo:
tags = {}
xtags = {}
- with open(_path_file, 'rb') as f:
+ with open(_path_file, "rb") as f:
tags = exifread.process_file(f, details=True, extract_thumbnail=False)
try:
- if 'Image Make' in tags:
+ if "Image Make" in tags:
try:
- self.camera_make = tags['Image Make'].values
+ self.camera_make = tags["Image Make"].values
self.camera_make = self.camera_make.strip()
except UnicodeDecodeError:
log.ODM_WARNING("EXIF Image Make might be corrupted")
self.camera_make = "unknown"
- if 'Image Model' in tags:
+ if "Image Model" in tags:
try:
- self.camera_model = tags['Image Model'].values
+ self.camera_model = tags["Image Model"].values
self.camera_model = self.camera_model.strip()
except UnicodeDecodeError:
log.ODM_WARNING("EXIF Image Model might be corrupted")
self.camera_model = "unknown"
- if 'GPS GPSAltitude' in tags:
- self.altitude = self.float_value(tags['GPS GPSAltitude'])
- if 'GPS GPSAltitudeRef' in tags and self.int_value(tags['GPS GPSAltitudeRef']) is not None and self.int_value(tags['GPS GPSAltitudeRef']) > 0:
+ if "GPS GPSAltitude" in tags:
+ self.altitude = self.float_value(tags["GPS GPSAltitude"])
+ if (
+ "GPS GPSAltitudeRef" in tags
+ and self.int_value(tags["GPS GPSAltitudeRef"]) is not None
+ and self.int_value(tags["GPS GPSAltitudeRef"]) > 0
+ ):
self.altitude *= -1
- if 'GPS GPSLatitude' in tags and 'GPS GPSLatitudeRef' in tags:
- self.latitude = self.dms_to_decimal(tags['GPS GPSLatitude'], tags['GPS GPSLatitudeRef'])
- elif 'GPS GPSLatitude' in tags:
- log.ODM_WARNING("GPS position for %s might be incorrect, GPSLatitudeRef tag is missing (assuming N)" % self.filename)
- self.latitude = self.dms_to_decimal(tags['GPS GPSLatitude'], GPSRefMock('N'))
- if 'GPS GPSLongitude' in tags and 'GPS GPSLongitudeRef' in tags:
- self.longitude = self.dms_to_decimal(tags['GPS GPSLongitude'], tags['GPS GPSLongitudeRef'])
- elif 'GPS GPSLongitude' in tags:
- log.ODM_WARNING("GPS position for %s might be incorrect, GPSLongitudeRef tag is missing (assuming E)" % self.filename)
- self.longitude = self.dms_to_decimal(tags['GPS GPSLongitude'], GPSRefMock('E'))
- if 'Image Orientation' in tags:
- self.orientation = self.int_value(tags['Image Orientation'])
+ if "GPS GPSLatitude" in tags and "GPS GPSLatitudeRef" in tags:
+ self.latitude = self.dms_to_decimal(
+ tags["GPS GPSLatitude"], tags["GPS GPSLatitudeRef"]
+ )
+ elif "GPS GPSLatitude" in tags:
+ log.ODM_WARNING(
+ "GPS position for %s might be incorrect, GPSLatitudeRef tag is missing (assuming N)"
+ % self.filename
+ )
+ self.latitude = self.dms_to_decimal(
+ tags["GPS GPSLatitude"], GPSRefMock("N")
+ )
+ if "GPS GPSLongitude" in tags and "GPS GPSLongitudeRef" in tags:
+ self.longitude = self.dms_to_decimal(
+ tags["GPS GPSLongitude"], tags["GPS GPSLongitudeRef"]
+ )
+ elif "GPS GPSLongitude" in tags:
+ log.ODM_WARNING(
+ "GPS position for %s might be incorrect, GPSLongitudeRef tag is missing (assuming E)"
+ % self.filename
+ )
+ self.longitude = self.dms_to_decimal(
+ tags["GPS GPSLongitude"], GPSRefMock("E")
+ )
+ if "Image Orientation" in tags:
+ self.orientation = self.int_value(tags["Image Orientation"])
except (IndexError, ValueError) as e:
- log.ODM_WARNING("Cannot read basic EXIF tags for %s: %s" % (self.filename, str(e)))
+ log.ODM_WARNING(
+ "Cannot read basic EXIF tags for %s: %s" % (self.filename, str(e))
+ )
try:
- if 'Image Tag 0xC61A' in tags:
- self.black_level = self.list_values(tags['Image Tag 0xC61A'])
- elif 'BlackLevel' in tags:
- self.black_level = self.list_values(tags['BlackLevel'])
- elif 'Image BlackLevel' in tags:
- self.black_level = self.list_values(tags['Image BlackLevel'])
+ if "Image Tag 0xC61A" in tags:
+ self.black_level = self.list_values(tags["Image Tag 0xC61A"])
+ elif "BlackLevel" in tags:
+ self.black_level = self.list_values(tags["BlackLevel"])
+ elif "Image BlackLevel" in tags:
+ self.black_level = self.list_values(tags["Image BlackLevel"])
- if 'EXIF ExposureTime' in tags:
- self.exposure_time = self.float_value(tags['EXIF ExposureTime'])
+ if "EXIF ExposureTime" in tags:
+ self.exposure_time = self.float_value(tags["EXIF ExposureTime"])
- if 'EXIF FNumber' in tags:
- self.fnumber = self.float_value(tags['EXIF FNumber'])
-
- if 'EXIF ISOSpeed' in tags:
- self.iso_speed = self.int_value(tags['EXIF ISOSpeed'])
- elif 'EXIF PhotographicSensitivity' in tags:
- self.iso_speed = self.int_value(tags['EXIF PhotographicSensitivity'])
- elif 'EXIF ISOSpeedRatings' in tags:
- self.iso_speed = self.int_value(tags['EXIF ISOSpeedRatings'])
-
- if 'Image BitsPerSample' in tags:
- self.bits_per_sample = self.int_value(tags['Image BitsPerSample'])
+ if "EXIF FNumber" in tags:
+ self.fnumber = self.float_value(tags["EXIF FNumber"])
- if 'EXIF DateTimeOriginal' in tags:
- str_time = tags['EXIF DateTimeOriginal'].values
+ if "EXIF ISOSpeed" in tags:
+ self.iso_speed = self.int_value(tags["EXIF ISOSpeed"])
+ elif "EXIF PhotographicSensitivity" in tags:
+ self.iso_speed = self.int_value(
+ tags["EXIF PhotographicSensitivity"]
+ )
+ elif "EXIF ISOSpeedRatings" in tags:
+ self.iso_speed = self.int_value(tags["EXIF ISOSpeedRatings"])
+
+ if "Image BitsPerSample" in tags:
+ self.bits_per_sample = self.int_value(tags["Image BitsPerSample"])
+
+ if "EXIF DateTimeOriginal" in tags:
+ str_time = tags["EXIF DateTimeOriginal"].values
utc_time = datetime.strptime(str_time, "%Y:%m:%d %H:%M:%S")
subsec = 0
- if 'EXIF SubSecTime' in tags:
- subsec = self.int_value(tags['EXIF SubSecTime'])
+ if "EXIF SubSecTime" in tags:
+ subsec = self.int_value(tags["EXIF SubSecTime"])
negative = 1.0
if subsec < 0:
negative = -1.0
subsec *= -1.0
- subsec = float('0.{}'.format(int(subsec)))
+ subsec = float("0.{}".format(int(subsec)))
subsec *= negative
ms = subsec * 1e3
- utc_time += timedelta(milliseconds = ms)
- timezone = pytz.timezone('UTC')
+ utc_time += timedelta(milliseconds=ms)
+ timezone = pytz.timezone("UTC")
epoch = timezone.localize(datetime.utcfromtimestamp(0))
- self.utc_time = (timezone.localize(utc_time) - epoch).total_seconds() * 1000.0
-
- if 'MakerNote SpeedX' in tags and \
- 'MakerNote SpeedY' in tags and \
- 'MakerNote SpeedZ' in tags:
- self.speed_x = self.float_value(tags['MakerNote SpeedX'])
- self.speed_y = self.float_value(tags['MakerNote SpeedY'])
- self.speed_z = self.float_value(tags['MakerNote SpeedZ'])
+ self.utc_time = (
+ timezone.localize(utc_time) - epoch
+ ).total_seconds() * 1000.0
+
+ if (
+ "MakerNote SpeedX" in tags
+ and "MakerNote SpeedY" in tags
+ and "MakerNote SpeedZ" in tags
+ ):
+ self.speed_x = self.float_value(tags["MakerNote SpeedX"])
+ self.speed_y = self.float_value(tags["MakerNote SpeedY"])
+ self.speed_z = self.float_value(tags["MakerNote SpeedZ"])
+
+ if "EXIF ExifImageWidth" in tags and "EXIF ExifImageLength" in tags:
+ self.exif_width = self.int_value(tags["EXIF ExifImageWidth"])
+ self.exif_height = self.int_value(tags["EXIF ExifImageLength"])
- if 'EXIF ExifImageWidth' in tags and \
- 'EXIF ExifImageLength' in tags:
- self.exif_width = self.int_value(tags['EXIF ExifImageWidth'])
- self.exif_height = self.int_value(tags['EXIF ExifImageLength'])
-
except Exception as e:
- log.ODM_WARNING("Cannot read extended EXIF tags for %s: %s" % (self.filename, str(e)))
+ log.ODM_WARNING(
+ "Cannot read extended EXIF tags for %s: %s"
+ % (self.filename, str(e))
+ )
# Warn if GPS coordinates are suspiciously wrong
- if self.latitude is not None and self.latitude == 0 and \
- self.longitude is not None and self.longitude == 0:
- log.ODM_WARNING("%s has GPS position (0,0), possibly corrupted" % self.filename)
-
+ if (
+ self.latitude is not None
+ and self.latitude == 0
+ and self.longitude is not None
+ and self.longitude == 0
+ ):
+ log.ODM_WARNING(
+ "%s has GPS position (0,0), possibly corrupted" % self.filename
+ )
# Extract XMP tags
f.seek(0)
@@ -316,138 +378,201 @@ class ODM_Photo:
for xtags in xmp:
try:
- band_name = self.get_xmp_tag(xtags, ['Camera:BandName', '@Camera:BandName', 'FLIR:BandName'])
+ band_name = self.get_xmp_tag(
+ xtags, ["Camera:BandName", "@Camera:BandName", "FLIR:BandName"]
+ )
if band_name is not None:
self.band_name = band_name.replace(" ", "")
- self.set_attr_from_xmp_tag('band_index', xtags, [
- 'DLS:SensorId', # Micasense RedEdge
- '@Camera:RigCameraIndex', # Parrot Sequoia, Sentera 21244-00_3.2MP-GS-0001
- 'Camera:RigCameraIndex', # MicaSense Altum
- ])
+ self.set_attr_from_xmp_tag(
+ "band_index",
+ xtags,
+ [
+ "DLS:SensorId", # Micasense RedEdge
+ "@Camera:RigCameraIndex", # Parrot Sequoia, Sentera 21244-00_3.2MP-GS-0001
+ "Camera:RigCameraIndex", # MicaSense Altum
+ ],
+ )
- self.set_attr_from_xmp_tag('radiometric_calibration', xtags, [
- 'MicaSense:RadiometricCalibration',
- ])
+ self.set_attr_from_xmp_tag(
+ "radiometric_calibration",
+ xtags,
+ [
+ "MicaSense:RadiometricCalibration",
+ ],
+ )
- self.set_attr_from_xmp_tag('vignetting_center', xtags, [
- 'Camera:VignettingCenter',
- 'Sentera:VignettingCenter',
- ])
+ self.set_attr_from_xmp_tag(
+ "vignetting_center",
+ xtags,
+ [
+ "Camera:VignettingCenter",
+ "Sentera:VignettingCenter",
+ ],
+ )
- self.set_attr_from_xmp_tag('vignetting_polynomial', xtags, [
- 'Camera:VignettingPolynomial',
- 'Sentera:VignettingPolynomial',
- ])
-
- self.set_attr_from_xmp_tag('horizontal_irradiance', xtags, [
- 'Camera:HorizontalIrradiance'
- ], float)
+ self.set_attr_from_xmp_tag(
+ "vignetting_polynomial",
+ xtags,
+ [
+ "Camera:VignettingPolynomial",
+ "Sentera:VignettingPolynomial",
+ ],
+ )
- self.set_attr_from_xmp_tag('irradiance_scale_to_si', xtags, [
- 'Camera:IrradianceScaleToSIUnits'
- ], float)
+ self.set_attr_from_xmp_tag(
+ "horizontal_irradiance",
+ xtags,
+ ["Camera:HorizontalIrradiance"],
+ float,
+ )
- self.set_attr_from_xmp_tag('sun_sensor', xtags, [
- 'Camera:SunSensor',
- ], float)
+ self.set_attr_from_xmp_tag(
+ "irradiance_scale_to_si",
+ xtags,
+ ["Camera:IrradianceScaleToSIUnits"],
+ float,
+ )
- self.set_attr_from_xmp_tag('spectral_irradiance', xtags, [
- 'Camera:SpectralIrradiance',
- 'Camera:Irradiance',
- ], float)
+ self.set_attr_from_xmp_tag(
+ "sun_sensor",
+ xtags,
+ [
+ "Camera:SunSensor",
+ ],
+ float,
+ )
- self.set_attr_from_xmp_tag('capture_uuid', xtags, [
- '@drone-dji:CaptureUUID', # DJI
- 'MicaSense:CaptureId', # MicaSense Altum
- '@Camera:ImageUniqueID', # sentera 6x
- '@Camera:CaptureUUID', # Parrot Sequoia
- ])
+ self.set_attr_from_xmp_tag(
+ "spectral_irradiance",
+ xtags,
+ [
+ "Camera:SpectralIrradiance",
+ "Camera:Irradiance",
+ ],
+ float,
+ )
- self.set_attr_from_xmp_tag('gain', xtags, [
- '@drone-dji:SensorGain'
- ], float)
+ self.set_attr_from_xmp_tag(
+ "capture_uuid",
+ xtags,
+ [
+ "@drone-dji:CaptureUUID", # DJI
+ "MicaSense:CaptureId", # MicaSense Altum
+ "@Camera:ImageUniqueID", # sentera 6x
+ "@Camera:CaptureUUID", # Parrot Sequoia
+ ],
+ )
- self.set_attr_from_xmp_tag('gain_adjustment', xtags, [
- '@drone-dji:SensorGainAdjustment'
- ], float)
+ self.set_attr_from_xmp_tag(
+ "gain", xtags, ["@drone-dji:SensorGain"], float
+ )
+
+ self.set_attr_from_xmp_tag(
+ "gain_adjustment",
+ xtags,
+ ["@drone-dji:SensorGainAdjustment"],
+ float,
+ )
# Camera make / model for some cameras is stored in the XMP
- if self.camera_make == '':
- self.set_attr_from_xmp_tag('camera_make', xtags, [
- '@tiff:Make'
- ])
- if self.camera_model == '':
- self.set_attr_from_xmp_tag('camera_model', xtags, [
- '@tiff:Model'
- ])
+ if self.camera_make == "":
+ self.set_attr_from_xmp_tag("camera_make", xtags, ["@tiff:Make"])
+ if self.camera_model == "":
+ self.set_attr_from_xmp_tag(
+ "camera_model", xtags, ["@tiff:Model"]
+ )
# DJI GPS tags
- self.set_attr_from_xmp_tag('longitude', xtags, [
- '@drone-dji:Longitude'
- ], float)
- self.set_attr_from_xmp_tag('latitude', xtags, [
- '@drone-dji:Latitude'
- ], float)
- self.set_attr_from_xmp_tag('altitude', xtags, [
- '@drone-dji:AbsoluteAltitude'
- ], float)
+ self.set_attr_from_xmp_tag(
+ "longitude", xtags, ["@drone-dji:Longitude"], float
+ )
+ self.set_attr_from_xmp_tag(
+ "latitude", xtags, ["@drone-dji:Latitude"], float
+ )
+ self.set_attr_from_xmp_tag(
+ "altitude", xtags, ["@drone-dji:AbsoluteAltitude"], float
+ )
# Phantom 4 RTK
- if '@drone-dji:RtkStdLon' in xtags:
- y = float(self.get_xmp_tag(xtags, '@drone-dji:RtkStdLon'))
- x = float(self.get_xmp_tag(xtags, '@drone-dji:RtkStdLat'))
+ if "@drone-dji:RtkStdLon" in xtags:
+ y = float(self.get_xmp_tag(xtags, "@drone-dji:RtkStdLon"))
+ x = float(self.get_xmp_tag(xtags, "@drone-dji:RtkStdLat"))
self.gps_xy_stddev = max(x, y)
-
- if '@drone-dji:RtkStdHgt' in xtags:
- self.gps_z_stddev = float(self.get_xmp_tag(xtags, '@drone-dji:RtkStdHgt'))
+
+ if "@drone-dji:RtkStdHgt" in xtags:
+ self.gps_z_stddev = float(
+ self.get_xmp_tag(xtags, "@drone-dji:RtkStdHgt")
+ )
else:
- self.set_attr_from_xmp_tag('gps_xy_stddev', xtags, [
- '@Camera:GPSXYAccuracy',
- 'GPSXYAccuracy'
- ], float)
- self.set_attr_from_xmp_tag('gps_z_stddev', xtags, [
- '@Camera:GPSZAccuracy',
- 'GPSZAccuracy'
- ], float)
-
+ self.set_attr_from_xmp_tag(
+ "gps_xy_stddev",
+ xtags,
+ ["@Camera:GPSXYAccuracy", "GPSXYAccuracy"],
+ float,
+ )
+ self.set_attr_from_xmp_tag(
+ "gps_z_stddev",
+ xtags,
+ ["@Camera:GPSZAccuracy", "GPSZAccuracy"],
+ float,
+ )
+
# DJI Speed tags
- if '@drone-dji:FlightXSpeed' in xtags and \
- '@drone-dji:FlightYSpeed' in xtags and \
- '@drone-dji:FlightZSpeed' in xtags:
- self.set_attr_from_xmp_tag('speed_x', xtags, [
- '@drone-dji:FlightXSpeed'
- ], float)
- self.set_attr_from_xmp_tag('speed_y', xtags, [
- '@drone-dji:FlightYSpeed',
- ], float)
- self.set_attr_from_xmp_tag('speed_z', xtags, [
- '@drone-dji:FlightZSpeed',
- ], float)
+ if (
+ "@drone-dji:FlightXSpeed" in xtags
+ and "@drone-dji:FlightYSpeed" in xtags
+ and "@drone-dji:FlightZSpeed" in xtags
+ ):
+ self.set_attr_from_xmp_tag(
+ "speed_x", xtags, ["@drone-dji:FlightXSpeed"], float
+ )
+ self.set_attr_from_xmp_tag(
+ "speed_y",
+ xtags,
+ [
+ "@drone-dji:FlightYSpeed",
+ ],
+ float,
+ )
+ self.set_attr_from_xmp_tag(
+ "speed_z",
+ xtags,
+ [
+ "@drone-dji:FlightZSpeed",
+ ],
+ float,
+ )
# DJI MS
- if self.black_level is None and 'Camera:BlackCurrent' in xtags:
- self.set_attr_from_xmp_tag('black_level', xtags, [
- 'Camera:BlackCurrent'
- ], str)
- if '@drone-dji:ExposureTime' in xtags:
- self.set_attr_from_xmp_tag('exposure_time', xtags, [
- '@drone-dji:ExposureTime'
- ], float)
- self.exposure_time /= 1e6 # is in microseconds
-
+ if self.black_level is None and "Camera:BlackCurrent" in xtags:
+ self.set_attr_from_xmp_tag(
+ "black_level", xtags, ["Camera:BlackCurrent"], str
+ )
+ if "@drone-dji:ExposureTime" in xtags:
+ self.set_attr_from_xmp_tag(
+ "exposure_time", xtags, ["@drone-dji:ExposureTime"], float
+ )
+ self.exposure_time /= 1e6 # is in microseconds
+
# Account for over-estimation
if self.gps_xy_stddev is not None:
self.gps_xy_stddev *= 2.0
if self.gps_z_stddev is not None:
self.gps_z_stddev *= 2.0
- if 'DLS:Yaw' in xtags:
- self.set_attr_from_xmp_tag('dls_yaw', xtags, ['DLS:Yaw'], float)
- self.set_attr_from_xmp_tag('dls_pitch', xtags, ['DLS:Pitch'], float)
- self.set_attr_from_xmp_tag('dls_roll', xtags, ['DLS:Roll'], float)
-
- camera_projection = self.get_xmp_tag(xtags, ['@Camera:ModelType', 'Camera:ModelType'])
+ if "DLS:Yaw" in xtags:
+ self.set_attr_from_xmp_tag("dls_yaw", xtags, ["DLS:Yaw"], float)
+ self.set_attr_from_xmp_tag(
+ "dls_pitch", xtags, ["DLS:Pitch"], float
+ )
+ self.set_attr_from_xmp_tag(
+ "dls_roll", xtags, ["DLS:Roll"], float
+ )
+
+ camera_projection = self.get_xmp_tag(
+ xtags, ["@Camera:ModelType", "Camera:ModelType"]
+ )
if camera_projection is not None:
camera_projection = camera_projection.lower()
@@ -460,9 +585,28 @@ class ODM_Photo:
self.camera_projection = camera_projection
# OPK
- self.set_attr_from_xmp_tag('yaw', xtags, ['@drone-dji:FlightYawDegree', '@Camera:Yaw', 'Camera:Yaw'], float)
- self.set_attr_from_xmp_tag('pitch', xtags, ['@drone-dji:GimbalPitchDegree', '@Camera:Pitch', 'Camera:Pitch'], float)
- self.set_attr_from_xmp_tag('roll', xtags, ['@drone-dji:GimbalRollDegree', '@Camera:Roll', 'Camera:Roll'], float)
+ self.set_attr_from_xmp_tag(
+ "yaw",
+ xtags,
+ ["@drone-dji:FlightYawDegree", "@Camera:Yaw", "Camera:Yaw"],
+ float,
+ )
+ self.set_attr_from_xmp_tag(
+ "pitch",
+ xtags,
+ [
+ "@drone-dji:GimbalPitchDegree",
+ "@Camera:Pitch",
+ "Camera:Pitch",
+ ],
+ float,
+ )
+ self.set_attr_from_xmp_tag(
+ "roll",
+ xtags,
+ ["@drone-dji:GimbalRollDegree", "@Camera:Roll", "Camera:Roll"],
+ float,
+ )
# Normalize YPR conventions (assuming nadir camera)
# Yaw: 0 --> top of image points north
@@ -472,14 +616,16 @@ class ODM_Photo:
# Pitch: 90 --> camera is looking forward
# Roll: 0 (assuming gimbal)
if self.has_ypr():
- if self.camera_make.lower() in ['dji', 'hasselblad']:
+ if self.camera_make.lower() in ["dji", "hasselblad"]:
self.pitch = 90 + self.pitch
-
- if self.camera_make.lower() == 'sensefly':
+
+ if self.camera_make.lower() == "sensefly":
self.roll *= -1
except Exception as e:
- log.ODM_WARNING("Cannot read XMP tags for %s: %s" % (self.filename, str(e)))
+ log.ODM_WARNING(
+ "Cannot read XMP tags for %s: %s" % (self.filename, str(e))
+ )
# self.set_attr_from_xmp_tag('center_wavelength', xtags, [
# 'Camera:CentralWavelength'
@@ -488,50 +634,61 @@ class ODM_Photo:
# self.set_attr_from_xmp_tag('bandwidth', xtags, [
# 'Camera:WavelengthFWHM'
# ], float)
-
+
# Special case band handling for AeroVironment Quantix images
# for some reason, they don't store band information in EXIFs
- if self.camera_make.lower() == 'aerovironment' and \
- self.camera_model.lower() == 'quantix':
+ if (
+ self.camera_make.lower() == "aerovironment"
+ and self.camera_model.lower() == "quantix"
+ ):
matches = re.match("IMG_(\d+)_(\w+)\.\w+", self.filename, re.IGNORECASE)
if matches:
band_aliases = {
- 'GRN': 'Green',
- 'NIR': 'Nir',
- 'RED': 'Red',
- 'RGB': 'RedGreenBlue',
+ "GRN": "Green",
+ "NIR": "Nir",
+ "RED": "Red",
+ "RGB": "RedGreenBlue",
}
self.capture_uuid = matches.group(1)
self.band_name = band_aliases.get(matches.group(2), matches.group(2))
# Sanitize band name since we use it in folder paths
- self.band_name = re.sub('[^A-Za-z0-9]+', '', self.band_name)
+ self.band_name = re.sub("[^A-Za-z0-9]+", "", self.band_name)
self.compute_focal(tags, xtags)
self.compute_opk()
def compute_focal(self, tags, xtags):
try:
- self.focal_ratio = self.extract_focal(self.camera_make, self.camera_model, tags, xtags)
+ self.focal_ratio = self.extract_focal(
+ self.camera_make, self.camera_model, tags, xtags
+ )
except (IndexError, ValueError) as e:
- log.ODM_WARNING("Cannot extract focal ratio for %s: %s" % (self.filename, str(e)))
+ log.ODM_WARNING(
+ "Cannot extract focal ratio for %s: %s" % (self.filename, str(e))
+ )
def extract_focal(self, make, model, tags, xtags):
if make != "unknown":
# remove duplicate 'make' information in 'model'
model = model.replace(make, "")
-
+
sensor_string = (make.strip() + " " + model.strip()).strip().lower()
sensor_width = None
- if ("EXIF FocalPlaneResolutionUnit" in tags and "EXIF FocalPlaneXResolution" in tags):
+ if (
+ "EXIF FocalPlaneResolutionUnit" in tags
+ and "EXIF FocalPlaneXResolution" in tags
+ ):
resolution_unit = self.float_value(tags["EXIF FocalPlaneResolutionUnit"])
mm_per_unit = get_mm_per_unit(resolution_unit)
if mm_per_unit:
pixels_per_unit = self.float_value(tags["EXIF FocalPlaneXResolution"])
if pixels_per_unit <= 0 and "EXIF FocalPlaneYResolution" in tags:
- pixels_per_unit = self.float_value(tags["EXIF FocalPlaneYResolution"])
-
+ pixels_per_unit = self.float_value(
+ tags["EXIF FocalPlaneYResolution"]
+ )
+
if pixels_per_unit > 0 and self.width is not None:
units_per_pixel = 1 / pixels_per_unit
sensor_width = self.width * units_per_pixel * mm_per_unit
@@ -544,7 +701,7 @@ class ODM_Photo:
focal = self.float_value(tags["EXIF FocalLength"])
if focal is None and "@aux:Lens" in xtags:
lens = self.get_xmp_tag(xtags, ["@aux:Lens"])
- matches = re.search('([\d\.]+)mm', str(lens))
+ matches = re.search("([\d\.]+)mm", str(lens))
if matches:
focal = float(matches.group(1))
@@ -570,11 +727,11 @@ class ODM_Photo:
if (cast == float or cast == int) and "/" in v:
v = self.try_parse_fraction(v)
setattr(self, attr, cast(v))
-
+
def get_xmp_tag(self, xmp_tags, tags):
if isinstance(tags, str):
tags = [tags]
-
+
for tag in tags:
if tag in xmp_tags:
t = xmp_tags[tag]
@@ -582,7 +739,7 @@ class ODM_Photo:
if isinstance(t, string_types):
return str(t)
elif isinstance(t, dict):
- items = t.get('rdf:Seq', {}).get('rdf:li', {})
+ items = t.get("rdf:Seq", {}).get("rdf:li", {})
if items:
if isinstance(items, string_types):
return items
@@ -590,25 +747,27 @@ class ODM_Photo:
elif isinstance(t, int) or isinstance(t, float):
return t
-
# From https://github.com/mapillary/OpenSfM/blob/master/opensfm/exif.py
def get_xmp(self, file):
img_bytes = file.read()
- xmp_start = img_bytes.find(b' 0 and self.fnumber > 0:
+ if (
+ self.fnumber is not None
+ and self.exposure_time is not None
+ and self.exposure_time > 0
+ and self.fnumber > 0
+ ):
return self.exposure_time / (self.fnumber * self.fnumber)
def get_horizontal_irradiance(self):
if self.horizontal_irradiance is not None:
- scale = 1.0 # Assumed
+ scale = 1.0 # Assumed
if self.irradiance_scale_to_si is not None:
scale = self.irradiance_scale_to_si
-
+
return self.horizontal_irradiance * scale
elif self.camera_make == "DJI" and self.spectral_irradiance is not None:
# Phantom 4 Multispectral saves this value in @drone-dji:Irradiance
return self.spectral_irradiance
-
+
def get_sun_sensor(self):
if self.sun_sensor is not None:
# TODO: Presence of XMP:SunSensorExposureTime
# and XMP:SunSensorSensitivity might
- # require additional logic. If these two tags are present,
+ # require additional logic. If these two tags are present,
# then sun_sensor is not in physical units?
- return self.sun_sensor / 65535.0 # normalize uint16 (is this correct?)
+ return self.sun_sensor / 65535.0 # normalize uint16 (is this correct?)
elif self.spectral_irradiance is not None:
- scale = 1.0 # Assumed
+ scale = 1.0 # Assumed
if self.irradiance_scale_to_si is not None:
scale = self.irradiance_scale_to_si
-
+
return self.spectral_irradiance * scale
def get_dls_pose(self):
@@ -756,7 +924,7 @@ class ODM_Photo:
def get_bit_depth_max(self):
if self.bits_per_sample:
- return float(2 ** self.bits_per_sample)
+ return float(2**self.bits_per_sample)
else:
# If it's a JPEG, this must be 256
_, ext = os.path.splitext(self.filename)
@@ -791,50 +959,60 @@ class ODM_Photo:
self.camera_projection = camera_projection
def is_thermal(self):
- #Added for support M2EA camera sensor
- if(self.camera_make == "DJI" and self.camera_model == "MAVIC2-ENTERPRISE-ADVANCED" and self.width == 640 and self.height == 512):
+ # Added for support M2EA camera sensor
+ if (
+ self.camera_make == "DJI"
+ and self.camera_model == "MAVIC2-ENTERPRISE-ADVANCED"
+ and self.width == 640
+ and self.height == 512
+ ):
return True
- #Added for support DJI H20T camera sensor
- if(self.camera_make == "DJI" and self.camera_model == "ZH20T" and self.width == 640 and self.height == 512):
+ # Added for support DJI H20T camera sensor
+ if (
+ self.camera_make == "DJI"
+ and self.camera_model == "ZH20T"
+ and self.width == 640
+ and self.height == 512
+ ):
return True
- return self.band_name.upper() in ["LWIR"] # TODO: more?
-
+ return self.band_name.upper() in ["LWIR"] # TODO: more?
+
def is_rgb(self):
return self.band_name.upper() in ["RGB", "REDGREENBLUE"]
def camera_id(self):
return " ".join(
- [
- "v2",
- self.camera_make.strip(),
- self.camera_model.strip(),
- str(int(self.width)),
- str(int(self.height)),
- self.camera_projection,
- str(float(self.focal_ratio))[:6],
- ]
- ).lower()
+ [
+ "v2",
+ self.camera_make.strip(),
+ self.camera_model.strip(),
+ str(int(self.width)),
+ str(int(self.height)),
+ self.camera_projection,
+ str(float(self.focal_ratio))[:6],
+ ]
+ ).lower()
- def to_opensfm_exif(self, rolling_shutter = False, rolling_shutter_readout = 0):
+ def to_opensfm_exif(self, rolling_shutter=False, rolling_shutter_readout=0):
capture_time = 0.0
if self.utc_time is not None:
capture_time = self.utc_time / 1000.0
-
+
gps = {}
has_gps = self.latitude is not None and self.longitude is not None
if has_gps:
- gps['latitude'] = self.latitude
- gps['longitude'] = self.longitude
+ gps["latitude"] = self.latitude
+ gps["longitude"] = self.longitude
if self.altitude is not None:
- gps['altitude'] = self.altitude
+ gps["altitude"] = self.altitude
else:
- gps['altitude'] = 0.0
+ gps["altitude"] = 0.0
dop = self.get_gps_dop()
if dop is None:
- dop = 10.0 # Default
-
- gps['dop'] = dop
+ dop = 10.0 # Default
+
+ gps["dop"] = dop
d = {
"make": self.camera_make,
@@ -846,58 +1024,78 @@ class ODM_Photo:
"orientation": self.orientation,
"capture_time": capture_time,
"gps": gps,
- "camera": self.camera_id()
+ "camera": self.camera_id(),
}
if self.has_opk():
- d['opk'] = {
- 'omega': self.omega,
- 'phi': self.phi,
- 'kappa': self.kappa
- }
-
+ d["opk"] = {"omega": self.omega, "phi": self.phi, "kappa": self.kappa}
+
# Speed is not useful without GPS
if self.has_speed() and has_gps:
- d['speed'] = [self.speed_y, self.speed_x, self.speed_z]
-
+ d["speed"] = [self.speed_y, self.speed_x, self.speed_z]
+
if rolling_shutter:
- d['rolling_shutter'] = get_rolling_shutter_readout(self, rolling_shutter_readout)
-
+ d["rolling_shutter"] = get_rolling_shutter_readout(
+ self, rolling_shutter_readout
+ )
+
return d
def has_ypr(self):
- return self.yaw is not None and \
- self.pitch is not None and \
- self.roll is not None
-
+ return self.yaw is not None and self.pitch is not None and self.roll is not None
+
def has_opk(self):
- return self.omega is not None and \
- self.phi is not None and \
- self.kappa is not None
-
+ return (
+ self.omega is not None and self.phi is not None and self.kappa is not None
+ )
+
def has_speed(self):
- return self.speed_x is not None and \
- self.speed_y is not None and \
- self.speed_z is not None
+ return (
+ self.speed_x is not None
+ and self.speed_y is not None
+ and self.speed_z is not None
+ )
def has_geo(self):
- return self.latitude is not None and \
- self.longitude is not None
-
+ return self.latitude is not None and self.longitude is not None
+
def compute_opk(self):
if self.has_ypr() and self.has_geo():
- y, p, r = math.radians(self.yaw), math.radians(self.pitch), math.radians(self.roll)
+ y, p, r = (
+ math.radians(self.yaw),
+ math.radians(self.pitch),
+ math.radians(self.roll),
+ )
- # Ref: New Calibration and Computing Method for Direct
- # Georeferencing of Image and Scanner Data Using the
- # Position and Angular Data of an Hybrid Inertial Navigation System
+ # Ref: New Calibration and Computing Method for Direct
+ # Georeferencing of Image and Scanner Data Using the
+ # Position and Angular Data of an Hybrid Inertial Navigation System
# by Manfred Bäumker
# YPR rotation matrix
- cnb = np.array([[ math.cos(y) * math.cos(p), math.cos(y) * math.sin(p) * math.sin(r) - math.sin(y) * math.cos(r), math.cos(y) * math.sin(p) * math.cos(r) + math.sin(y) * math.sin(r)],
- [ math.sin(y) * math.cos(p), math.sin(y) * math.sin(p) * math.sin(r) + math.cos(y) * math.cos(r), math.sin(y) * math.sin(p) * math.cos(r) - math.cos(y) * math.sin(r)],
- [ -math.sin(p), math.cos(p) * math.sin(r), math.cos(p) * math.cos(r)],
- ])
+ cnb = np.array(
+ [
+ [
+ math.cos(y) * math.cos(p),
+ math.cos(y) * math.sin(p) * math.sin(r)
+ - math.sin(y) * math.cos(r),
+ math.cos(y) * math.sin(p) * math.cos(r)
+ + math.sin(y) * math.sin(r),
+ ],
+ [
+ math.sin(y) * math.cos(p),
+ math.sin(y) * math.sin(p) * math.sin(r)
+ + math.cos(y) * math.cos(r),
+ math.sin(y) * math.sin(p) * math.cos(r)
+ - math.cos(y) * math.sin(r),
+ ],
+ [
+ -math.sin(p),
+ math.cos(p) * math.sin(r),
+ math.cos(p) * math.cos(r),
+ ],
+ ]
+ )
# Convert between image and body coordinates
# Top of image pixels point to flying direction
@@ -906,22 +1104,20 @@ class ODM_Photo:
# camera mount orientations (e.g. backward or sideways)
# (Swap X/Y, flip Z)
- cbb = np.array([[0, 1, 0],
- [1, 0, 0],
- [0, 0, -1]])
-
+ cbb = np.array([[0, 1, 0], [1, 0, 0], [0, 0, -1]])
+
delta = 1e-7
-
+
alt = self.altitude if self.altitude is not None else 0.0
p1 = np.array(ecef_from_lla(self.latitude + delta, self.longitude, alt))
p2 = np.array(ecef_from_lla(self.latitude - delta, self.longitude, alt))
xnp = p1 - p2
m = np.linalg.norm(xnp)
-
+
if m == 0:
log.ODM_WARNING("Cannot compute OPK angles, divider = 0")
return
-
+
# Unit vector pointing north
xnp /= m
@@ -948,6 +1144,9 @@ class ODM_Photo:
return self.width * self.height / 1e6
else:
return 0.0
-
+
def is_make_model(self, make, model):
- return self.camera_make.lower() == make.lower() and self.camera_model.lower() == model.lower()
+ return (
+ self.camera_make.lower() == make.lower()
+ and self.camera_model.lower() == model.lower()
+ )
diff --git a/opendm/point_cloud.py b/opendm/point_cloud.py
index 79b7083d..53d8a0b9 100644
--- a/opendm/point_cloud.py
+++ b/opendm/point_cloud.py
@@ -12,6 +12,7 @@ from opendm.dem.pdal import run_pipeline
from opendm.opc import classify
from opendm.dem import commands
+
def ply_info(input_ply):
if not os.path.exists(input_ply):
raise IOError("%s does not exist" % input_ply)
@@ -21,7 +22,7 @@ def ply_info(input_ply):
has_views = False
vertex_count = 0
- with open(input_ply, 'r', errors='ignore') as f:
+ with open(input_ply, "r", errors="ignore") as f:
line = f.readline().strip().lower()
i = 0
while line != "end_header":
@@ -37,35 +38,48 @@ def ply_info(input_ply):
i += 1
if i > 100:
raise IOError("Cannot find end_header field. Invalid PLY?")
-
return {
- 'has_normals': has_normals,
- 'vertex_count': vertex_count,
- 'has_views': has_views,
- 'header_lines': i + 1
+ "has_normals": has_normals,
+ "vertex_count": vertex_count,
+ "has_views": has_views,
+ "header_lines": i + 1,
}
def split(input_point_cloud, outdir, filename_template, capacity, dims=None):
- log.ODM_INFO("Splitting point cloud filtering in chunks of {} vertices".format(capacity))
+ log.ODM_INFO(
+ "Splitting point cloud filtering in chunks of {} vertices".format(capacity)
+ )
if not os.path.exists(input_point_cloud):
- log.ODM_ERROR("{} does not exist, cannot split point cloud. The program will now exit.".format(input_point_cloud))
+ log.ODM_ERROR(
+ "{} does not exist, cannot split point cloud. The program will now exit.".format(
+ input_point_cloud
+ )
+ )
sys.exit(1)
if not os.path.exists(outdir):
system.mkdir_p(outdir)
if len(os.listdir(outdir)) != 0:
- log.ODM_ERROR("%s already contains some files. The program will now exit.".format(outdir))
+ log.ODM_ERROR(
+ "%s already contains some files. The program will now exit.".format(outdir)
+ )
sys.exit(1)
- cmd = 'pdal split -i "%s" -o "%s" --capacity %s ' % (input_point_cloud, os.path.join(outdir, filename_template), capacity)
-
+ cmd = 'pdal split -i "%s" -o "%s" --capacity %s ' % (
+ input_point_cloud,
+ os.path.join(outdir, filename_template),
+ capacity,
+ )
+
if filename_template.endswith(".ply"):
- cmd += ("--writers.ply.sized_types=false "
- "--writers.ply.storage_mode=\"little endian\" ")
+ cmd += (
+ "--writers.ply.sized_types=false "
+ '--writers.ply.storage_mode="little endian" '
+ )
if dims is not None:
cmd += '--writers.ply.dims="%s"' % dims
system.run(cmd)
@@ -73,53 +87,71 @@ def split(input_point_cloud, outdir, filename_template, capacity, dims=None):
return [os.path.join(outdir, f) for f in os.listdir(outdir)]
-def filter(input_point_cloud, output_point_cloud, output_stats, standard_deviation=2.5, sample_radius=0, boundary=None, max_concurrency=1):
+def filter(
+ input_point_cloud,
+ output_point_cloud,
+ output_stats,
+ standard_deviation=2.5,
+ sample_radius=0,
+ boundary=None,
+ max_concurrency=1,
+):
"""
Filters a point cloud
"""
if not os.path.exists(input_point_cloud):
- log.ODM_ERROR("{} does not exist. The program will now exit.".format(input_point_cloud))
+ log.ODM_ERROR(
+ "{} does not exist. The program will now exit.".format(input_point_cloud)
+ )
sys.exit(1)
args = [
'--input "%s"' % input_point_cloud,
'--output "%s"' % output_point_cloud,
- '--concurrency %s' % max_concurrency
+ "--concurrency %s" % max_concurrency,
]
if sample_radius > 0:
log.ODM_INFO("Sampling points around a %sm radius" % sample_radius)
- args.append('--radius %s' % sample_radius)
+ args.append("--radius %s" % sample_radius)
meank = 16
- log.ODM_INFO("Filtering {} (statistical, meanK {}, standard deviation {})".format(input_point_cloud, meank, standard_deviation))
- args.append('--meank %s' % meank)
- args.append('--std %s' % standard_deviation)
+ log.ODM_INFO(
+ "Filtering {} (statistical, meanK {}, standard deviation {})".format(
+ input_point_cloud, meank, standard_deviation
+ )
+ )
+ args.append("--meank %s" % meank)
+ args.append("--std %s" % standard_deviation)
args.append('--stats "%s"' % output_stats)
-
+
if boundary is not None:
log.ODM_INFO("Boundary {}".format(boundary))
- fd, boundary_json_file = tempfile.mkstemp(suffix='.boundary.json')
+ fd, boundary_json_file = tempfile.mkstemp(suffix=".boundary.json")
os.close(fd)
- with open(boundary_json_file, 'w') as f:
+ with open(boundary_json_file, "w") as f:
f.write(as_geojson(boundary))
args.append('--boundary "%s"' % boundary_json_file)
system.run('"%s" %s' % (context.fpcfilter_path, " ".join(args)))
if not os.path.exists(output_point_cloud):
- log.ODM_WARNING("{} not found, filtering has failed.".format(output_point_cloud))
+ log.ODM_WARNING(
+ "{} not found, filtering has failed.".format(output_point_cloud)
+ )
def get_spacing(stats_file, resolution_fallback=5.0):
def fallback():
- log.ODM_WARNING("Cannot read %s, falling back to resolution estimate" % stats_file)
+ log.ODM_WARNING(
+ "Cannot read %s, falling back to resolution estimate" % stats_file
+ )
return (resolution_fallback / 100.0) / 2.0
if not os.path.isfile(stats_file):
return fallback()
-
- with open(stats_file, 'r') as f:
+
+ with open(stats_file, "r") as f:
j = json.loads(f.read())
if "spacing" in j:
d = j["spacing"]
@@ -130,17 +162,25 @@ def get_spacing(stats_file, resolution_fallback=5.0):
else:
return fallback()
+
def export_info_json(pointcloud_path, info_file_path):
- system.run('pdal info --dimensions "X,Y,Z" "{0}" > "{1}"'.format(pointcloud_path, info_file_path))
+ system.run(
+ 'pdal info --dimensions "X,Y,Z" "{0}" > "{1}"'.format(
+ pointcloud_path, info_file_path
+ )
+ )
def export_summary_json(pointcloud_path, summary_file_path):
- system.run('pdal info --summary "{0}" > "{1}"'.format(pointcloud_path, summary_file_path))
+ system.run(
+ 'pdal info --summary "{0}" > "{1}"'.format(pointcloud_path, summary_file_path)
+ )
+
def get_extent(input_point_cloud):
- fd, json_file = tempfile.mkstemp(suffix='.json')
+ fd, json_file = tempfile.mkstemp(suffix=".json")
os.close(fd)
-
+
# Get point cloud extent
fallback = False
@@ -151,38 +191,64 @@ def get_extent(input_point_cloud):
try:
if not fallback:
- run('pdal info --summary "{0}" > "{1}"'.format(input_point_cloud, json_file))
+ run(
+ 'pdal info --summary "{0}" > "{1}"'.format(input_point_cloud, json_file)
+ )
except:
fallback = True
run('pdal info "{0}" > "{1}"'.format(input_point_cloud, json_file))
bounds = {}
- with open(json_file, 'r') as f:
+ with open(json_file, "r") as f:
result = json.loads(f.read())
-
- if not fallback:
- summary = result.get('summary')
- if summary is None: raise Exception("Cannot compute summary for %s (summary key missing)" % input_point_cloud)
- bounds = summary.get('bounds')
- else:
- stats = result.get('stats')
- if stats is None: raise Exception("Cannot compute bounds for %s (stats key missing)" % input_point_cloud)
- bbox = stats.get('bbox')
- if bbox is None: raise Exception("Cannot compute bounds for %s (bbox key missing)" % input_point_cloud)
- native = bbox.get('native')
- if native is None: raise Exception("Cannot compute bounds for %s (native key missing)" % input_point_cloud)
- bounds = native.get('bbox')
- if bounds is None: raise Exception("Cannot compute bounds for %s (bounds key missing)" % input_point_cloud)
-
- if bounds.get('maxx', None) is None or \
- bounds.get('minx', None) is None or \
- bounds.get('maxy', None) is None or \
- bounds.get('miny', None) is None or \
- bounds.get('maxz', None) is None or \
- bounds.get('minz', None) is None:
- raise Exception("Cannot compute bounds for %s (invalid keys) %s" % (input_point_cloud, str(bounds)))
-
+ if not fallback:
+ summary = result.get("summary")
+ if summary is None:
+ raise Exception(
+ "Cannot compute summary for %s (summary key missing)"
+ % input_point_cloud
+ )
+ bounds = summary.get("bounds")
+ else:
+ stats = result.get("stats")
+ if stats is None:
+ raise Exception(
+ "Cannot compute bounds for %s (stats key missing)"
+ % input_point_cloud
+ )
+ bbox = stats.get("bbox")
+ if bbox is None:
+ raise Exception(
+ "Cannot compute bounds for %s (bbox key missing)"
+ % input_point_cloud
+ )
+ native = bbox.get("native")
+ if native is None:
+ raise Exception(
+ "Cannot compute bounds for %s (native key missing)"
+ % input_point_cloud
+ )
+ bounds = native.get("bbox")
+
+ if bounds is None:
+ raise Exception(
+ "Cannot compute bounds for %s (bounds key missing)" % input_point_cloud
+ )
+
+ if (
+ bounds.get("maxx", None) is None
+ or bounds.get("minx", None) is None
+ or bounds.get("maxy", None) is None
+ or bounds.get("miny", None) is None
+ or bounds.get("maxz", None) is None
+ or bounds.get("minz", None) is None
+ ):
+ raise Exception(
+ "Cannot compute bounds for %s (invalid keys) %s"
+ % (input_point_cloud, str(bounds))
+ )
+
os.remove(json_file)
return bounds
@@ -198,12 +264,12 @@ def merge(input_point_cloud_files, output_file, rerun=False):
os.remove(output_file)
kwargs = {
- 'all_inputs': " ".join(map(double_quote, input_point_cloud_files)),
- 'output': output_file
+ "all_inputs": " ".join(map(double_quote, input_point_cloud_files)),
+ "output": output_file,
}
system.run('lasmerge -i {all_inputs} -o "{output}"'.format(**kwargs))
-
+
def fast_merge_ply(input_point_cloud_files, output_file):
# Assumes that all input files share the same header/content format
@@ -213,33 +279,35 @@ def fast_merge_ply(input_point_cloud_files, output_file):
if num_files == 0:
log.ODM_WARNING("No input point cloud files to process")
return
-
+
if io.file_exists(output_file):
log.ODM_WARNING("Removing previous point cloud: %s" % output_file)
os.remove(output_file)
-
- vertex_count = sum([ply_info(pcf)['vertex_count'] for pcf in input_point_cloud_files])
+
+ vertex_count = sum(
+ [ply_info(pcf)["vertex_count"] for pcf in input_point_cloud_files]
+ )
master_file = input_point_cloud_files[0]
with open(output_file, "wb") as out:
with open(master_file, "r", errors="ignore") as fhead:
# Copy header
line = fhead.readline()
- out.write(line.encode('utf8'))
+ out.write(line.encode("utf8"))
i = 0
while line.strip().lower() != "end_header":
line = fhead.readline()
-
+
# Intercept element vertex field
if line.lower().startswith("element vertex "):
- out.write(("element vertex %s\n" % vertex_count).encode('utf8'))
+ out.write(("element vertex %s\n" % vertex_count).encode("utf8"))
else:
- out.write(line.encode('utf8'))
+ out.write(line.encode("utf8"))
i += 1
if i > 100:
raise IOError("Cannot find end_header field. Invalid PLY?")
-
+
for ipc in input_point_cloud_files:
i = 0
with open(ipc, "rb") as fin:
@@ -251,10 +319,10 @@ def fast_merge_ply(input_point_cloud_files, output_file):
i += 1
if i > 100:
raise IOError("Cannot find end_header field. Invalid PLY?")
-
+
# Write fields
out.write(fin.read())
-
+
return output_file
@@ -265,78 +333,107 @@ def merge_ply(input_point_cloud_files, output_file, dims=None):
return
cmd = [
- 'pdal',
- 'merge',
- '--writers.ply.sized_types=false',
+ "pdal",
+ "merge",
+ "--writers.ply.sized_types=false",
'--writers.ply.storage_mode="little endian"',
- ('--writers.ply.dims="%s"' % dims) if dims is not None else '',
- ' '.join(map(double_quote, input_point_cloud_files + [output_file])),
+ ('--writers.ply.dims="%s"' % dims) if dims is not None else "",
+ " ".join(map(double_quote, input_point_cloud_files + [output_file])),
]
- system.run(' '.join(cmd))
+ system.run(" ".join(cmd))
+
def post_point_cloud_steps(args, tree, rerun=False):
# Classify and rectify before generating derivate files
if args.pc_classify:
- pc_classify_marker = os.path.join(tree.odm_georeferencing, 'pc_classify_done.txt')
+ pc_classify_marker = os.path.join(
+ tree.odm_georeferencing, "pc_classify_done.txt"
+ )
if not io.file_exists(pc_classify_marker) or rerun:
- log.ODM_INFO("Classifying {} using Simple Morphological Filter (1/2)".format(tree.odm_georeferencing_model_laz))
- commands.classify(tree.odm_georeferencing_model_laz,
- args.smrf_scalar,
- args.smrf_slope,
- args.smrf_threshold,
- args.smrf_window
- )
+ log.ODM_INFO(
+ "Classifying {} using Simple Morphological Filter (1/2)".format(
+ tree.odm_georeferencing_model_laz
+ )
+ )
+ commands.classify(
+ tree.odm_georeferencing_model_laz,
+ args.smrf_scalar,
+ args.smrf_slope,
+ args.smrf_threshold,
+ args.smrf_window,
+ )
- log.ODM_INFO("Classifying {} using OpenPointClass (2/2)".format(tree.odm_georeferencing_model_laz))
+ log.ODM_INFO(
+ "Classifying {} using OpenPointClass (2/2)".format(
+ tree.odm_georeferencing_model_laz
+ )
+ )
classify(tree.odm_georeferencing_model_laz, args.max_concurrency)
- with open(pc_classify_marker, 'w') as f:
- f.write('Classify: smrf\n')
- f.write('Scalar: {}\n'.format(args.smrf_scalar))
- f.write('Slope: {}\n'.format(args.smrf_slope))
- f.write('Threshold: {}\n'.format(args.smrf_threshold))
- f.write('Window: {}\n'.format(args.smrf_window))
-
+ with open(pc_classify_marker, "w") as f:
+ f.write("Classify: smrf\n")
+ f.write("Scalar: {}\n".format(args.smrf_scalar))
+ f.write("Slope: {}\n".format(args.smrf_slope))
+ f.write("Threshold: {}\n".format(args.smrf_threshold))
+ f.write("Window: {}\n".format(args.smrf_window))
+
if args.pc_rectify:
commands.rectify(tree.odm_georeferencing_model_laz)
# XYZ point cloud output
if args.pc_csv:
log.ODM_INFO("Creating CSV file (XYZ format)")
-
+
if not io.file_exists(tree.odm_georeferencing_xyz_file) or rerun:
- system.run("pdal translate -i \"{}\" "
- "-o \"{}\" "
+ system.run(
+ 'pdal translate -i "{}" '
+ '-o "{}" '
"--writers.text.format=csv "
- "--writers.text.order=\"X,Y,Z\" "
+ '--writers.text.order="X,Y,Z" '
"--writers.text.keep_unspecified=false ".format(
- tree.odm_georeferencing_model_laz,
- tree.odm_georeferencing_xyz_file))
+ tree.odm_georeferencing_model_laz, tree.odm_georeferencing_xyz_file
+ )
+ )
else:
- log.ODM_WARNING("Found existing CSV file %s" % tree.odm_georeferencing_xyz_file)
+ log.ODM_WARNING(
+ "Found existing CSV file %s" % tree.odm_georeferencing_xyz_file
+ )
# LAS point cloud output
if args.pc_las:
log.ODM_INFO("Creating LAS file")
-
+
if not io.file_exists(tree.odm_georeferencing_model_las) or rerun:
- system.run("pdal translate -i \"{}\" "
- "-o \"{}\" ".format(
- tree.odm_georeferencing_model_laz,
- tree.odm_georeferencing_model_las))
+ system.run(
+ 'pdal translate -i "{}" '
+ '-o "{}" '.format(
+ tree.odm_georeferencing_model_laz, tree.odm_georeferencing_model_las
+ )
+ )
else:
- log.ODM_WARNING("Found existing LAS file %s" % tree.odm_georeferencing_model_las)
+ log.ODM_WARNING(
+ "Found existing LAS file %s" % tree.odm_georeferencing_model_las
+ )
# EPT point cloud output
if args.pc_ept:
log.ODM_INFO("Creating Entwine Point Tile output")
- entwine.build([tree.odm_georeferencing_model_laz], tree.entwine_pointcloud, max_concurrency=args.max_concurrency, rerun=rerun)
+ entwine.build(
+ [tree.odm_georeferencing_model_laz],
+ tree.entwine_pointcloud,
+ max_concurrency=args.max_concurrency,
+ rerun=rerun,
+ )
# COPC point clouds
if args.pc_copc:
log.ODM_INFO("Creating Cloud Optimized Point Cloud (COPC)")
- copc_output = io.related_file_path(tree.odm_georeferencing_model_laz, postfix=".copc")
- entwine.build_copc([tree.odm_georeferencing_model_laz], copc_output, convert_rgb_8_to_16=True)
\ No newline at end of file
+ copc_output = io.related_file_path(
+ tree.odm_georeferencing_model_laz, postfix=".copc"
+ )
+ entwine.build_copc(
+ [tree.odm_georeferencing_model_laz], copc_output, convert_rgb_8_to_16=True
+ )
diff --git a/opendm/progress.py b/opendm/progress.py
index 264db82d..20cce8e1 100644
--- a/opendm/progress.py
+++ b/opendm/progress.py
@@ -2,13 +2,14 @@ import socket
import os
from opendm import log
-PROGRESS_BROADCAST_PORT = 6367 #ODMR
+PROGRESS_BROADCAST_PORT = 6367 # ODMR
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except:
log.ODM_WARNING("Cannot create UDP socket, progress reporting will be disabled.")
sock = None
+
class Broadcaster:
def __init__(self, port):
self.port = port
@@ -28,13 +29,24 @@ class Broadcaster:
UDP_IP = "127.0.0.1"
if global_progress > 100:
- log.ODM_WARNING("Global progress is > 100 (%s), please contact the developers." % global_progress)
+ log.ODM_WARNING(
+ "Global progress is > 100 (%s), please contact the developers."
+ % global_progress
+ )
global_progress = 100
try:
- sock.sendto("PGUP/{}/{}/{}".format(self.pid, self.project_name, float(global_progress)).encode('utf8'),
- (UDP_IP, self.port))
+ sock.sendto(
+ "PGUP/{}/{}/{}".format(
+ self.pid, self.project_name, float(global_progress)
+ ).encode("utf8"),
+ (UDP_IP, self.port),
+ )
except Exception as e:
- log.ODM_WARNING("Failed to broadcast progress update on UDP port %s (%s)" % (str(self.port), str(e)))
+ log.ODM_WARNING(
+ "Failed to broadcast progress update on UDP port %s (%s)"
+ % (str(self.port), str(e))
+ )
-progressbc = Broadcaster(PROGRESS_BROADCAST_PORT)
\ No newline at end of file
+
+progressbc = Broadcaster(PROGRESS_BROADCAST_PORT)
diff --git a/opendm/pseudogeo.py b/opendm/pseudogeo.py
index 5d706fef..94617181 100644
--- a/opendm/pseudogeo.py
+++ b/opendm/pseudogeo.py
@@ -4,11 +4,14 @@ from osgeo.gdalconst import GA_Update
from opendm import io
from opendm import log
+
def get_pseudogeo_utm():
- return '+proj=utm +zone=30 +ellps=WGS84 +datum=WGS84 +units=m +no_defs'
+ return "+proj=utm +zone=30 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
+
def get_pseudogeo_scale():
- return 0.1 # Arbitrarily chosen
+ return 0.1 # Arbitrarily chosen
+
def add_pseudo_georeferencing(geotiff):
if not io.file_exists(geotiff):
@@ -16,15 +19,23 @@ def add_pseudo_georeferencing(geotiff):
return
try:
- log.ODM_INFO("Adding pseudo georeferencing (raster should show up at the equator) to %s" % geotiff)
+ log.ODM_INFO(
+ "Adding pseudo georeferencing (raster should show up at the equator) to %s"
+ % geotiff
+ )
dst_ds = gdal.Open(geotiff, GA_Update)
srs = osr.SpatialReference()
srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
srs.ImportFromProj4(get_pseudogeo_utm())
- dst_ds.SetProjection( srs.ExportToWkt() )
- dst_ds.SetGeoTransform( [ 0.0, get_pseudogeo_scale(), 0.0, 0.0, 0.0, -get_pseudogeo_scale() ] )
+ dst_ds.SetProjection(srs.ExportToWkt())
+ dst_ds.SetGeoTransform(
+ [0.0, get_pseudogeo_scale(), 0.0, 0.0, 0.0, -get_pseudogeo_scale()]
+ )
dst_ds = None
except Exception as e:
- log.ODM_WARNING("Cannot add pseudo georeferencing to %s (%s), skipping..." % (geotiff, str(e)))
\ No newline at end of file
+ log.ODM_WARNING(
+ "Cannot add pseudo georeferencing to %s (%s), skipping..."
+ % (geotiff, str(e))
+ )
diff --git a/opendm/remote.py b/opendm/remote.py
index 8e26f7de..a54f3a4f 100644
--- a/opendm/remote.py
+++ b/opendm/remote.py
@@ -20,33 +20,46 @@ try:
except ImportError:
import Queue as queue
+
class LocalRemoteExecutor:
"""
A class for performing OpenSfM reconstructions and full ODM pipeline executions
using a mix of local and remote processing. Tasks are executed locally one at a time
and remotely until a node runs out of available slots for processing. This allows us
- to use the processing power of the current machine as well as offloading tasks to a
+ to use the processing power of the current machine as well as offloading tasks to a
network node.
"""
- def __init__(self, nodeUrl, rolling_shutter = False, rerun = False):
+
+ def __init__(self, nodeUrl, rolling_shutter=False, rerun=False):
self.node = Node.from_url(nodeUrl)
self.params = {
- 'tasks': [],
- 'threads': [],
- 'rolling_shutter': rolling_shutter,
- 'rerun': rerun
+ "tasks": [],
+ "threads": [],
+ "rolling_shutter": rolling_shutter,
+ "rerun": rerun,
}
self.node_online = True
- log.ODM_INFO("LRE: Initializing using cluster node %s:%s" % (self.node.host, self.node.port))
+ log.ODM_INFO(
+ "LRE: Initializing using cluster node %s:%s"
+ % (self.node.host, self.node.port)
+ )
try:
info = self.node.info()
- log.ODM_INFO("LRE: Node is online and running %s version %s" % (info.engine, info.engine_version))
+ log.ODM_INFO(
+ "LRE: Node is online and running %s version %s"
+ % (info.engine, info.engine_version)
+ )
except exceptions.NodeConnectionError:
- log.ODM_WARNING("LRE: The node seems to be offline! We'll still process the dataset, but it's going to run entirely locally.")
+ log.ODM_WARNING(
+ "LRE: The node seems to be offline! We'll still process the dataset, but it's going to run entirely locally."
+ )
self.node_online = False
except Exception as e:
- raise system.ExitException("LRE: An unexpected problem happened while opening the node connection: %s" % str(e))
+ raise system.ExitException(
+ "LRE: An unexpected problem happened while opening the node connection: %s"
+ % str(e)
+ )
def set_projects(self, paths):
self.project_paths = paths
@@ -66,7 +79,7 @@ class LocalRemoteExecutor:
error = None
local_processing = False
max_remote_tasks = None
-
+
calculate_task_limit_lock = threading.Lock()
finished_tasks = AtomicCounter(0)
remote_running_tasks = AtomicCounter(0)
@@ -83,28 +96,37 @@ class LocalRemoteExecutor:
except exceptions.OdmError:
removed = False
return removed
-
+
def cleanup_remote_tasks():
- if self.params['tasks']:
+ if self.params["tasks"]:
log.ODM_WARNING("LRE: Attempting to cleanup remote tasks")
else:
log.ODM_INFO("LRE: No remote tasks left to cleanup")
- for task in self.params['tasks']:
- log.ODM_INFO("LRE: Removing remote task %s... %s" % (task.uuid, 'OK' if remove_task_safe(task) else 'NO'))
+ for task in self.params["tasks"]:
+ log.ODM_INFO(
+ "LRE: Removing remote task %s... %s"
+ % (task.uuid, "OK" if remove_task_safe(task) else "NO")
+ )
- def handle_result(task, local, error = None, partial=False):
+ def handle_result(task, local, error=None, partial=False):
def cleanup_remote():
if not partial and task.remote_task:
- log.ODM_INFO("LRE: Cleaning up remote task (%s)... %s" % (task.remote_task.uuid, 'OK' if remove_task_safe(task.remote_task) else 'NO'))
- self.params['tasks'].remove(task.remote_task)
+ log.ODM_INFO(
+ "LRE: Cleaning up remote task (%s)... %s"
+ % (
+ task.remote_task.uuid,
+ "OK" if remove_task_safe(task.remote_task) else "NO",
+ )
+ )
+ self.params["tasks"].remove(task.remote_task)
task.remote_task = None
if error:
log.ODM_WARNING("LRE: %s failed with: %s" % (task, str(error)))
-
+
# Special case in which the error is caused by a SIGTERM signal
- # this means a local processing was terminated either by CTRL+C or
+ # this means a local processing was terminated either by CTRL+C or
# by canceling the task.
if str(error) == "Child was terminated by signal 15":
system.exit_gracefully()
@@ -116,46 +138,62 @@ class LocalRemoteExecutor:
with calculate_task_limit_lock:
if nonloc.max_remote_tasks is None:
node_task_limit = 0
- for t in self.params['tasks']:
+ for t in self.params["tasks"]:
try:
info = t.info(with_output=-3)
- if info.status == TaskStatus.RUNNING and info.processing_time >= 0 and len(info.output) >= 3:
+ if (
+ info.status == TaskStatus.RUNNING
+ and info.processing_time >= 0
+ and len(info.output) >= 3
+ ):
node_task_limit += 1
except exceptions.OdmError:
pass
nonloc.max_remote_tasks = max(1, node_task_limit)
- log.ODM_INFO("LRE: Node task limit reached. Setting max remote tasks to %s" % node_task_limit)
-
+ log.ODM_INFO(
+ "LRE: Node task limit reached. Setting max remote tasks to %s"
+ % node_task_limit
+ )
# Retry, but only if the error is not related to a task failure
- if task.retries < task.max_retries and not isinstance(error, exceptions.TaskFailedError):
+ if task.retries < task.max_retries and not isinstance(
+ error, exceptions.TaskFailedError
+ ):
# Put task back in queue
# Don't increment the retry counter if this task simply reached the task
# limit count.
if not task_limit_reached:
task.retries += 1
- task.wait_until = datetime.datetime.now() + datetime.timedelta(seconds=task.retries * task.retry_timeout)
+ task.wait_until = datetime.datetime.now() + datetime.timedelta(
+ seconds=task.retries * task.retry_timeout
+ )
cleanup_remote()
q.task_done()
- log.ODM_INFO("LRE: Re-queueing %s (retries: %s)" % (task, task.retries))
+ log.ODM_INFO(
+ "LRE: Re-queueing %s (retries: %s)" % (task, task.retries)
+ )
q.put(task)
- if not local: remote_running_tasks.increment(-1)
+ if not local:
+ remote_running_tasks.increment(-1)
return
else:
nonloc.error = error
finished_tasks.increment()
- if not local: remote_running_tasks.increment(-1)
+ if not local:
+ remote_running_tasks.increment(-1)
else:
if not partial:
log.ODM_INFO("LRE: %s finished successfully" % task)
finished_tasks.increment()
- if not local: remote_running_tasks.increment(-1)
+ if not local:
+ remote_running_tasks.increment(-1)
cleanup_remote()
- if not partial: q.task_done()
-
+ if not partial:
+ q.task_done()
+
def local_worker():
while True:
# Block until a new queue item is available
@@ -174,7 +212,6 @@ class LocalRemoteExecutor:
finally:
nonloc.local_processing = False
-
def remote_worker():
while True:
# Block until a new queue item is available
@@ -183,10 +220,13 @@ class LocalRemoteExecutor:
if task is None or nonloc.error is not None:
q.task_done()
break
-
+
# Yield to local processing
if not nonloc.local_processing:
- log.ODM_INFO("LRE: Yielding to local processing, sending %s back to the queue" % task)
+ log.ODM_INFO(
+ "LRE: Yielding to local processing, sending %s back to the queue"
+ % task
+ )
q.put(task)
q.task_done()
time.sleep(0.05)
@@ -194,7 +234,10 @@ class LocalRemoteExecutor:
# If we've found an estimate of the limit on the maximum number of tasks
# a node can process, we block until some tasks have completed
- if nonloc.max_remote_tasks is not None and remote_running_tasks.value >= nonloc.max_remote_tasks:
+ if (
+ nonloc.max_remote_tasks is not None
+ and remote_running_tasks.value >= nonloc.max_remote_tasks
+ ):
q.put(task)
q.task_done()
time.sleep(2)
@@ -206,7 +249,7 @@ class LocalRemoteExecutor:
task.process(False, handle_result)
except Exception as e:
handle_result(task, False, e)
-
+
# Create queue thread
local_thread = threading.Thread(target=local_worker)
if self.node_online:
@@ -221,12 +264,14 @@ class LocalRemoteExecutor:
# block until all tasks are done (or CTRL+C)
try:
- while finished_tasks.value < len(self.project_paths) and nonloc.error is None:
+ while (
+ finished_tasks.value < len(self.project_paths) and nonloc.error is None
+ ):
time.sleep(0.5)
except KeyboardInterrupt:
log.ODM_WARNING("LRE: CTRL+C")
system.exit_gracefully()
-
+
# stop workers
q.put(None)
if self.node_online:
@@ -238,73 +283,86 @@ class LocalRemoteExecutor:
remote_thread.join()
# Wait for all remains threads
- for thrds in self.params['threads']:
+ for thrds in self.params["threads"]:
thrds.join()
-
+
system.remove_cleanup_callback(cleanup_remote_tasks)
cleanup_remote_tasks()
if nonloc.error is not None:
# Try not to leak access token
if isinstance(nonloc.error, exceptions.NodeConnectionError):
- raise exceptions.NodeConnectionError("A connection error happened. Check the connection to the processing node and try again.")
+ raise exceptions.NodeConnectionError(
+ "A connection error happened. Check the connection to the processing node and try again."
+ )
else:
raise nonloc.error
-
+
class NodeTaskLimitReachedException(Exception):
pass
+
class Task:
def __init__(self, project_path, node, params, max_retries=5, retry_timeout=10):
self.project_path = project_path
self.node = node
self.params = params
- self.wait_until = datetime.datetime.now() # Don't run this task until a certain time
+ self.wait_until = (
+ datetime.datetime.now()
+ ) # Don't run this task until a certain time
self.max_retries = max_retries
self.retries = 0
self.retry_timeout = retry_timeout
self.remote_task = None
def process(self, local, done):
- def handle_result(error = None, partial=False):
+ def handle_result(error=None, partial=False):
done(self, local, error, partial)
- log.ODM_INFO("LRE: About to process %s %s" % (self, 'locally' if local else 'remotely'))
-
+ log.ODM_INFO(
+ "LRE: About to process %s %s" % (self, "locally" if local else "remotely")
+ )
+
if local:
- self._process_local(handle_result) # Block until complete
+ self._process_local(handle_result) # Block until complete
else:
now = datetime.datetime.now()
if self.wait_until > now:
wait_for = (self.wait_until - now).seconds + 1
- log.ODM_INFO("LRE: Waiting %s seconds before processing %s" % (wait_for, self))
+ log.ODM_INFO(
+ "LRE: Waiting %s seconds before processing %s" % (wait_for, self)
+ )
time.sleep(wait_for)
# TODO: we could consider uploading multiple tasks
# in parallel. But since we are using the same node
# perhaps this wouldn't be a big speedup.
- self._process_remote(handle_result) # Block until upload is complete
+ self._process_remote(handle_result) # Block until upload is complete
def path(self, *paths):
return os.path.join(self.project_path, *paths)
def touch(self, file):
- with open(file, 'w') as fout:
+ with open(file, "w") as fout:
fout.write("Done!\n")
def create_seed_payload(self, paths, touch_files=[]):
paths = filter(os.path.exists, map(lambda p: self.path(p), paths))
outfile = self.path("seed.zip")
- with zipfile.ZipFile(outfile, "w", compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zf:
+ with zipfile.ZipFile(
+ outfile, "w", compression=zipfile.ZIP_DEFLATED, allowZip64=True
+ ) as zf:
for p in paths:
if os.path.isdir(p):
for root, _, filenames in os.walk(p):
for filename in filenames:
filename = os.path.join(root, filename)
filename = os.path.normpath(filename)
- zf.write(filename, os.path.relpath(filename, self.project_path))
+ zf.write(
+ filename, os.path.relpath(filename, self.project_path)
+ )
else:
zf.write(p, os.path.relpath(p, self.project_path))
@@ -319,33 +377,41 @@ class Task:
done()
except Exception as e:
done(e)
-
+
def _process_remote(self, done):
try:
self.process_remote(done)
- done(error=None, partial=True) # Upload is completed, but processing is not (partial)
+ done(
+ error=None, partial=True
+ ) # Upload is completed, but processing is not (partial)
except Exception as e:
done(e)
- def execute_remote_task(self, done, seed_files = [], seed_touch_files = [], outputs = [], ):
+ def execute_remote_task(
+ self,
+ done,
+ seed_files=[],
+ seed_touch_files=[],
+ outputs=[],
+ ):
"""
Run a task by creating a seed file with all files in seed_files, optionally
creating empty files (for flag checks) specified in seed_touch_files
and returning the results specified in outputs. Yeah it's pretty cool!
"""
seed_file = self.create_seed_payload(seed_files, touch_files=seed_touch_files)
-
+
# Find all images
images = glob.glob(self.path("images/**"))
# Add GCP (optional)
if os.path.exists(self.path("gcp_list.txt")):
images.append(self.path("gcp_list.txt"))
-
+
# Add GEO (optional)
if os.path.exists(self.path("geo.txt")):
images.append(self.path("geo.txt"))
-
+
# Add seed file
images.append(seed_file)
@@ -358,48 +424,67 @@ class Task:
nonloc.last_update = time.time()
# Upload task
- task = self.node.create_task(images,
- get_submodel_args_dict(config.config()),
- progress_callback=print_progress,
- skip_post_processing=True,
- outputs=outputs)
+ task = self.node.create_task(
+ images,
+ get_submodel_args_dict(config.config()),
+ progress_callback=print_progress,
+ skip_post_processing=True,
+ outputs=outputs,
+ )
self.remote_task = task
# Cleanup seed file
os.remove(seed_file)
# Keep track of tasks for cleanup
- self.params['tasks'].append(task)
+ self.params["tasks"].append(task)
# Check status
info = task.info()
if info.status in [TaskStatus.RUNNING, TaskStatus.COMPLETED]:
+
def monitor():
class nonloc:
status_callback_calls = 0
last_update = 0
def status_callback(info):
- # If a task switches from RUNNING to QUEUED, then we need to
+ # If a task switches from RUNNING to QUEUED, then we need to
# stop the process and re-add the task to the queue.
if info.status == TaskStatus.QUEUED:
- log.ODM_WARNING("LRE: %s (%s) turned from RUNNING to QUEUED. Re-adding to back of the queue." % (self, task.uuid))
- raise NodeTaskLimitReachedException("Delayed task limit reached")
+ log.ODM_WARNING(
+ "LRE: %s (%s) turned from RUNNING to QUEUED. Re-adding to back of the queue."
+ % (self, task.uuid)
+ )
+ raise NodeTaskLimitReachedException(
+ "Delayed task limit reached"
+ )
elif info.status == TaskStatus.RUNNING:
# Print a status message once in a while
nonloc.status_callback_calls += 1
if nonloc.status_callback_calls > 30:
- log.ODM_INFO("LRE: %s (%s) is still running" % (self, task.uuid))
+ log.ODM_INFO(
+ "LRE: %s (%s) is still running" % (self, task.uuid)
+ )
nonloc.status_callback_calls = 0
+
try:
+
def print_progress(percentage):
- if (time.time() - nonloc.last_update >= 2) or int(percentage) == 100:
- log.ODM_INFO("LRE: Download of %s at [%s%%]" % (self, int(percentage)))
+ if (time.time() - nonloc.last_update >= 2) or int(
+ percentage
+ ) == 100:
+ log.ODM_INFO(
+ "LRE: Download of %s at [%s%%]"
+ % (self, int(percentage))
+ )
nonloc.last_update = time.time()
task.wait_for_completion(status_callback=status_callback)
log.ODM_INFO("LRE: Downloading assets for %s" % self)
- task.download_assets(self.project_path, progress_callback=print_progress)
+ task.download_assets(
+ self.project_path, progress_callback=print_progress
+ )
log.ODM_INFO("LRE: Downloaded and extracted assets for %s" % self)
done()
except exceptions.TaskFailedError as e:
@@ -409,30 +494,37 @@ class Task:
# Save to file
error_log_path = self.path("error.log")
- with open(error_log_path, 'w') as f:
- f.write('\n'.join(output_lines) + '\n')
+ with open(error_log_path, "w") as f:
+ f.write("\n".join(output_lines) + "\n")
- msg = "(%s) failed with task output: %s\nFull log saved at %s" % (task.uuid, "\n".join(output_lines[-10:]), error_log_path)
+ msg = (
+ "(%s) failed with task output: %s\nFull log saved at %s"
+ % (task.uuid, "\n".join(output_lines[-10:]), error_log_path)
+ )
done(exceptions.TaskFailedError(msg))
except:
- log.ODM_WARNING("LRE: Could not retrieve task output for %s (%s)" % (self, task.uuid))
+ log.ODM_WARNING(
+ "LRE: Could not retrieve task output for %s (%s)"
+ % (self, task.uuid)
+ )
done(e)
except Exception as e:
done(e)
# Launch monitor thread and return
t = threading.Thread(target=monitor)
- self.params['threads'].append(t)
+ self.params["threads"].append(t)
t.start()
elif info.status == TaskStatus.QUEUED:
raise NodeTaskLimitReachedException("Task limit reached")
else:
- raise Exception("Could not send task to node, task status is %s" % str(info.status))
+ raise Exception(
+ "Could not send task to node, task status is %s" % str(info.status)
+ )
-
def process_local(self):
raise NotImplementedError()
-
+
def process_remote(self, done):
raise NotImplementedError()
@@ -446,31 +538,47 @@ class ReconstructionTask(Task):
log.ODM_INFO("==================================")
log.ODM_INFO("Local Reconstruction %s" % octx.name())
log.ODM_INFO("==================================")
- octx.feature_matching(self.params['rerun'])
- octx.create_tracks(self.params['rerun'])
- octx.reconstruct(self.params['rolling_shutter'], True, self.params['rerun'])
-
+ octx.feature_matching(self.params["rerun"])
+ octx.create_tracks(self.params["rerun"])
+ octx.reconstruct(self.params["rolling_shutter"], True, self.params["rerun"])
+
def process_remote(self, done):
octx = OSFMContext(self.path("opensfm"))
- if not octx.is_feature_matching_done() or not octx.is_reconstruction_done() or self.params['rerun']:
- self.execute_remote_task(done, seed_files=["opensfm/exif",
- "opensfm/camera_models.json",
- "opensfm/reference_lla.json"],
- seed_touch_files=["opensfm/split_merge_stop_at_reconstruction.txt"],
- outputs=["opensfm/matches", "opensfm/features",
- "opensfm/reconstruction.json",
- "opensfm/tracks.csv",
- "cameras.json"])
+ if (
+ not octx.is_feature_matching_done()
+ or not octx.is_reconstruction_done()
+ or self.params["rerun"]
+ ):
+ self.execute_remote_task(
+ done,
+ seed_files=[
+ "opensfm/exif",
+ "opensfm/camera_models.json",
+ "opensfm/reference_lla.json",
+ ],
+ seed_touch_files=["opensfm/split_merge_stop_at_reconstruction.txt"],
+ outputs=[
+ "opensfm/matches",
+ "opensfm/features",
+ "opensfm/reconstruction.json",
+ "opensfm/tracks.csv",
+ "cameras.json",
+ ],
+ )
else:
- log.ODM_INFO("Already processed feature matching and reconstruction for %s" % octx.name())
+ log.ODM_INFO(
+ "Already processed feature matching and reconstruction for %s"
+ % octx.name()
+ )
done()
+
class ToolchainTask(Task):
def process_local(self):
completed_file = self.path("toolchain_completed.txt")
submodel_name = os.path.basename(self.project_path)
-
- if not os.path.exists(completed_file) or self.params['rerun']:
+
+ if not os.path.exists(completed_file) or self.params["rerun"]:
log.ODM_INFO("=============================")
log.ODM_INFO("Local Toolchain %s" % self)
log.ODM_INFO("=============================")
@@ -479,37 +587,48 @@ class ToolchainTask(Task):
argv = get_submodel_argv(config.config(), submodels_path, submodel_name)
# Re-run the ODM toolchain on the submodel
- system.run(" ".join(map(double_quote, map(str, argv))), env_vars=os.environ.copy())
+ system.run(
+ " ".join(map(double_quote, map(str, argv))), env_vars=os.environ.copy()
+ )
# This will only get executed if the command above succeeds
self.touch(completed_file)
else:
log.ODM_INFO("Already processed toolchain for %s" % submodel_name)
-
+
def process_remote(self, done):
completed_file = self.path("toolchain_completed.txt")
submodel_name = os.path.basename(self.project_path)
- def handle_result(error = None):
+ def handle_result(error=None):
# Mark task as completed if no error
if error is None:
self.touch(completed_file)
done(error=error)
- if not os.path.exists(completed_file) or self.params['rerun']:
- self.execute_remote_task(handle_result, seed_files=["opensfm/camera_models.json",
- "opensfm/reference_lla.json",
- "opensfm/reconstruction.json",
- "opensfm/tracks.csv"],
- seed_touch_files=["opensfm/features/empty",
- "opensfm/matches/empty",
- "opensfm/exif/empty"],
- outputs=["odm_orthophoto/cutline.gpkg",
- "odm_orthophoto/odm_orthophoto_cut.tif",
- "odm_orthophoto/odm_orthophoto_feathered.tif",
- "odm_dem",
- "odm_report",
- "odm_georeferencing"])
+ if not os.path.exists(completed_file) or self.params["rerun"]:
+ self.execute_remote_task(
+ handle_result,
+ seed_files=[
+ "opensfm/camera_models.json",
+ "opensfm/reference_lla.json",
+ "opensfm/reconstruction.json",
+ "opensfm/tracks.csv",
+ ],
+ seed_touch_files=[
+ "opensfm/features/empty",
+ "opensfm/matches/empty",
+ "opensfm/exif/empty",
+ ],
+ outputs=[
+ "odm_orthophoto/cutline.gpkg",
+ "odm_orthophoto/odm_orthophoto_cut.tif",
+ "odm_orthophoto/odm_orthophoto_feathered.tif",
+ "odm_dem",
+ "odm_report",
+ "odm_georeferencing",
+ ],
+ )
else:
log.ODM_INFO("Already processed toolchain for %s" % submodel_name)
handle_result()
diff --git a/opendm/rollingshutter.py b/opendm/rollingshutter.py
index 9447eeb4..23fa022b 100644
--- a/opendm/rollingshutter.py
+++ b/opendm/rollingshutter.py
@@ -2,60 +2,53 @@ from opendm import log
# Make Model (lowercase) --> readout time (ms)
RS_DATABASE = {
- 'autel robotics xt701': 25, # Autel Evo II 8k
- 'dji phantom vision fc200': 74, # Phantom 2
-
- 'dji fc300s': 33, # Phantom 3 Advanced
- 'dji fc300c': 33, # Phantom 3 Standard
- 'dji fc300x': 33, # Phantom 3 Professional
-
- 'dji fc330': 33, # Phantom 4
- 'dji fc6310': 33, # Phantom 4 Professional
-
- 'dji fc7203': lambda p: 19 if p.get_capture_megapixels() < 10 else 25, # DJI Mavic Mini v1 (at 16:9 => 9MP 19ms, at 4:3 => 12MP 25ms)
- 'dji fc2103': 32, # DJI Mavic Air 1
- 'dji fc3170': 27, # DJI Mavic Air 2
- 'dji fc3411': 32, # DJI Mavic Air 2S
-
- 'dji fc220': 64, # DJI Mavic Pro (Platinum)
- 'hasselblad l1d-20c': lambda p: 47 if p.get_capture_megapixels() < 17 else 56, # DJI Mavic 2 Pro (at 16:10 => 16.8MP 47ms, at 3:2 => 19.9MP 56ms. 4:3 has 17.7MP with same image height as 3:2 which can be concluded as same sensor readout)
- 'hasselblad l2d-20c': 16.6, # DJI Mavic 3 (not enterprise version)
-
- 'dji fc3582': lambda p: 26 if p.get_capture_megapixels() < 48 else 60, # DJI Mini 3 pro (at 48MP readout is 60ms, at 12MP it's 26ms)
-
- 'dji fc350': 30, # Inspire 1
-
- 'dji mavic2-enterprise-advanced': 31, # DJI Mavic 2 Enterprise Advanced
- 'dji zenmuse z30': 8, # DJI Zenmuse Z30
-
- 'yuneec e90': 44, # Yuneec E90
-
- 'gopro hero4 black': 30, # GoPro Hero 4 Black
- 'gopro hero8 black': 17, # GoPro Hero 8 Black
-
- 'teracube teracube one': 32, # TeraCube TeraCube_One TR1907Q Mobile Phone
-
- 'fujifilm x-a5': 186, # FUJIFILM X-A5 Mirrorless Interchangeable Lens Camera
-
- 'fujifilm x-t2': 35, # FUJIFILM X-T2 Mirrorless Interchangeable Lens Camera
-
- 'autel robotics xl724': 29, # Autel Nano+
-
- 'parrot anafi': 39, # Parrot Anafi
-
- 'autel robotics xt705': 30, # Autel EVO II pro
-
- # Help us add more!
+ "autel robotics xt701": 25, # Autel Evo II 8k
+ "dji phantom vision fc200": 74, # Phantom 2
+ "dji fc300s": 33, # Phantom 3 Advanced
+ "dji fc300c": 33, # Phantom 3 Standard
+ "dji fc300x": 33, # Phantom 3 Professional
+ "dji fc330": 33, # Phantom 4
+ "dji fc6310": 33, # Phantom 4 Professional
+ "dji fc7203": lambda p: (
+ 19 if p.get_capture_megapixels() < 10 else 25
+ ), # DJI Mavic Mini v1 (at 16:9 => 9MP 19ms, at 4:3 => 12MP 25ms)
+ "dji fc2103": 32, # DJI Mavic Air 1
+ "dji fc3170": 27, # DJI Mavic Air 2
+ "dji fc3411": 32, # DJI Mavic Air 2S
+ "dji fc220": 64, # DJI Mavic Pro (Platinum)
+ "hasselblad l1d-20c": lambda p: (
+ 47 if p.get_capture_megapixels() < 17 else 56
+ ), # DJI Mavic 2 Pro (at 16:10 => 16.8MP 47ms, at 3:2 => 19.9MP 56ms. 4:3 has 17.7MP with same image height as 3:2 which can be concluded as same sensor readout)
+ "hasselblad l2d-20c": 16.6, # DJI Mavic 3 (not enterprise version)
+ "dji fc3582": lambda p: (
+ 26 if p.get_capture_megapixels() < 48 else 60
+ ), # DJI Mini 3 pro (at 48MP readout is 60ms, at 12MP it's 26ms)
+ "dji fc350": 30, # Inspire 1
+ "dji mavic2-enterprise-advanced": 31, # DJI Mavic 2 Enterprise Advanced
+ "dji zenmuse z30": 8, # DJI Zenmuse Z30
+ "yuneec e90": 44, # Yuneec E90
+ "gopro hero4 black": 30, # GoPro Hero 4 Black
+ "gopro hero8 black": 17, # GoPro Hero 8 Black
+ "teracube teracube one": 32, # TeraCube TeraCube_One TR1907Q Mobile Phone
+ "fujifilm x-a5": 186, # FUJIFILM X-A5 Mirrorless Interchangeable Lens Camera
+ "fujifilm x-t2": 35, # FUJIFILM X-T2 Mirrorless Interchangeable Lens Camera
+ "autel robotics xl724": 29, # Autel Nano+
+ "parrot anafi": 39, # Parrot Anafi
+ "autel robotics xt705": 30, # Autel EVO II pro
+ # Help us add more!
# See: https://github.com/OpenDroneMap/RSCalibration for instructions
}
-DEFAULT_RS_READOUT = 30 # Just a guess
+DEFAULT_RS_READOUT = 30 # Just a guess
+
def make_model_key(make, model):
return ("%s %s" % (make.strip(), model.strip())).lower().strip()
+
warn_db_missing = {}
info_db_found = {}
+
def get_rolling_shutter_readout(photo, override_value=0):
global warn_db_missing
global info_db_found
@@ -64,7 +57,7 @@ def get_rolling_shutter_readout(photo, override_value=0):
if override_value > 0:
return override_value
-
+
key = make_model_key(make, model)
if key in RS_DATABASE:
rsd = RS_DATABASE[key]
@@ -75,17 +68,25 @@ def get_rolling_shutter_readout(photo, override_value=0):
elif callable(rsd):
val = float(rsd(photo))
else:
- log.ODM_WARNING("Invalid rolling shutter calibration entry, returning default of %sms" % DEFAULT_RS_READOUT)
+ log.ODM_WARNING(
+ "Invalid rolling shutter calibration entry, returning default of %sms"
+ % DEFAULT_RS_READOUT
+ )
if not key in info_db_found:
- log.ODM_INFO("Rolling shutter profile for \"%s %s\" selected, using %sms as --rolling-shutter-readout." % (make, model, val))
+ log.ODM_INFO(
+ 'Rolling shutter profile for "%s %s" selected, using %sms as --rolling-shutter-readout.'
+ % (make, model, val)
+ )
info_db_found[key] = True
-
+
return val
else:
# Warn once
if not key in warn_db_missing:
- log.ODM_WARNING("Rolling shutter readout time for \"%s %s\" is not in our database, using default of %sms which might be incorrect. Use --rolling-shutter-readout to set an actual value (see https://github.com/OpenDroneMap/RSCalibration for instructions on how to calculate this value)" % (make, model, DEFAULT_RS_READOUT))
+ log.ODM_WARNING(
+ 'Rolling shutter readout time for "%s %s" is not in our database, using default of %sms which might be incorrect. Use --rolling-shutter-readout to set an actual value (see https://github.com/OpenDroneMap/RSCalibration for instructions on how to calculate this value)'
+ % (make, model, DEFAULT_RS_READOUT)
+ )
warn_db_missing[key] = True
return float(DEFAULT_RS_READOUT)
-
diff --git a/opendm/shots.py b/opendm/shots.py
index b6eeeb30..9d0755cf 100644
--- a/opendm/shots.py
+++ b/opendm/shots.py
@@ -7,10 +7,12 @@ from osgeo import gdal
import numpy as np
import cv2
+
def get_rotation_matrix(rotation):
"""Get rotation as a 3x3 matrix."""
return cv2.Rodrigues(rotation)[0]
+
def matrix_to_rotation(rotation_matrix):
R = np.array(rotation_matrix, dtype=float)
# if not np.isclose(np.linalg.det(R), 1):
@@ -19,11 +21,21 @@ def matrix_to_rotation(rotation_matrix):
# raise ValueError("Not orthogonal")
return cv2.Rodrigues(R)[0].ravel()
+
def get_origin(shot):
"""The origin of the pose in world coordinates."""
- return -get_rotation_matrix(np.array(shot['rotation'])).T.dot(np.array(shot['translation']))
+ return -get_rotation_matrix(np.array(shot["rotation"])).T.dot(
+ np.array(shot["translation"])
+ )
-def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset=None, pseudo_geotiff=None, a_matrix=None):
+
+def get_geojson_shots_from_opensfm(
+ reconstruction_file,
+ utm_srs=None,
+ utm_offset=None,
+ pseudo_geotiff=None,
+ a_matrix=None,
+):
"""
Extract shots from OpenSfM's reconstruction.json
"""
@@ -36,17 +48,21 @@ def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset
# the pseudo-georeferencing CRS UL corner is at 0,0
# but our shot coordinates aren't, so we need to offset them
raster = gdal.Open(pseudo_geotiff)
- ulx, xres, _, uly, _, yres = raster.GetGeoTransform()
+ ulx, xres, _, uly, _, yres = raster.GetGeoTransform()
lrx = ulx + (raster.RasterXSize * xres)
lry = uly + (raster.RasterYSize * yres)
- pseudo_geocoords = np.array([[1.0 / get_pseudogeo_scale() ** 2, 0, 0, ulx + lrx / 2.0],
- [0, 1.0 / get_pseudogeo_scale() ** 2, 0, uly + lry / 2.0],
- [0, 0, 1, 0],
- [0, 0, 0, 1]])
+ pseudo_geocoords = np.array(
+ [
+ [1.0 / get_pseudogeo_scale() ** 2, 0, 0, ulx + lrx / 2.0],
+ [0, 1.0 / get_pseudogeo_scale() ** 2, 0, uly + lry / 2.0],
+ [0, 0, 1, 0],
+ [0, 0, 0, 1],
+ ]
+ )
raster = None
pseudo = True
-
+
# Couldn't get a SRS?
if utm_srs is None:
return None
@@ -54,17 +70,17 @@ def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset
crstrans = transformer(CRS.from_proj4(utm_srs), CRS.from_epsg("4326"))
if os.path.exists(reconstruction_file):
- with open(reconstruction_file, 'r') as fin:
+ with open(reconstruction_file, "r") as fin:
reconstructions = json.loads(fin.read())
feats = []
added_shots = {}
for recon in reconstructions:
- cameras = recon.get('cameras', {})
+ cameras = recon.get("cameras", {})
- for filename in recon.get('shots', {}):
- shot = recon['shots'][filename]
- cam_id = shot.get('camera')
+ for filename in recon.get("shots", {}):
+ shot = recon["shots"][filename]
+ cam_id = shot.get("camera")
if (not cam_id in cameras) or (filename in added_shots):
continue
@@ -76,57 +92,69 @@ def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset
# Translation
utm_coords = np.dot(Rs, origin) + T
- trans_coords = crstrans.TransformPoint(utm_coords[0], utm_coords[1], utm_coords[2])
+ trans_coords = crstrans.TransformPoint(
+ utm_coords[0], utm_coords[1], utm_coords[2]
+ )
# Rotation
- rotation_matrix = get_rotation_matrix(np.array(shot['rotation']))
+ rotation_matrix = get_rotation_matrix(
+ np.array(shot["rotation"])
+ )
rotation = matrix_to_rotation(np.dot(rotation_matrix, Rs1))
translation = origin
else:
- rotation = shot['rotation']
+ rotation = shot["rotation"]
# Just add UTM offset
origin = get_origin(shot)
- utm_coords = [origin[0] + utm_offset[0],
- origin[1] + utm_offset[1],
- origin[2]]
+ utm_coords = [
+ origin[0] + utm_offset[0],
+ origin[1] + utm_offset[1],
+ origin[2],
+ ]
if a_matrix is not None:
- rotation = list(np.array(rotation).dot(a_matrix[:3,:3]))
- utm_coords = list(a_matrix.dot(np.hstack((np.array(utm_coords), 1)))[:-1])
-
- translation = utm_coords
- trans_coords = crstrans.TransformPoint(utm_coords[0], utm_coords[1], utm_coords[2])
+ rotation = list(np.array(rotation).dot(a_matrix[:3, :3]))
+ utm_coords = list(
+ a_matrix.dot(np.hstack((np.array(utm_coords), 1)))[:-1]
+ )
- feats.append({
- 'type': 'Feature',
- 'properties': {
- 'filename': filename,
- 'camera': cam_id,
- 'focal': cam.get('focal', cam.get('focal_x')), # Focal ratio = focal length (mm) / max(sensor_width, sensor_height) (mm)
- 'width': cam.get('width', 0),
- 'height': cam.get('height', 0),
- 'capture_time': shot.get('capture_time', 0),
- 'translation': list(translation),
- 'rotation': list(rotation)
- },
- 'geometry':{
- 'type': 'Point',
- 'coordinates': list(trans_coords)
+ translation = utm_coords
+ trans_coords = crstrans.TransformPoint(
+ utm_coords[0], utm_coords[1], utm_coords[2]
+ )
+
+ feats.append(
+ {
+ "type": "Feature",
+ "properties": {
+ "filename": filename,
+ "camera": cam_id,
+ "focal": cam.get(
+ "focal", cam.get("focal_x")
+ ), # Focal ratio = focal length (mm) / max(sensor_width, sensor_height) (mm)
+ "width": cam.get("width", 0),
+ "height": cam.get("height", 0),
+ "capture_time": shot.get("capture_time", 0),
+ "translation": list(translation),
+ "rotation": list(rotation),
+ },
+ "geometry": {
+ "type": "Point",
+ "coordinates": list(trans_coords),
+ },
}
- })
+ )
added_shots[filename] = True
- return {
- 'type': 'FeatureCollection',
- 'features': feats
- }
+ return {"type": "FeatureCollection", "features": feats}
else:
raise RuntimeError("%s does not exist." % reconstruction_file)
+
def merge_geojson_shots(geojson_shots_files, output_geojson_file):
result = {}
added_files = {}
@@ -135,29 +163,30 @@ def merge_geojson_shots(geojson_shots_files, output_geojson_file):
shots = json.loads(f.read())
if len(result) == 0:
- for feat in shots.get('features', []):
- added_files[feat['properties']['filename']] = True
+ for feat in shots.get("features", []):
+ added_files[feat["properties"]["filename"]] = True
# Use first file as base
result = shots
else:
# Append features if filename not already added
- for feat in shots.get('features', []):
- if not feat['properties']['filename'] in added_files:
- result['features'].append(feat)
-
+ for feat in shots.get("features", []):
+ if not feat["properties"]["filename"] in added_files:
+ result["features"].append(feat)
+
with open(output_geojson_file, "w") as f:
f.write(json.dumps(result))
+
def merge_cameras(cameras_json_files, output_cameras_file):
result = {}
for cameras_file in cameras_json_files:
with open(cameras_file, "r") as f:
cameras = json.loads(f.read())
-
+
for cam_id in cameras:
if not cam_id in result:
result[cam_id] = cameras[cam_id]
-
+
with open(output_cameras_file, "w") as f:
f.write(json.dumps(result))
diff --git a/opendm/skyremoval/guidedfilter.py b/opendm/skyremoval/guidedfilter.py
index 1bf29777..a8813a52 100644
--- a/opendm/skyremoval/guidedfilter.py
+++ b/opendm/skyremoval/guidedfilter.py
@@ -4,21 +4,32 @@ import numpy as np
# Kaiming He, Jian Sun
# https://arxiv.org/abs/1505.00996
+
def box(img, radius):
dst = np.zeros_like(img)
(r, c) = img.shape
s = [radius, 1]
c_sum = np.cumsum(img, 0)
- dst[0:radius+1, :, ...] = c_sum[radius:2*radius+1, :, ...]
- dst[radius+1:r-radius, :, ...] = c_sum[2*radius+1:r, :, ...] - c_sum[0:r-2*radius-1, :, ...]
- dst[r-radius:r, :, ...] = np.tile(c_sum[r-1:r, :, ...], s) - c_sum[r-2*radius-1:r-radius-1, :, ...]
+ dst[0 : radius + 1, :, ...] = c_sum[radius : 2 * radius + 1, :, ...]
+ dst[radius + 1 : r - radius, :, ...] = (
+ c_sum[2 * radius + 1 : r, :, ...] - c_sum[0 : r - 2 * radius - 1, :, ...]
+ )
+ dst[r - radius : r, :, ...] = (
+ np.tile(c_sum[r - 1 : r, :, ...], s)
+ - c_sum[r - 2 * radius - 1 : r - radius - 1, :, ...]
+ )
s = [1, radius]
c_sum = np.cumsum(dst, 1)
- dst[:, 0:radius+1, ...] = c_sum[:, radius:2*radius+1, ...]
- dst[:, radius+1:c-radius, ...] = c_sum[:, 2*radius+1 : c, ...] - c_sum[:, 0 : c-2*radius-1, ...]
- dst[:, c-radius: c, ...] = np.tile(c_sum[:, c-1:c, ...], s) - c_sum[:, c-2*radius-1 : c-radius-1, ...]
+ dst[:, 0 : radius + 1, ...] = c_sum[:, radius : 2 * radius + 1, ...]
+ dst[:, radius + 1 : c - radius, ...] = (
+ c_sum[:, 2 * radius + 1 : c, ...] - c_sum[:, 0 : c - 2 * radius - 1, ...]
+ )
+ dst[:, c - radius : c, ...] = (
+ np.tile(c_sum[:, c - 1 : c, ...], s)
+ - c_sum[:, c - 2 * radius - 1 : c - radius - 1, ...]
+ )
return dst
@@ -31,7 +42,9 @@ def guided_filter(img, guide, radius, eps):
mean_img = box(img, radius) / CNT
mean_guide = box(guide, radius) / CNT
- a = ((box(img * guide, radius) / CNT) - mean_img * mean_guide) / (((box(img * img, radius) / CNT) - mean_img * mean_img) + eps)
+ a = ((box(img * guide, radius) / CNT) - mean_img * mean_guide) / (
+ ((box(img * img, radius) / CNT) - mean_img * mean_img) + eps
+ )
b = mean_guide - a * mean_img
return (box(a, radius) / CNT) * img + (box(b, radius) / CNT)
diff --git a/opendm/skyremoval/skyfilter.py b/opendm/skyremoval/skyfilter.py
index bee0dce9..e566a66c 100644
--- a/opendm/skyremoval/skyfilter.py
+++ b/opendm/skyremoval/skyfilter.py
@@ -1,4 +1,3 @@
-
import time
import numpy as np
import cv2
@@ -12,30 +11,35 @@ from threading import Lock
mutex = Lock()
# Use GPU if it is available, otherwise CPU
-provider = "CUDAExecutionProvider" if "CUDAExecutionProvider" in ort.get_available_providers() else "CPUExecutionProvider"
+provider = (
+ "CUDAExecutionProvider"
+ if "CUDAExecutionProvider" in ort.get_available_providers()
+ else "CPUExecutionProvider"
+)
-class SkyFilter():
- def __init__(self, model, width = 384, height = 384):
+class SkyFilter:
+
+ def __init__(self, model, width=384, height=384):
self.model = model
self.width, self.height = width, height
- log.ODM_INFO(' ?> Using provider %s' % provider)
+ log.ODM_INFO(" ?> Using provider %s" % provider)
self.load_model()
-
def load_model(self):
- log.ODM_INFO(' -> Loading the model')
- self.session = ort.InferenceSession(self.model, providers=[provider])
-
+ log.ODM_INFO(" -> Loading the model")
+ self.session = ort.InferenceSession(self.model, providers=[provider])
def get_mask(self, img):
height, width, c = img.shape
# Resize image to fit the model input
- new_img = cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_AREA)
+ new_img = cv2.resize(
+ img, (self.width, self.height), interpolation=cv2.INTER_AREA
+ )
new_img = np.array(new_img, dtype=np.float32)
# Input vector for onnx model
@@ -55,34 +59,34 @@ class SkyFilter():
return self.refine(output, img)
-
def refine(self, pred, img):
guided_filter_radius, guided_filter_eps = 20, 0.01
- refined = guided_filter(img[:,:,2], pred[:,:,0], guided_filter_radius, guided_filter_eps)
+ refined = guided_filter(
+ img[:, :, 2], pred[:, :, 0], guided_filter_radius, guided_filter_eps
+ )
res = np.clip(refined, a_min=0, a_max=1)
-
+
# Convert res to CV_8UC1
- res = np.array(res * 255., dtype=np.uint8)
-
+ res = np.array(res * 255.0, dtype=np.uint8)
+
# Thresholding
res = cv2.threshold(res, 127, 255, cv2.THRESH_BINARY_INV)[1]
-
+
return res
-
def run_img(self, img_path, dest):
img = read_image(img_path)
- img = np.array(img / 255., dtype=np.float32)
+ img = np.array(img / 255.0, dtype=np.float32)
+
+ mask = self.get_mask(img)
- mask = self.get_mask(img)
-
img_name = os.path.basename(img_path)
fpath = os.path.join(dest, img_name)
fname, _ = os.path.splitext(fpath)
- mask_name = fname + '_mask.png'
+ mask_name = fname + "_mask.png"
cv2.imwrite(mask_name, mask)
-
+
return mask_name
diff --git a/opendm/system.py b/opendm/system.py
index 1ad88a52..8e30d2c3 100644
--- a/opendm/system.py
+++ b/opendm/system.py
@@ -13,34 +13,44 @@ from collections import deque
from opendm import context
from opendm import log
+
class SubprocessException(Exception):
def __init__(self, msg, errorCode):
super().__init__(msg)
self.errorCode = errorCode
+
class ExitException(Exception):
pass
+
def get_ccd_widths():
"""Return the CCD Width of the camera listed in the JSON defs file."""
with open(context.ccd_widths_path) as f:
sensor_data = json.loads(f.read())
return dict(zip(map(string.lower, sensor_data.keys()), sensor_data.values()))
+
running_subprocesses = []
cleanup_callbacks = []
+
def add_cleanup_callback(func):
global cleanup_callbacks
cleanup_callbacks.append(func)
+
def remove_cleanup_callback(func):
global cleanup_callbacks
try:
cleanup_callbacks.remove(func)
except ValueError as e:
- log.ODM_EXCEPTION("Tried to remove %s from cleanup_callbacks but got: %s" % (str(func), str(e)))
+ log.ODM_EXCEPTION(
+ "Tried to remove %s from cleanup_callbacks but got: %s"
+ % (str(func), str(e))
+ )
+
def exit_gracefully():
global running_subprocesses
@@ -53,44 +63,63 @@ def exit_gracefully():
for sp in running_subprocesses:
log.ODM_WARNING("Sending TERM signal to PID %s..." % sp.pid)
- if sys.platform == 'win32':
+ if sys.platform == "win32":
os.kill(sp.pid, signal.CTRL_C_EVENT)
else:
os.killpg(os.getpgid(sp.pid), signal.SIGTERM)
-
+
os._exit(1)
+
def sighandler(signum, frame):
exit_gracefully()
+
signal.signal(signal.SIGINT, sighandler)
signal.signal(signal.SIGTERM, sighandler)
-def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}, packages_paths=context.python_packages_paths, quiet=False):
+
+def run(
+ cmd,
+ env_paths=[context.superbuild_bin_path],
+ env_vars={},
+ packages_paths=context.python_packages_paths,
+ quiet=False,
+):
"""Run a system command"""
global running_subprocesses
if not quiet:
- log.ODM_INFO('running %s' % cmd)
+ log.ODM_INFO("running %s" % cmd)
env = os.environ.copy()
sep = ":"
- if sys.platform == 'win32':
+ if sys.platform == "win32":
sep = ";"
if len(env_paths) > 0:
env["PATH"] = env["PATH"] + sep + sep.join(env_paths)
-
+
if len(packages_paths) > 0:
- env["PYTHONPATH"] = env.get("PYTHONPATH", "") + sep + sep.join(packages_paths)
- if sys.platform == 'darwin':
+ env["PYTHONPATH"] = env.get("PYTHONPATH", "") + sep + sep.join(packages_paths)
+ if sys.platform == "darwin":
# Propagate DYLD_LIBRARY_PATH
- cmd = "export DYLD_LIBRARY_PATH=\"%s\" && %s" % (env.get("DYLD_LIBRARY_PATH", ""), cmd)
+ cmd = 'export DYLD_LIBRARY_PATH="%s" && %s' % (
+ env.get("DYLD_LIBRARY_PATH", ""),
+ cmd,
+ )
for k in env_vars:
env[k] = str(env_vars[k])
- p = subprocess.Popen(cmd, shell=True, env=env, start_new_session=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ p = subprocess.Popen(
+ cmd,
+ shell=True,
+ env=env,
+ start_new_session=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
running_subprocesses.append(p)
lines = deque()
for line in io.TextIOWrapper(p.stdout):
@@ -107,14 +136,16 @@ def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}, packages_path
running_subprocesses.remove(p)
if retcode < 0:
- raise SubprocessException("Child was terminated by signal {}".format(-retcode), -retcode)
+ raise SubprocessException(
+ "Child was terminated by signal {}".format(-retcode), -retcode
+ )
elif retcode > 0:
raise SubprocessException("Child returned {}".format(retcode), retcode)
def now():
"""Return the current time"""
- return datetime.datetime.now().strftime('%a %b %d %H:%M:%S %Z %Y')
+ return datetime.datetime.now().strftime("%a %b %d %H:%M:%S %Z %Y")
def now_raw():
@@ -128,35 +159,43 @@ def benchmark(start, benchmarking_file, process):
"""
# Write to benchmark file
delta = (datetime.datetime.now() - start).total_seconds()
- with open(benchmarking_file, 'a') as b:
- b.write('%s runtime: %s seconds\n' % (process, delta))
+ with open(benchmarking_file, "a") as b:
+ b.write("%s runtime: %s seconds\n" % (process, delta))
+
def mkdir_p(path):
- """Make a directory including parent directories.
- """
+ """Make a directory including parent directories."""
try:
os.makedirs(path)
except os.error as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
+
# Python2 shutil.which
def which(program):
- path=os.getenv('PATH')
+ path = os.getenv("PATH")
for p in path.split(os.path.pathsep):
- p=os.path.join(p,program)
- if os.path.exists(p) and os.access(p,os.X_OK):
+ p = os.path.join(p, program)
+ if os.path.exists(p) and os.access(p, os.X_OK):
return p
+
def link_file(src, dst):
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
if not os.path.isfile(dst):
- if sys.platform == 'win32':
+ if sys.platform == "win32":
os.link(src, dst)
else:
- os.symlink(os.path.relpath(os.path.abspath(src), os.path.dirname(os.path.abspath(dst))), dst)
+ os.symlink(
+ os.path.relpath(
+ os.path.abspath(src), os.path.dirname(os.path.abspath(dst))
+ ),
+ dst,
+ )
+
def move_files(src, dst):
if not os.path.isdir(dst):
@@ -166,6 +205,7 @@ def move_files(src, dst):
if os.path.isfile(os.path.join(src, f)):
shutil.move(os.path.join(src, f), dst)
+
def delete_files(folder, exclude=()):
if not os.path.isdir(folder):
return
@@ -173,4 +213,4 @@ def delete_files(folder, exclude=()):
for f in os.listdir(folder):
if os.path.isfile(os.path.join(folder, f)):
if not exclude or not f.endswith(exclude):
- os.unlink(os.path.join(folder, f))
\ No newline at end of file
+ os.unlink(os.path.join(folder, f))
diff --git a/opendm/thermal.py b/opendm/thermal.py
index f0ad2099..13358869 100644
--- a/opendm/thermal.py
+++ b/opendm/thermal.py
@@ -5,7 +5,8 @@ from opendm.thermal_tools import dji_unpack
from opendm.exiftool import extract_raw_thermal_image_data
from opendm.thermal_tools.thermal_utils import sensor_vals_to_temp
-def resize_to_match(image, match_photo = None):
+
+def resize_to_match(image, match_photo=None):
"""
Resize images to match the dimension of another photo
:param image numpy array containing image data to resize
@@ -15,12 +16,16 @@ def resize_to_match(image, match_photo = None):
if match_photo is not None:
h, w, _ = image.shape
if w != match_photo.width or h != match_photo.height:
- image = cv2.resize(image, None,
- fx=match_photo.width/w,
- fy=match_photo.height/h,
- interpolation=cv2.INTER_LANCZOS4)
+ image = cv2.resize(
+ image,
+ None,
+ fx=match_photo.width / w,
+ fy=match_photo.height / h,
+ interpolation=cv2.INTER_LANCZOS4,
+ )
return image
+
def dn_to_temperature(photo, image, images_path):
"""
Convert Digital Number values to temperature (C) values
@@ -37,34 +42,45 @@ def dn_to_temperature(photo, image, images_path):
# but not necessarily for others
if photo.camera_make == "MicaSense" and photo.camera_model[:5] == "Altum":
image = image.astype("float32")
- image -= (273.15 * 100.0) # Convert Kelvin to Celsius
+ image -= 273.15 * 100.0 # Convert Kelvin to Celsius
image *= 0.01
return image
elif photo.camera_make == "DJI" and photo.camera_model == "ZH20T":
filename, file_extension = os.path.splitext(photo.filename)
# DJI H20T high gain mode supports measurement of -40~150 celsius degrees
- if file_extension.lower() in [".tif", ".tiff"] and image.min() >= 23315: # Calibrated grayscale tif
+ if (
+ file_extension.lower() in [".tif", ".tiff"] and image.min() >= 23315
+ ): # Calibrated grayscale tif
image = image.astype("float32")
- image -= (273.15 * 100.0) # Convert Kelvin to Celsius
+ image -= 273.15 * 100.0 # Convert Kelvin to Celsius
image *= 0.01
return image
else:
return image
- elif photo.camera_make == "DJI" and photo.camera_model == "MAVIC2-ENTERPRISE-ADVANCED":
+ elif (
+ photo.camera_make == "DJI"
+ and photo.camera_model == "MAVIC2-ENTERPRISE-ADVANCED"
+ ):
image = dji_unpack.extract_temperatures_dji(photo, image, images_path)
image = image.astype("float32")
return image
else:
try:
- params, image = extract_raw_thermal_image_data(os.path.join(images_path, photo.filename))
+ params, image = extract_raw_thermal_image_data(
+ os.path.join(images_path, photo.filename)
+ )
image = sensor_vals_to_temp(image, **params)
except Exception as e:
- log.ODM_WARNING("Cannot radiometrically calibrate %s: %s" % (photo.filename, str(e)))
+ log.ODM_WARNING(
+ "Cannot radiometrically calibrate %s: %s" % (photo.filename, str(e))
+ )
image = image.astype("float32")
return image
else:
image = image.astype("float32")
- log.ODM_WARNING("Tried to radiometrically calibrate a non-thermal image with temperature values (%s)" % photo.filename)
+ log.ODM_WARNING(
+ "Tried to radiometrically calibrate a non-thermal image with temperature values (%s)"
+ % photo.filename
+ )
return image
-
diff --git a/opendm/thermal_tools/dji_unpack.py b/opendm/thermal_tools/dji_unpack.py
index eb706a4c..fc50e5e6 100644
--- a/opendm/thermal_tools/dji_unpack.py
+++ b/opendm/thermal_tools/dji_unpack.py
@@ -5,46 +5,53 @@ from opendm import log
from opendm.thermal_tools.thermal_utils import sensor_vals_to_temp
-def extract_temperatures_dji(photo, image, dataset_tree):
- """Extracts the DJI-encoded thermal image as 2D floating-point numpy array with temperatures in degC.
- The raw sensor values are obtained using the sample binaries provided in the official Thermal SDK by DJI.
- The executable file is run and generates a 16 bit unsigned RAW image with Little Endian byte order.
- Link to DJI Forum post: https://forum.dji.com/forum.php?mod=redirect&goto=findpost&ptid=230321&pid=2389016
- """
- # Hardcoded metadata for mean of values
- # This is added to support the possibility of extracting RJPEG from DJI M2EA
- meta = {
- "Emissivity": 0.95,
- "ObjectDistance": 50, #This is mean value of flights for better results. Need to be changed later, or improved by bypassing options from task broker
- "AtmosphericTemperature": 20,
- "ReflectedApparentTemperature": 30,
- "IRWindowTemperature": 20,
- "IRWindowTransmission": 1,
- "RelativeHumidity": 40,
- "PlanckR1": 21106.77,
- "PlanckB": 1501,
- "PlanckF": 1,
- "PlanckO": -7340,
- "PlanckR2": 0.012545258,
- }
- if photo.camera_model == "MAVIC2-ENTERPRISE-ADVANCED":
- # Adding support for MAVIC2-ENTERPRISE-ADVANCED Camera images
- im = Image.open(f"{dataset_tree}/{photo.filename}")
- # concatenate APP3 chunks of data
- a = im.applist[3][1]
- for i in range(4, 14):
- a += im.applist[i][1]
- # create image from bytes
- try:
- img = Image.frombytes("I;16L", (640, 512), a)
- except ValueError as e:
- log.ODM_ERROR("Error during extracting temperature values for file %s : %s" % photo.filename, e)
- else:
- log.ODM_WARNING("Only DJI M2EA currently supported, please wait for new updates")
- return image
- # Extract raw sensor values from generated image into numpy array
- raw_sensor_np = np.array(img)
- ## extracting the temperatures from thermal images
- thermal_np = sensor_vals_to_temp(raw_sensor_np, **meta)
- return thermal_np
\ No newline at end of file
+def extract_temperatures_dji(photo, image, dataset_tree):
+ """Extracts the DJI-encoded thermal image as 2D floating-point numpy array with temperatures in degC.
+ The raw sensor values are obtained using the sample binaries provided in the official Thermal SDK by DJI.
+ The executable file is run and generates a 16 bit unsigned RAW image with Little Endian byte order.
+ Link to DJI Forum post: https://forum.dji.com/forum.php?mod=redirect&goto=findpost&ptid=230321&pid=2389016
+ """
+ # Hardcoded metadata for mean of values
+ # This is added to support the possibility of extracting RJPEG from DJI M2EA
+ meta = {
+ "Emissivity": 0.95,
+ "ObjectDistance": 50, # This is mean value of flights for better results. Need to be changed later, or improved by bypassing options from task broker
+ "AtmosphericTemperature": 20,
+ "ReflectedApparentTemperature": 30,
+ "IRWindowTemperature": 20,
+ "IRWindowTransmission": 1,
+ "RelativeHumidity": 40,
+ "PlanckR1": 21106.77,
+ "PlanckB": 1501,
+ "PlanckF": 1,
+ "PlanckO": -7340,
+ "PlanckR2": 0.012545258,
+ }
+
+ if photo.camera_model == "MAVIC2-ENTERPRISE-ADVANCED":
+ # Adding support for MAVIC2-ENTERPRISE-ADVANCED Camera images
+ im = Image.open(f"{dataset_tree}/{photo.filename}")
+ # concatenate APP3 chunks of data
+ a = im.applist[3][1]
+ for i in range(4, 14):
+ a += im.applist[i][1]
+ # create image from bytes
+ try:
+ img = Image.frombytes("I;16L", (640, 512), a)
+ except ValueError as e:
+ log.ODM_ERROR(
+ "Error during extracting temperature values for file %s : %s"
+ % photo.filename,
+ e,
+ )
+ else:
+ log.ODM_WARNING(
+ "Only DJI M2EA currently supported, please wait for new updates"
+ )
+ return image
+ # Extract raw sensor values from generated image into numpy array
+ raw_sensor_np = np.array(img)
+ ## extracting the temperatures from thermal images
+ thermal_np = sensor_vals_to_temp(raw_sensor_np, **meta)
+ return thermal_np
diff --git a/opendm/thermal_tools/thermal_utils.py b/opendm/thermal_tools/thermal_utils.py
index 6dbfdf5f..ac9229a3 100644
--- a/opendm/thermal_tools/thermal_utils.py
+++ b/opendm/thermal_tools/thermal_utils.py
@@ -1,7 +1,9 @@
"""Thermal Image manipulation utilities."""
+
"""Based on https://github.com/detecttechnologies/thermal_base"""
import numpy as np
+
def sensor_vals_to_temp(
raw,
Emissivity=1.0,
@@ -16,7 +18,8 @@ def sensor_vals_to_temp(
PlanckF=1,
PlanckO=-7340,
PlanckR2=0.012545258,
- **kwargs,):
+ **kwargs,
+):
"""Convert raw values from the thermographic sensor sensor to temperatures in °C. Tested for Flir and DJI cams."""
# this calculation has been ported to python from https://github.com/gtatters/Thermimage/blob/master/R/raw2temp.R
# a detailed explanation of what is going on here can be found there
@@ -39,46 +42,60 @@ def sensor_vals_to_temp(
- 0.00027816 * (AtmosphericTemperature) ** 2
+ 0.00000068455 * (AtmosphericTemperature) ** 3
)
- tau1 = ATX * np.exp(-np.sqrt(ObjectDistance / 2) * (ATA1 + ATB1 * np.sqrt(h2o))) + (1 - ATX) * np.exp(
- -np.sqrt(ObjectDistance / 2) * (ATA2 + ATB2 * np.sqrt(h2o))
- )
- tau2 = ATX * np.exp(-np.sqrt(ObjectDistance / 2) * (ATA1 + ATB1 * np.sqrt(h2o))) + (1 - ATX) * np.exp(
- -np.sqrt(ObjectDistance / 2) * (ATA2 + ATB2 * np.sqrt(h2o))
- )
+ tau1 = ATX * np.exp(-np.sqrt(ObjectDistance / 2) * (ATA1 + ATB1 * np.sqrt(h2o))) + (
+ 1 - ATX
+ ) * np.exp(-np.sqrt(ObjectDistance / 2) * (ATA2 + ATB2 * np.sqrt(h2o)))
+ tau2 = ATX * np.exp(-np.sqrt(ObjectDistance / 2) * (ATA1 + ATB1 * np.sqrt(h2o))) + (
+ 1 - ATX
+ ) * np.exp(-np.sqrt(ObjectDistance / 2) * (ATA2 + ATB2 * np.sqrt(h2o)))
# radiance from the environment
- raw_refl1 = PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (ReflectedApparentTemperature + 273.15)) - PlanckF)) - PlanckO
-
+ raw_refl1 = (
+ PlanckR1
+ / (
+ PlanckR2
+ * (np.exp(PlanckB / (ReflectedApparentTemperature + 273.15)) - PlanckF)
+ )
+ - PlanckO
+ )
+
# Reflected component
- raw_refl1_attn = (1 - Emissivity) / Emissivity * raw_refl1
-
+ raw_refl1_attn = (1 - Emissivity) / Emissivity * raw_refl1
+
# Emission from atmosphere 1
raw_atm1 = (
- PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (AtmosphericTemperature + 273.15)) - PlanckF)) - PlanckO
- )
-
+ PlanckR1
+ / (PlanckR2 * (np.exp(PlanckB / (AtmosphericTemperature + 273.15)) - PlanckF))
+ - PlanckO
+ )
+
# attenuation for atmospheric 1 emission
- raw_atm1_attn = (1 - tau1) / Emissivity / tau1 * raw_atm1
-
+ raw_atm1_attn = (1 - tau1) / Emissivity / tau1 * raw_atm1
+
# Emission from window due to its own temp
raw_wind = (
- PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (IRWindowTemperature + 273.15)) - PlanckF)) - PlanckO
- )
+ PlanckR1
+ / (PlanckR2 * (np.exp(PlanckB / (IRWindowTemperature + 273.15)) - PlanckF))
+ - PlanckO
+ )
# Componen due to window emissivity
- raw_wind_attn = (
- emiss_wind / Emissivity / tau1 / IRWindowTransmission * raw_wind
- )
+ raw_wind_attn = emiss_wind / Emissivity / tau1 / IRWindowTransmission * raw_wind
# Reflection from window due to external objects
raw_refl2 = (
- PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (ReflectedApparentTemperature + 273.15)) - PlanckF)) - PlanckO
- )
+ PlanckR1
+ / (
+ PlanckR2
+ * (np.exp(PlanckB / (ReflectedApparentTemperature + 273.15)) - PlanckF)
+ )
+ - PlanckO
+ )
# component due to window reflectivity
- raw_refl2_attn = (
- refl_wind / Emissivity / tau1 / IRWindowTransmission * raw_refl2
- )
+ raw_refl2_attn = refl_wind / Emissivity / tau1 / IRWindowTransmission * raw_refl2
# Emission from atmosphere 2
raw_atm2 = (
- PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (AtmosphericTemperature + 273.15)) - PlanckF)) - PlanckO
- )
+ PlanckR1
+ / (PlanckR2 * (np.exp(PlanckB / (AtmosphericTemperature + 273.15)) - PlanckF))
+ - PlanckO
+ )
# attenuation for atmospheric 2 emission
raw_atm2_attn = (
(1 - tau2) / Emissivity / tau1 / IRWindowTransmission / tau2 * raw_atm2
@@ -114,6 +131,7 @@ def normalize_temp_matrix(thermal_np):
thermal_np = num / den
return thermal_np
+
def clip_temp_to_roi(thermal_np, thermal_roi_values):
"""
Given an RoI within a temperature matrix, this function clips the temperature values in the entire thermal.
@@ -136,4 +154,4 @@ def clip_temp_to_roi(thermal_np, thermal_roi_values):
def scale_with_roi(thermal_np, thermal_roi_values):
"""Alias for clip_temp_to_roi, to be deprecated in the future."""
- return clip_temp_to_roi(thermal_np, thermal_roi_values)
\ No newline at end of file
+ return clip_temp_to_roi(thermal_np, thermal_roi_values)
diff --git a/opendm/tiles/gdal2tiles.py b/opendm/tiles/gdal2tiles.py
index 081c335a..11fd0c77 100644
--- a/opendm/tiles/gdal2tiles.py
+++ b/opendm/tiles/gdal2tiles.py
@@ -61,9 +61,17 @@ except Exception:
__version__ = "$Id$"
-resampling_list = ('average', 'near', 'bilinear', 'cubic', 'cubicspline', 'lanczos', 'antialias')
-profile_list = ('mercator', 'geodetic', 'raster')
-webviewer_list = ('all', 'google', 'openlayers', 'leaflet', 'none')
+resampling_list = (
+ "average",
+ "near",
+ "bilinear",
+ "cubic",
+ "cubicspline",
+ "lanczos",
+ "antialias",
+)
+profile_list = ("mercator", "geodetic", "raster")
+webviewer_list = ("all", "google", "openlayers", "leaflet", "none")
# =============================================================================
# =============================================================================
@@ -226,7 +234,11 @@ class GlobalMercator(object):
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
- lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
+ lat = (
+ 180
+ / math.pi
+ * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
+ )
return lat, lon
def PixelsToMeters(self, px, py, zoom):
@@ -267,8 +279,10 @@ class GlobalMercator(object):
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:3857 coordinates"
- minx, miny = self.PixelsToMeters(tx*self.tileSize, ty*self.tileSize, zoom)
- maxx, maxy = self.PixelsToMeters((tx+1)*self.tileSize, (ty+1)*self.tileSize, zoom)
+ minx, miny = self.PixelsToMeters(tx * self.tileSize, ty * self.tileSize, zoom)
+ maxx, maxy = self.PixelsToMeters(
+ (tx + 1) * self.tileSize, (ty + 1) * self.tileSize, zoom
+ )
return (minx, miny, maxx, maxy)
def TileLatLonBounds(self, tx, ty, zoom):
@@ -292,9 +306,9 @@ class GlobalMercator(object):
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i != -1:
- return i-1
+ return i - 1
else:
- return 0 # We don't want to scale up
+ return 0 # We don't want to scale up
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
@@ -309,7 +323,7 @@ class GlobalMercator(object):
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
- mask = 1 << (i-1)
+ mask = 1 << (i - 1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
@@ -398,18 +412,18 @@ class GlobalGeodetic(object):
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i != 0:
- return i-1
+ return i - 1
else:
- return 0 # We don't want to scale up
+ return 0 # We don't want to scale up
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile"
res = self.resFact / 2**zoom
return (
- tx*self.tileSize*res - 180,
- ty*self.tileSize*res - 90,
- (tx+1)*self.tileSize*res - 180,
- (ty+1)*self.tileSize*res - 90
+ tx * self.tileSize * res - 180,
+ ty * self.tileSize * res - 90,
+ (tx + 1) * self.tileSize * res - 180,
+ (ty + 1) * self.tileSize * res - 90,
)
def TileLatLonBounds(self, tx, ty, zoom):
@@ -424,7 +438,7 @@ class Zoomify(object):
----------------------------------------
"""
- def __init__(self, width, height, tilesize=256, tileformat='jpg'):
+ def __init__(self, width, height, tilesize=256, tileformat="jpg"):
"""Initialization of the Zoomify tile tree"""
self.tilesize = tilesize
@@ -440,9 +454,12 @@ class Zoomify(object):
self.tierImageSize = []
self.tierImageSize.append(imagesize)
- while (imagesize[0] > tilesize or imagesize[1] > tilesize):
+ while imagesize[0] > tilesize or imagesize[1] > tilesize:
imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))
- tiles = (math.ceil(imagesize[0] / tilesize), math.ceil(imagesize[1] / tilesize))
+ tiles = (
+ math.ceil(imagesize[0] / tilesize),
+ math.ceil(imagesize[1] / tilesize),
+ )
self.tierSizeInTiles.append(tiles)
self.tierImageSize.append(imagesize)
@@ -455,26 +472,31 @@ class Zoomify(object):
# Number of tiles up to the given tier of pyramid.
self.tileCountUpToTier = []
self.tileCountUpToTier[0] = 0
- for i in range(1, self.numberOfTiers+1):
+ for i in range(1, self.numberOfTiers + 1):
self.tileCountUpToTier.append(
- self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] +
- self.tileCountUpToTier[i-1]
+ self.tierSizeInTiles[i - 1][0] * self.tierSizeInTiles[i - 1][1]
+ + self.tileCountUpToTier[i - 1]
)
def tilefilename(self, x, y, z):
"""Returns filename for tile with given coordinates"""
tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]
- return os.path.join("TileGroup%.0f" % math.floor(tileIndex / 256),
- "%s-%s-%s.%s" % (z, x, y, self.tileformat))
+ return os.path.join(
+ "TileGroup%.0f" % math.floor(tileIndex / 256),
+ "%s-%s-%s.%s" % (z, x, y, self.tileformat),
+ )
class GDALError(Exception):
pass
+
import os
+
main_pid = os.getpid()
+
def exit_with_error(message, details=""):
# Message printing and exit code kept from the way it worked using the OptionParser (in case
# someone parses the error output)
@@ -484,43 +506,46 @@ def exit_with_error(message, details=""):
sys.stderr.write("\n\n%s\n" % details)
import signal
+
os.kill(main_pid, signal.SIGINT)
sys.exit(2)
-def generate_kml(tx, ty, tz, tileext, tilesize, tileswne, options, children=None, **args):
+def generate_kml(
+ tx, ty, tz, tileext, tilesize, tileswne, options, children=None, **args
+):
"""
Template for the KML. Returns filled string.
"""
if not children:
children = []
- args['tx'], args['ty'], args['tz'] = tx, ty, tz
- args['tileformat'] = tileext
- if 'tilesize' not in args:
- args['tilesize'] = tilesize
+ args["tx"], args["ty"], args["tz"] = tx, ty, tz
+ args["tileformat"] = tileext
+ if "tilesize" not in args:
+ args["tilesize"] = tilesize
- if 'minlodpixels' not in args:
- args['minlodpixels'] = int(args['tilesize'] / 2)
- if 'maxlodpixels' not in args:
- args['maxlodpixels'] = int(args['tilesize'] * 8)
+ if "minlodpixels" not in args:
+ args["minlodpixels"] = int(args["tilesize"] / 2)
+ if "maxlodpixels" not in args:
+ args["maxlodpixels"] = int(args["tilesize"] * 8)
if children == []:
- args['maxlodpixels'] = -1
+ args["maxlodpixels"] = -1
if tx is None:
tilekml = False
- args['title'] = options.title
+ args["title"] = options.title
else:
tilekml = True
- args['title'] = "%d/%d/%d.kml" % (tz, tx, ty)
- args['south'], args['west'], args['north'], args['east'] = tileswne(tx, ty, tz)
+ args["title"] = "%d/%d/%d.kml" % (tz, tx, ty)
+ args["south"], args["west"], args["north"], args["east"] = tileswne(tx, ty, tz)
if tx == 0:
- args['drawOrder'] = 2 * tz + 1
+ args["drawOrder"] = 2 * tz + 1
elif tx is not None:
- args['drawOrder'] = 2 * tz
+ args["drawOrder"] = 2 * tz
else:
- args['drawOrder'] = 0
+ args["drawOrder"] = 0
url = options.url
if not url:
@@ -529,7 +554,8 @@ def generate_kml(tx, ty, tz, tileext, tilesize, tileswne, options, children=None
else:
url = ""
- s = """
+ s = (
+ """
%(title)s
@@ -538,9 +564,12 @@ def generate_kml(tx, ty, tz, tileext, tilesize, tileswne, options, children=None
checkHideChildren
- """ % args
+ """
+ % args
+ )
if tilekml:
- s += """
+ s += (
+ """
%(north).14f
@@ -565,7 +594,9 @@ def generate_kml(tx, ty, tz, tileext, tilesize, tileswne, options, children=None
%(west).14f
-""" % args
+"""
+ % args
+ )
for cx, cy, cz in children:
csouth, cwest, cnorth, ceast = tileswne(cx, cy, cz)
@@ -590,8 +621,21 @@ def generate_kml(tx, ty, tz, tileext, tilesize, tileswne, options, children=None
- """ % (cz, cx, cy, args['tileformat'], cnorth, csouth, ceast, cwest,
- args['minlodpixels'], url, cz, cx, cy)
+ """ % (
+ cz,
+ cx,
+ cy,
+ args["tileformat"],
+ cnorth,
+ csouth,
+ ceast,
+ cwest,
+ args["minlodpixels"],
+ url,
+ cz,
+ cx,
+ cy,
+ )
s += """
@@ -599,32 +643,35 @@ def generate_kml(tx, ty, tz, tileext, tilesize, tileswne, options, children=None
return s
-def scale_query_to_tile(dsquery, dstile, tiledriver, options, tilefilename=''):
+def scale_query_to_tile(dsquery, dstile, tiledriver, options, tilefilename=""):
"""Scales down query dataset to the tile dataset"""
querysize = dsquery.RasterXSize
tilesize = dstile.RasterXSize
tilebands = dstile.RasterCount
- if options.resampling == 'average':
+ if options.resampling == "average":
# Function: gdal.RegenerateOverview()
- for i in range(1, tilebands+1):
+ for i in range(1, tilebands + 1):
# Black border around NODATA
- res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i),
- 'average')
+ res = gdal.RegenerateOverview(
+ dsquery.GetRasterBand(i), dstile.GetRasterBand(i), "average"
+ )
if res != 0:
- exit_with_error("RegenerateOverview() failed on %s, error %d" % (
- tilefilename, res))
+ exit_with_error(
+ "RegenerateOverview() failed on %s, error %d" % (tilefilename, res)
+ )
- elif options.resampling == 'antialias':
+ elif options.resampling == "antialias":
# Scaling by PIL (Python Imaging Library) - improved Lanczos
array = numpy.zeros((querysize, querysize, tilebands), numpy.uint8)
for i in range(tilebands):
- array[:, :, i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i+1),
- 0, 0, querysize, querysize)
- im = Image.fromarray(array, 'RGBA') # Always four bands
+ array[:, :, i] = gdalarray.BandReadAsArray(
+ dsquery.GetRasterBand(i + 1), 0, 0, querysize, querysize
+ )
+ im = Image.fromarray(array, "RGBA") # Always four bands
im1 = im.resize((tilesize, tilesize), Image.ANTIALIAS)
if os.path.exists(tilefilename):
im0 = Image.open(tilefilename)
@@ -633,29 +680,39 @@ def scale_query_to_tile(dsquery, dstile, tiledriver, options, tilefilename=''):
else:
- if options.resampling == 'near':
+ if options.resampling == "near":
gdal_resampling = gdal.GRA_NearestNeighbour
- elif options.resampling == 'bilinear':
+ elif options.resampling == "bilinear":
gdal_resampling = gdal.GRA_Bilinear
- elif options.resampling == 'cubic':
+ elif options.resampling == "cubic":
gdal_resampling = gdal.GRA_Cubic
- elif options.resampling == 'cubicspline':
+ elif options.resampling == "cubicspline":
gdal_resampling = gdal.GRA_CubicSpline
- elif options.resampling == 'lanczos':
+ elif options.resampling == "lanczos":
gdal_resampling = gdal.GRA_Lanczos
# Other algorithms are implemented by gdal.ReprojectImage().
- dsquery.SetGeoTransform((0.0, tilesize / float(querysize), 0.0, 0.0, 0.0,
- tilesize / float(querysize)))
+ dsquery.SetGeoTransform(
+ (
+ 0.0,
+ tilesize / float(querysize),
+ 0.0,
+ 0.0,
+ 0.0,
+ tilesize / float(querysize),
+ )
+ )
dstile.SetGeoTransform((0.0, 1.0, 0.0, 0.0, 0.0, 1.0))
res = gdal.ReprojectImage(dsquery, dstile, None, None, gdal_resampling)
if res != 0:
- exit_with_error("ReprojectImage() failed on %s, error %d" % (tilefilename, res))
+ exit_with_error(
+ "ReprojectImage() failed on %s, error %d" % (tilefilename, res)
+ )
def setup_no_data_values(input_dataset, options):
@@ -664,13 +721,13 @@ def setup_no_data_values(input_dataset, options):
"""
in_nodata = []
if options.srcnodata:
- nds = list(map(float, options.srcnodata.split(',')))
+ nds = list(map(float, options.srcnodata.split(",")))
if len(nds) < input_dataset.RasterCount:
- in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount]
+ in_nodata = (nds * input_dataset.RasterCount)[: input_dataset.RasterCount]
else:
in_nodata = nds
else:
- for i in range(1, input_dataset.RasterCount+1):
+ for i in range(1, input_dataset.RasterCount + 1):
raster_no_data = input_dataset.GetRasterBand(i).GetNoDataValue()
if raster_no_data is not None:
in_nodata.append(raster_no_data)
@@ -714,9 +771,9 @@ def setup_output_srs(input_srs, options):
"""
output_srs = osr.SpatialReference()
- if options.profile == 'mercator':
+ if options.profile == "mercator":
output_srs.ImportFromEPSG(3857)
- elif options.profile == 'geodetic':
+ elif options.profile == "geodetic":
output_srs.ImportFromEPSG(4326)
else:
output_srs = input_srs
@@ -725,8 +782,10 @@ def setup_output_srs(input_srs, options):
def has_georeference(dataset):
- return (dataset.GetGeoTransform() != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) or
- dataset.GetGCPCount() != 0)
+ return (
+ dataset.GetGeoTransform() != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
+ or dataset.GetGCPCount() != 0
+ )
def reproject_dataset(from_dataset, from_srs, to_srs, options=None):
@@ -737,12 +796,17 @@ def reproject_dataset(from_dataset, from_srs, to_srs, options=None):
if not from_srs or not to_srs:
raise GDALError("from and to SRS must be defined to reproject the dataset")
- if (from_srs.ExportToProj4() != to_srs.ExportToProj4()) or (from_dataset.GetGCPCount() != 0):
- to_dataset = gdal.AutoCreateWarpedVRT(from_dataset,
- from_srs.ExportToWkt(), to_srs.ExportToWkt())
+ if (from_srs.ExportToProj4() != to_srs.ExportToProj4()) or (
+ from_dataset.GetGCPCount() != 0
+ ):
+ to_dataset = gdal.AutoCreateWarpedVRT(
+ from_dataset, from_srs.ExportToWkt(), to_srs.ExportToWkt()
+ )
if options and options.verbose:
- print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')")
+ print(
+ "Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')"
+ )
to_dataset.GetDriver().CreateCopy("tiles.vrt", to_dataset)
return to_dataset
@@ -777,30 +841,31 @@ def update_no_data_values(warped_vrt_dataset, nodata_values, options=None):
"""
# TODO: gbataille - Seems that I forgot tests there
if nodata_values != []:
- temp_file = gettempfilename('-gdal2tiles.vrt')
+ temp_file = gettempfilename("-gdal2tiles.vrt")
warped_vrt_dataset.GetDriver().CreateCopy(temp_file, warped_vrt_dataset)
- with open(temp_file, 'r') as f:
+ with open(temp_file, "r") as f:
vrt_string = f.read()
vrt_string = add_gdal_warp_options_to_string(
- vrt_string, {"INIT_DEST": "NO_DATA", "UNIFIED_SRC_NODATA": "YES"})
+ vrt_string, {"INIT_DEST": "NO_DATA", "UNIFIED_SRC_NODATA": "YES"}
+ )
-# TODO: gbataille - check the need for this replacement. Seems to work without
-# # replace BandMapping tag for NODATA bands....
-# for i in range(len(nodata_values)):
-# s = s.replace(
-# '' % ((i+1), (i+1)),
-# """
-#
-# %i
-# 0
-# %i
-# 0
-#
-# """ % ((i+1), (i+1), nodata_values[i], nodata_values[i]))
+ # TODO: gbataille - check the need for this replacement. Seems to work without
+ # # replace BandMapping tag for NODATA bands....
+ # for i in range(len(nodata_values)):
+ # s = s.replace(
+ # '' % ((i+1), (i+1)),
+ # """
+ #
+ # %i
+ # 0
+ # %i
+ # 0
+ #
+ # """ % ((i+1), (i+1), nodata_values[i], nodata_values[i]))
# save the corrected VRT
- with open(temp_file, 'w') as f:
+ with open(temp_file, "w") as f:
f.write(vrt_string)
corrected_dataset = gdal.Open(temp_file)
@@ -808,7 +873,8 @@ def update_no_data_values(warped_vrt_dataset, nodata_values, options=None):
# set NODATA_VALUE metadata
corrected_dataset.SetMetadataItem(
- 'NODATA_VALUES', ' '.join([str(i) for i in nodata_values]))
+ "NODATA_VALUES", " ".join([str(i) for i in nodata_values])
+ )
if options and options.verbose:
print("Modified warping result saved into 'tiles1.vrt'")
@@ -842,8 +908,14 @@ def add_alpha_band_to_string_vrt(vrt_string):
index += 1
tb = ElementTree.TreeBuilder()
- tb.start("VRTRasterBand",
- {'dataType': "Byte", "band": str(nb_bands + 1), "subClass": "VRTWarpedRasterBand"})
+ tb.start(
+ "VRTRasterBand",
+ {
+ "dataType": "Byte",
+ "band": str(nb_bands + 1),
+ "subClass": "VRTWarpedRasterBand",
+ },
+ )
tb.start("ColorInterp", {})
tb.data("Alpha")
tb.end("ColorInterp")
@@ -877,12 +949,12 @@ def update_alpha_value_for_non_alpha_inputs(warped_vrt_dataset, options=None):
not been forced by options
"""
if warped_vrt_dataset.RasterCount in [1, 3]:
- tempfilename = gettempfilename('-gdal2tiles.vrt')
+ tempfilename = gettempfilename("-gdal2tiles.vrt")
warped_vrt_dataset.GetDriver().CreateCopy(tempfilename, warped_vrt_dataset)
with open(tempfilename) as f:
orig_data = f.read()
alpha_data = add_alpha_band_to_string_vrt(orig_data)
- with open(tempfilename, 'w') as f:
+ with open(tempfilename, "w") as f:
f.write(alpha_data)
warped_vrt_dataset = gdal.Open(tempfilename)
@@ -903,9 +975,11 @@ def nb_data_bands(dataset):
Return the number of data (non-alpha) bands of a gdal dataset
"""
alphaband = dataset.GetRasterBand(1).GetMaskBand()
- if ((alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or
- dataset.RasterCount == 4 or
- dataset.RasterCount == 2):
+ if (
+ (alphaband.GetMaskFlags() & gdal.GMF_ALPHA)
+ or dataset.RasterCount == 4
+ or dataset.RasterCount == 2
+ ):
return dataset.RasterCount - 1
else:
return dataset.RasterCount
@@ -913,16 +987,17 @@ def nb_data_bands(dataset):
def gettempfilename(suffix):
"""Returns a temporary filename"""
- if '_' in os.environ:
+ if "_" in os.environ:
# tempfile.mktemp() crashes on some Wine versions (the one of Ubuntu 12.04 particularly)
- if os.environ['_'].find('wine') >= 0:
- tmpdir = '.'
- if 'TMP' in os.environ:
- tmpdir = os.environ['TMP']
+ if os.environ["_"].find("wine") >= 0:
+ tmpdir = "."
+ if "TMP" in os.environ:
+ tmpdir = os.environ["TMP"]
import time
import random
+
random.seed(time.time())
- random_part = 'file%d' % random.randint(0, 1000000000)
+ random_part = "file%d" % random.randint(0, 1000000000)
return os.path.join(tmpdir, random_part + suffix)
return tempfile.mktemp(suffix)
@@ -939,7 +1014,7 @@ def create_base_tile(tile_job_info, tile_detail, queue=None):
tilebands = dataBandsCount + 1
ds = gdal.Open(tile_job_info.src_file, gdal.GA_ReadOnly)
- mem_drv = gdal.GetDriverByName('MEM')
+ mem_drv = gdal.GetDriverByName("MEM")
out_drv = gdal.GetDriverByName(tile_job_info.tile_driver)
alphaband = ds.GetRasterBand(1).GetMaskBand()
@@ -957,22 +1032,29 @@ def create_base_tile(tile_job_info, tile_detail, queue=None):
querysize = tile_detail.querysize
# Tile dataset in memory
- tilefilename = os.path.join(
- output, str(tz), str(tx), "%s.%s" % (ty, tileext))
- dstile = mem_drv.Create('', tilesize, tilesize, tilebands)
+ tilefilename = os.path.join(output, str(tz), str(tx), "%s.%s" % (ty, tileext))
+ dstile = mem_drv.Create("", tilesize, tilesize, tilebands)
data = alpha = None
if options.verbose:
- print("\tReadRaster Extent: ",
- (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
+ print(
+ "\tReadRaster Extent: ", (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)
+ )
# Query is in 'nearest neighbour' but can be bigger in then the tilesize
# We scale down the query to the tilesize by supplied algorithm.
if rxsize != 0 and rysize != 0 and wxsize != 0 and wysize != 0:
- data = ds.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize,
- band_list=list(range(1, dataBandsCount+1)))
+ data = ds.ReadRaster(
+ rx,
+ ry,
+ rxsize,
+ rysize,
+ wxsize,
+ wysize,
+ band_list=list(range(1, dataBandsCount + 1)),
+ )
alpha = alphaband.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize)
# The tile in memory is a transparent file by default. Write pixel values into it if
@@ -980,8 +1062,14 @@ def create_base_tile(tile_job_info, tile_detail, queue=None):
if data:
if tilesize == querysize:
# Use the ReadRaster result directly in tiles ('nearest neighbour' query)
- dstile.WriteRaster(wx, wy, wxsize, wysize, data,
- band_list=list(range(1, dataBandsCount+1)))
+ dstile.WriteRaster(
+ wx,
+ wy,
+ wxsize,
+ wysize,
+ data,
+ band_list=list(range(1, dataBandsCount + 1)),
+ )
dstile.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
# Note: For source drivers based on WaveLet compression (JPEG2000, ECW,
@@ -991,15 +1079,26 @@ def create_base_tile(tile_job_info, tile_detail, queue=None):
else:
# Big ReadRaster query in memory scaled to the tilesize - all but 'near'
# algo
- dsquery = mem_drv.Create('', querysize, querysize, tilebands)
+ dsquery = mem_drv.Create("", querysize, querysize, tilebands)
# TODO: fill the null value in case a tile without alpha is produced (now
# only png tiles are supported)
- dsquery.WriteRaster(wx, wy, wxsize, wysize, data,
- band_list=list(range(1, dataBandsCount+1)))
+ dsquery.WriteRaster(
+ wx,
+ wy,
+ wxsize,
+ wysize,
+ data,
+ band_list=list(range(1, dataBandsCount + 1)),
+ )
dsquery.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
- scale_query_to_tile(dsquery, dstile, tile_job_info.tile_driver, options,
- tilefilename=tilefilename)
+ scale_query_to_tile(
+ dsquery,
+ dstile,
+ tile_job_info.tile_driver,
+ options,
+ tilefilename=tilefilename,
+ )
del dsquery
# Force freeing the memory to make sure the C++ destructor is called and the memory as well as
@@ -1007,7 +1106,7 @@ def create_base_tile(tile_job_info, tile_detail, queue=None):
del ds
del data
- if options.resampling != 'antialias':
+ if options.resampling != "antialias":
# Write a copy of tile to png/jpg
out_drv.CreateCopy(tilefilename, dstile, strict=0)
@@ -1015,13 +1114,20 @@ def create_base_tile(tile_job_info, tile_detail, queue=None):
# Create a KML file for this tile.
if tile_job_info.kml:
- kmlfilename = os.path.join(output, str(tz), str(tx), '%d.kml' % ty)
+ kmlfilename = os.path.join(output, str(tz), str(tx), "%d.kml" % ty)
if not options.resume or not os.path.exists(kmlfilename):
- with open(kmlfilename, 'wb') as f:
- f.write(generate_kml(
- tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
- tile_job_info.tile_swne, tile_job_info.options
- ).encode('utf-8'))
+ with open(kmlfilename, "wb") as f:
+ f.write(
+ generate_kml(
+ tx,
+ ty,
+ tz,
+ tile_job_info.tile_extension,
+ tile_job_info.tile_size,
+ tile_job_info.tile_swne,
+ tile_job_info.options,
+ ).encode("utf-8")
+ )
if queue:
queue.put("tile %s %s %s" % (tx, ty, tz))
@@ -1029,7 +1135,7 @@ def create_base_tile(tile_job_info, tile_detail, queue=None):
def create_overview_tiles(tile_job_info, output_folder, options):
"""Generation of the overview tiles (higher in the pyramid) based on existing tiles"""
- mem_driver = gdal.GetDriverByName('MEM')
+ mem_driver = gdal.GetDriverByName("MEM")
tile_driver = tile_job_info.tile_driver
out_driver = gdal.GetDriverByName(tile_driver)
@@ -1040,7 +1146,7 @@ def create_overview_tiles(tile_job_info, output_folder, options):
tcount = 0
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
- tcount += (1 + abs(tmaxx-tminx)) * (1 + abs(tmaxy-tminy))
+ tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
@@ -1059,13 +1165,15 @@ def create_overview_tiles(tile_job_info, output_folder, options):
for tx in range(tminx, tmaxx + 1):
ti += 1
- tilefilename = os.path.join(output_folder,
- str(tz),
- str(tx),
- "%s.%s" % (ty, tile_job_info.tile_extension))
+ tilefilename = os.path.join(
+ output_folder,
+ str(tz),
+ str(tx),
+ "%s.%s" % (ty, tile_job_info.tile_extension),
+ )
if options.verbose:
- print(ti, '/', tcount, tilefilename)
+ print(ti, "/", tcount, tilefilename)
if options.resume and os.path.exists(tilefilename):
if options.verbose:
@@ -1078,11 +1186,16 @@ def create_overview_tiles(tile_job_info, output_folder, options):
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
- dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size,
- 2 * tile_job_info.tile_size, tilebands)
+ dsquery = mem_driver.Create(
+ "",
+ 2 * tile_job_info.tile_size,
+ 2 * tile_job_info.tile_size,
+ tilebands,
+ )
# TODO: fill the null value
- dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size,
- tilebands)
+ dstile = mem_driver.Create(
+ "", tile_job_info.tile_size, tile_job_info.tile_size, tilebands
+ )
# TODO: Implement more clever walking on the tiles with cache functionality
# probably walk should start with reading of four tiles from top left corner
@@ -1095,10 +1208,17 @@ def create_overview_tiles(tile_job_info, output_folder, options):
minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1]
if x >= minx and x <= maxx and y >= miny and y <= maxy:
dsquerytile = gdal.Open(
- os.path.join(output_folder, str(tz + 1), str(x),
- "%s.%s" % (y, tile_job_info.tile_extension)),
- gdal.GA_ReadOnly)
- if (ty == 0 and y == 1) or (ty != 0 and (y % (2 * ty)) != 0):
+ os.path.join(
+ output_folder,
+ str(tz + 1),
+ str(x),
+ "%s.%s" % (y, tile_job_info.tile_extension),
+ ),
+ gdal.GA_ReadOnly,
+ )
+ if (ty == 0 and y == 1) or (
+ ty != 0 and (y % (2 * ty)) != 0
+ ):
tileposy = 0
else:
tileposy = tile_job_info.tile_size
@@ -1109,36 +1229,56 @@ def create_overview_tiles(tile_job_info, output_folder, options):
else:
tileposx = 0
dsquery.WriteRaster(
- tileposx, tileposy, tile_job_info.tile_size,
+ tileposx,
+ tileposy,
tile_job_info.tile_size,
- dsquerytile.ReadRaster(0, 0,
- tile_job_info.tile_size,
- tile_job_info.tile_size),
- band_list=list(range(1, tilebands + 1)))
+ tile_job_info.tile_size,
+ dsquerytile.ReadRaster(
+ 0,
+ 0,
+ tile_job_info.tile_size,
+ tile_job_info.tile_size,
+ ),
+ band_list=list(range(1, tilebands + 1)),
+ )
children.append([x, y, tz + 1])
- scale_query_to_tile(dsquery, dstile, tile_driver, options,
- tilefilename=tilefilename)
+ scale_query_to_tile(
+ dsquery, dstile, tile_driver, options, tilefilename=tilefilename
+ )
# Write a copy of tile to png/jpg
- if options.resampling != 'antialias':
+ if options.resampling != "antialias":
# Write a copy of tile to png/jpg
out_driver.CreateCopy(tilefilename, dstile, strict=0)
if options.verbose:
- print("\tbuild from zoom", tz + 1,
- " tiles:", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty),
- (2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1))
+ print(
+ "\tbuild from zoom",
+ tz + 1,
+ " tiles:",
+ (2 * tx, 2 * ty),
+ (2 * tx + 1, 2 * ty),
+ (2 * tx, 2 * ty + 1),
+ (2 * tx + 1, 2 * ty + 1),
+ )
# Create a KML file for this tile.
if tile_job_info.kml:
- with open(os.path.join(
- output_folder,
- '%d/%d/%d.kml' % (tz, tx, ty)
- ), 'wb') as f:
- f.write(generate_kml(
- tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
- get_tile_swne(tile_job_info, options), options, children
- ).encode('utf-8'))
+ with open(
+ os.path.join(output_folder, "%d/%d/%d.kml" % (tz, tx, ty)), "wb"
+ ) as f:
+ f.write(
+ generate_kml(
+ tx,
+ ty,
+ tz,
+ tile_job_info.tile_extension,
+ tile_job_info.tile_size,
+ get_tile_swne(tile_job_info, options),
+ options,
+ children,
+ ).encode("utf-8")
+ )
if not options.verbose and not options.quiet:
progress_bar.log_progress()
@@ -1148,69 +1288,159 @@ def optparse_init():
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
+
usage = "Usage: %prog [options] input_file [output]"
p = OptionParser(usage, version="%prog " + __version__)
- p.add_option("-p", "--profile", dest='profile',
- type='choice', choices=profile_list,
- help=("Tile cutting profile (%s) - default 'mercator' "
- "(Google Maps compatible)" % ",".join(profile_list)))
- p.add_option("-r", "--resampling", dest="resampling",
- type='choice', choices=resampling_list,
- help="Resampling method (%s) - default 'average'" % ",".join(resampling_list))
- p.add_option('-s', '--s_srs', dest="s_srs", metavar="SRS",
- help="The spatial reference system used for the source input data")
- p.add_option('-z', '--zoom', dest="zoom",
- help="Zoom levels to render (format:'2-5' or '10').")
- p.add_option('-e', '--resume', dest="resume", action="store_true",
- help="Resume mode. Generate only missing files.")
- p.add_option('-a', '--srcnodata', dest="srcnodata", metavar="NODATA",
- help="NODATA transparency value to assign to the input data")
- p.add_option('-d', '--tmscompatible', dest="tmscompatible", action="store_true",
- help=("When using the geodetic profile, specifies the base resolution "
- "as 0.703125 or 2 tiles at zoom level 0."))
- p.add_option("-v", "--verbose",
- action="store_true", dest="verbose",
- help="Print status messages to stdout")
- p.add_option("-q", "--quiet",
- action="store_true", dest="quiet",
- help="Disable messages and status to stdout")
- p.add_option("--processes",
- dest="nb_processes",
- type='int',
- help="Number of processes to use for tiling")
+ p.add_option(
+ "-p",
+ "--profile",
+ dest="profile",
+ type="choice",
+ choices=profile_list,
+ help=(
+ "Tile cutting profile (%s) - default 'mercator' "
+ "(Google Maps compatible)" % ",".join(profile_list)
+ ),
+ )
+ p.add_option(
+ "-r",
+ "--resampling",
+ dest="resampling",
+ type="choice",
+ choices=resampling_list,
+ help="Resampling method (%s) - default 'average'" % ",".join(resampling_list),
+ )
+ p.add_option(
+ "-s",
+ "--s_srs",
+ dest="s_srs",
+ metavar="SRS",
+ help="The spatial reference system used for the source input data",
+ )
+ p.add_option(
+ "-z",
+ "--zoom",
+ dest="zoom",
+ help="Zoom levels to render (format:'2-5' or '10').",
+ )
+ p.add_option(
+ "-e",
+ "--resume",
+ dest="resume",
+ action="store_true",
+ help="Resume mode. Generate only missing files.",
+ )
+ p.add_option(
+ "-a",
+ "--srcnodata",
+ dest="srcnodata",
+ metavar="NODATA",
+ help="NODATA transparency value to assign to the input data",
+ )
+ p.add_option(
+ "-d",
+ "--tmscompatible",
+ dest="tmscompatible",
+ action="store_true",
+ help=(
+ "When using the geodetic profile, specifies the base resolution "
+ "as 0.703125 or 2 tiles at zoom level 0."
+ ),
+ )
+ p.add_option(
+ "-v",
+ "--verbose",
+ action="store_true",
+ dest="verbose",
+ help="Print status messages to stdout",
+ )
+ p.add_option(
+ "-q",
+ "--quiet",
+ action="store_true",
+ dest="quiet",
+ help="Disable messages and status to stdout",
+ )
+ p.add_option(
+ "--processes",
+ dest="nb_processes",
+ type="int",
+ help="Number of processes to use for tiling",
+ )
# KML options
- g = OptionGroup(p, "KML (Google Earth) options",
- "Options for generated Google Earth SuperOverlay metadata")
- g.add_option("-k", "--force-kml", dest='kml', action="store_true",
- help=("Generate KML for Google Earth - default for 'geodetic' profile and "
- "'raster' in EPSG:4326. For a dataset with different projection use "
- "with caution!"))
- g.add_option("-n", "--no-kml", dest='kml', action="store_false",
- help="Avoid automatic generation of KML files for EPSG:4326")
- g.add_option("-u", "--url", dest='url',
- help="URL address where the generated tiles are going to be published")
+ g = OptionGroup(
+ p,
+ "KML (Google Earth) options",
+ "Options for generated Google Earth SuperOverlay metadata",
+ )
+ g.add_option(
+ "-k",
+ "--force-kml",
+ dest="kml",
+ action="store_true",
+ help=(
+ "Generate KML for Google Earth - default for 'geodetic' profile and "
+ "'raster' in EPSG:4326. For a dataset with different projection use "
+ "with caution!"
+ ),
+ )
+ g.add_option(
+ "-n",
+ "--no-kml",
+ dest="kml",
+ action="store_false",
+ help="Avoid automatic generation of KML files for EPSG:4326",
+ )
+ g.add_option(
+ "-u",
+ "--url",
+ dest="url",
+ help="URL address where the generated tiles are going to be published",
+ )
p.add_option_group(g)
# HTML options
- g = OptionGroup(p, "Web viewer options",
- "Options for generated HTML viewers a la Google Maps")
- g.add_option("-w", "--webviewer", dest='webviewer', type='choice', choices=webviewer_list,
- help="Web viewer to generate (%s) - default 'all'" % ",".join(webviewer_list))
- g.add_option("-t", "--title", dest='title',
- help="Title of the map")
- g.add_option("-c", "--copyright", dest='copyright',
- help="Copyright for the map")
- g.add_option("-g", "--googlekey", dest='googlekey',
- help="Google Maps API key from http://code.google.com/apis/maps/signup.html")
- g.add_option("-b", "--bingkey", dest='bingkey',
- help="Bing Maps API key from https://www.bingmapsportal.com/")
+ g = OptionGroup(
+ p, "Web viewer options", "Options for generated HTML viewers a la Google Maps"
+ )
+ g.add_option(
+ "-w",
+ "--webviewer",
+ dest="webviewer",
+ type="choice",
+ choices=webviewer_list,
+ help="Web viewer to generate (%s) - default 'all'" % ",".join(webviewer_list),
+ )
+ g.add_option("-t", "--title", dest="title", help="Title of the map")
+ g.add_option("-c", "--copyright", dest="copyright", help="Copyright for the map")
+ g.add_option(
+ "-g",
+ "--googlekey",
+ dest="googlekey",
+ help="Google Maps API key from http://code.google.com/apis/maps/signup.html",
+ )
+ g.add_option(
+ "-b",
+ "--bingkey",
+ dest="bingkey",
+ help="Bing Maps API key from https://www.bingmapsportal.com/",
+ )
p.add_option_group(g)
- p.set_defaults(verbose=False, profile="mercator", kml=False, url='',
- webviewer='all', copyright='', resampling='average', resume=False,
- googlekey='INSERT_YOUR_KEY_HERE', bingkey='INSERT_YOUR_KEY_HERE',
- processes=1)
+ p.set_defaults(
+ verbose=False,
+ profile="mercator",
+ kml=False,
+ url="",
+ webviewer="all",
+ copyright="",
+ resampling="average",
+ resume=False,
+ googlekey="INSERT_YOUR_KEY_HERE",
+ bingkey="INSERT_YOUR_KEY_HERE",
+ processes=1,
+ )
return p
@@ -1220,16 +1450,22 @@ def process_args(argv):
options, args = parser.parse_args(args=argv)
# Args should be either an input file OR an input file and an output folder
- if (len(args) == 0):
- exit_with_error("You need to specify at least an input file as argument to the script")
- if (len(args) > 2):
- exit_with_error("Processing of several input files is not supported.",
- "Please first use a tool like gdal_vrtmerge.py or gdal_merge.py on the "
- "files: gdal_vrtmerge.py -o merged.vrt %s" % " ".join(args))
+ if len(args) == 0:
+ exit_with_error(
+ "You need to specify at least an input file as argument to the script"
+ )
+ if len(args) > 2:
+ exit_with_error(
+ "Processing of several input files is not supported.",
+ "Please first use a tool like gdal_vrtmerge.py or gdal_merge.py on the "
+ "files: gdal_vrtmerge.py -o merged.vrt %s" % " ".join(args),
+ )
input_file = args[0]
if not os.path.isfile(input_file):
- exit_with_error("The provided input file %s does not exist or is not a file" % input_file)
+ exit_with_error(
+ "The provided input file %s does not exist or is not a file" % input_file
+ )
if len(args) == 2:
output_folder = args[1]
@@ -1245,46 +1481,52 @@ def options_post_processing(options, input_file, output_folder):
if not options.title:
options.title = os.path.basename(input_file)
- if options.url and not options.url.endswith('/'):
- options.url += '/'
+ if options.url and not options.url.endswith("/"):
+ options.url += "/"
if options.url:
out_path = output_folder
if out_path.endswith("/"):
out_path = out_path[:-1]
- options.url += os.path.basename(out_path) + '/'
+ options.url += os.path.basename(out_path) + "/"
# Supported options
- if options.resampling == 'average':
+ if options.resampling == "average":
try:
if gdal.RegenerateOverview:
pass
except Exception:
- exit_with_error("'average' resampling algorithm is not available.",
- "Please use -r 'near' argument or upgrade to newer version of GDAL.")
+ exit_with_error(
+ "'average' resampling algorithm is not available.",
+ "Please use -r 'near' argument or upgrade to newer version of GDAL.",
+ )
- elif options.resampling == 'antialias':
+ elif options.resampling == "antialias":
try:
- if numpy: # pylint:disable=W0125
+ if numpy: # pylint:disable=W0125
pass
except Exception:
- exit_with_error("'antialias' resampling algorithm is not available.",
- "Install PIL (Python Imaging Library) and numpy.")
+ exit_with_error(
+ "'antialias' resampling algorithm is not available.",
+ "Install PIL (Python Imaging Library) and numpy.",
+ )
try:
- os.path.basename(input_file).encode('ascii')
+ os.path.basename(input_file).encode("ascii")
except UnicodeEncodeError:
full_ascii = False
else:
full_ascii = True
# LC_CTYPE check
- if not full_ascii and 'UTF-8' not in os.environ.get("LC_CTYPE", ""):
+ if not full_ascii and "UTF-8" not in os.environ.get("LC_CTYPE", ""):
if not options.quiet:
- print("\nWARNING: "
- "You are running gdal2tiles.py with a LC_CTYPE environment variable that is "
- "not UTF-8 compatible, and your input file contains non-ascii characters. "
- "The generated sample googlemaps, openlayers or "
- "leaflet files might contain some invalid characters as a result\n")
+ print(
+ "\nWARNING: "
+ "You are running gdal2tiles.py with a LC_CTYPE environment variable that is "
+ "not UTF-8 compatible, and your input file contains non-ascii characters. "
+ "The generated sample googlemaps, openlayers or "
+ "leaflet files might contain some invalid characters as a result\n"
+ )
# Output the results
if options.verbose:
@@ -1292,7 +1534,7 @@ def options_post_processing(options, input_file, output_folder):
print("Input:", input_file)
print("Output:", output_folder)
print("Cache: %s MB" % (gdal.GetCacheMax() / 1024 / 1024))
- print('')
+ print("")
return options
@@ -1330,6 +1572,7 @@ class TileJobInfo(object):
"""
Plain object to hold tile job configuration for a dataset
"""
+
src_file = ""
nb_data_bands = 0
output_file_path = ""
@@ -1393,10 +1636,10 @@ class GDAL2Tiles(object):
# Tile format
self.tilesize = 256
- self.tiledriver = 'PNG'
- self.tileext = 'png'
+ self.tiledriver = "PNG"
+ self.tileext = "png"
self.tmp_dir = tempfile.mkdtemp()
- self.tmp_vrt_filename = os.path.join(self.tmp_dir, str(uuid4()) + '.vrt')
+ self.tmp_vrt_filename = os.path.join(self.tmp_dir, str(uuid4()) + ".vrt")
# Should we read bigger window of the input raster and scale it down?
# Note: Modified later by open_input()
@@ -1417,18 +1660,18 @@ class GDAL2Tiles(object):
self.output_folder = output_folder
self.options = options
- if self.options.resampling == 'near':
+ if self.options.resampling == "near":
self.querysize = self.tilesize
- elif self.options.resampling == 'bilinear':
+ elif self.options.resampling == "bilinear":
self.querysize = self.tilesize * 2
# User specified zoom levels
self.tminz = None
self.tmaxz = None
if self.options.zoom:
- minmax = self.options.zoom.split('-', 1)
- minmax.extend([''])
+ minmax = self.options.zoom.split("-", 1)
+ minmax.extend([""])
zoom_min, zoom_max = minmax[:2]
self.tminz = int(zoom_min)
if zoom_max:
@@ -1445,13 +1688,17 @@ class GDAL2Tiles(object):
gdal.AllRegister()
self.out_drv = gdal.GetDriverByName(self.tiledriver)
- self.mem_drv = gdal.GetDriverByName('MEM')
+ self.mem_drv = gdal.GetDriverByName("MEM")
if not self.out_drv:
- raise Exception("The '%s' driver was not found, is it available in this GDAL build?",
- self.tiledriver)
+ raise Exception(
+ "The '%s' driver was not found, is it available in this GDAL build?",
+ self.tiledriver,
+ )
if not self.mem_drv:
- raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?")
+ raise Exception(
+ "The 'MEM' driver was not found, is it available in this GDAL build?"
+ )
# Open the input file
@@ -1461,14 +1708,21 @@ class GDAL2Tiles(object):
raise Exception("No input file was specified")
if self.options.verbose:
- print("Input file:",
- "( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
- input_dataset.RasterYSize,
- input_dataset.RasterCount))
+ print(
+ "Input file:",
+ "( %sP x %sL - %s bands)"
+ % (
+ input_dataset.RasterXSize,
+ input_dataset.RasterYSize,
+ input_dataset.RasterCount,
+ ),
+ )
if not input_dataset:
# Note: GDAL prints the ERROR message too
- exit_with_error("It is not possible to open the input file '%s'." % self.input_file)
+ exit_with_error(
+ "It is not possible to open the input file '%s'." % self.input_file
+ )
# Read metadata from the input file
if input_dataset.RasterCount == 0:
@@ -1480,16 +1734,21 @@ class GDAL2Tiles(object):
"From paletted file you can create RGBA file (temp.vrt) by:\n"
"gdal_translate -of vrt -expand rgba %s temp.vrt\n"
"then run:\n"
- "gdal2tiles temp.vrt" % self.input_file
+ "gdal2tiles temp.vrt" % self.input_file,
)
in_nodata = setup_no_data_values(input_dataset, self.options)
if self.options.verbose:
- print("Preprocessed file:",
- "( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
- input_dataset.RasterYSize,
- input_dataset.RasterCount))
+ print(
+ "Preprocessed file:",
+ "( %sP x %sL - %s bands)"
+ % (
+ input_dataset.RasterXSize,
+ input_dataset.RasterYSize,
+ input_dataset.RasterCount,
+ ),
+ )
in_srs, self.in_srs_wkt = setup_input_srs(input_dataset, self.options)
@@ -1500,44 +1759,56 @@ class GDAL2Tiles(object):
self.warped_input_dataset = None
- if self.options.profile in ('mercator', 'geodetic'):
+ if self.options.profile in ("mercator", "geodetic"):
if not in_srs:
exit_with_error(
"Input file has unknown SRS.",
- "Use --s_srs ESPG:xyz (or similar) to provide source reference system.")
+ "Use --s_srs ESPG:xyz (or similar) to provide source reference system.",
+ )
if not has_georeference(input_dataset):
exit_with_error(
"There is no georeference - neither affine transformation (worldfile) "
"nor GCPs. You can generate only 'raster' profile tiles.",
"Either gdal2tiles with parameter -p 'raster' or use another GIS "
- "software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs"
+ "software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs",
)
- if ((in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or
- (input_dataset.GetGCPCount() != 0)):
+ if (in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or (
+ input_dataset.GetGCPCount() != 0
+ ):
self.warped_input_dataset = reproject_dataset(
- input_dataset, in_srs, self.out_srs)
+ input_dataset, in_srs, self.out_srs
+ )
if in_nodata:
self.warped_input_dataset = update_no_data_values(
- self.warped_input_dataset, in_nodata, options=self.options)
+ self.warped_input_dataset, in_nodata, options=self.options
+ )
else:
self.warped_input_dataset = update_alpha_value_for_non_alpha_inputs(
- self.warped_input_dataset, options=self.options)
+ self.warped_input_dataset, options=self.options
+ )
if self.warped_input_dataset and self.options.verbose:
- print("Projected file:", "tiles.vrt", "( %sP x %sL - %s bands)" % (
- self.warped_input_dataset.RasterXSize,
- self.warped_input_dataset.RasterYSize,
- self.warped_input_dataset.RasterCount))
+ print(
+ "Projected file:",
+ "tiles.vrt",
+ "( %sP x %sL - %s bands)"
+ % (
+ self.warped_input_dataset.RasterXSize,
+ self.warped_input_dataset.RasterYSize,
+ self.warped_input_dataset.RasterCount,
+ ),
+ )
if not self.warped_input_dataset:
self.warped_input_dataset = input_dataset
- self.warped_input_dataset.GetDriver().CreateCopy(self.tmp_vrt_filename,
- self.warped_input_dataset)
+ self.warped_input_dataset.GetDriver().CreateCopy(
+ self.tmp_vrt_filename, self.warped_input_dataset
+ )
# Get alpha band (either directly or from NODATA value)
self.alphaband = self.warped_input_dataset.GetRasterBand(1).GetMaskBand()
@@ -1560,23 +1831,35 @@ class GDAL2Tiles(object):
# Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)
if (self.out_gt[2], self.out_gt[4]) != (0, 0):
- exit_with_error("Georeference of the raster contains rotation or skew. "
- "Such raster is not supported. Please use gdalwarp first.")
+ exit_with_error(
+ "Georeference of the raster contains rotation or skew. "
+ "Such raster is not supported. Please use gdalwarp first."
+ )
# Here we expect: pixel is square, no rotation on the raster
# Output Bounds - coordinates in the output SRS
self.ominx = self.out_gt[0]
- self.omaxx = self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1]
+ self.omaxx = (
+ self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1]
+ )
self.omaxy = self.out_gt[3]
- self.ominy = self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1]
+ self.ominy = (
+ self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1]
+ )
# Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15
if self.options.verbose:
- print("Bounds (output srs):", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy)
+ print(
+ "Bounds (output srs):",
+ round(self.ominx, 13),
+ self.ominy,
+ self.omaxx,
+ self.omaxy,
+ )
# Calculating ranges for tiles in different zoom levels
- if self.options.profile == 'mercator':
+ if self.options.profile == "mercator":
self.mercator = GlobalMercator()
@@ -1590,7 +1873,7 @@ class GDAL2Tiles(object):
tmaxx, tmaxy = self.mercator.MetersToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
- tmaxx, tmaxy = min(2**tz-1, tmaxx), min(2**tz-1, tmaxy)
+ tmaxx, tmaxy = min(2**tz - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
@@ -1598,10 +1881,13 @@ class GDAL2Tiles(object):
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz is None:
self.tminz = self.mercator.ZoomForPixelSize(
- self.out_gt[1] *
- max(self.warped_input_dataset.RasterXSize,
- self.warped_input_dataset.RasterYSize) /
- float(self.tilesize))
+ self.out_gt[1]
+ * max(
+ self.warped_input_dataset.RasterXSize,
+ self.warped_input_dataset.RasterYSize,
+ )
+ / float(self.tilesize)
+ )
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
@@ -1609,17 +1895,21 @@ class GDAL2Tiles(object):
self.tmaxz = self.mercator.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
- print("Bounds (latlong):",
- self.mercator.MetersToLatLon(self.ominx, self.ominy),
- self.mercator.MetersToLatLon(self.omaxx, self.omaxy))
- print('MinZoomLevel:', self.tminz)
- print("MaxZoomLevel:",
- self.tmaxz,
- "(",
- self.mercator.Resolution(self.tmaxz),
- ")")
+ print(
+ "Bounds (latlong):",
+ self.mercator.MetersToLatLon(self.ominx, self.ominy),
+ self.mercator.MetersToLatLon(self.omaxx, self.omaxy),
+ )
+ print("MinZoomLevel:", self.tminz)
+ print(
+ "MaxZoomLevel:",
+ self.tmaxz,
+ "(",
+ self.mercator.Resolution(self.tmaxz),
+ ")",
+ )
- if self.options.profile == 'geodetic':
+ if self.options.profile == "geodetic":
self.geodetic = GlobalGeodetic(self.options.tmscompatible)
@@ -1633,7 +1923,7 @@ class GDAL2Tiles(object):
tmaxx, tmaxy = self.geodetic.LonLatToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
- tmaxx, tmaxy = min(2**(tz+1)-1, tmaxx), min(2**tz-1, tmaxy)
+ tmaxx, tmaxy = min(2 ** (tz + 1) - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
@@ -1642,10 +1932,13 @@ class GDAL2Tiles(object):
# (closest possible zoom level up on the resolution of raster)
if self.tminz is None:
self.tminz = self.geodetic.ZoomForPixelSize(
- self.out_gt[1] *
- max(self.warped_input_dataset.RasterXSize,
- self.warped_input_dataset.RasterYSize) /
- float(self.tilesize))
+ self.out_gt[1]
+ * max(
+ self.warped_input_dataset.RasterXSize,
+ self.warped_input_dataset.RasterYSize,
+ )
+ / float(self.tilesize)
+ )
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
@@ -1653,16 +1946,29 @@ class GDAL2Tiles(object):
self.tmaxz = self.geodetic.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
- print("Bounds (latlong):", self.ominx, self.ominy, self.omaxx, self.omaxy)
+ print(
+ "Bounds (latlong):", self.ominx, self.ominy, self.omaxx, self.omaxy
+ )
- if self.options.profile == 'raster':
+ if self.options.profile == "raster":
def log2(x):
return math.log10(x) / math.log10(2)
self.nativezoom = int(
- max(math.ceil(log2(self.warped_input_dataset.RasterXSize/float(self.tilesize))),
- math.ceil(log2(self.warped_input_dataset.RasterYSize/float(self.tilesize)))))
+ max(
+ math.ceil(
+ log2(
+ self.warped_input_dataset.RasterXSize / float(self.tilesize)
+ )
+ ),
+ math.ceil(
+ log2(
+ self.warped_input_dataset.RasterYSize / float(self.tilesize)
+ )
+ ),
+ )
+ )
if self.options.verbose:
print("Native zoom of the raster:", self.nativezoom)
@@ -1676,13 +1982,17 @@ class GDAL2Tiles(object):
self.tmaxz = self.nativezoom
# Generate table with min max tile coordinates for all zoomlevels
- self.tminmax = list(range(0, self.tmaxz+1))
- self.tsize = list(range(0, self.tmaxz+1))
- for tz in range(0, self.tmaxz+1):
- tsize = 2.0**(self.nativezoom-tz)*self.tilesize
+ self.tminmax = list(range(0, self.tmaxz + 1))
+ self.tsize = list(range(0, self.tmaxz + 1))
+ for tz in range(0, self.tmaxz + 1):
+ tsize = 2.0 ** (self.nativezoom - tz) * self.tilesize
tminx, tminy = 0, 0
- tmaxx = int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1
- tmaxy = int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1
+ tmaxx = (
+ int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1
+ )
+ tmaxy = (
+ int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1
+ )
self.tsize[tz] = math.ceil(tsize)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
@@ -1691,11 +2001,13 @@ class GDAL2Tiles(object):
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
- pixelsizex = (2**(self.tmaxz-z) * self.out_gt[1]) # X-pixel size in level
- west = self.out_gt[0] + x*self.tilesize*pixelsizex
- east = west + self.tilesize*pixelsizex
- south = self.ominy + y*self.tilesize*pixelsizex
- north = south + self.tilesize*pixelsizex
+ pixelsizex = (
+ 2 ** (self.tmaxz - z) * self.out_gt[1]
+ ) # X-pixel size in level
+ west = self.out_gt[0] + x * self.tilesize * pixelsizex
+ east = west + self.tilesize * pixelsizex
+ south = self.ominy + y * self.tilesize * pixelsizex
+ north = south + self.tilesize * pixelsizex
if not self.isepsg4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
@@ -1704,7 +2016,7 @@ class GDAL2Tiles(object):
self.tileswne = rastertileswne
else:
- self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa
+ self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa
def generate_metadata(self):
"""
@@ -1715,7 +2027,7 @@ class GDAL2Tiles(object):
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
- if self.options.profile == 'mercator':
+ if self.options.profile == "mercator":
south, west = self.mercator.MetersToLatLon(self.ominx, self.ominy)
north, east = self.mercator.MetersToLatLon(self.omaxx, self.omaxy)
@@ -1724,27 +2036,39 @@ class GDAL2Tiles(object):
self.swne = (south, west, north, east)
# Generate googlemaps.html
- if self.options.webviewer in ('all', 'google') and self.options.profile == 'mercator':
- if (not self.options.resume or not
- os.path.exists(os.path.join(self.output_folder, 'googlemaps.html'))):
- with open(os.path.join(self.output_folder, 'googlemaps.html'), 'wb') as f:
- f.write(self.generate_googlemaps().encode('utf-8'))
+ if (
+ self.options.webviewer in ("all", "google")
+ and self.options.profile == "mercator"
+ ):
+ if not self.options.resume or not os.path.exists(
+ os.path.join(self.output_folder, "googlemaps.html")
+ ):
+ with open(
+ os.path.join(self.output_folder, "googlemaps.html"), "wb"
+ ) as f:
+ f.write(self.generate_googlemaps().encode("utf-8"))
# Generate openlayers.html
- if self.options.webviewer in ('all', 'openlayers'):
- if (not self.options.resume or not
- os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
- with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
- f.write(self.generate_openlayers().encode('utf-8'))
+ if self.options.webviewer in ("all", "openlayers"):
+ if not self.options.resume or not os.path.exists(
+ os.path.join(self.output_folder, "openlayers.html")
+ ):
+ with open(
+ os.path.join(self.output_folder, "openlayers.html"), "wb"
+ ) as f:
+ f.write(self.generate_openlayers().encode("utf-8"))
# Generate leaflet.html
- if self.options.webviewer in ('all', 'leaflet'):
- if (not self.options.resume or not
- os.path.exists(os.path.join(self.output_folder, 'leaflet.html'))):
- with open(os.path.join(self.output_folder, 'leaflet.html'), 'wb') as f:
- f.write(self.generate_leaflet().encode('utf-8'))
+ if self.options.webviewer in ("all", "leaflet"):
+ if not self.options.resume or not os.path.exists(
+ os.path.join(self.output_folder, "leaflet.html")
+ ):
+ with open(
+ os.path.join(self.output_folder, "leaflet.html"), "wb"
+ ) as f:
+ f.write(self.generate_leaflet().encode("utf-8"))
- elif self.options.profile == 'geodetic':
+ elif self.options.profile == "geodetic":
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
@@ -1753,13 +2077,16 @@ class GDAL2Tiles(object):
self.swne = (south, west, north, east)
# Generate openlayers.html
- if self.options.webviewer in ('all', 'openlayers'):
- if (not self.options.resume or not
- os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
- with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
- f.write(self.generate_openlayers().encode('utf-8'))
+ if self.options.webviewer in ("all", "openlayers"):
+ if not self.options.resume or not os.path.exists(
+ os.path.join(self.output_folder, "openlayers.html")
+ ):
+ with open(
+ os.path.join(self.output_folder, "openlayers.html"), "wb"
+ ) as f:
+ f.write(self.generate_openlayers().encode("utf-8"))
- elif self.options.profile == 'raster':
+ elif self.options.profile == "raster":
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
@@ -1767,34 +2094,50 @@ class GDAL2Tiles(object):
self.swne = (south, west, north, east)
# Generate openlayers.html
- if self.options.webviewer in ('all', 'openlayers'):
- if (not self.options.resume or not
- os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
- with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
- f.write(self.generate_openlayers().encode('utf-8'))
+ if self.options.webviewer in ("all", "openlayers"):
+ if not self.options.resume or not os.path.exists(
+ os.path.join(self.output_folder, "openlayers.html")
+ ):
+ with open(
+ os.path.join(self.output_folder, "openlayers.html"), "wb"
+ ) as f:
+ f.write(self.generate_openlayers().encode("utf-8"))
# Generate tilemapresource.xml.
- if not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'tilemapresource.xml')):
- with open(os.path.join(self.output_folder, 'tilemapresource.xml'), 'wb') as f:
- f.write(self.generate_tilemapresource().encode('utf-8'))
+ if not self.options.resume or not os.path.exists(
+ os.path.join(self.output_folder, "tilemapresource.xml")
+ ):
+ with open(
+ os.path.join(self.output_folder, "tilemapresource.xml"), "wb"
+ ) as f:
+ f.write(self.generate_tilemapresource().encode("utf-8"))
if self.kml:
# TODO: Maybe problem for not automatically generated tminz
# The root KML should contain links to all tiles in the tminz level
children = []
xmin, ymin, xmax, ymax = self.tminmax[self.tminz]
- for x in range(xmin, xmax+1):
- for y in range(ymin, ymax+1):
+ for x in range(xmin, xmax + 1):
+ for y in range(ymin, ymax + 1):
children.append([x, y, self.tminz])
# Generate Root KML
if self.kml:
- if (not self.options.resume or not
- os.path.exists(os.path.join(self.output_folder, 'doc.kml'))):
- with open(os.path.join(self.output_folder, 'doc.kml'), 'wb') as f:
- f.write(generate_kml(
- None, None, None, self.tileext, self.tilesize, self.tileswne,
- self.options, children
- ).encode('utf-8'))
+ if not self.options.resume or not os.path.exists(
+ os.path.join(self.output_folder, "doc.kml")
+ ):
+ with open(os.path.join(self.output_folder, "doc.kml"), "wb") as f:
+ f.write(
+ generate_kml(
+ None,
+ None,
+ None,
+ self.tileext,
+ self.tilesize,
+ self.tileswne,
+ self.options,
+ children,
+ ).encode("utf-8")
+ )
def generate_base_tiles(self):
"""
@@ -1805,10 +2148,10 @@ class GDAL2Tiles(object):
print("Generating Base Tiles:")
if self.options.verbose:
- print('')
+ print("")
print("Tiles generated from the max zoom level:")
print("----------------------------------------")
- print('')
+ print("")
# Set the bounds
tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]
@@ -1821,20 +2164,21 @@ class GDAL2Tiles(object):
print("dataBandsCount: ", self.dataBandsCount)
print("tilebands: ", tilebands)
- tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))
+ tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
tile_details = []
tz = self.tmaxz
- for ty in range(tmaxy, tminy-1, -1):
- for tx in range(tminx, tmaxx+1):
+ for ty in range(tmaxy, tminy - 1, -1):
+ for tx in range(tminx, tmaxx + 1):
ti += 1
tilefilename = os.path.join(
- self.output_folder, str(tz), str(tx), "%s.%s" % (ty, self.tileext))
+ self.output_folder, str(tz), str(tx), "%s.%s" % (ty, self.tileext)
+ )
if self.options.verbose:
- print(ti, '/', tcount, tilefilename)
+ print(ti, "/", tcount, tilefilename)
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
@@ -1845,16 +2189,16 @@ class GDAL2Tiles(object):
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
- if self.options.profile == 'mercator':
+ if self.options.profile == "mercator":
# Tile bounds in EPSG:3857
b = self.mercator.TileBounds(tx, ty, tz)
- elif self.options.profile == 'geodetic':
+ elif self.options.profile == "geodetic":
b = self.geodetic.TileBounds(tx, ty, tz)
# Don't scale up by nearest neighbour, better change the querysize
# to the native resolution (and return smaller query tile) for scaling
- if self.options.profile in ('mercator', 'geodetic'):
+ if self.options.profile in ("mercator", "geodetic"):
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])
# Pixel size in the raster covering query geo extent
@@ -1863,15 +2207,21 @@ class GDAL2Tiles(object):
print("\tNative Extent (querysize", nativesize, "): ", rb, wb)
# Tile bounds in raster coordinates for ReadRaster query
- rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)
+ rb, wb = self.geo_query(
+ ds, b[0], b[3], b[2], b[1], querysize=querysize
+ )
rx, ry, rxsize, rysize = rb
wx, wy, wxsize, wysize = wb
- else: # 'raster' profile:
+ else: # 'raster' profile:
- tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom
- xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels
+ tsize = int(
+ self.tsize[tz]
+ ) # tilesize in raster coordinates for actual zoom
+ xsize = (
+ self.warped_input_dataset.RasterXSize
+ ) # size of the raster in pixels
ysize = self.warped_input_dataset.RasterYSize
if tz >= self.nativezoom:
querysize = self.tilesize
@@ -1891,8 +2241,8 @@ class GDAL2Tiles(object):
ry = ysize - (ty * tsize) - rysize
wx, wy = 0, 0
- wxsize = int(rxsize/float(tsize) * self.tilesize)
- wysize = int(rysize/float(tsize) * self.tilesize)
+ wxsize = int(rxsize / float(tsize) * self.tilesize)
+ wysize = int(rysize / float(tsize) * self.tilesize)
if wysize != self.tilesize:
wy = self.tilesize - wysize
@@ -1900,8 +2250,18 @@ class GDAL2Tiles(object):
# geo_query
tile_details.append(
TileDetail(
- tx=tx, ty=ty, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,
- wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,
+ tx=tx,
+ ty=ty,
+ tz=tz,
+ rx=rx,
+ ry=ry,
+ rxsize=rxsize,
+ rysize=rysize,
+ wx=wx,
+ wy=wy,
+ wxsize=wxsize,
+ wysize=wysize,
+ querysize=querysize,
)
)
@@ -1952,7 +2312,7 @@ class GDAL2Tiles(object):
wxsize = wxsize - wx
rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize))
rx = 0
- if rx+rxsize > ds.RasterXSize:
+ if rx + rxsize > ds.RasterXSize:
wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize))
rxsize = ds.RasterXSize - rx
@@ -1963,7 +2323,7 @@ class GDAL2Tiles(object):
wysize = wysize - wy
rysize = rysize - int(rysize * (float(ryshift) / rysize))
ry = 0
- if ry+rysize > ds.RasterYSize:
+ if ry + rysize > ds.RasterYSize:
wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize))
rysize = ds.RasterYSize - ry
@@ -1977,25 +2337,26 @@ class GDAL2Tiles(object):
"""
args = {}
- args['title'] = self.options.title
- args['south'], args['west'], args['north'], args['east'] = self.swne
- args['tilesize'] = self.tilesize
- args['tileformat'] = self.tileext
- args['publishurl'] = self.options.url
- args['profile'] = self.options.profile
+ args["title"] = self.options.title
+ args["south"], args["west"], args["north"], args["east"] = self.swne
+ args["tilesize"] = self.tilesize
+ args["tileformat"] = self.tileext
+ args["publishurl"] = self.options.url
+ args["profile"] = self.options.profile
- if self.options.profile == 'mercator':
- args['srs'] = "EPSG:3857"
- elif self.options.profile == 'geodetic':
- args['srs'] = "EPSG:4326"
+ if self.options.profile == "mercator":
+ args["srs"] = "EPSG:3857"
+ elif self.options.profile == "geodetic":
+ args["srs"] = "EPSG:4326"
elif self.options.s_srs:
- args['srs'] = self.options.s_srs
+ args["srs"] = self.options.s_srs
elif self.out_srs:
- args['srs'] = self.out_srs.ExportToWkt()
+ args["srs"] = self.out_srs.ExportToWkt()
else:
- args['srs'] = ""
+ args["srs"] = ""
- s = """
+ s = (
+ """
%(title)s
@@ -2004,17 +2365,30 @@ class GDAL2Tiles(object):
-""" % args # noqa
- for z in range(self.tminz, self.tmaxz+1):
- if self.options.profile == 'raster':
- s += """ \n""" % (
- args['publishurl'], z, (2**(self.nativezoom-z) * self.out_gt[1]), z)
- elif self.options.profile == 'mercator':
- s += """ \n""" % (
- args['publishurl'], z, 156543.0339/2**z, z)
- elif self.options.profile == 'geodetic':
- s += """ \n""" % (
- args['publishurl'], z, 0.703125/2**z, z)
+"""
+ % args
+ ) # noqa
+ for z in range(self.tminz, self.tmaxz + 1):
+ if self.options.profile == "raster":
+ s += (
+ """ \n"""
+ % (
+ args["publishurl"],
+ z,
+ (2 ** (self.nativezoom - z) * self.out_gt[1]),
+ z,
+ )
+ )
+ elif self.options.profile == "mercator":
+ s += (
+ """ \n"""
+ % (args["publishurl"], z, 156543.0339 / 2**z, z)
+ )
+ elif self.options.profile == "geodetic":
+ s += (
+ """ \n"""
+ % (args["publishurl"], z, 0.703125 / 2**z, z)
+ )
s += """
"""
@@ -2028,17 +2402,18 @@ class GDAL2Tiles(object):
publishurl
"""
args = {}
- args['title'] = self.options.title
- args['googlemapskey'] = self.options.googlekey
- args['south'], args['west'], args['north'], args['east'] = self.swne
- args['minzoom'] = self.tminz
- args['maxzoom'] = self.tmaxz
- args['tilesize'] = self.tilesize
- args['tileformat'] = self.tileext
- args['publishurl'] = self.options.url
- args['copyright'] = self.options.copyright
+ args["title"] = self.options.title
+ args["googlemapskey"] = self.options.googlekey
+ args["south"], args["west"], args["north"], args["east"] = self.swne
+ args["minzoom"] = self.tminz
+ args["maxzoom"] = self.tmaxz
+ args["tilesize"] = self.tilesize
+ args["tileformat"] = self.tileext
+ args["publishurl"] = self.options.url
+ args["copyright"] = self.options.copyright
- s = r"""
+ s = (
+ r"""
%(title)s
@@ -2261,7 +2636,9 @@ class GDAL2Tiles(object):
map.addControl(new GLargeMapControl());
map.addControl(new GHierarchicalMapTypeControl());
map.addControl(new CTransparencyControl( overlay ));
- """ % args # noqa
+ """
+ % args
+ ) # noqa
if self.kml:
s += """
map.addMapType(G_SATELLITE_3D_MAP);
@@ -2278,7 +2655,8 @@ class GDAL2Tiles(object):
}
"""
if self.kml:
- s += """
+ s += (
+ """
function getEarthInstanceCB(object) {
var ge = object;
@@ -2298,8 +2676,11 @@ class GDAL2Tiles(object):
// add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML?
}
}
- """ % args # noqa
- s += """
+ """
+ % args
+ ) # noqa
+ s += (
+ """
onresize=function(){ resize(); };
//]]>
@@ -2313,7 +2694,9 @@ class GDAL2Tiles(object):
- """ % args # noqa
+ """
+ % args
+ ) # noqa
return s
@@ -2325,20 +2708,21 @@ class GDAL2Tiles(object):
"""
args = {}
- args['title'] = self.options.title.replace('"', '\\"')
- args['htmltitle'] = self.options.title
- args['south'], args['west'], args['north'], args['east'] = self.swne
- args['centerlon'] = (args['north'] + args['south']) / 2.
- args['centerlat'] = (args['west'] + args['east']) / 2.
- args['minzoom'] = self.tminz
- args['maxzoom'] = self.tmaxz
- args['beginzoom'] = self.tmaxz
- args['tilesize'] = self.tilesize # not used
- args['tileformat'] = self.tileext
- args['publishurl'] = self.options.url # not used
- args['copyright'] = self.options.copyright.replace('"', '\\"')
+ args["title"] = self.options.title.replace('"', '\\"')
+ args["htmltitle"] = self.options.title
+ args["south"], args["west"], args["north"], args["east"] = self.swne
+ args["centerlon"] = (args["north"] + args["south"]) / 2.0
+ args["centerlat"] = (args["west"] + args["east"]) / 2.0
+ args["minzoom"] = self.tminz
+ args["maxzoom"] = self.tmaxz
+ args["beginzoom"] = self.tmaxz
+ args["tilesize"] = self.tilesize # not used
+ args["tileformat"] = self.tileext
+ args["publishurl"] = self.options.url # not used
+ args["copyright"] = self.options.copyright.replace('"', '\\"')
- s = """
+ s = (
+ """
@@ -2444,7 +2828,9 @@ class GDAL2Tiles(object):
- """ % args # noqa
+ """
+ % args
+ ) # noqa
return s
@@ -2457,24 +2843,25 @@ class GDAL2Tiles(object):
"""
args = {}
- args['title'] = self.options.title
- args['bingkey'] = self.options.bingkey
- args['south'], args['west'], args['north'], args['east'] = self.swne
- args['minzoom'] = self.tminz
- args['maxzoom'] = self.tmaxz
- args['tilesize'] = self.tilesize
- args['tileformat'] = self.tileext
- args['publishurl'] = self.options.url
- args['copyright'] = self.options.copyright
+ args["title"] = self.options.title
+ args["bingkey"] = self.options.bingkey
+ args["south"], args["west"], args["north"], args["east"] = self.swne
+ args["minzoom"] = self.tminz
+ args["maxzoom"] = self.tmaxz
+ args["tilesize"] = self.tilesize
+ args["tileformat"] = self.tileext
+ args["publishurl"] = self.options.url
+ args["copyright"] = self.options.copyright
if self.options.tmscompatible:
- args['tmsoffset'] = "-1"
+ args["tmsoffset"] = "-1"
else:
- args['tmsoffset'] = ""
- if self.options.profile == 'raster':
- args['rasterzoomlevels'] = self.tmaxz+1
- args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1]
+ args["tmsoffset"] = ""
+ if self.options.profile == "raster":
+ args["rasterzoomlevels"] = self.tmaxz + 1
+ args["rastermaxresolution"] = 2 ** (self.nativezoom) * self.out_gt[1]
- s = r"""
+ s = (
+ r"""
%(title)s
@@ -2488,14 +2875,20 @@ class GDAL2Tiles(object):
#map { height: 95%%; border: 1px solid #888; }
.olImageLoadError { display: none; }
.olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }
- """ % args # noqa
+ """
+ % args
+ ) # noqa
- if self.options.profile == 'mercator':
- s += """
+ if self.options.profile == "mercator":
+ s += (
+ """
- """ % args
+ """
+ % args
+ )
- s += """
+ s += (
+ """
-