fix: fixed mypy and black linter errors (Closes #1849)

pull/1850/head
itsIryna 2025-04-01 20:32:22 +02:00
rodzic 6114e5e934
commit 62b7f8e2a8
114 zmienionych plików z 9845 dodań i 5538 usunięć

Wyświetl plik

@ -1,9 +1,13 @@
import sys, platform
if sys.platform != 'win32':
if sys.platform != "win32":
print("This script is for Windows only! Use configure.sh instead.")
exit(1)
if sys.version_info.major != 3 or sys.version_info.minor != 8:
print("You need to use Python 3.8.x (due to the requirements.txt). You are using %s instead." % platform.python_version())
print(
"You need to use Python 3.8.x (due to the requirements.txt). You are using %s instead."
% platform.python_version()
)
exit(1)
import argparse
@ -11,32 +15,41 @@ import subprocess
import os
import stat
import urllib.request
import shutil
import shutil
import zipfile
from venv import EnvBuilder
parser = argparse.ArgumentParser(description='ODM Windows Configure Script')
parser.add_argument('action',
type=str,
choices=["build", "clean", "dist", "vcpkg_export"],
help='Action: %(choices)s')
parser.add_argument('--build-vcpkg',
type=bool,
help='Build VCPKG environment from scratch instead of downloading prebuilt one.')
parser.add_argument('--vcpkg-archive-url',
type=str,
default='https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/vcpkg-export-250.zip',
required=False,
help='Path to VCPKG export archive')
parser.add_argument('--code-sign-cert-path',
type=str,
default='',
required=False,
help='Path to pfx code signing certificate')
parser = argparse.ArgumentParser(description="ODM Windows Configure Script")
parser.add_argument(
"action",
type=str,
choices=["build", "clean", "dist", "vcpkg_export"],
help="Action: %(choices)s",
)
parser.add_argument(
"--build-vcpkg",
type=bool,
help="Build VCPKG environment from scratch instead of downloading prebuilt one.",
)
parser.add_argument(
"--vcpkg-archive-url",
type=str,
default="https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/vcpkg-export-250.zip",
required=False,
help="Path to VCPKG export archive",
)
parser.add_argument(
"--code-sign-cert-path",
type=str,
default="",
required=False,
help="Path to pfx code signing certificate",
)
args = parser.parse_args()
def run(cmd, cwd=os.getcwd()):
env = os.environ.copy()
print(cmd)
@ -45,6 +58,7 @@ def run(cmd, cwd=os.getcwd()):
if retcode != 0:
raise Exception("Command returned %s" % retcode)
# https://izziswift.com/shutil-rmtree-fails-on-windows-with-access-is-denied/
def rmtree(top):
for root, dirs, files in os.walk(top, topdown=False):
@ -56,11 +70,13 @@ def rmtree(top):
os.rmdir(os.path.join(root, name))
os.rmdir(top)
def vcpkg_requirements():
with open("vcpkg-requirements.txt") as f:
pckgs = list(filter(lambda l: len(l) > 0, map(str.strip, f.read().split("\n"))))
return pckgs
def build():
# Create python virtual env
if not os.path.isdir("venv"):
@ -69,7 +85,7 @@ def build():
ebuilder.create("venv")
run("venv\\Scripts\\pip install --ignore-installed -r requirements.txt")
# Download / build VCPKG environment
if not os.path.isdir("vcpkg"):
if args.build_vcpkg:
@ -81,7 +97,9 @@ def build():
else:
if not os.path.exists("vcpkg-env.zip"):
print("Downloading %s" % args.vcpkg_archive_url)
with urllib.request.urlopen(args.vcpkg_archive_url) as response, open( "vcpkg-env.zip", 'wb') as out_file:
with urllib.request.urlopen(args.vcpkg_archive_url) as response, open(
"vcpkg-env.zip", "wb"
) as out_file:
shutil.copyfileobj(response, out_file)
if not os.path.exists("vcpkg"):
print("Extracting vcpkg-env.zip --> vcpkg/")
@ -92,20 +110,27 @@ def build():
if os.path.exists(top_dir):
os.rename(top_dir, "vcpkg")
else:
print("Warning! Something looks wrong in the VCPKG archive... check the vcpkg/ directory.")
print(
"Warning! Something looks wrong in the VCPKG archive... check the vcpkg/ directory."
)
safe_remove("vcpkg-env.zip")
if not os.path.exists(os.path.join("SuperBuild", "build")) or not os.path.exists(os.path.join("SuperBuild", "install")):
if not os.path.exists(os.path.join("SuperBuild", "build")) or not os.path.exists(
os.path.join("SuperBuild", "install")
):
print("Compiling SuperBuild")
build_dir = os.path.join("SuperBuild", "build")
if not os.path.isdir(build_dir):
os.mkdir(build_dir)
toolchain_file = os.path.join(os.getcwd(), "vcpkg", "scripts", "buildsystems", "vcpkg.cmake")
run("cmake .. -DCMAKE_TOOLCHAIN_FILE=\"%s\"" % toolchain_file, cwd=build_dir)
toolchain_file = os.path.join(
os.getcwd(), "vcpkg", "scripts", "buildsystems", "vcpkg.cmake"
)
run('cmake .. -DCMAKE_TOOLCHAIN_FILE="%s"' % toolchain_file, cwd=build_dir)
run("cmake --build . --config Release", cwd=build_dir)
def vcpkg_export():
if not os.path.exists("vcpkg"):
print("vcpkg directory does not exist. Did you build the environment?")
@ -115,16 +140,19 @@ def vcpkg_export():
out = "vcpkg-export-%s" % odm_version().replace(".", "")
run("vcpkg\\vcpkg export %s --output=%s --zip" % (" ".join(pkgs), out))
def odm_version():
with open("VERSION") as f:
return f.read().split("\n")[0].strip()
def safe_remove(path):
if os.path.isdir(path):
rmtree(path)
elif os.path.isfile(path):
os.remove(path)
def clean():
safe_remove("vcpkg-download.zip")
safe_remove("vcpkg")
@ -134,6 +162,7 @@ def clean():
safe_remove(os.path.join("SuperBuild", "src"))
safe_remove(os.path.join("SuperBuild", "install"))
def dist():
if not os.path.exists("SuperBuild\\install"):
print("You need to run configure.py build before you can run dist")
@ -147,7 +176,9 @@ def dist():
if not os.path.isfile(vcredist_path):
vcredist_url = "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/VC_redist.x64.zip"
print("Downloading %s" % vcredist_url)
with urllib.request.urlopen(vcredist_url) as response, open(vcredist_path, 'wb') as out_file:
with urllib.request.urlopen(vcredist_url) as response, open(
vcredist_path, "wb"
) as out_file:
shutil.copyfileobj(response, out_file)
print("Extracting --> vc_redist.x64.exe")
@ -160,9 +191,11 @@ def dist():
python_url = "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/python-3.8.1-embed-amd64-less-pth.zip"
if not os.path.exists(pythonzip_path):
print("Downloading %s" % python_url)
with urllib.request.urlopen(python_url) as response, open( pythonzip_path, 'wb') as out_file:
with urllib.request.urlopen(python_url) as response, open(
pythonzip_path, "wb"
) as out_file:
shutil.copyfileobj(response, out_file)
os.mkdir("python38")
print("Extracting --> python38/")
@ -174,7 +207,9 @@ def dist():
signtool_url = "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/signtool.exe"
if not os.path.exists(signtool_path):
print("Downloading %s" % signtool_url)
with urllib.request.urlopen(signtool_url) as response, open(signtool_path, 'wb') as out_file:
with urllib.request.urlopen(signtool_url) as response, open(
signtool_path, "wb"
) as out_file:
shutil.copyfileobj(response, out_file)
# Download innosetup
@ -183,7 +218,9 @@ def dist():
innosetup_url = "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/innosetup-portable-win32-6.0.5-3.zip"
if not os.path.exists(innosetupzip_path):
print("Downloading %s" % innosetup_url)
with urllib.request.urlopen(innosetup_url) as response, open(innosetupzip_path, 'wb') as out_file:
with urllib.request.urlopen(innosetup_url) as response, open(
innosetupzip_path, "wb"
) as out_file:
shutil.copyfileobj(response, out_file)
os.mkdir("innosetup")
@ -193,20 +230,24 @@ def dist():
z.extractall("innosetup")
# Run
cs_flags = '/DSKIP_SIGN=1'
cs_flags = "/DSKIP_SIGN=1"
if args.code_sign_cert_path:
cs_flags = '"/Ssigntool=%s sign /f %s /fd SHA1 /t http://timestamp.sectigo.com $f"' % (signtool_path, args.code_sign_cert_path)
run("innosetup\\iscc /Qp " + cs_flags + " \"innosetup.iss\"")
cs_flags = (
'"/Ssigntool=%s sign /f %s /fd SHA1 /t http://timestamp.sectigo.com $f"'
% (signtool_path, args.code_sign_cert_path)
)
run("innosetup\\iscc /Qp " + cs_flags + ' "innosetup.iss"')
print("Done! Setup created in dist/")
if args.action == 'build':
if args.action == "build":
build()
elif args.action == 'vcpkg_export':
elif args.action == "vcpkg_export":
vcpkg_export()
elif args.action == 'dist':
elif args.action == "dist":
dist()
elif args.action == 'clean':
elif args.action == "clean":
clean()
else:
args.print_help()

Wyświetl plik

@ -1,22 +1,21 @@
import bpy
def loadMesh(file):
bpy.utils.register_module('materials_utils')
bpy.utils.register_module("materials_utils")
bpy.ops.import_scene.obj(filepath=file,
axis_forward='Y',
axis_up='Z')
bpy.ops.import_scene.obj(filepath=file, axis_forward="Y", axis_up="Z")
bpy.ops.xps_tools.convert_to_cycles_all()
model = bpy.data.objects[-1]
minX = float('inf')
maxX = float('-inf')
minY = float('inf')
maxY = float('-inf')
minZ = float('inf')
maxZ = float('-inf')
minX = float("inf")
maxX = float("-inf")
minY = float("inf")
maxY = float("-inf")
minZ = float("inf")
maxZ = float("-inf")
for coord in model.bound_box:
x = coord[0]
y = coord[1]
@ -28,17 +27,21 @@ def loadMesh(file):
minZ = min(z, minZ)
maxZ = max(z, maxZ)
model.location[2] += (maxZ - minZ)/2
model.location[2] += (maxZ - minZ) / 2
surfaceShaderType = 'ShaderNodeEmission'
surfaceShaderName = 'Emission'
surfaceShaderType = "ShaderNodeEmission"
surfaceShaderName = "Emission"
for m in bpy.data.materials:
nt = m.node_tree
nt.nodes.remove(nt.nodes['Color Mult'])
nt.nodes.remove(nt.nodes['Diffuse BSDF'])
nt.nodes.remove(nt.nodes["Color Mult"])
nt.nodes.remove(nt.nodes["Diffuse BSDF"])
nt.nodes.new(surfaceShaderType)
nt.links.new(nt.nodes['Material Output'].inputs[0],
nt.nodes[surfaceShaderName].outputs[0])
nt.links.new(nt.nodes[surfaceShaderName].inputs[0],
nt.nodes['Diffuse Texture'].outputs[0])
nt.links.new(
nt.nodes["Material Output"].inputs[0],
nt.nodes[surfaceShaderName].outputs[0],
)
nt.links.new(
nt.nodes[surfaceShaderName].inputs[0],
nt.nodes["Diffuse Texture"].outputs[0],
)

Wyświetl plik

@ -16,50 +16,53 @@ from common import loadMesh
def main():
if len(sys.argv) < 5 or sys.argv[-2] != '--':
sys.exit('Please provide the ODM project path.')
if len(sys.argv) < 5 or sys.argv[-2] != "--":
sys.exit("Please provide the ODM project path.")
projectHome = sys.argv[-1]
loadMesh(projectHome +
'/odm_texturing/odm_textured_model_geo.obj')
loadMesh(projectHome + "/odm_texturing/odm_textured_model_geo.obj")
blendName = bpy.path.display_name_from_filepath(bpy.data.filepath)
fileName = projectHome + '/odm_photo/odm_' + blendName
render = bpy.data.scenes['Scene'].render
fileName = projectHome + "/odm_photo/odm_" + blendName
render = bpy.data.scenes["Scene"].render
render.filepath = fileName
bpy.ops.render.render(write_still=True)
width = render.resolution_x
height = render.resolution_y
if(render.use_multiview):
writeExif(fileName+render.views[0].file_suffix+'.jpg', width, height)
writeExif(fileName+render.views[1].file_suffix+'.jpg', width, height)
if render.use_multiview:
writeExif(fileName + render.views[0].file_suffix + ".jpg", width, height)
writeExif(fileName + render.views[1].file_suffix + ".jpg", width, height)
else:
writeExif(fileName+'.jpg', width, height)
writeExif(fileName + ".jpg", width, height)
def writeExif(fileName, width, height):
w = str(width)
h = str(height)
subprocess.run(['exiftool',
'-overwrite_original',
'-CroppedAreaImageWidthPixels=' + w,
'-CroppedAreaImageHeightPixels=' + h,
'-FullPanoWidthPixels=' + w,
'-FullPanoHeightPixels=' + h,
'-CroppedAreaLeftPixels=0',
'-CroppedAreaTopPixels=0',
'-ProjectionType=equirectangular',
'-UsePanoramaViewer=True',
'-PoseHeadingDegrees=0',
'-LargestValidInteriorRectLeft=0',
'-LargestValidInteriorRectTop=0',
'-LargestValidInteriorRectWidth=' + w,
'-LargestValidInteriorRectHeight=' + h,
fileName])
subprocess.run(
[
"exiftool",
"-overwrite_original",
"-CroppedAreaImageWidthPixels=" + w,
"-CroppedAreaImageHeightPixels=" + h,
"-FullPanoWidthPixels=" + w,
"-FullPanoHeightPixels=" + h,
"-CroppedAreaLeftPixels=0",
"-CroppedAreaTopPixels=0",
"-ProjectionType=equirectangular",
"-UsePanoramaViewer=True",
"-PoseHeadingDegrees=0",
"-LargestValidInteriorRectLeft=0",
"-LargestValidInteriorRectTop=0",
"-LargestValidInteriorRectWidth=" + w,
"-LargestValidInteriorRectHeight=" + h,
fileName,
]
)
if __name__ == '__main__':
if __name__ == "__main__":
main()

Wyświetl plik

@ -13,101 +13,105 @@ from common import loadMesh
def main():
if len(sys.argv) < 7 or sys.argv[-4] != '--':
sys.exit('Please provide the ODM project path, camera waypoints (xyz format), and number of frames.')
if len(sys.argv) < 7 or sys.argv[-4] != "--":
sys.exit(
"Please provide the ODM project path, camera waypoints (xyz format), and number of frames."
)
projectHome = sys.argv[-3]
waypointFile = sys.argv[-2]
numFrames = int(sys.argv[-1])
loadMesh(projectHome +
'/odm_texturing/odm_textured_model_geo.obj')
loadMesh(projectHome + "/odm_texturing/odm_textured_model_geo.obj")
waypoints = loadWaypoints(waypointFile)
numWaypoints = len(waypoints)
scene = bpy.data.scenes['Scene']
scene = bpy.data.scenes["Scene"]
# create path thru waypoints
curve = bpy.data.curves.new(name='CameraPath', type='CURVE')
curve.dimensions = '3D'
curve.twist_mode = 'Z_UP'
nurbs = curve.splines.new('NURBS')
nurbs.points.add(numWaypoints-1)
curve = bpy.data.curves.new(name="CameraPath", type="CURVE")
curve.dimensions = "3D"
curve.twist_mode = "Z_UP"
nurbs = curve.splines.new("NURBS")
nurbs.points.add(numWaypoints - 1)
weight = 1
for i in range(numWaypoints):
nurbs.points[i].co[0] = waypoints[i][0]
nurbs.points[i].co[1] = waypoints[i][1]
nurbs.points[i].co[2] = waypoints[i][2]
nurbs.points[i].co[3] = weight
nurbs.points[i].co[0] = waypoints[i][0]
nurbs.points[i].co[1] = waypoints[i][1]
nurbs.points[i].co[2] = waypoints[i][2]
nurbs.points[i].co[3] = weight
nurbs.use_endpoint_u = True
path = bpy.data.objects.new(name='CameraPath', object_data=curve)
path = bpy.data.objects.new(name="CameraPath", object_data=curve)
scene.objects.link(path)
camera = bpy.data.objects['Camera']
camera = bpy.data.objects["Camera"]
camera.location[0] = 0
camera.location[1] = 0
camera.location[2] = 0
followPath = camera.constraints.new(type='FOLLOW_PATH')
followPath.name = 'CameraFollowPath'
followPath = camera.constraints.new(type="FOLLOW_PATH")
followPath.name = "CameraFollowPath"
followPath.target = path
followPath.use_curve_follow = True
animateContext = bpy.context.copy()
animateContext['constraint'] = followPath
bpy.ops.constraint.followpath_path_animate(animateContext,
constraint='CameraFollowPath',
frame_start=0,
length=numFrames)
animateContext["constraint"] = followPath
bpy.ops.constraint.followpath_path_animate(
animateContext, constraint="CameraFollowPath", frame_start=0, length=numFrames
)
blendName = bpy.path.display_name_from_filepath(bpy.data.filepath)
fileName = projectHome + '/odm_video/odm_' + blendName.replace('photo', 'video')
fileName = projectHome + "/odm_video/odm_" + blendName.replace("photo", "video")
scene.frame_start = 0
scene.frame_end = numFrames
render = scene.render
render.filepath = fileName + '.mp4'
render.image_settings.file_format = 'FFMPEG'
if(render.use_multiview):
render.image_settings.stereo_3d_format.display_mode = 'TOPBOTTOM'
render.image_settings.views_format = 'STEREO_3D'
render.views[0].file_suffix = ''
format3d = 'top-bottom'
render.filepath = fileName + ".mp4"
render.image_settings.file_format = "FFMPEG"
if render.use_multiview:
render.image_settings.stereo_3d_format.display_mode = "TOPBOTTOM"
render.image_settings.views_format = "STEREO_3D"
render.views[0].file_suffix = ""
format3d = "top-bottom"
else:
width = render.resolution_x
height = render.resolution_y
format3d = 'none'
format3d = "none"
render.resolution_x = 4096
render.resolution_y = 2048
render.ffmpeg.audio_codec = 'AAC'
render.ffmpeg.codec = 'H264'
render.ffmpeg.format = 'MPEG4'
render.ffmpeg.audio_codec = "AAC"
render.ffmpeg.codec = "H264"
render.ffmpeg.format = "MPEG4"
render.ffmpeg.video_bitrate = 45000
bpy.ops.render.render(animation=True)
writeMetadata(fileName+'.mp4', format3d)
writeMetadata(fileName + ".mp4", format3d)
def loadWaypoints(filename):
waypoints = []
with open(filename) as f:
for line in f:
xyz = line.split()
waypoints.append((float(xyz[0]), float(xyz[1]), float(xyz[2])))
xyz = line.split()
waypoints.append((float(xyz[0]), float(xyz[1]), float(xyz[2])))
return waypoints
def writeMetadata(filename, format3d):
subprocess.run(['python',
'spatialmedia',
'-i',
'--stereo='+format3d,
filename,
filename+'.injected'])
subprocess.run(
[
"python",
"spatialmedia",
"-i",
"--stereo=" + format3d,
filename,
filename + ".injected",
]
)
# check metadata injector was successful
if os.path.exists(filename+'.injected'):
if os.path.exists(filename + ".injected"):
os.remove(filename)
os.rename(filename+'.injected', filename)
os.rename(filename + ".injected", filename)
if __name__ == '__main__':
if __name__ == "__main__":
main()

Wyświetl plik

@ -5,15 +5,16 @@
import os
import glob
import sys
sys.path.insert(0, os.path.join("..", "..", os.path.dirname(__file__)))
import argparse
from opendm.dem import merge
parser = argparse.ArgumentParser(description='Merge and blend DEMs using OpenDroneMap\'s approach.')
parser.add_argument('input_dems',
type=str,
help='Path to input dems (.tif)')
parser = argparse.ArgumentParser(
description="Merge and blend DEMs using OpenDroneMap's approach."
)
parser.add_argument("input_dems", type=str, help="Path to input dems (.tif)")
args = parser.parse_args()
@ -21,10 +22,8 @@ if not os.path.exists(args.input_dems):
print("%s does not exist" % args.input_dems)
exit(1)
output_dem = os.path.join(args.input_dems, 'merged_blended_dem.tif')
input_dem_path = os.path.join(args.input_dems, '*.tif')
output_dem = os.path.join(args.input_dems, "merged_blended_dem.tif")
input_dem_path = os.path.join(args.input_dems, "*.tif")
input_dems = glob.glob(input_dem_path)
merge.euclidean_merge_dems(input_dems
,output_dem=output_dem
)
merge.euclidean_merge_dems(input_dems, output_dem=output_dem)

Wyświetl plik

@ -17,7 +17,7 @@ import argparse
# other imports
import PIL
from PIL import Image, ExifTags
from tqdm import tqdm # optional: see "swap with this for no tqdm" below
from tqdm import tqdm # optional: see "swap with this for no tqdm" below
parser = argparse.ArgumentParser()
@ -26,16 +26,50 @@ parser.add_argument("file_dir", help="input folder of images")
parser.add_argument("output_dir", help="output folder to copy images to")
# args with defaults
parser.add_argument("-b", "--bands", help="number of expected bands per capture", type=int, default=5)
parser.add_argument("-s", "--sequential", help="use sequential capture group in filenames rather than original capture ID", type=bool, default=True)
parser.add_argument("-z", "--zero_pad", help="if using sequential capture groups, zero-pad the group number to this many digits. 0 for no padding, -1 for auto padding", type=int, default=5)
parser.add_argument("-w", "--whitespace_replace", help="replace whitespace characters with this character", type=str, default="-")
parser.add_argument(
"-b", "--bands", help="number of expected bands per capture", type=int, default=5
)
parser.add_argument(
"-s",
"--sequential",
help="use sequential capture group in filenames rather than original capture ID",
type=bool,
default=True,
)
parser.add_argument(
"-z",
"--zero_pad",
help="if using sequential capture groups, zero-pad the group number to this many digits. 0 for no padding, -1 for auto padding",
type=int,
default=5,
)
parser.add_argument(
"-w",
"--whitespace_replace",
help="replace whitespace characters with this character",
type=str,
default="-",
)
# optional args no defaults
parser.add_argument("-l", "--logfile", help="write image metadata used to this CSV file", type=str)
parser.add_argument("-r", "--replace_filename", help="use this instead of using the original filename in new filenames", type=str)
parser.add_argument("-f", "--force", help="don't ask for confirmation", action="store_true")
parser.add_argument("-g", "--no_grouping", help="do not apply grouping, only validate and add band name", action="store_true")
parser.add_argument(
"-l", "--logfile", help="write image metadata used to this CSV file", type=str
)
parser.add_argument(
"-r",
"--replace_filename",
help="use this instead of using the original filename in new filenames",
type=str,
)
parser.add_argument(
"-f", "--force", help="don't ask for confirmation", action="store_true"
)
parser.add_argument(
"-g",
"--no_grouping",
help="do not apply grouping, only validate and add band name",
action="store_true",
)
args = parser.parse_args()
file_dir = args.file_dir
@ -53,9 +87,19 @@ auto_zero_pad = len(str(math.ceil(float(file_count) / float(expected_bands))))
if args.zero_pad >= 1:
if int("9" * args.zero_pad) < math.ceil(float(file_count) / float(expected_bands)):
raise ValueError("Zero pad must have more digits than maximum capture groups! Attempted to pad " + str(args.zero_pad) + " digits with "
+ str(file_count) + " files and " + str(expected_bands) + " bands (up to " + str(math.ceil(float(file_count) / float(expected_bands)))
+ " capture groups possible, try at least " + str(auto_zero_pad) + " digits to zero pad)")
raise ValueError(
"Zero pad must have more digits than maximum capture groups! Attempted to pad "
+ str(args.zero_pad)
+ " digits with "
+ str(file_count)
+ " files and "
+ str(expected_bands)
+ " bands (up to "
+ str(math.ceil(float(file_count) / float(expected_bands)))
+ " capture groups possible, try at least "
+ str(auto_zero_pad)
+ " digits to zero pad)"
)
if args.force is False:
print("Input dir: " + str(file_dir) + " (" + str(file_count) + " files)")
@ -84,7 +128,15 @@ print("Indexing images ...")
for filename in tqdm(os.listdir(file_dir)):
old_path = os.path.join(file_dir, filename)
file_name, file_ext = os.path.splitext(filename)
image_entry = {"name": filename, "valid": True, "band": "-", "ID": "-", "group": 0, "DateTime": "-", "error": "-"} # dashes to ensure CSV exports properly, can be blank
image_entry = {
"name": filename,
"valid": True,
"band": "-",
"ID": "-",
"group": 0,
"DateTime": "-",
"error": "-",
} # dashes to ensure CSV exports properly, can be blank
try:
img = Image.open(old_path)
except PIL.UnidentifiedImageError as img_err:
@ -102,9 +154,9 @@ for filename in tqdm(os.listdir(file_dir)):
# print(ExifTags.TAGS[key] + ":" + str(val)) # debugging
if ExifTags.TAGS[key] == "XMLPacket":
# find bandname
bandname_start = val.find(b'<Camera:BandName>')
bandname_end = val.find(b'</Camera:BandName>')
bandname_coded = val[(bandname_start + 17):bandname_end]
bandname_start = val.find(b"<Camera:BandName>")
bandname_end = val.find(b"</Camera:BandName>")
bandname_coded = val[(bandname_start + 17) : bandname_end]
bandname = bandname_coded.decode("UTF-8")
image_entry["band"] = str(bandname)
# find capture ID
@ -112,7 +164,9 @@ for filename in tqdm(os.listdir(file_dir)):
if ExifTags.TAGS[key] == "DateTime":
image_entry["DateTime"] = str(val)
image_entry["band"].replace(" ", "-")
if len(image_entry["band"]) >= 99: # if it's too long, wrong value (RGB pic has none)
if (
len(image_entry["band"]) >= 99
): # if it's too long, wrong value (RGB pic has none)
# no exif present
no_exif_n += 1
image_entry["valid"] = False
@ -121,7 +175,9 @@ for filename in tqdm(os.listdir(file_dir)):
no_exif_n += 1
image_entry["valid"] = False
image_entry["error"] = "No Capture ID found"
if (file_ext.lower() in [".jpg", ".jpeg"]) and (image_entry["band"] == "-"): # hack for DJI RGB jpgs
if (file_ext.lower() in [".jpg", ".jpeg"]) and (
image_entry["band"] == "-"
): # hack for DJI RGB jpgs
# handle = open(old_path, 'rb').read()
# xmp_start = handle.find(b'<x:xmpmeta')
# xmp_end = handle.find(b'</x:xmpmeta')
@ -149,7 +205,9 @@ if not args.no_grouping:
if not this_img["valid"]: # prefiltered in last loop
continue
same_id_images = [image for image in images if image["ID"] == this_img["ID"]]
if len(same_id_images) != expected_bands: # defaults to True, so only need to filter out not in
if (
len(same_id_images) != expected_bands
): # defaults to True, so only need to filter out not in
no_matching_bands_n += 1
this_img["valid"] = False
this_img["error"] = "Capture ID has too few/too many bands"
@ -158,7 +216,9 @@ if not args.no_grouping:
this_img["group"] = capture_ids[this_img["ID"]]
else:
capture_ids[this_img["ID"]] = new_capture_id
this_img["group"] = capture_ids[this_img["ID"]] # a little less efficient but we know it works this way
this_img["group"] = capture_ids[
this_img["ID"]
] # a little less efficient but we know it works this way
new_capture_id += 1
print(str(no_matching_bands_n) + " images had unexpected bands in same capture")
@ -193,7 +253,9 @@ for this_img in tqdm(images):
identifier = str(this_img["group"]).zfill(args.zero_pad)
else:
identifier = this_img["ID"]
file_name_full = identifier + "-" + file_name + "-" + this_img["band"] + file_ext
file_name_full = (
identifier + "-" + file_name + "-" + this_img["band"] + file_ext
)
else:
prefix = output_invalid
file_name_full = file_name + file_ext
@ -202,7 +264,7 @@ for this_img in tqdm(images):
if logfile:
header = images[0].keys()
with open(logfile, 'w', newline='') as logfile_handle:
with open(logfile, "w", newline="") as logfile_handle:
dict_writer = csv.DictWriter(logfile_handle, header)
dict_writer.writeheader()
dict_writer.writerows(images)

Wyświetl plik

@ -12,35 +12,35 @@ import grass.script as gscript
import grass.script.core
import grass.script.setup
rsurfName = 'odm_rsurf'
contourName = 'odm_contour'
orthophotoName = 'odm_orthophoto'
reliefName = 'odm_relief'
shadedReliefName = reliefName + '_shaded'
rsurfName = "odm_rsurf"
contourName = "odm_contour"
orthophotoName = "odm_orthophoto"
reliefName = "odm_relief"
shadedReliefName = reliefName + "_shaded"
overwrite = True
def main():
if len(sys.argv) < 2:
sys.exit('Please provide the ODM project path.')
sys.exit("Please provide the ODM project path.")
projectHome = sys.argv[1]
gisdb = projectHome+'/grassdata'
location = 'odm'
gisrc = gscript.setup.init(os.environ['GISBASE'], gisdb, location)
gisdb = projectHome + "/grassdata"
location = "odm"
gisrc = gscript.setup.init(os.environ["GISBASE"], gisdb, location)
# get srs and initial extents
with open(projectHome+'/odm_georeferencing/coords.txt') as f:
with open(projectHome + "/odm_georeferencing/coords.txt") as f:
srs = f.readline().split()
mean = f.readline().split()
meanX = float(mean[0])
meanY = float(mean[1])
minX = float('inf')
maxX = float('-inf')
minY = float('inf')
maxY = float('-inf')
minX = float("inf")
maxX = float("-inf")
minY = float("inf")
maxY = float("-inf")
for line in f:
xy = line.split()
x = float(xy[0])
@ -53,16 +53,29 @@ def main():
datum = srs[0]
proj = srs[1]
zone = srs[2]
gscript.core.create_location(gisdb, location, datum=datum,
proj4='+proj='+proj+' +zone='+zone,
overwrite=overwrite)
gscript.core.create_location(
gisdb,
location,
datum=datum,
proj4="+proj=" + proj + " +zone=" + zone,
overwrite=overwrite,
)
n = meanY + maxY
s = meanY + minY
e = meanX + maxX
w = meanX + minX
gscript.run_command('g.region', flags='s', n=n, s=s, e=e, w=w, res=0.01,
res3=0.01, overwrite=overwrite)
gscript.run_command(
"g.region",
flags="s",
n=n,
s=s,
e=e,
w=w,
res=0.01,
res3=0.01,
overwrite=overwrite,
)
contour(projectHome)
relief(projectHome)
@ -74,22 +87,29 @@ def contour(projectHome):
"""
Creates a contour map based on the ODM project DEM model.
"""
print 'Creating contour map'
print("Creating contour map")
step = 0.25
gscript.run_command('r.in.gdal', flags='o',
input=projectHome+'/odm_georeferencing/odm_georeferencing_model_dem.tif',
output=rsurfName, memory=2047,
overwrite=overwrite)
gscript.run_command('r.contour', input=rsurfName, output=contourName,
step=step, overwrite=overwrite)
gscript.run_command(
"r.in.gdal",
flags="o",
input=projectHome + "/odm_georeferencing/odm_georeferencing_model_dem.tif",
output=rsurfName,
memory=2047,
overwrite=overwrite,
)
gscript.run_command('v.out.ogr', input=contourName,
output=projectHome +
'/odm_georeferencing/odm_contour.shp',
overwrite=overwrite)
gscript.run_command(
"r.contour", input=rsurfName, output=contourName, step=step, overwrite=overwrite
)
gscript.run_command(
"v.out.ogr",
input=contourName,
output=projectHome + "/odm_georeferencing/odm_contour.shp",
overwrite=overwrite,
)
def relief(projectHome):
@ -97,53 +117,81 @@ def relief(projectHome):
Creates a textured relief map in GeoTIFF format.
NB: this is an RGBA raster and so is readable by image software.
"""
print 'Creating relief map'
print("Creating relief map")
gscript.run_command('r.in.gdal', flags='o',
input=projectHome+'/odm_orthophoto/odm_orthophoto.tif',
output=orthophotoName, memory=2047,
overwrite=overwrite)
gscript.run_command(
"r.in.gdal",
flags="o",
input=projectHome + "/odm_orthophoto/odm_orthophoto.tif",
output=orthophotoName,
memory=2047,
overwrite=overwrite,
)
gscript.run_command('r.composite', red=orthophotoName+'.red',
green=orthophotoName+'.green',
blue=orthophotoName+'.blue',
output=orthophotoName+'.rgb',
overwrite=overwrite)
gscript.run_command(
"r.composite",
red=orthophotoName + ".red",
green=orthophotoName + ".green",
blue=orthophotoName + ".blue",
output=orthophotoName + ".rgb",
overwrite=overwrite,
)
gscript.run_command('r.relief', input=rsurfName, output=reliefName,
overwrite=overwrite)
gscript.run_command(
"r.relief", input=rsurfName, output=reliefName, overwrite=overwrite
)
gscript.run_command('r.shade', shade=reliefName,
color=orthophotoName+'.rgb', output=shadedReliefName,
overwrite=overwrite)
gscript.run_command(
"r.shade",
shade=reliefName,
color=orthophotoName + ".rgb",
output=shadedReliefName,
overwrite=overwrite,
)
calc = ';'.join([
'$shadedRelief.red = ' +
'if(isnull($orthophoto.red), 0, r#$shadedRelief)',
'$shadedRelief.green = ' +
'if(isnull($orthophoto.green), 0, g#$shadedRelief)',
'$shadedRelief.blue = ' +
'if(isnull($orthophoto.blue), 0, b#$shadedRelief)',
'$shadedRelief.alpha = ' +
'if(isnull($orthophoto.alpha), 0, 255)'
])
gscript.mapcalc(calc, shadedRelief=shadedReliefName,
orthophoto=orthophotoName, overwrite=overwrite)
calc = ";".join(
[
"$shadedRelief.red = " + "if(isnull($orthophoto.red), 0, r#$shadedRelief)",
"$shadedRelief.green = "
+ "if(isnull($orthophoto.green), 0, g#$shadedRelief)",
"$shadedRelief.blue = "
+ "if(isnull($orthophoto.blue), 0, b#$shadedRelief)",
"$shadedRelief.alpha = " + "if(isnull($orthophoto.alpha), 0, 255)",
]
)
gscript.mapcalc(
calc,
shadedRelief=shadedReliefName,
orthophoto=orthophotoName,
overwrite=overwrite,
)
gscript.run_command('i.group', group=shadedReliefName+'.group',
input=shadedReliefName+'.red,' +
shadedReliefName+'.green,' +
shadedReliefName+'.blue,' +
shadedReliefName+'.alpha')
gscript.run_command(
"i.group",
group=shadedReliefName + ".group",
input=shadedReliefName
+ ".red,"
+ shadedReliefName
+ ".green,"
+ shadedReliefName
+ ".blue,"
+ shadedReliefName
+ ".alpha",
)
gscript.run_command('r.out.gdal', flags='cm',
input=shadedReliefName+'.group',
output=projectHome+'/odm_orthophoto/odm_relief.tif',
format='GTiff', type='Byte',
createopt='TILED=yes,COMPRESS=DEFLATE,PREDICTOR=2,' +
'BLOCKXSIZE=512,BLOCKYSIZE=512',
nodata=0, overwrite=overwrite)
gscript.run_command(
"r.out.gdal",
flags="cm",
input=shadedReliefName + ".group",
output=projectHome + "/odm_orthophoto/odm_relief.tif",
format="GTiff",
type="Byte",
createopt="TILED=yes,COMPRESS=DEFLATE,PREDICTOR=2,"
+ "BLOCKXSIZE=512,BLOCKYSIZE=512",
nodata=0,
overwrite=overwrite,
)
if __name__ == '__main__':
if __name__ == "__main__":
main()

Wyświetl plik

@ -11,6 +11,7 @@ import shutil
from tqdm import tqdm
import argparse
parser = argparse.ArgumentParser()
# Usage:
@ -18,13 +19,17 @@ parser = argparse.ArgumentParser()
parser.add_argument("file_dir", help="input folder of images")
parser.add_argument("output_dir", help="output folder to copy images to")
parser.add_argument("mask_file", help="filename or path to Mask file to be duplicated for all images")
parser.add_argument("-f", "--force", help="don't ask for confirmation", action="store_true")
parser.add_argument(
"mask_file", help="filename or path to Mask file to be duplicated for all images"
)
parser.add_argument(
"-f", "--force", help="don't ask for confirmation", action="store_true"
)
args = parser.parse_args()
file_dir = args.file_dir
mask_file_path = args.mask_file
mask_file_path = args.mask_file
output_dir = args.output_dir
file_count = len(os.listdir(file_dir))
@ -46,7 +51,7 @@ no_exif_n = 0
# for filename in os.listdir(file_dir):
for filename in tqdm(os.listdir(file_dir)):
old_path = mask_file_path
#print(mask_file_path)
# print(mask_file_path)
file_name, file_ext = os.path.splitext(filename)
try:
@ -57,6 +62,6 @@ for filename in tqdm(os.listdir(file_dir)):
sys.stderr.write(str(img_err) + "\n")
continue
new_path = os.path.join(output_dir, file_name + "_mask" + file_ext)
#print(new_path) # debugging
# print(new_path) # debugging
shutil.copy(old_path, new_path)
print("Done!")

Wyświetl plik

@ -1,5 +1,6 @@
import argparse
import sys
sys.path.append("../../")
import os
@ -11,20 +12,25 @@ from opendm import log
import shutil
parser = argparse.ArgumentParser(description='Quick Merge Preview')
parser.add_argument('input',
metavar='<paths>',
nargs='+',
help='Path to input images or image folder')
parser.add_argument('--size', '-s',
metavar='<percentage>',
type=str,
help='Size in percentage terms',
default='25%')
parser.add_argument('--force', '-f',
action='store_true',
default=False,
help="Force remove existing directories")
parser = argparse.ArgumentParser(description="Quick Merge Preview")
parser.add_argument(
"input", metavar="<paths>", nargs="+", help="Path to input images or image folder"
)
parser.add_argument(
"--size",
"-s",
metavar="<percentage>",
type=str,
help="Size in percentage terms",
default="25%",
)
parser.add_argument(
"--force",
"-f",
action="store_true",
default=False,
help="Force remove existing directories",
)
args = parser.parse_args()
@ -64,11 +70,11 @@ for f in input_images:
geojson = os.path.join(tmp_path, "%s.geojson" % name)
gpkg = os.path.join(tmp_path, "%s.gpkg" % name)
run("ddb geoproj \"%s\" \"%s\" -s \"%s\"" % (tmp_path, f, args.size))
run('ddb geoproj "%s" "%s" -s "%s"' % (tmp_path, f, args.size))
# Bounds (GPKG)
run("ddb info --format geojson --geometry polygon \"%s\" > \"%s\"" % (f, geojson))
run("ogr2ogr \"%s\" \"%s\"" % (gpkg, geojson))
run('ddb info --format geojson --geometry polygon "%s" > "%s"' % (f, geojson))
run('ogr2ogr "%s" "%s"' % (gpkg, geojson))
log.ODM_INFO("Computing cutlines")
@ -79,23 +85,17 @@ for f in projected_images:
name, _ = os.path.splitext(os.path.basename(f))
cutline_file = os.path.join(tmp_path, "%s_cutline.gpkg" % name)
bounds_file_path = os.path.join(tmp_path, "%s.gpkg" % name)
compute_cutline(f,
bounds_file_path,
cutline_file,
4,
scale=1)
compute_cutline(f, bounds_file_path, cutline_file, 4, scale=1)
cut_raster = os.path.join(tmp_path, "%s_cut.tif" % name)
orthophoto.compute_mask_raster(f, cutline_file,
cut_raster,
blend_distance=20, only_max_coords_feature=True)
orthophoto.compute_mask_raster(
f, cutline_file, cut_raster, blend_distance=20, only_max_coords_feature=True
)
feathered_raster = os.path.join(tmp_path, "%s_feathered.tif" % name)
orthophoto.feather_raster(f, feathered_raster,
blend_distance=20
)
orthophoto.feather_raster(f, feathered_raster, blend_distance=20)
all_orthos_and_ortho_cuts.append([feathered_raster, cut_raster])
@ -104,23 +104,26 @@ log.ODM_INFO("Merging...")
if len(all_orthos_and_ortho_cuts) > 1:
# TODO: histogram matching via rasterio
# currently parts have different color tones
output_file = os.path.join(cwd_path, 'mergepreview.tif')
output_file = os.path.join(cwd_path, "mergepreview.tif")
if os.path.isfile(output_file):
os.remove(output_file)
orthophoto.merge(all_orthos_and_ortho_cuts, output_file, {
'TILED': 'YES',
'COMPRESS': 'LZW',
'PREDICTOR': '2',
'BIGTIFF': 'IF_SAFER',
'BLOCKXSIZE': 512,
'BLOCKYSIZE': 512
})
orthophoto.merge(
all_orthos_and_ortho_cuts,
output_file,
{
"TILED": "YES",
"COMPRESS": "LZW",
"PREDICTOR": "2",
"BIGTIFF": "IF_SAFER",
"BLOCKXSIZE": 512,
"BLOCKYSIZE": 512,
},
)
log.ODM_INFO("Wrote %s" % output_file)
shutil.rmtree(tmp_path)
else:
log.ODM_ERROR("Error: no orthos found to merge")
exit(1)
exit(1)

Wyświetl plik

@ -9,49 +9,58 @@
import numpy
import argparse
import os.path
try:
from osgeo import gdal
from osgeo import osr
except ImportError:
raise ImportError("You need to install python-gdal : \
raise ImportError(
"You need to install python-gdal : \
run `sudo apt-get install libgdal-dev` \
# Check Gdal version with \
gdal-config --version \
#install corresponding gdal version with pip : \
pip3 install GDAL==2.4.0")
pip3 install GDAL==2.4.0"
)
def parse_args():
argument_parser = argparse.ArgumentParser('Createa from a multispectral orthophoto \
a Geotif with NDVI, NDRE and GNDVI agricultural indices')
argument_parser = argparse.ArgumentParser(
"Createa from a multispectral orthophoto \
a Geotif with NDVI, NDRE and GNDVI agricultural indices"
)
argument_parser.add_argument("orthophoto", metavar="<orthophoto.tif>",
type=argparse.FileType('r'),
help="The CIR orthophoto. Must be a GeoTiff.")
argument_parser.add_argument("-red", type=int,
help="Red band number")
argument_parser.add_argument("-green", type=int,
help="Green band number")
argument_parser.add_argument("-blue", type=int,
help="Blue band number")
argument_parser.add_argument("-re", type=int,
help="RedEdge band number")
argument_parser.add_argument("-nir", type=int,
help="NIR band number")
argument_parser.add_argument("out", metavar="<outfile.tif>",
type=argparse.FileType('w'),
help="The output file.")
argument_parser.add_argument("--overwrite", "-o",
action='store_true',
default=False,
help="Will overwrite output file if it exists. ")
argument_parser.add_argument(
"orthophoto",
metavar="<orthophoto.tif>",
type=argparse.FileType("r"),
help="The CIR orthophoto. Must be a GeoTiff.",
)
argument_parser.add_argument("-red", type=int, help="Red band number")
argument_parser.add_argument("-green", type=int, help="Green band number")
argument_parser.add_argument("-blue", type=int, help="Blue band number")
argument_parser.add_argument("-re", type=int, help="RedEdge band number")
argument_parser.add_argument("-nir", type=int, help="NIR band number")
argument_parser.add_argument(
"out",
metavar="<outfile.tif>",
type=argparse.FileType("w"),
help="The output file.",
)
argument_parser.add_argument(
"--overwrite",
"-o",
action="store_true",
default=False,
help="Will overwrite output file if it exists. ",
)
return argument_parser.parse_args()
if __name__ == "__main__":
# Suppress/hide warning when dividing by zero
numpy.seterr(divide='ignore', invalid='ignore')
numpy.seterr(divide="ignore", invalid="ignore")
rootdir = os.path.dirname(os.path.abspath(__file__))
@ -69,37 +78,48 @@ if __name__ == "__main__":
# parse out bands
print("Reading rasters")
red_matrix=orthophoto[args.red-1].astype(float)
green_matrix=orthophoto[args.green-1].astype(float)
blue_matrix=orthophoto[args.blue-1].astype(float)
re_matrix=orthophoto[args.re-1].astype(float)
nir_matrix=orthophoto[args.nir-1].astype(float)
red_matrix = orthophoto[args.red - 1].astype(float)
green_matrix = orthophoto[args.green - 1].astype(float)
blue_matrix = orthophoto[args.blue - 1].astype(float)
re_matrix = orthophoto[args.re - 1].astype(float)
nir_matrix = orthophoto[args.nir - 1].astype(float)
outfile = args.out
# NDVI
print("Computing NDVI")
#ndvi = calc_ndvi(nir_matrix, red_matrix)
ndvi = (nir_matrix.astype(float) - red_matrix.astype(float)) / (nir_matrix + red_matrix)
# ndvi = calc_ndvi(nir_matrix, red_matrix)
ndvi = (nir_matrix.astype(float) - red_matrix.astype(float)) / (
nir_matrix + red_matrix
)
# NDRE
print("Computing NDRE")
#ndre = calc_ndre(nir_matrix, re_matrix)
ndre = (nir_matrix.astype(float) - re_matrix.astype(float)) / (nir_matrix + re_matrix)
# ndre = calc_ndre(nir_matrix, re_matrix)
ndre = (nir_matrix.astype(float) - re_matrix.astype(float)) / (
nir_matrix + re_matrix
)
# GNDVI
# GNDVI
print("Computing GNDVI")
#gndvi = calc_gndvi(nir_matrix, green_matrix)
gndvi = (nir_matrix.astype(float) - green_matrix.astype(float)) / (nir_matrix + green_matrix)
# gndvi = calc_gndvi(nir_matrix, green_matrix)
gndvi = (nir_matrix.astype(float) - green_matrix.astype(float)) / (
nir_matrix + green_matrix
)
__import__("IPython").embed()
print("Saving Files")
# export raster
for name, matrix in zip(['ndvi', 'ndre', 'gndvi' ] ,[ndvi,ndre,gndvi] ):
for name, matrix in zip(["ndvi", "ndre", "gndvi"], [ndvi, ndre, gndvi]):
print(name)
out_driver = gdal.GetDriverByName('GTiff')\
.Create(name+'_'+outfile.name, int(ndvi.shape[1]), int(ndvi.shape[0]), 1, gdal.GDT_Float32)
out_driver = gdal.GetDriverByName("GTiff").Create(
name + "_" + outfile.name,
int(ndvi.shape[1]),
int(ndvi.shape[0]),
1,
gdal.GDT_Float32,
)
outband = out_driver.GetRasterBand(1)
outband.SetDescription(name.capitalize())
outband.WriteArray(matrix)
@ -108,5 +128,3 @@ if __name__ == "__main__":
out_driver.SetProjection(outcrs.ExportToWkt())
out_driver.SetGeoTransform(raster.GetGeoTransform())
outband.FlushCache()

Wyświetl plik

@ -4,31 +4,41 @@
import numpy
import argparse
import os.path
try:
from osgeo import gdal
from osgeo import osr
except ImportError:
raise ImportError("You need to install python-gdal. run `apt-get install python-gdal`")
raise ImportError(
"You need to install python-gdal. run `apt-get install python-gdal`"
)
exit()
def parse_args():
p = argparse.ArgumentParser("A script that calculates the NDVI of a CIR orthophoto")
p.add_argument("orthophoto", metavar="<orthophoto.tif>",
type=argparse.FileType('r'),
help="The CIR orthophoto. Must be a GeoTiff.")
p.add_argument("nir", metavar="N", type=int,
help="NIR band number")
p.add_argument("vis", metavar="N", type=int,
help="Vis band number")
p.add_argument("out", metavar="<outfile.tif>",
type=argparse.FileType('w'),
help="The output file. Also must be in GeoTiff format")
p.add_argument("--overwrite", "-o",
action='store_true',
default=False,
help="Will overwrite output file if it exists. ")
p.add_argument(
"orthophoto",
metavar="<orthophoto.tif>",
type=argparse.FileType("r"),
help="The CIR orthophoto. Must be a GeoTiff.",
)
p.add_argument("nir", metavar="N", type=int, help="NIR band number")
p.add_argument("vis", metavar="N", type=int, help="Vis band number")
p.add_argument(
"out",
metavar="<outfile.tif>",
type=argparse.FileType("w"),
help="The output file. Also must be in GeoTiff format",
)
p.add_argument(
"--overwrite",
"-o",
action="store_true",
default=False,
help="Will overwrite output file if it exists. ",
)
return p.parse_args()
@ -44,7 +54,10 @@ def calc_ndvi(nir, vis):
# for each cell, calculate ndvi (masking out where divide by 0)
ndvi = numpy.empty(nir.shape, dtype=float)
mask = numpy.not_equal((nirb + visb), 0.0)
return numpy.choose(mask, (-1.0, numpy.true_divide(numpy.subtract(nirb, visb), numpy.add(nirb, visb))))
return numpy.choose(
mask,
(-1.0, numpy.true_divide(numpy.subtract(nirb, visb), numpy.add(nirb, visb))),
)
if __name__ == "__main__":
@ -71,8 +84,9 @@ if __name__ == "__main__":
ndvi = calc_ndvi(nirb, visb)
# export raster
out_driver = gdal.GetDriverByName('GTiff')\
.Create(outfile.name, int(ndvi.shape[1]), int(ndvi.shape[0]), 1, gdal.GDT_Float32)
out_driver = gdal.GetDriverByName("GTiff").Create(
outfile.name, int(ndvi.shape[1]), int(ndvi.shape[0]), 1, gdal.GDT_Float32
)
outband = out_driver.GetRasterBand(1)
outband.WriteArray(ndvi)
outcrs = osr.SpatialReference()

Wyświetl plik

@ -4,57 +4,76 @@
import argparse
import sys
try:
from osgeo import gdal
except ImportError:
raise ImportError("You need to install python-gdal : \
raise ImportError(
"You need to install python-gdal : \
run `sudo apt-get install libgdal-dev` \
# Check Gdal version with \
gdal-config --version \
#install corresponding gdal version with pip : \
pip3 install GDAL==2.4.0")
pip3 install GDAL==2.4.0"
)
def parse_args():
""" Parse arguments """
"""Parse arguments"""
argument_parser = argparse.ArgumentParser(
"A script that rename inplace Sentera AGX710 Geotiff orthophoto. ")
argument_parser.add_argument("orthophoto", metavar="<orthophoto.tif>",
type=argparse.FileType('r'),
help="The input orthophoto. Must be a GeoTiff.")
"A script that rename inplace Sentera AGX710 Geotiff orthophoto. "
)
argument_parser.add_argument(
"orthophoto",
metavar="<orthophoto.tif>",
type=argparse.FileType("r"),
help="The input orthophoto. Must be a GeoTiff.",
)
return argument_parser.parse_args()
def rename_sentera_agx710_layers(name):
""" Only rename Geotif built from Sentera AGX710 images with ODM """
"""Only rename Geotif built from Sentera AGX710 images with ODM"""
if raster.RasterCount != 7:
raise ImportError(F'File {name} does not have 7 layers as a regular\
Geotif built from Sentera AGX710 images with ODM')
raise ImportError(
f"File {name} does not have 7 layers as a regular\
Geotif built from Sentera AGX710 images with ODM"
)
if 'RedGreenBlue' in raster.GetRasterBand(1).GetDescription() and \
'RedEdgeGarbageNIR' in raster.GetRasterBand(2).GetDescription():
if (
"RedGreenBlue" in raster.GetRasterBand(1).GetDescription()
and "RedEdgeGarbageNIR" in raster.GetRasterBand(2).GetDescription()
):
print("Sentera AGX710 Geotiff file has been detected.\
Layers are name are :")
print("RedGreenBlue for Band 1\nRedEdgeGarbageNIR for Band 2\
\nNone for Band 3\nNone for Band 4\nNone for Band 5\nNone for Band 6")
print(
"Sentera AGX710 Geotiff file has been detected.\
Layers are name are :"
)
print(
"RedGreenBlue for Band 1\nRedEdgeGarbageNIR for Band 2\
\nNone for Band 3\nNone for Band 4\nNone for Band 5\nNone for Band 6"
)
print("\nAfter renaming bands will be :")
print("Red for Band 1\nGreen for Band 2\nBlue for Band 3\n\
RedEdge for Band 4\nGarbage for Band 5\nNIR for Band 6")
print(
"Red for Band 1\nGreen for Band 2\nBlue for Band 3\n\
RedEdge for Band 4\nGarbage for Band 5\nNIR for Band 6"
)
answer = input(
"Are you sure you want to rename the layers of the input file ? [yes/no] ")
if answer =='yes':
raster.GetRasterBand(1).SetDescription('Red')
raster.GetRasterBand(2).SetDescription('Green')
raster.GetRasterBand(3).SetDescription('Blue')
raster.GetRasterBand(4).SetDescription('RedEdge')
raster.GetRasterBand(5).SetDescription('Garbage')
raster.GetRasterBand(6).SetDescription('NIR')
"Are you sure you want to rename the layers of the input file ? [yes/no] "
)
if answer == "yes":
raster.GetRasterBand(1).SetDescription("Red")
raster.GetRasterBand(2).SetDescription("Green")
raster.GetRasterBand(3).SetDescription("Blue")
raster.GetRasterBand(4).SetDescription("RedEdge")
raster.GetRasterBand(5).SetDescription("Garbage")
raster.GetRasterBand(6).SetDescription("NIR")
# raster.GetRasterBand(7).SetDescription('Alpha')
else:
print("No renaming")
else :
print(F'No need for band renaming in {name}')
else:
print(f"No need for band renaming in {name}")
sys.exit()

Wyświetl plik

@ -4,6 +4,7 @@
import os
import sys
sys.path.insert(0, os.path.join("..", "..", os.path.dirname(__file__)))
from math import sqrt
@ -19,62 +20,88 @@ default_dem_path = "odm_dem/dsm.tif"
default_outdir = "orthorectified"
default_image_list = "img_list.txt"
parser = argparse.ArgumentParser(description='Orthorectification Tool')
parser.add_argument('dataset',
type=str,
help='Path to ODM dataset')
parser.add_argument('--dem',
type=str,
default=default_dem_path,
help='Absolute path to DEM to use to orthorectify images. Default: %(default)s')
parser.add_argument('--no-alpha',
type=bool,
help="Don't output an alpha channel")
parser.add_argument('--interpolation',
type=str,
choices=('nearest', 'bilinear'),
default='bilinear',
help="Type of interpolation to use to sample pixel values.Default: %(default)s")
parser.add_argument('--outdir',
type=str,
default=default_outdir,
help="Output directory where to store results. Default: %(default)s")
parser.add_argument('--image-list',
type=str,
default=default_image_list,
help="Path to file that contains the list of image filenames to orthorectify. By default all images in a dataset are processed. Default: %(default)s")
parser.add_argument('--images',
type=str,
default="",
help="Comma-separated list of filenames to rectify. Use as an alternative to --image-list. Default: process all images.")
parser.add_argument('--threads',
type=int,
default=multiprocessing.cpu_count(),
help="Number of CPU processes to use. Default: %(default)s")
parser.add_argument('--skip-visibility-test',
type=bool,
help="Skip visibility testing (faster but leaves artifacts due to relief displacement)")
parser = argparse.ArgumentParser(description="Orthorectification Tool")
parser.add_argument("dataset", type=str, help="Path to ODM dataset")
parser.add_argument(
"--dem",
type=str,
default=default_dem_path,
help="Absolute path to DEM to use to orthorectify images. Default: %(default)s",
)
parser.add_argument("--no-alpha", type=bool, help="Don't output an alpha channel")
parser.add_argument(
"--interpolation",
type=str,
choices=("nearest", "bilinear"),
default="bilinear",
help="Type of interpolation to use to sample pixel values.Default: %(default)s",
)
parser.add_argument(
"--outdir",
type=str,
default=default_outdir,
help="Output directory where to store results. Default: %(default)s",
)
parser.add_argument(
"--image-list",
type=str,
default=default_image_list,
help="Path to file that contains the list of image filenames to orthorectify. By default all images in a dataset are processed. Default: %(default)s",
)
parser.add_argument(
"--images",
type=str,
default="",
help="Comma-separated list of filenames to rectify. Use as an alternative to --image-list. Default: process all images.",
)
parser.add_argument(
"--threads",
type=int,
default=multiprocessing.cpu_count(),
help="Number of CPU processes to use. Default: %(default)s",
)
parser.add_argument(
"--skip-visibility-test",
type=bool,
help="Skip visibility testing (faster but leaves artifacts due to relief displacement)",
)
args = parser.parse_args()
dataset_path = args.dataset
dem_path = os.path.join(dataset_path, default_dem_path) if args.dem == default_dem_path else args.dem
dem_path = (
os.path.join(dataset_path, default_dem_path)
if args.dem == default_dem_path
else args.dem
)
interpolation = args.interpolation
with_alpha = not args.no_alpha
image_list = os.path.join(dataset_path, default_image_list) if args.image_list == default_image_list else args.image_list
image_list = (
os.path.join(dataset_path, default_image_list)
if args.image_list == default_image_list
else args.image_list
)
cwd_path = os.path.join(dataset_path, default_outdir) if args.outdir == default_outdir else args.outdir
cwd_path = (
os.path.join(dataset_path, default_outdir)
if args.outdir == default_outdir
else args.outdir
)
if not os.path.exists(cwd_path):
os.makedirs(cwd_path)
target_images = [] # all
target_images = [] # all
if args.images:
target_images = list(map(str.strip, args.images.split(",")))
print("Processing %s images" % len(target_images))
elif args.image_list:
with open(image_list) as f:
target_images = list(filter(lambda filename: filename != '', map(str.strip, f.read().split("\n"))))
target_images = list(
filter(
lambda filename: filename != "", map(str.strip, f.read().split("\n"))
)
)
print("Processing %s images" % len(target_images))
if not os.path.exists(dem_path):
@ -91,31 +118,32 @@ def bilinear_interpolate(im, x, y):
y0 = np.floor(y).astype(int)
y1 = y0 + 1
x0 = np.clip(x0, 0, im.shape[1]-1)
x1 = np.clip(x1, 0, im.shape[1]-1)
y0 = np.clip(y0, 0, im.shape[0]-1)
y1 = np.clip(y1, 0, im.shape[0]-1)
x0 = np.clip(x0, 0, im.shape[1] - 1)
x1 = np.clip(x1, 0, im.shape[1] - 1)
y0 = np.clip(y0, 0, im.shape[0] - 1)
y1 = np.clip(y1, 0, im.shape[0] - 1)
Ia = im[ y0, x0 ]
Ib = im[ y1, x0 ]
Ic = im[ y0, x1 ]
Id = im[ y1, x1 ]
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1-x) * (y1-y)
wb = (x1-x) * (y-y0)
wc = (x-x0) * (y1-y)
wd = (x-x0) * (y-y0)
wa = (x1 - x) * (y1 - y)
wb = (x1 - x) * (y - y0)
wc = (x - x0) * (y1 - y)
wd = (x - x0) * (y - y0)
return wa * Ia + wb * Ib + wc * Ic + wd * Id
return wa*Ia + wb*Ib + wc*Ic + wd*Id
# Read DEM
print("Reading DEM: %s" % dem_path)
with rasterio.open(dem_path) as dem_raster:
dem = dem_raster.read()[0]
dem_has_nodata = dem_raster.profile.get('nodata') is not None
dem_has_nodata = dem_raster.profile.get("nodata") is not None
if dem_has_nodata:
m = ma.array(dem, mask=dem==dem_raster.nodata)
m = ma.array(dem, mask=dem == dem_raster.nodata)
dem_min_value = m.min()
dem_max_value = m.max()
else:
@ -124,10 +152,10 @@ with rasterio.open(dem_path) as dem_raster:
print("DEM Minimum: %s" % dem_min_value)
print("DEM Maximum: %s" % dem_max_value)
h, w = dem.shape
crs = dem_raster.profile.get('crs')
crs = dem_raster.profile.get("crs")
dem_offset_x, dem_offset_y = (0, 0)
if crs:
@ -138,20 +166,23 @@ with rasterio.open(dem_path) as dem_raster:
if not os.path.exists(coords_file):
print("Whoops! Cannot find %s (we need that!)" % coords_file)
exit(1)
with open(coords_file) as f:
l = f.readline() # discard
l = f.readline() # discard
# second line is a northing/easting offset
l = f.readline().rstrip()
dem_offset_x, dem_offset_y = map(float, l.split(" "))
print("DEM offset: (%s, %s)" % (dem_offset_x, dem_offset_y))
print("DEM dimensions: %sx%s pixels" % (w, h))
# Read reconstruction
udata = dataset.UndistortedDataSet(dataset.DataSet(os.path.join(dataset_path, "opensfm")), undistorted_data_path=os.path.join(dataset_path, "opensfm", "undistorted"))
udata = dataset.UndistortedDataSet(
dataset.DataSet(os.path.join(dataset_path, "opensfm")),
undistorted_data_path=os.path.join(dataset_path, "opensfm", "undistorted"),
)
reconstructions = udata.load_undistorted_reconstruction()
if len(reconstructions) == 0:
raise Exception("No reconstructions available")
@ -168,7 +199,9 @@ with rasterio.open(dem_path) as dem_raster:
r = shot.pose.get_rotation_matrix()
Xs, Ys, Zs = shot.pose.get_origin()
cam_grid_y, cam_grid_x = dem_raster.index(Xs + dem_offset_x, Ys + dem_offset_y)
cam_grid_y, cam_grid_x = dem_raster.index(
Xs + dem_offset_x, Ys + dem_offset_y
)
a1 = r[0][0]
b1 = r[0][1]
@ -185,8 +218,10 @@ with rasterio.open(dem_path) as dem_raster:
for j in range(0, h):
for i in range(0, w):
distance_map[j][i] = sqrt((cam_grid_x - i) ** 2 + (cam_grid_y - j) ** 2)
distance_map[distance_map==0] = 1e-7
distance_map[j][i] = sqrt(
(cam_grid_x - i) ** 2 + (cam_grid_y - j) ** 2
)
distance_map[distance_map == 0] = 1e-7
print("Camera pose: (%f, %f, %f)" % (Xs, Ys, Zs))
@ -195,7 +230,7 @@ with rasterio.open(dem_path) as dem_raster:
half_img_h = (img_h - 1) / 2.0
print("Image dimensions: %sx%s pixels" % (img_w, img_h))
f = shot.camera.focal * max(img_h, img_w)
has_nodata = dem_raster.profile.get('nodata') is not None
has_nodata = dem_raster.profile.get("nodata") is not None
def process_pixels(step):
imgout = np.full((num_bands, dem_bbox_h, dem_bbox_w), np.nan)
@ -226,9 +261,9 @@ with rasterio.open(dem_path) as dem_raster:
Ya -= dem_offset_y
# Colinearity function http://web.pdx.edu/~jduh/courses/geog493f14/Week03.pdf
dx = (Xa - Xs)
dy = (Ya - Ys)
dz = (Za - Zs)
dx = Xa - Xs
dy = Ya - Ys
dz = Za - Zs
den = a3 * dx + b3 * dy + c3 * dz
x = half_img_w - (f * (a1 * dx + b1 * dy + c1 * dz) / den)
@ -237,12 +272,29 @@ with rasterio.open(dem_path) as dem_raster:
if x >= 0 and y >= 0 and x <= img_w - 1 and y <= img_h - 1:
# Visibility test
if not args.skip_visibility_test:
check_dem_points = np.column_stack(line(i, j, cam_grid_x, cam_grid_y))
check_dem_points = check_dem_points[np.all(np.logical_and(np.array([0, 0]) <= check_dem_points, check_dem_points < [w, h]), axis=1)]
check_dem_points = np.column_stack(
line(i, j, cam_grid_x, cam_grid_y)
)
check_dem_points = check_dem_points[
np.all(
np.logical_and(
np.array([0, 0]) <= check_dem_points,
check_dem_points < [w, h],
),
axis=1,
)
]
visible = True
for p in check_dem_points:
ray_z = Zs + (distance_map[p[1]][p[0]] / distance_map[j][i]) * dz
ray_z = (
Zs
+ (
distance_map[p[1]][p[0]]
/ distance_map[j][i]
)
* dz
)
if ray_z > dem_max_value:
break
@ -252,7 +304,7 @@ with rasterio.open(dem_path) as dem_raster:
if not visible:
continue
if interpolation == 'bilinear':
if interpolation == "bilinear":
xi = img_w - 1 - x
yi = img_h - 1 - y
values = bilinear_interpolate(shot_image, xi, yi)
@ -294,9 +346,54 @@ with rasterio.open(dem_path) as dem_raster:
:param cpy principal point Y (image coordinates)
"""
Za = dem_min_value
m = (a3*b1*cpy - a1*b3*cpy - (a3*b2 - a2*b3)*cpx - (a2*b1 - a1*b2)*f)
Xa = dem_offset_x + (m*Xs + (b3*c1*cpy - b1*c3*cpy - (b3*c2 - b2*c3)*cpx - (b2*c1 - b1*c2)*f)*Za - (b3*c1*cpy - b1*c3*cpy - (b3*c2 - b2*c3)*cpx - (b2*c1 - b1*c2)*f)*Zs)/m
Ya = dem_offset_y + (m*Ys - (a3*c1*cpy - a1*c3*cpy - (a3*c2 - a2*c3)*cpx - (a2*c1 - a1*c2)*f)*Za + (a3*c1*cpy - a1*c3*cpy - (a3*c2 - a2*c3)*cpx - (a2*c1 - a1*c2)*f)*Zs)/m
m = (
a3 * b1 * cpy
- a1 * b3 * cpy
- (a3 * b2 - a2 * b3) * cpx
- (a2 * b1 - a1 * b2) * f
)
Xa = (
dem_offset_x
+ (
m * Xs
+ (
b3 * c1 * cpy
- b1 * c3 * cpy
- (b3 * c2 - b2 * c3) * cpx
- (b2 * c1 - b1 * c2) * f
)
* Za
- (
b3 * c1 * cpy
- b1 * c3 * cpy
- (b3 * c2 - b2 * c3) * cpx
- (b2 * c1 - b1 * c2) * f
)
* Zs
)
/ m
)
Ya = (
dem_offset_y
+ (
m * Ys
- (
a3 * c1 * cpy
- a1 * c3 * cpy
- (a3 * c2 - a2 * c3) * cpx
- (a2 * c1 - a1 * c2) * f
)
* Za
+ (
a3 * c1 * cpy
- a1 * c3 * cpy
- (a3 * c2 - a2 * c3) * cpx
- (a2 * c1 - a1 * c2) * f
)
* Zs
)
/ m
)
y, x = dem_raster.index(Xa, Ya)
return (x, y)
@ -313,11 +410,21 @@ with rasterio.open(dem_path) as dem_raster:
dem_bbox_miny = min(h - 1, max(0, dem_bbox_y.min()))
dem_bbox_maxx = min(w - 1, max(0, dem_bbox_x.max()))
dem_bbox_maxy = min(h - 1, max(0, dem_bbox_y.max()))
dem_bbox_w = 1 + dem_bbox_maxx - dem_bbox_minx
dem_bbox_h = 1 + dem_bbox_maxy - dem_bbox_miny
print("Iterating over DEM box: [(%s, %s), (%s, %s)] (%sx%s pixels)" % (dem_bbox_minx, dem_bbox_miny, dem_bbox_maxx, dem_bbox_maxy, dem_bbox_w, dem_bbox_h))
print(
"Iterating over DEM box: [(%s, %s), (%s, %s)] (%sx%s pixels)"
% (
dem_bbox_minx,
dem_bbox_miny,
dem_bbox_maxx,
dem_bbox_maxy,
dem_bbox_w,
dem_bbox_h,
)
)
if max_workers > 1:
with multiprocessing.Pool(max_workers) as p:
@ -325,7 +432,9 @@ with rasterio.open(dem_path) as dem_raster:
else:
results = [process_pixels(0)]
results = list(filter(lambda r: r[1][0] <= r[1][2] and r[1][1] <= r[1][3], results))
results = list(
filter(lambda r: r[1][0] <= r[1][2] and r[1][1] <= r[1][3], results)
)
# Merge image
imgout, _ = results[0]
@ -335,7 +444,7 @@ with rasterio.open(dem_path) as dem_raster:
resimg, _ = results[j % max_workers]
for b in range(num_bands):
imgout[b][im_j] = resimg[b][im_j]
# Merge bounds
minx = dem_bbox_w
miny = dem_bbox_h
@ -347,10 +456,10 @@ with rasterio.open(dem_path) as dem_raster:
miny = min(bounds[1], miny)
maxx = max(bounds[2], maxx)
maxy = max(bounds[3], maxy)
print("Output bounds: (%s, %s), (%s, %s) pixels" % (minx, miny, maxx, maxy))
if minx <= maxx and miny <= maxy:
imgout = imgout[:,miny:maxy+1,minx:maxx+1]
imgout = imgout[:, miny : maxy + 1, minx : maxx + 1]
if with_alpha:
alpha = np.zeros((imgout.shape[1], imgout.shape[2]), dtype=np.uint8)
@ -361,26 +470,34 @@ with rasterio.open(dem_path) as dem_raster:
# Cast
imgout = imgout.astype(shot_image.dtype)
dem_transform = dem_raster.profile['transform']
offset_x, offset_y = dem_raster.xy(dem_bbox_miny + miny, dem_bbox_minx + minx, offset='ul')
dem_transform = dem_raster.profile["transform"]
offset_x, offset_y = dem_raster.xy(
dem_bbox_miny + miny, dem_bbox_minx + minx, offset="ul"
)
profile = {
'driver': 'GTiff',
'width': imgout.shape[2],
'height': imgout.shape[1],
'count': num_bands + 1 if with_alpha else num_bands,
'dtype': imgout.dtype.name,
'transform': rasterio.transform.Affine(dem_transform[0], dem_transform[1], offset_x,
dem_transform[3], dem_transform[4], offset_y),
'nodata': None,
'crs': crs
"driver": "GTiff",
"width": imgout.shape[2],
"height": imgout.shape[1],
"count": num_bands + 1 if with_alpha else num_bands,
"dtype": imgout.dtype.name,
"transform": rasterio.transform.Affine(
dem_transform[0],
dem_transform[1],
offset_x,
dem_transform[3],
dem_transform[4],
offset_y,
),
"nodata": None,
"crs": crs,
}
outfile = os.path.join(cwd_path, shot.id)
if not outfile.endswith(".tif"):
outfile = outfile + ".tif"
with rasterio.open(outfile, 'w', BIGTIFF="IF_SAFER", **profile) as wout:
with rasterio.open(outfile, "w", BIGTIFF="IF_SAFER", **profile) as wout:
for b in range(num_bands):
wout.write(imgout[b], b + 1)
if with_alpha:

Wyświetl plik

@ -4,33 +4,42 @@
import os
import sys
sys.path.insert(0, os.path.join("..", "..", os.path.dirname(__file__)))
import argparse
import multiprocessing
from opendm.dem import commands
parser = argparse.ArgumentParser(description='Generate DEMs from point clouds using ODM\'s algorithm.')
parser.add_argument('point_cloud',
type=str,
help='Path to point cloud file (.las, .laz, .ply)')
parser.add_argument('--type',
type=str,
choices=("dsm", "dtm"),
default="dsm",
help="Type of DEM. Default: %(default)s")
parser.add_argument('--resolution',
type=float,
default=0.05,
help='Resolution in m/px. Default: %(default)s')
parser.add_argument('--gapfill-steps',
default=3,
type=int,
help='Number of steps used to fill areas with gaps. Set to 0 to disable gap filling. '
'Starting with a radius equal to the output resolution, N different DEMs are generated with '
'progressively bigger radius using the inverse distance weighted (IDW) algorithm '
'and merged together. Remaining gaps are then merged using nearest neighbor interpolation. '
'Default: %(default)s')
parser = argparse.ArgumentParser(
description="Generate DEMs from point clouds using ODM's algorithm."
)
parser.add_argument(
"point_cloud", type=str, help="Path to point cloud file (.las, .laz, .ply)"
)
parser.add_argument(
"--type",
type=str,
choices=("dsm", "dtm"),
default="dsm",
help="Type of DEM. Default: %(default)s",
)
parser.add_argument(
"--resolution",
type=float,
default=0.05,
help="Resolution in m/px. Default: %(default)s",
)
parser.add_argument(
"--gapfill-steps",
default=3,
type=int,
help="Number of steps used to fill areas with gaps. Set to 0 to disable gap filling. "
"Starting with a radius equal to the output resolution, N different DEMs are generated with "
"progressively bigger radius using the inverse distance weighted (IDW) algorithm "
"and merged together. Remaining gaps are then merged using nearest neighbor interpolation. "
"Default: %(default)s",
)
args = parser.parse_args()
if not os.path.exists(args.point_cloud):
@ -41,15 +50,18 @@ outdir = os.path.dirname(args.point_cloud)
radius_steps = [args.resolution / 2.0]
for _ in range(args.gapfill_steps - 1):
radius_steps.append(radius_steps[-1] * 2) # 2 is arbitrary, maybe there's a better value?
radius_steps.append(
radius_steps[-1] * 2
) # 2 is arbitrary, maybe there's a better value?
commands.create_dem(args.point_cloud,
args.type,
output_type='idw' if args.type == 'dtm' else 'max',
radiuses=list(map(str, radius_steps)),
gapfill=args.gapfill_steps > 0,
outdir=outdir,
resolution=args.resolution,
decimation=1,
max_workers=multiprocessing.cpu_count()
)
commands.create_dem(
args.point_cloud,
args.type,
output_type="idw" if args.type == "dtm" else "max",
radiuses=list(map(str, radius_steps)),
gapfill=args.gapfill_steps > 0,
outdir=outdir,
resolution=args.resolution,
decimation=1,
max_workers=multiprocessing.cpu_count(),
)

Wyświetl plik

@ -7,35 +7,46 @@ import piexif
import multiprocessing
from multiprocessing.pool import ThreadPool
import sys
sys.path.append("../../")
from opendm.gcp import GCPFile
parser = argparse.ArgumentParser(description='Exif Image Resize')
parser.add_argument('--input', '-i',
metavar='<path>',
required=True,
help='Path to input image/GCP or image folder')
parser.add_argument('--output', '-o',
metavar='<path>',
required=True,
help='Path to output image/GCP or image folder')
parser.add_argument('--force', '-f',
action='store_true',
default=False,
help='Overwrite results')
parser.add_argument('amount',
metavar='<pixel|percentage%>',
type=str,
help='Pixel of largest side or percentage to resize images by')
parser = argparse.ArgumentParser(description="Exif Image Resize")
parser.add_argument(
"--input",
"-i",
metavar="<path>",
required=True,
help="Path to input image/GCP or image folder",
)
parser.add_argument(
"--output",
"-o",
metavar="<path>",
required=True,
help="Path to output image/GCP or image folder",
)
parser.add_argument(
"--force", "-f", action="store_true", default=False, help="Overwrite results"
)
parser.add_argument(
"amount",
metavar="<pixel|percentage%>",
type=str,
help="Pixel of largest side or percentage to resize images by",
)
args = parser.parse_args()
def die(msg):
print(msg)
exit(1)
class nonloc:
errors = 0
def resize_image(image_path, out_path, resize_to, out_path_is_file=False):
"""
:param image_path: path to the image
@ -64,24 +75,36 @@ def resize_image(image_path, out_path, resize_to, out_path_is_file=False):
im.thumbnail((resized_width, resized_height), Image.LANCZOS)
driver = ext[1:].upper()
if driver == 'JPG':
driver = 'JPEG'
if driver == "JPG":
driver = "JPEG"
if 'exif' in im.info:
exif_dict = piexif.load(im.info['exif'])
exif_dict['Exif'][piexif.ExifIFD.PixelXDimension] = resized_width
exif_dict['Exif'][piexif.ExifIFD.PixelYDimension] = resized_height
im.save(resized_image_path, driver, exif=piexif.dump(exif_dict), quality=100)
if "exif" in im.info:
exif_dict = piexif.load(im.info["exif"])
exif_dict["Exif"][piexif.ExifIFD.PixelXDimension] = resized_width
exif_dict["Exif"][piexif.ExifIFD.PixelYDimension] = resized_height
im.save(
resized_image_path, driver, exif=piexif.dump(exif_dict), quality=100
)
else:
im.save(resized_image_path, driver, quality=100)
im.close()
print("{} ({}x{}) --> {} ({}x{})".format(image_path, width, height, resized_image_path, resized_width, resized_height))
print(
"{} ({}x{}) --> {} ({}x{})".format(
image_path,
width,
height,
resized_image_path,
resized_width,
resized_height,
)
)
except (IOError, ValueError) as e:
print("Error: Cannot resize {}: {}.".format(image_path, str(e)))
nonloc.errors += 1
def resize_gcp(gcp_path, out_path, resize_to, out_path_is_file=False):
"""
:param gcp_path: path to the GCP
@ -110,6 +133,7 @@ def resize_gcp(gcp_path, out_path, resize_to, out_path_is_file=False):
print("Error: Cannot resize {}: {}.".format(gcp_path, str(e)))
nonloc.errors += 1
if not args.amount.endswith("%"):
args.amount = float(args.amount)
if args.amount <= 0:
@ -157,13 +181,15 @@ if create_dir:
pool = ThreadPool(processes=multiprocessing.cpu_count())
def resize(file):
_, ext = os.path.splitext(file)
if ext.lower() == ".txt":
return resize_gcp(file, args.output, args.amount, not create_dir)
else:
return resize_image(file, args.output, args.amount, not create_dir)
pool.map(resize, files + gcps)
print("Process completed, {} errors.".format(nonloc.errors))

Wyświetl plik

@ -3,28 +3,34 @@
import rasterio, os, sys
import numpy as np
class bcolors:
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
try:
file = sys.argv[1]
typ = sys.argv[2]
(fileRoot, fileExt) = os.path.splitext(file)
outFileName = fileRoot + "_" + typ + fileExt
if typ not in ['vari', 'tgi', 'ngrdi']:
if typ not in ["vari", "tgi", "ngrdi"]:
raise IndexError
except (TypeError, IndexError, NameError):
print bcolors.FAIL + 'Arguments messed up. Check arguments order and index name' + bcolors.ENDC
print 'Usage: ./vegind.py orto index'
print ' orto - filepath to RGB orthophoto'
print ' index - Vegetation Index'
print bcolors.OKGREEN + 'Available indexes: vari, ngrdi, tgi' + bcolors.ENDC
print(
bcolors.FAIL
+ "Arguments messed up. Check arguments order and index name"
+ bcolors.ENDC
)
print("Usage: ./vegind.py orto index")
print(" orto - filepath to RGB orthophoto")
print(" index - Vegetation Index")
print(bcolors.OKGREEN + "Available indexes: vari, ngrdi, tgi" + bcolors.ENDC)
sys.exit()
@ -38,12 +44,13 @@ def calcNgrdi(red, green):
:param green: green visible channel
:return: ngrdi index array
"""
mask = np.not_equal(np.add(red,green), 0.0)
return np.choose(mask, (-9999.0, np.true_divide(
np.subtract(green,red),
np.add(red,green))))
mask = np.not_equal(np.add(red, green), 0.0)
return np.choose(
mask, (-9999.0, np.true_divide(np.subtract(green, red), np.add(red, green)))
)
def calcVari(red,green,blue):
def calcVari(red, green, blue):
"""
Calculates Visible Atmospheric Resistant Index
Gitelson, A.A., Kaufman, Y.J., Stark, R., Rundquist, D., 2002.
@ -54,10 +61,19 @@ def calcVari(red,green,blue):
:param blue: blue visible channel
:return: vari index array, that will be saved to tiff
"""
mask = np.not_equal(np.subtract(np.add(green,red),blue), 0.0)
return np.choose(mask, (-9999.0, np.true_divide(np.subtract(green,red),np.subtract(np.add(green,red),blue))))
mask = np.not_equal(np.subtract(np.add(green, red), blue), 0.0)
return np.choose(
mask,
(
-9999.0,
np.true_divide(
np.subtract(green, red), np.subtract(np.add(green, red), blue)
),
),
)
def calcTgi(red,green,blue):
def calcTgi(red, green, blue):
"""
Calculates Triangular Greenness Index
Hunt, E. Raymond Jr.; Doraiswamy, Paul C.; McMurtrey, James E.; Daughtry, Craig S.T.; Perry, Eileen M.; and Akhmedov, Bakhyt,
@ -69,8 +85,12 @@ def calcTgi(red,green,blue):
:param blue: blue channel
:return: tgi index array, that will be saved to tiff
"""
mask = np.not_equal(green-red+blue-255.0, 0.0)
return np.choose(mask, (-9999.0, np.subtract(green, np.multiply(0.39,red), np.multiply(0.61, blue))))
mask = np.not_equal(green - red + blue - 255.0, 0.0)
return np.choose(
mask,
(-9999.0, np.subtract(green, np.multiply(0.39, red), np.multiply(0.61, blue))),
)
try:
with rasterio.Env():
@ -80,16 +100,16 @@ try:
red = np.float32(ds.read(1))
green = np.float32(ds.read(2))
blue = np.float32(ds.read(3))
np.seterr(divide='ignore', invalid='ignore')
if typ == 'ngrdi':
indeks = calcNgrdi(red,green)
elif typ == 'vari':
np.seterr(divide="ignore", invalid="ignore")
if typ == "ngrdi":
indeks = calcNgrdi(red, green)
elif typ == "vari":
indeks = calcVari(red, green, blue)
elif typ == 'tgi':
elif typ == "tgi":
indeks = calcTgi(red, green, blue)
with rasterio.open(outFileName, 'w', BIGTIFF="IF_SAFER", **profile) as dst:
with rasterio.open(outFileName, "w", BIGTIFF="IF_SAFER", **profile) as dst:
dst.write(indeks.astype(rasterio.float32), 1)
except rasterio.errors.RasterioIOError:
print bcolors.FAIL + 'Orthophoto file not found or access denied' + bcolors.ENDC
print(bcolors.FAIL + "Orthophoto file not found or access denied" + bcolors.ENDC)
sys.exit()

Wyświetl plik

@ -7,6 +7,7 @@ import sys
import rawpy
import cv2
def read_image(img_path):
if img_path[-4:].lower() in [".dng", ".raw", ".nef"]:
try:
@ -20,24 +21,24 @@ def read_image(img_path):
return None
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def get_model(namespace, url, version, name = "model.onnx"):
def get_model(namespace, url, version, name="model.onnx"):
version = version.replace(".", "_")
base_dir = os.path.join(os.path.dirname(__file__), "..")
if sys.platform == 'win32':
base_dir = os.path.join(os.getenv('PROGRAMDATA'),"ODM")
if sys.platform == "win32":
base_dir = os.path.join(os.getenv("PROGRAMDATA"), "ODM")
base_dir = os.path.join(os.path.abspath(base_dir), "storage", "models")
namespace_dir = os.path.join(base_dir, namespace)
versioned_dir = os.path.join(namespace_dir, version)
if not os.path.isdir(versioned_dir):
os.makedirs(versioned_dir, exist_ok=True)
# Check if we need to download it
model_file = os.path.join(versioned_dir, name)
if not os.path.isfile(model_file):
@ -62,14 +63,16 @@ def get_model(namespace, url, version, name = "model.onnx"):
if os.path.basename(downloaded_file).lower().endswith(".zip"):
log.ODM_INFO("Extracting %s ..." % downloaded_file)
with zipfile.ZipFile(downloaded_file, 'r') as z:
with zipfile.ZipFile(downloaded_file, "r") as z:
z.extractall(versioned_dir)
os.remove(downloaded_file)
if not os.path.isfile(model_file):
log.ODM_WARNING("Cannot find %s (is the URL to the AI model correct?)" % model_file)
log.ODM_WARNING(
"Cannot find %s (is the URL to the AI model correct?)" % model_file
)
return None
else:
return model_file
else:
return model_file
return model_file

Wyświetl plik

@ -13,42 +13,50 @@ from opendm import io
from opendm import system
from opendm.concurrency import get_max_memory
def get_point_cloud_crs(file):
pipeline = pdal.Pipeline(json.dumps([ file ]))
pipeline = pdal.Pipeline(json.dumps([file]))
metadata = pipeline.quickinfo
reader_metadata = [val for key, val in metadata.items() if "readers" in key]
crs = CRS.from_string(reader_metadata[0]["srs"]["horizontal"])
return str(crs)
def get_raster_crs(file):
with rasterio.open(file, 'r') as f:
with rasterio.open(file, "r") as f:
return str(f.crs)
def reproject_point_cloud(file, out_srs):
out_file = io.related_file_path(file, postfix="_reprojected_tmp")
pipeline = pdal.Pipeline(json.dumps([ file, {
"type": "filters.reprojection",
"out_srs": out_srs
}, out_file]))
pipeline = pdal.Pipeline(
json.dumps(
[file, {"type": "filters.reprojection", "out_srs": out_srs}, out_file]
)
)
pipeline.execute()
return out_file
def reproject_raster(file, out_srs):
out_file = io.related_file_path(file, postfix="_reprojected_tmp")
kwargs = {
'input': double_quote(file),
'output': double_quote(out_file),
'out_srs': out_srs,
'max_memory': get_max_memory()
"input": double_quote(file),
"output": double_quote(out_file),
"out_srs": out_srs,
"max_memory": get_max_memory(),
}
system.run('gdalwarp '
'-t_srs {out_srs} '
'{input} '
'{output} '
'--config GDAL_CACHEMAX {max_memory}% '.format(**kwargs))
system.run(
"gdalwarp "
"-t_srs {out_srs} "
"{input} "
"{output} "
"--config GDAL_CACHEMAX {max_memory}% ".format(**kwargs)
)
return out_file
def compute_alignment_matrix(input_laz, align_file, stats_dir):
if os.path.exists(stats_dir):
shutil.rmtree(stats_dir)
@ -70,7 +78,7 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir):
else:
log.ODM_WARNING("Unsupported alignment file: %s" % align_file)
return
to_delete = []
try:
@ -81,7 +89,9 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir):
align_file = repr_func(align_file, input_crs)
to_delete.append(align_file)
conf = dataclasses.asdict(codem.CodemRunConfig(align_file, input_laz, OUTPUT_DIR=stats_dir))
conf = dataclasses.asdict(
codem.CodemRunConfig(align_file, input_laz, OUTPUT_DIR=stats_dir)
)
fnd_obj, aoi_obj = codem.preprocess(conf)
fnd_obj.prep()
aoi_obj.prep()
@ -102,46 +112,53 @@ def compute_alignment_matrix(input_laz, align_file, stats_dir):
)
reg = app_reg.get_registration_transformation()
# Write JSON to stats folder
with open(os.path.join(stats_dir, "registration.json"), 'w') as f:
del dsm_reg.registration_parameters['matrix']
del icp_reg.registration_parameters['matrix']
with open(os.path.join(stats_dir, "registration.json"), "w") as f:
del dsm_reg.registration_parameters["matrix"]
del icp_reg.registration_parameters["matrix"]
f.write(json.dumps({
'coarse': dsm_reg.registration_parameters,
'fine': icp_reg.registration_parameters,
}, indent=4))
f.write(
json.dumps(
{
"coarse": dsm_reg.registration_parameters,
"fine": icp_reg.registration_parameters,
},
indent=4,
)
)
matrix = np.fromstring(reg['matrix'], dtype=float, sep=' ').reshape((4, 4))
matrix = np.fromstring(reg["matrix"], dtype=float, sep=" ").reshape((4, 4))
return matrix
finally:
for f in to_delete:
if os.path.isfile(f):
os.unlink(f)
def transform_point_cloud(input_laz, a_matrix, output_laz):
pipe = [
input_laz,
{
'type': 'filters.transformation',
'matrix': " ".join(list(map(str, a_matrix.flatten()))),
"type": "filters.transformation",
"matrix": " ".join(list(map(str, a_matrix.flatten()))),
},
output_laz,
]
p = pdal.Pipeline(json.dumps(pipe))
p.execute()
def transform_obj(input_obj, a_matrix, geo_offset, output_obj):
g_off = np.array([geo_offset[0], geo_offset[1], 0, 0])
with open(input_obj, 'r') as fin:
with open(output_obj, 'w') as fout:
with open(input_obj, "r") as fin:
with open(output_obj, "w") as fout:
lines = fin.readlines()
for line in lines:
if line.startswith("v "):
v = np.fromstring(line.strip()[2:] + " 1", sep=' ', dtype=float)
v = np.fromstring(line.strip()[2:] + " 1", sep=" ", dtype=float)
vt = (a_matrix.dot((v + g_off)) - g_off)[:3]
fout.write("v " + " ".join(map(str, list(vt))) + '\n')
fout.write("v " + " ".join(map(str, list(vt))) + "\n")
else:
fout.write(line)
fout.write(line)

Wyświetl plik

@ -3,6 +3,7 @@ from shlex import _find_unsafe
import json
import os
def double_quote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
@ -12,7 +13,8 @@ def double_quote(s):
# use double quotes, and prefix double quotes with a \
# the string $"b is then quoted as "$\"b"
return '"' + s.replace('"', '\\\"') + '"'
return '"' + s.replace('"', '\\"') + '"'
def args_to_dict(args):
args_dict = vars(args)
@ -23,20 +25,22 @@ def args_to_dict(args):
continue
# Don't leak token
if k == 'sm_cluster' and args_dict[k] is not None:
if k == "sm_cluster" and args_dict[k] is not None:
result[k] = True
else:
result[k] = args_dict[k]
return result
def save_opts(opts_json, args):
try:
with open(opts_json, "w", encoding='utf-8') as f:
with open(opts_json, "w", encoding="utf-8") as f:
f.write(json.dumps(args_to_dict(args)))
except Exception as e:
log.ODM_WARNING("Cannot save options to %s: %s" % (opts_json, str(e)))
def compare_args(opts_json, args, rerun_stages):
if not os.path.isfile(opts_json):
return {}
@ -55,22 +59,31 @@ def compare_args(opts_json, args, rerun_stages):
if stage is not None and cur_value != prev_value:
diff[opt] = prev_value
return diff
except:
return {}
def find_rerun_stage(opts_json, args, rerun_stages, processopts):
# Find the proper rerun stage if one is not explicitly set
if not ('rerun_is_set' in args or 'rerun_from_is_set' in args or 'rerun_all_is_set' in args):
if not (
"rerun_is_set" in args
or "rerun_from_is_set" in args
or "rerun_all_is_set" in args
):
args_diff = compare_args(opts_json, args, rerun_stages)
if args_diff:
if 'split_is_set' in args:
return processopts[processopts.index('dataset'):], args_diff
if "split_is_set" in args:
return processopts[processopts.index("dataset") :], args_diff
try:
stage_idxs = [processopts.index(rerun_stages[opt]) for opt in args_diff.keys() if rerun_stages[opt] is not None]
return processopts[min(stage_idxs):], args_diff
stage_idxs = [
processopts.index(rerun_stages[opt])
for opt in args_diff.keys()
if rerun_stages[opt] is not None
]
return processopts[min(stage_idxs) :], args_diff
except ValueError as e:
print(str(e))
return None, {}
return None, {}

Wyświetl plik

@ -1,4 +1,3 @@
import time
import numpy as np
import cv2
@ -13,18 +12,22 @@ mutex = Lock()
# Implementation based on https://github.com/danielgatis/rembg by Daniel Gatis
# Use GPU if it is available, otherwise CPU
provider = "CUDAExecutionProvider" if "CUDAExecutionProvider" in ort.get_available_providers() else "CPUExecutionProvider"
provider = (
"CUDAExecutionProvider"
if "CUDAExecutionProvider" in ort.get_available_providers()
else "CPUExecutionProvider"
)
class BgFilter():
class BgFilter:
def __init__(self, model):
self.model = model
log.ODM_INFO(' ?> Using provider %s' % provider)
log.ODM_INFO(" ?> Using provider %s" % provider)
self.load_model()
def load_model(self):
log.ODM_INFO(' -> Loading the model')
log.ODM_INFO(" -> Loading the model")
self.session = ort.InferenceSession(self.model, providers=[provider])
@ -53,7 +56,10 @@ class BgFilter():
ort_outs = self.session.run(
None,
self.normalize(
img, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225), (320, 320) # <-- image size
img,
(0.485, 0.456, 0.406),
(0.229, 0.224, 0.225),
(320, 320), # <-- image size
),
)
@ -75,13 +81,13 @@ class BgFilter():
def run_img(self, img_path, dest):
img = read_image(img_path)
mask = self.get_mask(img)
mask = self.get_mask(img)
img_name = os.path.basename(img_path)
fpath = os.path.join(dest, img_name)
fname, _ = os.path.splitext(fpath)
mask_name = fname + '_mask.png'
mask_name = fname + "_mask.png"
cv2.imwrite(mask_name, mask)
return mask_name

Wyświetl plik

@ -10,7 +10,10 @@ from opendm.utils import double_quote
from osgeo import ogr
from opendm.shots import get_origin
def compute_boundary_from_shots(reconstruction_json, buffer=0, reconstruction_offset=(0, 0)):
def compute_boundary_from_shots(
reconstruction_json, buffer=0, reconstruction_offset=(0, 0)
):
if not os.path.isfile(reconstruction_json):
raise IOError(reconstruction_json + " does not exist.")
@ -20,15 +23,18 @@ def compute_boundary_from_shots(reconstruction_json, buffer=0, reconstruction_of
mp = ogr.Geometry(ogr.wkbMultiPoint)
for shot_image in reconstruction['shots']:
shot = reconstruction['shots'][shot_image]
if shot.get('gps_dop', 999999) < 999999:
camera = reconstruction['cameras'][shot['camera']]
for shot_image in reconstruction["shots"]:
shot = reconstruction["shots"][shot_image]
if shot.get("gps_dop", 999999) < 999999:
camera = reconstruction["cameras"][shot["camera"]]
p = ogr.Geometry(ogr.wkbPoint)
origin = get_origin(shot)
p.AddPoint_2D(origin[0] + reconstruction_offset[0], origin[1] + reconstruction_offset[1])
p.AddPoint_2D(
origin[0] + reconstruction_offset[0],
origin[1] + reconstruction_offset[1],
)
mp.AddGeometry(p)
if mp.GetGeometryCount() < 3:
@ -39,24 +45,27 @@ def compute_boundary_from_shots(reconstruction_json, buffer=0, reconstruction_of
return load_boundary(boundary.ExportToJson())
def load_boundary(boundary_json, reproject_to_proj4=None):
if not isinstance(boundary_json, str):
boundary_json = json.dumps(boundary_json)
with fiona.open(io.BytesIO(boundary_json.encode('utf-8')), 'r') as src:
with fiona.open(io.BytesIO(boundary_json.encode("utf-8")), "r") as src:
if len(src) != 1:
raise IOError("Boundary must have a single polygon (found: %s)" % len(src))
geom = src[0]['geometry']
if geom['type'] != 'Polygon':
raise IOError("Boundary must have a polygon feature (found: %s)" % geom['type'])
geom = src[0]["geometry"]
rings = geom['coordinates']
if geom["type"] != "Polygon":
raise IOError(
"Boundary must have a polygon feature (found: %s)" % geom["type"]
)
rings = geom["coordinates"]
if len(rings) == 0:
raise IOError("Boundary geometry has no rings")
coords = rings[0]
if len(coords) == 0:
raise IOError("Boundary geometry has no coordinates")
@ -64,58 +73,75 @@ def load_boundary(boundary_json, reproject_to_proj4=None):
dimensions = len(coords[0])
if reproject_to_proj4 is not None:
t = transformer(CRS.from_proj4(fiona.crs.to_string(src.crs)),
CRS.from_proj4(reproject_to_proj4))
t = transformer(
CRS.from_proj4(fiona.crs.to_string(src.crs)),
CRS.from_proj4(reproject_to_proj4),
)
coords = [t.TransformPoint(*c)[:dimensions] for c in coords]
return coords
def boundary_offset(boundary, reconstruction_offset):
if boundary is None or reconstruction_offset is None:
return boundary
res = []
dims = len(boundary[0])
for c in boundary:
if dims == 2:
res.append((c[0] - reconstruction_offset[0], c[1] - reconstruction_offset[1]))
res.append(
(c[0] - reconstruction_offset[0], c[1] - reconstruction_offset[1])
)
else:
res.append((c[0] - reconstruction_offset[0], c[1] - reconstruction_offset[1], c[2]))
res.append(
(c[0] - reconstruction_offset[0], c[1] - reconstruction_offset[1], c[2])
)
return res
def as_polygon(boundary):
if boundary is None:
return None
return "POLYGON((" + ", ".join([" ".join(map(str, c)) for c in boundary]) + "))"
def as_geojson(boundary):
return '{"type":"FeatureCollection","features":[{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[%s]}}]}' % str(list(map(list, boundary)))
return (
'{"type":"FeatureCollection","features":[{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[%s]}}]}'
% str(list(map(list, boundary)))
)
def export_to_bounds_files(boundary, proj4, bounds_json_file, bounds_gpkg_file):
with open(bounds_json_file, "w") as f:
f.write(json.dumps({
"type": "FeatureCollection",
"name": "bounds",
"features": [{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [boundary]
f.write(
json.dumps(
{
"type": "FeatureCollection",
"name": "bounds",
"features": [
{
"type": "Feature",
"properties": {},
"geometry": {"type": "Polygon", "coordinates": [boundary]},
}
],
}
}]
}))
)
)
if os.path.isfile(bounds_gpkg_file):
os.remove(bounds_gpkg_file)
kwargs = {
'proj4': proj4,
'input': double_quote(bounds_json_file),
'output': double_quote(bounds_gpkg_file)
"proj4": proj4,
"input": double_quote(bounds_json_file),
"output": double_quote(bounds_gpkg_file),
}
system.run('ogr2ogr -overwrite -f GPKG -a_srs "{proj4}" {output} {input}'.format(**kwargs))
system.run(
'ogr2ogr -overwrite -f GPKG -a_srs "{proj4}" {output} {input}'.format(**kwargs)
)

Wyświetl plik

@ -1,29 +1,30 @@
import os, json
from opendm import log
def get_cameras_from_opensfm(reconstruction_file):
"""
Extract the cameras from OpenSfM's reconstruction.json
"""
if os.path.exists(reconstruction_file):
with open(reconstruction_file, 'r') as fin:
with open(reconstruction_file, "r") as fin:
reconstructions = json.loads(fin.read())
result = {}
for recon in reconstructions:
if 'cameras' in recon:
for camera_id in recon['cameras']:
if "cameras" in recon:
for camera_id in recon["cameras"]:
# Strip "v2" from OpenSfM camera IDs
new_camera_id = camera_id
if new_camera_id.startswith("v2 "):
new_camera_id = new_camera_id[3:]
result[new_camera_id] = recon['cameras'][camera_id]
result[new_camera_id] = recon["cameras"][camera_id]
# Remove "_prior" keys
keys = list(result[new_camera_id].keys())
for k in keys:
if k.endswith('_prior'):
if k.endswith("_prior"):
result[new_camera_id].pop(k)
return result
else:
@ -47,11 +48,26 @@ def get_opensfm_camera_models(cameras):
osfm_camera_id = "v2 " + camera_id
else:
osfm_camera_id = camera_id
# Add "_prior" keys
camera = cameras[camera_id]
prior_fields = ["focal","focal_x","focal_y","c_x","c_y","k1","k2","p1","p2","k3"]
valid_fields = ["id","width","height","projection_type"] + prior_fields + [f + "_prior" for f in prior_fields]
prior_fields = [
"focal",
"focal_x",
"focal_y",
"c_x",
"c_y",
"k1",
"k2",
"p1",
"p2",
"k3",
]
valid_fields = (
["id", "width", "height", "projection_type"]
+ prior_fields
+ [f + "_prior" for f in prior_fields]
)
keys = list(camera.keys())
for param in keys:

Wyświetl plik

@ -5,6 +5,7 @@ from opendm.concurrency import get_max_memory
from opendm import io
from opendm import log
def convert_to_cogeo(src_path, blocksize=256, max_workers=1, compression="DEFLATE"):
"""
Guarantee that the .tif passed as an argument is a Cloud Optimized GeoTIFF (cogeo)
@ -20,43 +21,44 @@ def convert_to_cogeo(src_path, blocksize=256, max_workers=1, compression="DEFLAT
log.ODM_INFO("Optimizing %s as Cloud Optimized GeoTIFF" % src_path)
tmpfile = io.related_file_path(src_path, postfix='_cogeo')
swapfile = io.related_file_path(src_path, postfix='_cogeo_swap')
tmpfile = io.related_file_path(src_path, postfix="_cogeo")
swapfile = io.related_file_path(src_path, postfix="_cogeo_swap")
kwargs = {
'threads': max_workers if max_workers else 'ALL_CPUS',
'blocksize': blocksize,
'max_memory': get_max_memory(),
'src_path': src_path,
'tmpfile': tmpfile,
'compress': compression,
'predictor': '2' if compression in ['LZW', 'DEFLATE'] else '1',
"threads": max_workers if max_workers else "ALL_CPUS",
"blocksize": blocksize,
"max_memory": get_max_memory(),
"src_path": src_path,
"tmpfile": tmpfile,
"compress": compression,
"predictor": "2" if compression in ["LZW", "DEFLATE"] else "1",
}
try:
system.run("gdal_translate "
"-of COG "
"-co NUM_THREADS={threads} "
"-co BLOCKSIZE={blocksize} "
"-co COMPRESS={compress} "
"-co PREDICTOR={predictor} "
"-co BIGTIFF=IF_SAFER "
"-co RESAMPLING=NEAREST "
"--config GDAL_CACHEMAX {max_memory}% "
"--config GDAL_NUM_THREADS {threads} "
"\"{src_path}\" \"{tmpfile}\" ".format(**kwargs))
system.run(
"gdal_translate "
"-of COG "
"-co NUM_THREADS={threads} "
"-co BLOCKSIZE={blocksize} "
"-co COMPRESS={compress} "
"-co PREDICTOR={predictor} "
"-co BIGTIFF=IF_SAFER "
"-co RESAMPLING=NEAREST "
"--config GDAL_CACHEMAX {max_memory}% "
"--config GDAL_NUM_THREADS {threads} "
'"{src_path}" "{tmpfile}" '.format(**kwargs)
)
except Exception as e:
log.ODM_WARNING("Cannot create Cloud Optimized GeoTIFF: %s" % str(e))
if os.path.isfile(tmpfile):
shutil.move(src_path, swapfile) # Move to swap location
shutil.move(src_path, swapfile) # Move to swap location
try:
shutil.move(tmpfile, src_path)
except IOError as e:
log.ODM_WARNING("Cannot move %s to %s: %s" % (tmpfile, src_path, str(e)))
shutil.move(swapfile, src_path) # Attempt to restore
shutil.move(swapfile, src_path) # Attempt to restore
if os.path.isfile(swapfile):
os.remove(swapfile)

Wyświetl plik

@ -1,6 +1,7 @@
from vmem import virtual_memory
import os
import sys
try:
import Queue as queue
except:
@ -9,7 +10,8 @@ import threading
import time
from opendm import log
def get_max_memory(minimum = 5, use_at_most = 0.5):
def get_max_memory(minimum=5, use_at_most=0.5):
"""
:param minimum minimum value to return (return value will never be lower than this)
:param use_at_most use at most this fraction of the available memory. 0.5 = use at most 50% of available memory
@ -17,7 +19,8 @@ def get_max_memory(minimum = 5, use_at_most = 0.5):
"""
return max(minimum, (100 - virtual_memory().percent) * use_at_most)
def get_max_memory_mb(minimum = 100, use_at_most = 0.5):
def get_max_memory_mb(minimum=100, use_at_most=0.5):
"""
:param minimum minimum value to return (return value will never be lower than this)
:param use_at_most use at most this fraction of the available memory. 0.5 = use at most 50% of available memory
@ -25,13 +28,15 @@ def get_max_memory_mb(minimum = 100, use_at_most = 0.5):
"""
return max(minimum, (virtual_memory().available / 1024 / 1024) * use_at_most)
def get_total_memory():
return virtual_memory().total
def parallel_map(func, items, max_workers=1, single_thread_fallback=True):
"""
Our own implementation for parallel processing
which handles gracefully CTRL+C and reverts to
which handles gracefully CTRL+C and reverts to
single thread processing in case of errors
:param items list of objects
:param func function to execute on each object
@ -92,7 +97,9 @@ def parallel_map(func, items, max_workers=1, single_thread_fallback=True):
if error is not None and single_thread_fallback:
# Try to reprocess using a single thread
# in case this was a memory error
log.ODM_WARNING("Failed to run process in parallel, retrying with a single thread...")
log.ODM_WARNING(
"Failed to run process in parallel, retrying with a single thread..."
)
use_single_thread = True
else:
use_single_thread = True
@ -100,4 +107,4 @@ def parallel_map(func, items, max_workers=1, single_thread_fallback=True):
if use_single_thread:
# Boring, single thread processing
for q in items:
process_one(q)
process_one(q)

Plik diff jest za duży Load Diff

Wyświetl plik

@ -6,16 +6,19 @@ import multiprocessing
current_path = os.path.abspath(os.path.dirname(__file__))
root_path, _ = os.path.split(current_path)
superbuild_path = os.path.join(root_path, 'SuperBuild')
superbuild_bin_path = os.path.join(superbuild_path, 'install', 'bin')
superbuild_path = os.path.join(root_path, "SuperBuild")
superbuild_bin_path = os.path.join(superbuild_path, "install", "bin")
# add opencv,opensfm to python path
python_packages_paths = [os.path.join(superbuild_path, p) for p in [
'install/lib/python3.9/dist-packages',
'install/lib/python3.8/dist-packages',
'install/lib/python3/dist-packages',
'install/bin/opensfm',
]]
python_packages_paths = [
os.path.join(superbuild_path, p)
for p in [
"install/lib/python3.9/dist-packages",
"install/lib/python3.8/dist-packages",
"install/lib/python3/dist-packages",
"install/bin/opensfm",
]
]
for p in python_packages_paths:
sys.path.append(p)
@ -23,25 +26,37 @@ for p in python_packages_paths:
# define opensfm path
opensfm_path = os.path.join(superbuild_bin_path, "opensfm")
poisson_recon_path = os.path.join(superbuild_bin_path, 'PoissonRecon')
dem2mesh_path = os.path.join(superbuild_bin_path, 'dem2mesh')
dem2points_path = os.path.join(superbuild_bin_path, 'dem2points')
poisson_recon_path = os.path.join(superbuild_bin_path, "PoissonRecon")
dem2mesh_path = os.path.join(superbuild_bin_path, "dem2mesh")
dem2points_path = os.path.join(superbuild_bin_path, "dem2points")
# define mvstex path
mvstex_path = os.path.join(superbuild_bin_path, "texrecon")
# openmvs paths
omvs_densify_path = os.path.join(superbuild_bin_path, "OpenMVS", "DensifyPointCloud")
omvs_reconstructmesh_path = os.path.join(superbuild_bin_path, "OpenMVS", "ReconstructMesh")
omvs_reconstructmesh_path = os.path.join(
superbuild_bin_path, "OpenMVS", "ReconstructMesh"
)
fpcfilter_path = os.path.join(superbuild_bin_path, "FPCFilter")
odm_orthophoto_path = os.path.join(superbuild_bin_path, "odm_orthophoto")
settings_path = os.path.join(root_path, 'settings.yaml')
settings_path = os.path.join(root_path, "settings.yaml")
# Define supported image extensions
supported_extensions = {'.jpg','.jpeg','.png', '.tif', '.tiff', '.bmp', '.raw', '.dng', '.nef'}
supported_video_extensions = {'.mp4', '.mov', '.lrv', '.ts'}
supported_extensions = {
".jpg",
".jpeg",
".png",
".tif",
".tiff",
".bmp",
".raw",
".dng",
".nef",
}
supported_video_extensions = {".mp4", ".mov", ".lrv", ".ts"}
# Define the number of cores
num_cores = multiprocessing.cpu_count()
@ -49,4 +64,4 @@ num_cores = multiprocessing.cpu_count()
# Print python paths if invoked as a script
if __name__ == "__main__":
print("export PYTHONPATH=" + ":".join(python_packages_paths))
print("export PYTHONPATH=" + ":".join(python_packages_paths))

Wyświetl plik

@ -7,8 +7,9 @@ import json, os
from opendm.concurrency import get_max_memory
from opendm.utils import double_quote
class Cropper:
def __init__(self, storage_dir, files_prefix = "crop"):
def __init__(self, storage_dir, files_prefix="crop"):
self.storage_dir = storage_dir
self.files_prefix = files_prefix
@ -16,19 +17,25 @@ class Cropper:
"""
@return a path relative to storage_dir and prefixed with files_prefix
"""
return os.path.join(self.storage_dir, '{}.{}'.format(self.files_prefix, suffix))
return os.path.join(self.storage_dir, "{}.{}".format(self.files_prefix, suffix))
@staticmethod
def crop(gpkg_path, geotiff_path, gdal_options, keep_original=True, warp_options=[]):
def crop(
gpkg_path, geotiff_path, gdal_options, keep_original=True, warp_options=[]
):
if not os.path.exists(gpkg_path) or not os.path.exists(geotiff_path):
log.ODM_WARNING("Either {} or {} does not exist, will skip cropping.".format(gpkg_path, geotiff_path))
log.ODM_WARNING(
"Either {} or {} does not exist, will skip cropping.".format(
gpkg_path, geotiff_path
)
)
return geotiff_path
log.ODM_INFO("Cropping %s" % geotiff_path)
# Rename original file
# path/to/odm_orthophoto.tif --> path/to/odm_orthophoto.original.tif
path, filename = os.path.split(geotiff_path)
# path = path/to
# filename = odm_orthophoto.tif
@ -42,46 +49,50 @@ class Cropper:
try:
kwargs = {
'gpkg_path': double_quote(gpkg_path),
'geotiffInput': double_quote(original_geotiff),
'geotiffOutput': double_quote(geotiff_path),
'options': ' '.join(map(lambda k: '-co {}={}'.format(k, gdal_options[k]), gdal_options)),
'warpOptions': ' '.join(warp_options),
'max_memory': get_max_memory()
"gpkg_path": double_quote(gpkg_path),
"geotiffInput": double_quote(original_geotiff),
"geotiffOutput": double_quote(geotiff_path),
"options": " ".join(
map(lambda k: "-co {}={}".format(k, gdal_options[k]), gdal_options)
),
"warpOptions": " ".join(warp_options),
"max_memory": get_max_memory(),
}
run('gdalwarp -cutline {gpkg_path} '
'-crop_to_cutline '
'{options} '
'{warpOptions} '
'{geotiffInput} '
'{geotiffOutput} '
'--config GDAL_CACHEMAX {max_memory}%'.format(**kwargs))
run(
"gdalwarp -cutline {gpkg_path} "
"-crop_to_cutline "
"{options} "
"{warpOptions} "
"{geotiffInput} "
"{geotiffOutput} "
"--config GDAL_CACHEMAX {max_memory}%".format(**kwargs)
)
if not keep_original:
os.remove(original_geotiff)
except Exception as e:
log.ODM_WARNING('Something went wrong while cropping: {}'.format(e))
log.ODM_WARNING("Something went wrong while cropping: {}".format(e))
# Revert rename
os.replace(original_geotiff, geotiff_path)
return geotiff_path
@staticmethod
def merge_bounds(input_bound_files, output_bounds, buffer_distance = 0):
def merge_bounds(input_bound_files, output_bounds, buffer_distance=0):
"""
Merge multiple bound files into a single bound computed from the convex hull
of all bounds (minus a buffer distance in meters)
"""
geomcol = ogr.Geometry(ogr.wkbGeometryCollection)
driver = ogr.GetDriverByName('GPKG')
driver = ogr.GetDriverByName("GPKG")
srs = None
for input_bound_file in input_bound_files:
ds = driver.Open(input_bound_file, 0) # ready-only
ds = driver.Open(input_bound_file, 0) # ready-only
layer = ds.GetLayer()
srs = layer.GetSpatialRef()
@ -89,7 +100,7 @@ class Cropper:
# Collect all Geometry
for feature in layer:
geomcol.AddGeometry(feature.GetGeometryRef())
ds = None
# Calculate convex hull
@ -121,7 +132,9 @@ class Cropper:
# Save and close output data source
out_ds = None
def create_bounds_geojson(self, pointcloud_path, buffer_distance = 0, decimation_step=40):
def create_bounds_geojson(
self, pointcloud_path, buffer_distance=0, decimation_step=40
):
"""
Compute a buffered polygon around the data extents (not just a bounding box)
of the given point cloud.
@ -129,51 +142,71 @@ class Cropper:
@return filename to GeoJSON containing the polygon
"""
if not os.path.exists(pointcloud_path):
log.ODM_WARNING('Point cloud does not exist, cannot generate bounds {}'.format(pointcloud_path))
return ''
log.ODM_WARNING(
"Point cloud does not exist, cannot generate bounds {}".format(
pointcloud_path
)
)
return ""
# Do decimation prior to extracting boundary information
decimated_pointcloud_path = self.path('decimated.las')
decimated_pointcloud_path = self.path("decimated.las")
run("pdal translate -i \"{}\" "
"-o \"{}\" "
run(
'pdal translate -i "{}" '
'-o "{}" '
"decimation "
"--filters.decimation.step={} ".format(pointcloud_path, decimated_pointcloud_path, decimation_step))
"--filters.decimation.step={} ".format(
pointcloud_path, decimated_pointcloud_path, decimation_step
)
)
if not os.path.exists(decimated_pointcloud_path):
log.ODM_WARNING('Could not decimate point cloud, thus cannot generate GPKG bounds {}'.format(decimated_pointcloud_path))
return ''
log.ODM_WARNING(
"Could not decimate point cloud, thus cannot generate GPKG bounds {}".format(
decimated_pointcloud_path
)
)
return ""
# Use PDAL to dump boundary information
# then read the information back
boundary_file_path = self.path('boundary.json')
boundary_file_path = self.path("boundary.json")
run(
'pdal info --boundary --filters.hexbin.edge_size=1 --filters.hexbin.threshold=0 "{0}" > "{1}"'.format(
decimated_pointcloud_path, boundary_file_path
)
)
run('pdal info --boundary --filters.hexbin.edge_size=1 --filters.hexbin.threshold=0 "{0}" > "{1}"'.format(decimated_pointcloud_path, boundary_file_path))
pc_geojson_boundary_feature = None
with open(boundary_file_path, 'r') as f:
with open(boundary_file_path, "r") as f:
json_f = json.loads(f.read())
pc_geojson_boundary_feature = json_f['boundary']['boundary_json']
pc_geojson_boundary_feature = json_f["boundary"]["boundary_json"]
if pc_geojson_boundary_feature is None: raise RuntimeError("Could not determine point cloud boundaries")
if pc_geojson_boundary_feature is None:
raise RuntimeError("Could not determine point cloud boundaries")
# Write bounds to GeoJSON
tmp_bounds_geojson_path = self.path('tmp-bounds.geojson')
tmp_bounds_geojson_path = self.path("tmp-bounds.geojson")
with open(tmp_bounds_geojson_path, "w") as f:
f.write(json.dumps({
"type": "FeatureCollection",
"features": [{
"type": "Feature",
"geometry": pc_geojson_boundary_feature
}]
}))
f.write(
json.dumps(
{
"type": "FeatureCollection",
"features": [
{"type": "Feature", "geometry": pc_geojson_boundary_feature}
],
}
)
)
# Create a convex hull around the boundary
# as to encompass the entire area (no holes)
driver = ogr.GetDriverByName('GeoJSON')
ds = driver.Open(tmp_bounds_geojson_path, 0) # ready-only
# as to encompass the entire area (no holes)
driver = ogr.GetDriverByName("GeoJSON")
ds = driver.Open(tmp_bounds_geojson_path, 0) # ready-only
layer = ds.GetLayer()
# Collect all Geometry
@ -191,7 +224,7 @@ class Cropper:
BUFFER_SMOOTH_DISTANCE = 3
if buffer_distance > 0:
# For small areas, check that buffering doesn't obliterate
# For small areas, check that buffering doesn't obliterate
# our hull
tmp = convexhull.Buffer(-(buffer_distance + BUFFER_SMOOTH_DISTANCE))
tmp = tmp.Buffer(BUFFER_SMOOTH_DISTANCE)
@ -201,7 +234,7 @@ class Cropper:
log.ODM_WARNING("Very small crop area detected, we will not smooth it.")
# Save to a new file
bounds_geojson_path = self.path('bounds.geojson')
bounds_geojson_path = self.path("bounds.geojson")
if os.path.exists(bounds_geojson_path):
os.remove(bounds_geojson_path)
@ -220,50 +253,65 @@ class Cropper:
# Remove decimated point cloud
if os.path.exists(decimated_pointcloud_path):
os.remove(decimated_pointcloud_path)
# Remove tmp bounds
if os.path.exists(tmp_bounds_geojson_path):
os.remove(tmp_bounds_geojson_path)
return bounds_geojson_path
def create_bounds_gpkg(self, pointcloud_path, buffer_distance = 0, decimation_step=40):
def create_bounds_gpkg(
self, pointcloud_path, buffer_distance=0, decimation_step=40
):
"""
Compute a buffered polygon around the data extents (not just a bounding box)
of the given point cloud.
@return filename to Geopackage containing the polygon
"""
if not os.path.exists(pointcloud_path):
log.ODM_WARNING('Point cloud does not exist, cannot generate GPKG bounds {}'.format(pointcloud_path))
return ''
log.ODM_WARNING(
"Point cloud does not exist, cannot generate GPKG bounds {}".format(
pointcloud_path
)
)
return ""
bounds_geojson_path = self.create_bounds_geojson(pointcloud_path, buffer_distance, decimation_step)
bounds_geojson_path = self.create_bounds_geojson(
pointcloud_path, buffer_distance, decimation_step
)
summary_file_path = os.path.join(self.storage_dir, '{}.summary.json'.format(self.files_prefix))
summary_file_path = os.path.join(
self.storage_dir, "{}.summary.json".format(self.files_prefix)
)
export_summary_json(pointcloud_path, summary_file_path)
pc_proj4 = None
with open(summary_file_path, 'r') as f:
with open(summary_file_path, "r") as f:
json_f = json.loads(f.read())
pc_proj4 = json_f['summary']['srs']['proj4']
pc_proj4 = json_f["summary"]["srs"]["proj4"]
if pc_proj4 is None: raise RuntimeError("Could not determine point cloud proj4 declaration")
if pc_proj4 is None:
raise RuntimeError("Could not determine point cloud proj4 declaration")
bounds_gpkg_path = os.path.join(self.storage_dir, '{}.bounds.gpkg'.format(self.files_prefix))
bounds_gpkg_path = os.path.join(
self.storage_dir, "{}.bounds.gpkg".format(self.files_prefix)
)
if os.path.isfile(bounds_gpkg_path):
os.remove(bounds_gpkg_path)
# Convert bounds to GPKG
kwargs = {
'input': double_quote(bounds_geojson_path),
'output': double_quote(bounds_gpkg_path),
'proj4': pc_proj4
"input": double_quote(bounds_geojson_path),
"output": double_quote(bounds_gpkg_path),
"proj4": pc_proj4,
}
run('ogr2ogr -overwrite -f GPKG -a_srs "{proj4}" {output} {input}'.format(**kwargs))
run(
'ogr2ogr -overwrite -f GPKG -a_srs "{proj4}" {output} {input}'.format(
**kwargs
)
)
return bounds_gpkg_path

Wyświetl plik

@ -7,7 +7,7 @@ import math
import sys
from opendm import log
from opendm import io
from opendm import concurrency
from opendm import concurrency
from opendm import get_image_size
from opendm import system
@ -18,68 +18,79 @@ import shapely
from shapely.geometry import LineString, mapping, shape
from shapely.ops import polygonize, unary_union
if sys.platform == 'win32':
# Temporary fix for: ValueError: GEOSGeom_createLinearRing_r returned a NULL pointer
if sys.platform == "win32":
# Temporary fix for: ValueError: GEOSGeom_createLinearRing_r returned a NULL pointer
# https://github.com/Toblerity/Shapely/issues/1005
shapely.speedups.disable()
def write_raster(data, file):
profile = {
'driver': 'GTiff',
'width': data.shape[1],
'height': data.shape[0],
'count': 1,
'dtype': 'float32',
'transform': None,
'nodata': None,
'crs': None
"driver": "GTiff",
"width": data.shape[1],
"height": data.shape[0],
"count": 1,
"dtype": "float32",
"transform": None,
"nodata": None,
"crs": None,
}
with rasterio.open(file, 'w', BIGTIFF="IF_SAFER", **profile) as wout:
with rasterio.open(file, "w", BIGTIFF="IF_SAFER", **profile) as wout:
wout.write(data, 1)
def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrency=1, scale=1):
def compute_cutline(
orthophoto_file, crop_area_file, destination, max_concurrency=1, scale=1
):
if io.file_exists(orthophoto_file) and io.file_exists(crop_area_file):
log.ODM_INFO("Computing cutline")
scale = max(0.0001, min(1, scale))
scaled_orthophoto = None
if scale < 1:
log.ODM_INFO("Scaling orthophoto to %s%% to compute cutline" % (scale * 100))
log.ODM_INFO(
"Scaling orthophoto to %s%% to compute cutline" % (scale * 100)
)
scaled_orthophoto = io.related_file_path(orthophoto_file, postfix=".scaled")
# Scale orthophoto before computing cutline
system.run("gdal_translate -outsize {}% 0 "
system.run(
"gdal_translate -outsize {}% 0 "
"-co NUM_THREADS={} "
"--config GDAL_CACHEMAX {}% "
'"{}" "{}"'.format(
scale * 100,
max_concurrency,
concurrency.get_max_memory(),
orthophoto_file,
scaled_orthophoto
))
scale * 100,
max_concurrency,
concurrency.get_max_memory(),
orthophoto_file,
scaled_orthophoto,
)
)
orthophoto_file = scaled_orthophoto
# open raster
f = rasterio.open(orthophoto_file)
rast = f.read(1) # First band only
f = rasterio.open(orthophoto_file)
rast = f.read(1) # First band only
height, width = rast.shape
number_lines = int(max(8, math.ceil(min(width, height) / 256.0)))
line_hor_offset = int(width / number_lines)
line_ver_offset = int(height / number_lines)
if line_hor_offset <= 2 or line_ver_offset <= 2:
log.ODM_WARNING("Cannot compute cutline, orthophoto is too small (%sx%spx)" % (width, height))
log.ODM_WARNING(
"Cannot compute cutline, orthophoto is too small (%sx%spx)"
% (width, height)
)
return
crop_f = fiona.open(crop_area_file, 'r')
crop_f = fiona.open(crop_area_file, "r")
if len(crop_f) == 0:
log.ODM_WARNING("Crop area is empty, cannot compute cutline")
return
crop_poly = shape(crop_f[1]['geometry'])
crop_poly = shape(crop_f[1]["geometry"])
crop_f.close()
linestrings = []
@ -93,35 +104,51 @@ def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrenc
cost_map = np.full((height, width), 1, dtype=np.float32)
# Write edges to cost map
cost_map[edges==True] = 0 # Low cost
cost_map[edges == True] = 0 # Low cost
# Write "barrier, floor is lava" costs
if direction == 'vertical':
lines = [((i, 0), (i, height - 1)) for i in range(line_hor_offset, width - line_hor_offset, line_hor_offset)]
if direction == "vertical":
lines = [
((i, 0), (i, height - 1))
for i in range(
line_hor_offset, width - line_hor_offset, line_hor_offset
)
]
points = []
pad_x = int(line_hor_offset / 2.0)
for i in range(0, len(lines)):
a,b = lines[i]
points.append(((a[0] - pad_x , a[1]), (b[0] - pad_x, b[1])))
a,b = lines[-1]
points.append(((a[0] + pad_x , a[1]), (b[0] + pad_x, b[1])))
a, b = lines[i]
points.append(((a[0] - pad_x, a[1]), (b[0] - pad_x, b[1])))
a, b = lines[-1]
points.append(((a[0] + pad_x, a[1]), (b[0] + pad_x, b[1])))
else:
lines = [((0, j), (width - 1, j)) for j in range(line_ver_offset, height - line_ver_offset, line_ver_offset)]
lines = [
((0, j), (width - 1, j))
for j in range(
line_ver_offset, height - line_ver_offset, line_ver_offset
)
]
points = []
pad_y = int(line_ver_offset / 2.0)
for i in range(0, len(lines)):
a,b = lines[i]
points.append(((a[0] , a[1] - pad_y), (b[0], b[1] - pad_y)))
a,b = lines[-1]
points.append(((a[0] , a[1] + pad_y), (b[0], b[1] + pad_y)))
a, b = lines[i]
points.append(((a[0], a[1] - pad_y), (b[0], b[1] - pad_y)))
a, b = lines[-1]
points.append(((a[0], a[1] + pad_y), (b[0], b[1] + pad_y)))
for a, b in lines:
rr,cc = line(*a, *b)
cost_map[cc, rr] = 9999 # Lava
rr, cc = line(*a, *b)
cost_map[cc, rr] = 9999 # Lava
# Calculate route
for a, b in points:
line_coords, cost = route_through_array(cost_map, (a[1], a[0]), (b[1], b[0]), fully_connected=True, geometric=True)
line_coords, cost = route_through_array(
cost_map,
(a[1], a[0]),
(b[1], b[0]),
fully_connected=True,
geometric=True,
)
# Convert to geographic
geo_line_coords = [f.xy(*c) for c in line_coords]
@ -129,11 +156,10 @@ def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrenc
# Simplify
ls = LineString(geo_line_coords)
linestrings.append(ls.simplify(0.05, preserve_topology=False))
compute_linestrings('vertical')
compute_linestrings('horizontal')
compute_linestrings("vertical")
compute_linestrings("horizontal")
# Generate polygons and keep only those inside the crop area
log.ODM_INFO("Generating polygons... this could take a bit.")
polygons = []
@ -148,7 +174,7 @@ def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrenc
log.ODM_INFO("Merging polygons")
cutline_polygons = unary_union(polygons)
if not hasattr(cutline_polygons, '__getitem__'):
if not hasattr(cutline_polygons, "__getitem__"):
cutline_polygons = [cutline_polygons]
largest_cutline = cutline_polygons[0]
@ -157,27 +183,21 @@ def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrenc
if p.area > max_area:
max_area = p.area
largest_cutline = p
log.ODM_INFO("Largest cutline found: %s m^2" % max_area)
meta = {
'crs': {'init': str(f.crs).lower() },
'driver': 'GPKG',
'schema': {
'properties': {},
'geometry': 'Polygon'
}
"crs": {"init": str(f.crs).lower()},
"driver": "GPKG",
"schema": {"properties": {}, "geometry": "Polygon"},
}
# Remove previous
if os.path.exists(destination):
os.remove(destination)
with fiona.open(destination, 'w', **meta) as sink:
sink.write({
'geometry': mapping(largest_cutline),
'properties': {}
})
with fiona.open(destination, "w", **meta) as sink:
sink.write({"geometry": mapping(largest_cutline), "properties": {}})
f.close()
log.ODM_INFO("Wrote %s" % destination)
@ -185,4 +205,7 @@ def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrenc
if scaled_orthophoto is not None and os.path.exists(scaled_orthophoto):
os.remove(scaled_orthophoto)
else:
log.ODM_WARNING("We've been asked to compute cutline, but either %s or %s is missing. Skipping..." % (orthophoto_file, crop_area_file))
log.ODM_WARNING(
"We've been asked to compute cutline, but either %s or %s is missing. Skipping..."
% (orthophoto_file, crop_area_file)
)

Wyświetl plik

@ -30,6 +30,7 @@ except ModuleNotFoundError:
except:
pass
def classify(lasFile, scalar, slope, threshold, window):
start = datetime.now()
@ -38,57 +39,83 @@ def classify(lasFile, scalar, slope, threshold, window):
except:
log.ODM_WARNING("Error creating classified file %s" % lasFile)
log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start))
log.ODM_INFO("Created %s in %s" % (lasFile, datetime.now() - start))
return lasFile
def rectify(lasFile, reclassify_threshold=5, min_area=750, min_points=500):
start = datetime.now()
try:
log.ODM_INFO("Rectifying {} using with [reclassify threshold: {}, min area: {}, min points: {}]".format(lasFile, reclassify_threshold, min_area, min_points))
log.ODM_INFO(
"Rectifying {} using with [reclassify threshold: {}, min area: {}, min points: {}]".format(
lasFile, reclassify_threshold, min_area, min_points
)
)
run_rectification(
input=lasFile, output=lasFile, \
reclassify_plan='median', reclassify_threshold=reclassify_threshold, \
extend_plan='surrounding', extend_grid_distance=5, \
min_area=min_area, min_points=min_points)
input=lasFile,
output=lasFile,
reclassify_plan="median",
reclassify_threshold=reclassify_threshold,
extend_plan="surrounding",
extend_grid_distance=5,
min_area=min_area,
min_points=min_points,
)
log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start))
log.ODM_INFO("Created %s in %s" % (lasFile, datetime.now() - start))
except Exception as e:
log.ODM_WARNING("Error rectifying ground in file %s: %s" % (lasFile, str(e)))
return lasFile
error = None
def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'], gapfill=True,
outdir='', resolution=0.1, max_workers=1, max_tile_size=4096,
decimation=None, with_euclidean_map=False,
apply_smoothing=True, max_tiles=None):
""" Create DEM from multiple radii, and optionally gapfill """
def create_dem(
input_point_cloud,
dem_type,
output_type="max",
radiuses=["0.56"],
gapfill=True,
outdir="",
resolution=0.1,
max_workers=1,
max_tile_size=4096,
decimation=None,
with_euclidean_map=False,
apply_smoothing=True,
max_tiles=None,
):
"""Create DEM from multiple radii, and optionally gapfill"""
start = datetime.now()
kwargs = {
'input': input_point_cloud,
'outdir': outdir,
'outputType': output_type,
'radiuses': ",".join(map(str, radiuses)),
'resolution': resolution,
'maxTiles': 0 if max_tiles is None else max_tiles,
'decimation': 1 if decimation is None else decimation,
'classification': 2 if dem_type == 'dtm' else -1,
'tileSize': max_tile_size
"input": input_point_cloud,
"outdir": outdir,
"outputType": output_type,
"radiuses": ",".join(map(str, radiuses)),
"resolution": resolution,
"maxTiles": 0 if max_tiles is None else max_tiles,
"decimation": 1 if decimation is None else decimation,
"classification": 2 if dem_type == "dtm" else -1,
"tileSize": max_tile_size,
}
system.run('renderdem "{input}" '
'--outdir "{outdir}" '
'--output-type {outputType} '
'--radiuses {radiuses} '
'--resolution {resolution} '
'--max-tiles {maxTiles} '
'--decimation {decimation} '
'--classification {classification} '
'--tile-size {tileSize} '
'--force '.format(**kwargs), env_vars={'OMP_NUM_THREADS': max_workers})
system.run(
'renderdem "{input}" '
'--outdir "{outdir}" '
"--output-type {outputType} "
"--radiuses {radiuses} "
"--resolution {resolution} "
"--max-tiles {maxTiles} "
"--decimation {decimation} "
"--classification {classification} "
"--tile-size {tileSize} "
"--force ".format(**kwargs),
env_vars={"OMP_NUM_THREADS": max_workers},
)
output_file = "%s.tif" % dem_type
output_path = os.path.abspath(os.path.join(outdir, output_file))
@ -99,7 +126,7 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
filename = os.path.basename(p)
m = re.match("^r([\d\.]+)_x\d+_y\d+\.tif", filename)
if m is not None:
tiles.append({'filename': p, 'radius': float(m.group(1))})
tiles.append({"filename": p, "radius": float(m.group(1))})
if len(tiles) == 0:
raise system.ExitException("No DEM tiles were generated, something went wrong")
@ -107,31 +134,33 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
log.ODM_INFO("Generated %s tiles" % len(tiles))
# Sort tiles by decreasing radius
tiles.sort(key=lambda t: float(t['radius']), reverse=True)
tiles.sort(key=lambda t: float(t["radius"]), reverse=True)
# Create virtual raster
tiles_vrt_path = os.path.abspath(os.path.join(outdir, "tiles.vrt"))
tiles_file_list = os.path.abspath(os.path.join(outdir, "tiles_list.txt"))
with open(tiles_file_list, 'w') as f:
with open(tiles_file_list, "w") as f:
for t in tiles:
f.write(t['filename'] + '\n')
f.write(t["filename"] + "\n")
run('gdalbuildvrt -input_file_list "%s" "%s" ' % (tiles_file_list, tiles_vrt_path))
merged_vrt_path = os.path.abspath(os.path.join(outdir, "merged.vrt"))
geotiff_small_path = os.path.abspath(os.path.join(outdir, 'tiles.small.tif'))
geotiff_small_filled_path = os.path.abspath(os.path.join(outdir, 'tiles.small_filled.tif'))
geotiff_path = os.path.abspath(os.path.join(outdir, 'tiles.tif'))
geotiff_small_path = os.path.abspath(os.path.join(outdir, "tiles.small.tif"))
geotiff_small_filled_path = os.path.abspath(
os.path.join(outdir, "tiles.small_filled.tif")
)
geotiff_path = os.path.abspath(os.path.join(outdir, "tiles.tif"))
# Build GeoTIFF
kwargs = {
'max_memory': get_max_memory(),
'threads': max_workers if max_workers else 'ALL_CPUS',
'tiles_vrt': tiles_vrt_path,
'merged_vrt': merged_vrt_path,
'geotiff': geotiff_path,
'geotiff_small': geotiff_small_path,
'geotiff_small_filled': geotiff_small_filled_path
"max_memory": get_max_memory(),
"threads": max_workers if max_workers else "ALL_CPUS",
"tiles_vrt": tiles_vrt_path,
"merged_vrt": merged_vrt_path,
"geotiff": geotiff_path,
"geotiff_small": geotiff_small_path,
"geotiff_small_filled": geotiff_small_filled_path,
}
if gapfill:
@ -139,41 +168,62 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
# behaves strangely when reading data directly from a .VRT
# so we need to convert to GeoTIFF first.
# Scale to 10% size
run('gdal_translate '
'-co NUM_THREADS={threads} '
'-co BIGTIFF=IF_SAFER '
'-co COMPRESS=DEFLATE '
'--config GDAL_CACHEMAX {max_memory}% '
'-outsize 10% 0 '
'"{tiles_vrt}" "{geotiff_small}"'.format(**kwargs))
run(
"gdal_translate "
"-co NUM_THREADS={threads} "
"-co BIGTIFF=IF_SAFER "
"-co COMPRESS=DEFLATE "
"--config GDAL_CACHEMAX {max_memory}% "
"-outsize 10% 0 "
'"{tiles_vrt}" "{geotiff_small}"'.format(**kwargs)
)
# Fill scaled
gdal_fillnodata(['.',
'-co', 'NUM_THREADS=%s' % kwargs['threads'],
'-co', 'BIGTIFF=IF_SAFER',
'-co', 'COMPRESS=DEFLATE',
'--config', 'GDAL_CACHE_MAX', str(kwargs['max_memory']) + '%',
'-b', '1',
'-of', 'GTiff',
kwargs['geotiff_small'], kwargs['geotiff_small_filled']])
gdal_fillnodata(
[
".",
"-co",
"NUM_THREADS=%s" % kwargs["threads"],
"-co",
"BIGTIFF=IF_SAFER",
"-co",
"COMPRESS=DEFLATE",
"--config",
"GDAL_CACHE_MAX",
str(kwargs["max_memory"]) + "%",
"-b",
"1",
"-of",
"GTiff",
kwargs["geotiff_small"],
kwargs["geotiff_small_filled"],
]
)
# Merge filled scaled DEM with unfilled DEM using bilinear interpolation
run('gdalbuildvrt -resolution highest -r bilinear "%s" "%s" "%s"' % (merged_vrt_path, geotiff_small_filled_path, tiles_vrt_path))
run('gdal_translate '
'-co NUM_THREADS={threads} '
'-co TILED=YES '
'-co BIGTIFF=IF_SAFER '
'-co COMPRESS=DEFLATE '
'--config GDAL_CACHEMAX {max_memory}% '
'"{merged_vrt}" "{geotiff}"'.format(**kwargs))
run(
'gdalbuildvrt -resolution highest -r bilinear "%s" "%s" "%s"'
% (merged_vrt_path, geotiff_small_filled_path, tiles_vrt_path)
)
run(
"gdal_translate "
"-co NUM_THREADS={threads} "
"-co TILED=YES "
"-co BIGTIFF=IF_SAFER "
"-co COMPRESS=DEFLATE "
"--config GDAL_CACHEMAX {max_memory}% "
'"{merged_vrt}" "{geotiff}"'.format(**kwargs)
)
else:
run('gdal_translate '
'-co NUM_THREADS={threads} '
'-co TILED=YES '
'-co BIGTIFF=IF_SAFER '
'-co COMPRESS=DEFLATE '
'--config GDAL_CACHEMAX {max_memory}% '
'"{tiles_vrt}" "{geotiff}"'.format(**kwargs))
run(
"gdal_translate "
"-co NUM_THREADS={threads} "
"-co TILED=YES "
"-co BIGTIFF=IF_SAFER "
"-co COMPRESS=DEFLATE "
"--config GDAL_CACHEMAX {max_memory}% "
'"{tiles_vrt}" "{geotiff}"'.format(**kwargs)
)
if apply_smoothing:
median_smoothing(geotiff_path, output_path, num_workers=max_workers)
@ -185,19 +235,29 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
if with_euclidean_map:
emap_path = io.related_file_path(output_path, postfix=".euclideand")
compute_euclidean_map(tiles_vrt_path, emap_path, overwrite=True)
for cleanup_file in [tiles_vrt_path, tiles_file_list, merged_vrt_path, geotiff_small_path, geotiff_small_filled_path]:
if os.path.exists(cleanup_file): os.remove(cleanup_file)
for cleanup_file in [
tiles_vrt_path,
tiles_file_list,
merged_vrt_path,
geotiff_small_path,
geotiff_small_filled_path,
]:
if os.path.exists(cleanup_file):
os.remove(cleanup_file)
for t in tiles:
if os.path.exists(t['filename']): os.remove(t['filename'])
if os.path.exists(t["filename"]):
os.remove(t["filename"])
log.ODM_INFO('Completed %s in %s' % (output_file, datetime.now() - start))
log.ODM_INFO("Completed %s in %s" % (output_file, datetime.now() - start))
def compute_euclidean_map(geotiff_path, output_path, overwrite=False):
if not os.path.exists(geotiff_path):
log.ODM_WARNING("Cannot compute euclidean map (file does not exist: %s)" % geotiff_path)
log.ODM_WARNING(
"Cannot compute euclidean map (file does not exist: %s)" % geotiff_path
)
return
nodata = -9999
@ -212,55 +272,73 @@ def compute_euclidean_map(geotiff_path, output_path, overwrite=False):
if gdal_proximity is not None:
try:
gdal_proximity(['gdal_proximity.py',
geotiff_path, output_path, '-values', str(nodata),
'-co', 'TILED=YES',
'-co', 'BIGTIFF=IF_SAFER',
'-co', 'COMPRESS=DEFLATE',
])
gdal_proximity(
[
"gdal_proximity.py",
geotiff_path,
output_path,
"-values",
str(nodata),
"-co",
"TILED=YES",
"-co",
"BIGTIFF=IF_SAFER",
"-co",
"COMPRESS=DEFLATE",
]
)
except Exception as e:
log.ODM_WARNING("Cannot compute euclidean distance: %s" % str(e))
if os.path.exists(output_path):
return output_path
else:
log.ODM_WARNING("Cannot compute euclidean distance file: %s" % output_path)
log.ODM_WARNING(
"Cannot compute euclidean distance file: %s" % output_path
)
else:
log.ODM_WARNING("Cannot compute euclidean map, gdal_proximity is missing")
else:
log.ODM_INFO("Found a euclidean distance map: %s" % output_path)
return output_path
def median_smoothing(geotiff_path, output_path, window_size=512, num_workers=1, radius=4):
""" Apply median smoothing """
def median_smoothing(
geotiff_path, output_path, window_size=512, num_workers=1, radius=4
):
"""Apply median smoothing"""
start = datetime.now()
if not os.path.exists(geotiff_path):
raise Exception('File %s does not exist!' % geotiff_path)
raise Exception("File %s does not exist!" % geotiff_path)
kwargs = {
'input': geotiff_path,
'output': output_path,
'window': window_size,
'radius': radius,
"input": geotiff_path,
"output": output_path,
"window": window_size,
"radius": radius,
}
system.run('fastrasterfilter "{input}" '
'--output "{output}" '
'--window-size {window} '
'--radius {radius} '
'--co TILED=YES '
'--co BIGTIFF=IF_SAFER '
'--co COMPRESS=DEFLATE '.format(**kwargs), env_vars={'OMP_NUM_THREADS': num_workers})
system.run(
'fastrasterfilter "{input}" '
'--output "{output}" '
"--window-size {window} "
"--radius {radius} "
"--co TILED=YES "
"--co BIGTIFF=IF_SAFER "
"--co COMPRESS=DEFLATE ".format(**kwargs),
env_vars={"OMP_NUM_THREADS": num_workers},
)
log.ODM_INFO('Completed smoothing to create %s in %s' % (output_path, datetime.now() - start))
log.ODM_INFO(
"Completed smoothing to create %s in %s" % (output_path, datetime.now() - start)
)
return output_path
def get_dem_radius_steps(stats_file, steps, resolution, multiplier = 1.0):
def get_dem_radius_steps(stats_file, steps, resolution, multiplier=1.0):
radius_steps = [point_cloud.get_spacing(stats_file, resolution) * multiplier]
for _ in range(steps - 1):
radius_steps.append(radius_steps[-1] * math.sqrt(2))
return radius_steps
return radius_steps

Wyświetl plik

@ -4,6 +4,7 @@ from ..point_cloud import PointCloud
EPSILON = 0.00001
class PolyBounds(object):
def __init__(self, points):
self.__points = points
@ -36,6 +37,7 @@ class PolyBounds(object):
def corners(self):
return self._corners
class BoxBounds(object):
def __init__(self, x_min, x_max, y_min, y_max):
self._corners = (x_min, x_max, y_min, y_max)
@ -70,15 +72,15 @@ class BoxBounds(object):
def area(self):
(x_min, x_max, y_min, y_max) = self._corners
return (x_max - x_min) * (y_max - y_min)
return (x_max - x_min) * (y_max - y_min)
def divide_by_point(self, point):
"""Divide the box into four boxes, marked by the point. It is assumed that the point is inside the box"""
[x_point, y_point] = point
(x_min, x_max, y_min, y_max) = self._corners
return [
BoxBounds(x_min, x_point, y_min, y_point),
BoxBounds(x_min, x_point, y_min, y_point),
BoxBounds(x_point + EPSILON, x_max, y_min, y_point),
BoxBounds(x_min, x_point, y_point + EPSILON, y_max),
BoxBounds(x_point + EPSILON, x_max, y_point + EPSILON, y_max)
BoxBounds(x_min, x_point, y_point + EPSILON, y_max),
BoxBounds(x_point + EPSILON, x_max, y_point + EPSILON, y_max),
]

Wyświetl plik

@ -2,12 +2,20 @@ import numpy as np
from scipy.spatial import ConvexHull
from .types import BoxBounds, PolyBounds
def calculate_convex_hull_bounds(points):
hull = ConvexHull(points)
return PolyBounds(points[hull.vertices])
def box_from_point_and_size(center, width, height):
return BoxBounds(center[0] - width / 2, center[0] + width / 2, center[1] - height / 2, center[1] + height / 2)
return BoxBounds(
center[0] - width / 2,
center[0] + width / 2,
center[1] - height / 2,
center[1] + height / 2,
)
def box_from_cloud(point_cloud):
xy = point_cloud.get_xy()

Wyświetl plik

@ -1,6 +1,7 @@
import numpy as np
from abc import ABCMeta, abstractmethod
class Dimension(object):
__metaclass__ = ABCMeta

Wyświetl plik

@ -2,6 +2,7 @@ import numpy as np
from sklearn.linear_model import RANSACRegressor
from .dimension import Dimension
class DistanceDimension(Dimension):
"""Assign each point the distance to the estimated ground"""
@ -32,14 +33,14 @@ class DistanceDimension(Dimension):
super(DistanceDimension, self)._set_values(point_cloud, diff)
def get_name(self):
return 'distance_to_ground'
return "distance_to_ground"
def get_las_type(self):
return 'float64'
return "float64"
def __calculate_angle(self, model):
"Calculate the angle between the estimated plane and the XY plane"
a = model.estimator_.coef_[0]
b = model.estimator_.coef_[1]
angle = np.arccos(1 / np.sqrt(a ** 2 + b ** 2 + 1))
angle = np.arccos(1 / np.sqrt(a**2 + b**2 + 1))
return np.degrees(angle)

Wyświetl plik

@ -1,6 +1,7 @@
import numpy as np
from .dimension import Dimension
class ExtendedDimension(Dimension):
"""Whether the point was added or was already on the original point cloud"""
@ -17,7 +18,7 @@ class ExtendedDimension(Dimension):
super(ExtendedDimension, self)._set_values(point_cloud, added)
def get_name(self):
return 'extended'
return "extended"
def get_las_type(self):
return 'uint16'
return "uint16"

Wyświetl plik

@ -1,6 +1,7 @@
import numpy as np
from .dimension import Dimension
class PartitionDimension(Dimension):
"""Group points by partition"""
@ -15,11 +16,13 @@ class PartitionDimension(Dimension):
def assign(self, *point_clouds, **kwargs):
for point_cloud in point_clouds:
super(PartitionDimension, self)._set_values(point_cloud, np.full(point_cloud.len(), self.counter))
super(PartitionDimension, self)._set_values(
point_cloud, np.full(point_cloud.len(), self.counter)
)
self.counter += 1
def get_name(self):
return self.name
def get_las_type(self):
return 'uint32'
return "uint32"

Wyświetl plik

@ -1,6 +1,7 @@
import numpy as np
from .dimension import Dimension
class UserDataDimension(Dimension):
"""A dimension that stores the user data of a point cloud."""
@ -16,10 +17,12 @@ class UserDataDimension(Dimension):
# Simply copy the value of the UserData dimension from the original point cloud
# to the new point cloud
for point_cloud in point_clouds:
super(UserDataDimension, self)._set_values(point_cloud, point_cloud.user_data)
super(UserDataDimension, self)._set_values(
point_cloud, point_cloud.user_data
)
def get_name(self):
return 'UserData'
return "UserData"
def get_las_type(self):
return 'uint8'
return "uint8"

Wyświetl plik

@ -3,9 +3,11 @@ from sklearn.neighbors import BallTree
EPSILON = 0.00001
def build_grid(bounds, point_cloud, distance):
"""First, a 2D grid is built with a distance of 'distance' between points, inside the given bounds.
Then, only points that don't have a point cloud neighbour closer than 'distance' are left. The rest are filtered out."""
Then, only points that don't have a point cloud neighbour closer than 'distance' are left. The rest are filtered out.
"""
# Generate a grid of 2D points inside the bounds, with a distance of 'distance' between them
grid = __build_grid(bounds, distance)
@ -16,14 +18,20 @@ def build_grid(bounds, point_cloud, distance):
# Filter out the grid points that have a neighbor closer than 'distance' from the given point cloud
return __calculate_lonely_points(grid_inside, point_cloud, distance)
def __build_grid(bounds, distance):
x_min, x_max, y_min, y_max = bounds.corners()
grid = [[x, y] for x in np.arange(x_min, x_max + distance, distance) for y in np.arange(y_min, y_max + distance, distance)]
grid = [
[x, y]
for x in np.arange(x_min, x_max + distance, distance)
for y in np.arange(y_min, y_max + distance, distance)
]
return np.array(grid)
def __calculate_lonely_points(grid, point_cloud, distance):
# Generate BallTree for point cloud
ball_tree = BallTree(point_cloud.get_xy(), metric='manhattan')
ball_tree = BallTree(point_cloud.get_xy(), metric="manhattan")
# Calculate for each of the points in the grid, the amount of neighbors in the original ground cloud
count = ball_tree.query_radius(grid, distance - EPSILON, count_only=True)

Wyświetl plik

@ -1,5 +1,7 @@
import time
from opendm.dem.ground_rectification.extra_dimensions.userdata_dimension import UserDataDimension
from opendm.dem.ground_rectification.extra_dimensions.userdata_dimension import (
UserDataDimension,
)
import pdal
import numpy as np
from opendm import log
@ -7,8 +9,11 @@ from ..point_cloud import PointCloud
import pdb
import json
def read_cloud(point_cloud_path):
pipeline = pdal.Pipeline('[{"type":"readers.las","filename":"%s"}]' % point_cloud_path)
pipeline = pdal.Pipeline(
'[{"type":"readers.las","filename":"%s"}]' % point_cloud_path
)
pipeline.execute()
arrays = pipeline.arrays[0]
@ -43,33 +48,37 @@ def write_cloud(metadata, point_cloud, output_point_cloud_path):
red, green, blue = np.hsplit(point_cloud.rgb, 3)
arrays = np.zeros(len(x),
dtype=[('X', '<f8'),
('Y', '<f8'),
('Z', '<f8'),
('Intensity', '<u2'),
('ReturnNumber', 'u1'),
('NumberOfReturns', 'u1'),
('ScanDirectionFlag', 'u1'),
('EdgeOfFlightLine', 'u1'),
('Classification', 'u1'),
('ScanAngleRank', '<f4'),
('UserData', 'u1'),
('PointSourceId', '<u2'),
('GpsTime', '<f8'),
('Red', '<u2'),
('Green', '<u2'),
('Blue', '<u2')])
arrays['X'] = x.ravel()
arrays['Y'] = y.ravel()
arrays['Z'] = point_cloud.z
arrays['Classification'] = point_cloud.classification.astype(np.uint8).ravel()
arrays['Red'] = red.astype(np.uint8).ravel()
arrays['Green'] = green.astype(np.uint8).ravel()
arrays['Blue'] = blue.astype(np.uint8).ravel()
arrays = np.zeros(
len(x),
dtype=[
("X", "<f8"),
("Y", "<f8"),
("Z", "<f8"),
("Intensity", "<u2"),
("ReturnNumber", "u1"),
("NumberOfReturns", "u1"),
("ScanDirectionFlag", "u1"),
("EdgeOfFlightLine", "u1"),
("Classification", "u1"),
("ScanAngleRank", "<f4"),
("UserData", "u1"),
("PointSourceId", "<u2"),
("GpsTime", "<f8"),
("Red", "<u2"),
("Green", "<u2"),
("Blue", "<u2"),
],
)
arrays["X"] = x.ravel()
arrays["Y"] = y.ravel()
arrays["Z"] = point_cloud.z
arrays["Classification"] = point_cloud.classification.astype(np.uint8).ravel()
arrays["Red"] = red.astype(np.uint8).ravel()
arrays["Green"] = green.astype(np.uint8).ravel()
arrays["Blue"] = blue.astype(np.uint8).ravel()
if "UserData" in point_cloud.extra_dimensions:
arrays['UserData'] = point_cloud.extra_dimensions["UserData"].ravel()
arrays["UserData"] = point_cloud.extra_dimensions["UserData"].ravel()
writer_pipeline = {
"pipeline": [
@ -77,7 +86,7 @@ def write_cloud(metadata, point_cloud, output_point_cloud_path):
"type": "writers.las",
"filename": output_point_cloud_path,
"compression": "lazperf",
"extra_dims": "all"
"extra_dims": "all",
}
]
}
@ -108,12 +117,14 @@ def write_cloud(metadata, point_cloud, output_point_cloud_path):
vlr_field = "vlr_%d" % i
if vlr_field in metadata:
vlr = metadata[vlr_field]
writer_pipeline["pipeline"][0]["vlrs"].append({
"record_id": vlr["record_id"],
"user_id": vlr["user_id"],
"description": vlr["description"],
"data": vlr["data"]
})
writer_pipeline["pipeline"][0]["vlrs"].append(
{
"record_id": vlr["record_id"],
"user_id": vlr["user_id"],
"description": vlr["description"],
"data": vlr["data"],
}
)
i += 1
else:
break

Wyświetl plik

@ -1,6 +1,7 @@
from .partition_plan import PartitionPlan, Partition
from ..bounds.utils import box_from_cloud
class OnePartition(PartitionPlan):
"""This partition plan does nothing. It returns all the cloud points in one partition."""

Wyświetl plik

@ -1,7 +1,9 @@
from abc import ABCMeta, abstractmethod
class PartitionPlan(object):
"""We want to partition the ground in different areas. There are many ways to do so, and each of them will be a different partition plan."""
__metaclass__ = ABCMeta
def __init__(self):
@ -11,7 +13,8 @@ class PartitionPlan(object):
def execute(self):
"""This method is expected to return a list of Partition instances"""
class Partition:
def __init__(self, point_cloud, **kwargs):
self.point_cloud = point_cloud
self.bounds = kwargs['bounds']
self.bounds = kwargs["bounds"]

Wyświetl plik

@ -3,11 +3,12 @@ from abc import abstractmethod
from ..bounds.utils import box_from_cloud
from .partition_plan import PartitionPlan, Partition
class QuadPartitions(PartitionPlan):
"""This partition plan starts with one big partition that includes the whole point cloud. It then divides it into four partitions, based on some criteria.
Each of these partitions are then divided into four other partitions and so on. The algorithm has two possible stopping criteria:
if subdividing a partition would imply that one of the new partitions contains fewer that a given amount of points, or that one of the new partitions as an area smaller that the given size,
then the partition is not divided."""
Each of these partitions are then divided into four other partitions and so on. The algorithm has two possible stopping criteria:
if subdividing a partition would imply that one of the new partitions contains fewer that a given amount of points, or that one of the new partitions as an area smaller that the given size,
then the partition is not divided."""
def __init__(self, point_cloud):
super(QuadPartitions, self).__init__()
@ -19,7 +20,12 @@ class QuadPartitions(PartitionPlan):
def execute(self, **kwargs):
initial_bounding_box = box_from_cloud(self.point_cloud)
return self._divide_until(self.point_cloud, initial_bounding_box, kwargs['min_points'], kwargs['min_area'])
return self._divide_until(
self.point_cloud,
initial_bounding_box,
kwargs["min_points"],
kwargs["min_area"],
)
def _divide_until(self, point_cloud, bounding_box, min_points, min_area):
dividing_point = self.choose_divide_point(point_cloud, bounding_box)
@ -27,19 +33,26 @@ class QuadPartitions(PartitionPlan):
for new_box in new_boxes:
if new_box.area() < min_area:
return [Partition(point_cloud, bounds=bounding_box)] # If by dividing, I break the minimum area threshold, don't do it
return [
Partition(point_cloud, bounds=bounding_box)
] # If by dividing, I break the minimum area threshold, don't do it
subdivisions = []
for new_box in new_boxes:
mask = new_box.calculate_mask(point_cloud)
if np.count_nonzero(mask) < min_points:
return [Partition(point_cloud, bounds=bounding_box)] # If by dividing, I break the minimum amount of points in a zone, don't do it
return [
Partition(point_cloud, bounds=bounding_box)
] # If by dividing, I break the minimum amount of points in a zone, don't do it
subdivisions += self._divide_until(point_cloud[mask], new_box, min_points, min_area)
subdivisions += self._divide_until(
point_cloud[mask], new_box, min_points, min_area
)
return subdivisions
class UniformPartitions(QuadPartitions):
"""This kind of partitioner takes the current bounding box, and divides it by four uniform partitions"""
@ -49,6 +62,7 @@ class UniformPartitions(QuadPartitions):
def choose_divide_point(self, point_cloud, bounding_box):
return bounding_box.center()
class MedianPartitions(QuadPartitions):
"""This kind of partitioner takes the current point cloud, and divides it by the median, so that all four new partitions have the same amount of points"""

Wyświetl plik

@ -4,13 +4,13 @@ from .surrounding_partitions import SurroundingPartitions
def select_partition_plan(name, point_cloud):
if name == 'one':
if name == "one":
return OnePartition(point_cloud)
elif name == 'uniform':
elif name == "uniform":
return UniformPartitions(point_cloud)
elif name == 'median':
elif name == "median":
return MedianPartitions(point_cloud)
elif name == 'surrounding':
elif name == "surrounding":
return SurroundingPartitions(point_cloud)
else:
raise Exception('Incorrect partition name.')
raise Exception("Incorrect partition name.")

Wyświetl plik

@ -13,40 +13,62 @@ DEFAULT_DISTANCE = 5
MIN_PERCENTAGE_OF_POINTS_IN_CONVEX_HULL = 90
EPSILON = 0.0001
class SurroundingPartitions(PartitionPlan):
def __init__(self, point_cloud):
super(SurroundingPartitions, self).__init__()
self.point_cloud = point_cloud
self.chebyshev_ball_tree = BallTree(point_cloud.xy, metric='chebyshev')
self.manhattan_ball_tree = BallTree(point_cloud.xy, metric='manhattan')
self.chebyshev_ball_tree = BallTree(point_cloud.xy, metric="chebyshev")
self.manhattan_ball_tree = BallTree(point_cloud.xy, metric="manhattan")
def execute(self, **kwargs):
distance = kwargs['distance'] if 'distance' in kwargs else DEFAULT_DISTANCE
bounds = kwargs['bounds'] if 'bounds' in kwargs else box_from_cloud(self.point_cloud)
min_points = kwargs['min_points']
min_area = kwargs['min_area']
distance = kwargs["distance"] if "distance" in kwargs else DEFAULT_DISTANCE
bounds = (
kwargs["bounds"] if "bounds" in kwargs else box_from_cloud(self.point_cloud)
)
min_points = kwargs["min_points"]
min_area = kwargs["min_area"]
result = ExecutionResult(self.point_cloud.len())
grid = build_grid(bounds, self.point_cloud, distance)
if grid.shape[0] >= 1:
db = DBSCAN(eps=distance + EPSILON, min_samples=1, metric='manhattan', n_jobs=-1).fit(grid)
db = DBSCAN(
eps=distance + EPSILON, min_samples=1, metric="manhattan", n_jobs=-1
).fit(grid)
clusters = set(db.labels_)
for cluster in clusters:
cluster_members = grid[db.labels_ == cluster]
point_cloud_neighbors, point_cloud_neighbors_mask = self.__find_cluster_neighbors(cluster_members, distance)
point_cloud_neighbors, point_cloud_neighbors_mask = (
self.__find_cluster_neighbors(cluster_members, distance)
)
if self.__is_cluster_surrounded(cluster_members, point_cloud_neighbors):
result.add_cluster_partition(cluster_members, point_cloud_neighbors, point_cloud_neighbors_mask)
result.add_cluster_partition(
cluster_members,
point_cloud_neighbors,
point_cloud_neighbors_mask,
)
else:
point_cloud_neighbors, point_cloud_neighbors_mask, bounding_box = self.__find_points_for_non_surrounded_cluster(bounds, cluster_members, distance, min_area, min_points)
result.add_zone_partition(cluster_members, point_cloud_neighbors, point_cloud_neighbors_mask, bounding_box)
point_cloud_neighbors, point_cloud_neighbors_mask, bounding_box = (
self.__find_points_for_non_surrounded_cluster(
bounds, cluster_members, distance, min_area, min_points
)
)
result.add_zone_partition(
cluster_members,
point_cloud_neighbors,
point_cloud_neighbors_mask,
bounding_box,
)
return result.build_result(self.point_cloud)
def __find_points_for_non_surrounded_cluster(self, bounds, cluster_members, distance, min_area, min_points):
def __find_points_for_non_surrounded_cluster(
self, bounds, cluster_members, distance, min_area, min_points
):
(center_x, center_y) = bounds.center()
[x_min, y_min] = np.amin(cluster_members, axis=0)
@ -84,22 +106,35 @@ class SurroundingPartitions(PartitionPlan):
return ratio > MIN_PERCENTAGE_OF_POINTS_IN_CONVEX_HULL
def __find_cluster_neighbors(self, cluster_members, distance):
mask_per_point = self.manhattan_ball_tree.query_radius(cluster_members, distance * 3)
mask_per_point = self.manhattan_ball_tree.query_radius(
cluster_members, distance * 3
)
all_neighbor_mask = np.concatenate(mask_per_point)
point_cloud_neighbors = self.point_cloud[all_neighbor_mask]
return point_cloud_neighbors, all_neighbor_mask
class ExecutionResult:
def __init__(self, cloud_size):
self.partitions = [ ]
self.partitions = []
self.marked_as_neighbors = np.zeros(cloud_size, dtype=bool)
def add_cluster_partition(self, cluster_members, point_cloud_neighbors, point_cloud_neighbors_mask):
convex_hull = calculate_convex_hull_bounds(np.concatenate((point_cloud_neighbors.get_xy(), cluster_members)))
def add_cluster_partition(
self, cluster_members, point_cloud_neighbors, point_cloud_neighbors_mask
):
convex_hull = calculate_convex_hull_bounds(
np.concatenate((point_cloud_neighbors.get_xy(), cluster_members))
)
self.marked_as_neighbors[point_cloud_neighbors_mask] = True
self.partitions.append(Partition(point_cloud_neighbors, bounds=convex_hull))
def add_zone_partition(self, cluster_members, point_cloud_neighbors, point_cloud_neighbors_mask, bounding_box):
def add_zone_partition(
self,
cluster_members,
point_cloud_neighbors,
point_cloud_neighbors_mask,
bounding_box,
):
self.marked_as_neighbors[point_cloud_neighbors_mask] = True
self.partitions.append(Partition(point_cloud_neighbors, bounds=bounding_box))

Wyświetl plik

@ -1,9 +1,20 @@
import numpy as np
from numpy.lib.recfunctions import append_fields
class PointCloud:
"""Representation of a 3D point cloud"""
def __init__(self, xy, z, classification, rgb, indices, extra_dimensions, extra_dimensions_metadata):
def __init__(
self,
xy,
z,
classification,
rgb,
indices,
extra_dimensions,
extra_dimensions_metadata,
):
self.xy = xy
self.z = z
self.classification = classification
@ -17,17 +28,35 @@ class PointCloud:
xy = np.column_stack((x, y))
rgb = np.column_stack((red, green, blue))
indices = indices if indices is not None else np.arange(0, len(x))
return PointCloud(xy, z, classification, rgb, indices, { }, { })
return PointCloud(xy, z, classification, rgb, indices, {}, {})
@staticmethod
def with_xy(xy):
[x, y] = np.hsplit(xy, 2)
empty = np.empty(xy.shape[0])
return PointCloud.with_dimensions(x.ravel(), y.ravel(), empty, np.empty(xy.shape[0], dtype=np.uint8), empty, empty, empty)
return PointCloud.with_dimensions(
x.ravel(),
y.ravel(),
empty,
np.empty(xy.shape[0], dtype=np.uint8),
empty,
empty,
empty,
)
def __getitem__(self, mask):
masked_dimensions = { name: values[mask] for name, values in self.extra_dimensions.items() }
return PointCloud(self.xy[mask], self.z[mask], self.classification[mask], self.rgb[mask], self.indices[mask], masked_dimensions, self.extra_dimensions_metadata)
masked_dimensions = {
name: values[mask] for name, values in self.extra_dimensions.items()
}
return PointCloud(
self.xy[mask],
self.z[mask],
self.classification[mask],
self.rgb[mask],
self.indices[mask],
masked_dimensions,
self.extra_dimensions_metadata,
)
def concatenate(self, other_cloud):
for name, dimension in self.extra_dimensions_metadata.items():
@ -36,13 +65,20 @@ class PointCloud:
for name, dimension in other_cloud.extra_dimensions_metadata.items():
if name not in self.extra_dimensions:
dimension.assign_default(self)
new_indices = np.arange(len(self.indices), len(self.indices) + len(other_cloud.indices))
new_indices = np.arange(
len(self.indices), len(self.indices) + len(other_cloud.indices)
)
self.xy = np.concatenate((self.xy, other_cloud.xy))
self.z = np.concatenate((self.z, other_cloud.z))
self.classification = np.concatenate((self.classification, other_cloud.classification))
self.classification = np.concatenate(
(self.classification, other_cloud.classification)
)
self.rgb = np.concatenate((self.rgb, other_cloud.rgb))
self.indices = np.concatenate((self.indices, new_indices))
self.extra_dimensions = { name: np.concatenate((values, other_cloud.extra_dimensions[name])) for name, values in self.extra_dimensions.items() }
self.extra_dimensions = {
name: np.concatenate((values, other_cloud.extra_dimensions[name]))
for name, values in self.extra_dimensions.items()
}
def update(self, other_cloud):
for name, dimension in self.extra_dimensions_metadata.items():

Wyświetl plik

@ -14,16 +14,30 @@ from .point_cloud import PointCloud
EPSILON = 0.00001
def run_rectification(**kwargs):
header, point_cloud = read_cloud(kwargs['input'])
header, point_cloud = read_cloud(kwargs["input"])
if 'reclassify_plan' in kwargs and kwargs['reclassify_plan'] is not None:
point_cloud = reclassify_cloud(point_cloud, kwargs['reclassify_plan'], kwargs['reclassify_threshold'], kwargs['min_points'], kwargs['min_area'])
if "reclassify_plan" in kwargs and kwargs["reclassify_plan"] is not None:
point_cloud = reclassify_cloud(
point_cloud,
kwargs["reclassify_plan"],
kwargs["reclassify_threshold"],
kwargs["min_points"],
kwargs["min_area"],
)
if 'extend_plan' in kwargs and kwargs['extend_plan'] is not None:
point_cloud = extend_cloud(point_cloud, kwargs['extend_plan'], kwargs['extend_grid_distance'], kwargs['min_points'], kwargs['min_area'])
if "extend_plan" in kwargs and kwargs["extend_plan"] is not None:
point_cloud = extend_cloud(
point_cloud,
kwargs["extend_plan"],
kwargs["extend_grid_distance"],
kwargs["min_points"],
kwargs["min_area"],
)
write_cloud(header, point_cloud, kwargs["output"])
write_cloud(header, point_cloud, kwargs['output'])
def reclassify_cloud(point_cloud, plan, threshold, min_points, min_area):
# Get only ground
@ -33,10 +47,13 @@ def reclassify_cloud(point_cloud, plan, threshold, min_points, min_area):
partition_plan = select_partition_plan(plan, ground_cloud)
# Execute the partition plan, and get all the partitions
partitions = [result for result in partition_plan.execute(min_points=min_points, min_area=min_area)]
partitions = [
result
for result in partition_plan.execute(min_points=min_points, min_area=min_area)
]
# Add 'distance to ground' and 'partition number' dimensions to the cloud
for dimension in [DistanceDimension(), PartitionDimension('reclassify_partition')]:
for dimension in [DistanceDimension(), PartitionDimension("reclassify_partition")]:
# Calculate new dimension for partition
for partition in partitions:
@ -46,13 +63,14 @@ def reclassify_cloud(point_cloud, plan, threshold, min_points, min_area):
point_cloud.update(partition.point_cloud)
# Calculate the points that need to be reclassified
mask = point_cloud.get_extra_dimension_values('distance_to_ground') > threshold
mask = point_cloud.get_extra_dimension_values("distance_to_ground") > threshold
# Reclassify them as 'unclassified'
point_cloud.classification[mask] = 1
return point_cloud
def extend_cloud(point_cloud, plan, distance, min_points, min_area):
# Get only ground
ground_cloud = point_cloud[point_cloud.classification == 2]
@ -70,10 +88,12 @@ def extend_cloud(point_cloud, plan, distance, min_points, min_area):
partition_plan = select_partition_plan(plan, ground_cloud)
# Execute the partition plan, and get all the partitions
partitions = partition_plan.execute(distance=distance, min_points=min_points, min_area=min_area, bounds=bounds)
partitions = partition_plan.execute(
distance=distance, min_points=min_points, min_area=min_area, bounds=bounds
)
# Create dimensions
partition_dimension = PartitionDimension('extend_partition')
partition_dimension = PartitionDimension("extend_partition")
extended_dimension = ExtendedDimension()
for partition in partitions:
@ -98,7 +118,6 @@ def extend_cloud(point_cloud, plan, distance, min_points, min_area):
# Update new information to the original point cloud
point_cloud.update(partition.point_cloud)
# Calculate the bounding box of the original cloud
bbox = point_cloud.get_bounding_box()
@ -111,9 +130,12 @@ def extend_cloud(point_cloud, plan, distance, min_points, min_area):
# Add the new points to the original point cloud
return point_cloud
def __calculate_new_points(grid_points_inside, partition_point_cloud):
# Calculate RANSCAC model
model = RANSACRegressor().fit(partition_point_cloud.get_xy(), partition_point_cloud.get_z())
model = RANSACRegressor().fit(
partition_point_cloud.get_xy(), partition_point_cloud.get_z()
)
# With the ransac model, calculate the altitude for each grid point
grid_points_altitude = model.predict(grid_points_inside.get_xy())
@ -131,24 +153,78 @@ def __calculate_new_points(grid_points_inside, partition_point_cloud):
[x, y] = np.hsplit(grid_points_inside.get_xy(), 2)
# Return point cloud
return PointCloud.with_dimensions(x.ravel(), y.ravel(), grid_points_altitude, classification, red, green, blue, grid_points_inside.indices)
return PointCloud.with_dimensions(
x.ravel(),
y.ravel(),
grid_points_altitude,
classification,
red,
green,
blue,
grid_points_inside.indices,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='This script takes a pre-classified point cloud, and then it re-clasiffies wrongly classified ground point to non-ground points and finally adds ground points where needed.')
parser.add_argument('input', type=str, help='The path where to find the pre-classified point cloud.')
parser.add_argument('output', type=str, help='The path where to save the rectified point cloud.')
parser.add_argument('--reclassify_plan', type=str, help='The partition plan to use reclasiffication. Must be one of(one, uniform, median, surrounding)')
parser.add_argument('--reclassify_threshold', type=float, help='Every point with a distance to the estimated ground that is higher than the threshold will be reclassified as non ground', default=5)
parser.add_argument('--extend_plan', type=str, help='The partition plan to use for extending the ground. Must be one of(one, uniform, median, surrounding)')
parser.add_argument('--extend_grid_distance', type=float, help='The distance between points on the grid that will be added to the point cloud.', default=5)
parser.add_argument('--min_area', type=int, help='Some partition plans need a minimum area as a stopping criteria.', default=750)
parser.add_argument('--min_points', type=int, help='Some partition plans need a minimum number of points as a stopping criteria.', default=500)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="This script takes a pre-classified point cloud, and then it re-clasiffies wrongly classified ground point to non-ground points and finally adds ground points where needed."
)
parser.add_argument(
"input", type=str, help="The path where to find the pre-classified point cloud."
)
parser.add_argument(
"output", type=str, help="The path where to save the rectified point cloud."
)
parser.add_argument(
"--reclassify_plan",
type=str,
help="The partition plan to use reclasiffication. Must be one of(one, uniform, median, surrounding)",
)
parser.add_argument(
"--reclassify_threshold",
type=float,
help="Every point with a distance to the estimated ground that is higher than the threshold will be reclassified as non ground",
default=5,
)
parser.add_argument(
"--extend_plan",
type=str,
help="The partition plan to use for extending the ground. Must be one of(one, uniform, median, surrounding)",
)
parser.add_argument(
"--extend_grid_distance",
type=float,
help="The distance between points on the grid that will be added to the point cloud.",
default=5,
)
parser.add_argument(
"--min_area",
type=int,
help="Some partition plans need a minimum area as a stopping criteria.",
default=750,
)
parser.add_argument(
"--min_points",
type=int,
help="Some partition plans need a minimum number of points as a stopping criteria.",
default=500,
)
args = parser.parse_args()
if args.reclassify_plan is None and args.extend_plan is None:
raise Exception("Please set a reclassifying or extension plan. Otherwise there is nothing for me to do.")
raise Exception(
"Please set a reclassifying or extension plan. Otherwise there is nothing for me to do."
)
run(input=args.input, reclassify_plan=args.reclassify_plan, reclassify_threshold=args.reclassify_threshold, \
extend_plan=args.extend_plan, extend_grid_distance=args.extend_grid_distance, \
output=args.output, min_points=args.min_points, min_area=args.min_area, debug=False)
run(
input=args.input,
reclassify_plan=args.reclassify_plan,
reclassify_threshold=args.reclassify_threshold,
extend_plan=args.extend_plan,
extend_grid_distance=args.extend_grid_distance,
output=args.output,
min_points=args.min_points,
min_area=args.min_area,
debug=False,
)

Wyświetl plik

@ -9,19 +9,22 @@ from opendm import log
from opendm import io
import os
def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_map_source=None):
def euclidean_merge_dems(
input_dems, output_dem, creation_options={}, euclidean_map_source=None
):
"""
Based on https://github.com/mapbox/rio-merge-rgba
and ideas from Anna Petrasova
implementation by Piero Toffanin
Computes a merged DEM by computing/using a euclidean
distance to NODATA cells map for all DEMs and then blending all overlapping DEM cells
Computes a merged DEM by computing/using a euclidean
distance to NODATA cells map for all DEMs and then blending all overlapping DEM cells
by a weighted average based on such euclidean distance.
"""
inputs = []
bounds=None
precision=7
bounds = None
precision = 7
existing_dems = []
for dem in input_dems:
@ -41,13 +44,19 @@ def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_
profile = first.profile
for dem in existing_dems:
eumap = compute_euclidean_map(dem, io.related_file_path(dem, postfix=".euclideand", replace_base=euclidean_map_source), overwrite=False)
eumap = compute_euclidean_map(
dem,
io.related_file_path(
dem, postfix=".euclideand", replace_base=euclidean_map_source
),
overwrite=False,
)
if eumap and io.file_exists(eumap):
inputs.append((dem, eumap))
log.ODM_INFO("%s valid DEM rasters to merge" % len(inputs))
sources = [(rasterio.open(d), rasterio.open(e)) for d,e in inputs]
sources = [(rasterio.open(d), rasterio.open(e)) for d, e in inputs]
# Extent from option or extent of all inputs.
if bounds:
@ -82,10 +91,10 @@ def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_
profile["transform"] = output_transform
profile["height"] = output_height
profile["width"] = output_width
profile["tiled"] = creation_options.get('TILED', 'YES') == 'YES'
profile["blockxsize"] = creation_options.get('BLOCKXSIZE', 512)
profile["blockysize"] = creation_options.get('BLOCKYSIZE', 512)
profile["compress"] = creation_options.get('COMPRESS', 'LZW')
profile["tiled"] = creation_options.get("TILED", "YES") == "YES"
profile["blockxsize"] = creation_options.get("BLOCKXSIZE", 512)
profile["blockysize"] = creation_options.get("BLOCKYSIZE", 512)
profile["compress"] = creation_options.get("COMPRESS", "LZW")
profile["nodata"] = src_nodata
# Creation opts
@ -123,17 +132,35 @@ def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_
nodata = src_d.nodatavals[0]
# Alternative, custom get_window using rounding
src_window_d = tuple(zip(rowcol(
src_d.transform, left, top, op=round, precision=precision
), rowcol(
src_d.transform, right, bottom, op=round, precision=precision
)))
src_window_d = tuple(
zip(
rowcol(
src_d.transform, left, top, op=round, precision=precision
),
rowcol(
src_d.transform,
right,
bottom,
op=round,
precision=precision,
),
)
)
src_window_e = tuple(zip(rowcol(
src_e.transform, left, top, op=round, precision=precision
), rowcol(
src_e.transform, right, bottom, op=round, precision=precision
)))
src_window_e = tuple(
zip(
rowcol(
src_e.transform, left, top, op=round, precision=precision
),
rowcol(
src_e.transform,
right,
bottom,
op=round,
precision=precision,
),
)
)
temp_d = np.zeros(dst_shape, dtype=dtype)
temp_d = src_d.read(
@ -147,12 +174,12 @@ def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_
# Set NODATA areas in the euclidean map to a very low value
# so that:
# - Areas with overlap prioritize DEM layers' cells that
# - Areas with overlap prioritize DEM layers' cells that
# are far away from NODATA areas
# - Areas that have no overlap are included in the final result
# even if they are very close to a NODATA cell
temp_e[temp_e==0] = small_distance
temp_e[temp_d==nodata] = 0
temp_e[temp_e == 0] = small_distance
temp_e[temp_d == nodata] = 0
np.multiply(temp_d, temp_e, out=temp_d)
np.add(dstarr, temp_d, out=dstarr)
@ -163,9 +190,11 @@ def euclidean_merge_dems(input_dems, output_dem, creation_options={}, euclidean_
# Perform nearest neighbor interpolation on areas where two or more rasters overlap
# but where both rasters have only interpolated data. This prevents the creation
# of artifacts that average areas of interpolation.
indices = ndimage.distance_transform_edt(np.logical_and(distsum < 1, distsum > small_distance),
return_distances=False,
return_indices=True)
indices = ndimage.distance_transform_edt(
np.logical_and(distsum < 1, distsum > small_distance),
return_distances=False,
return_indices=True,
)
dstarr = dstarr[tuple(indices)]
dstarr[dstarr == 0.0] = src_nodata

Wyświetl plik

@ -45,128 +45,118 @@ from datetime import datetime
def json_base():
""" Create initial JSON for PDAL pipeline """
return {'pipeline': []}
"""Create initial JSON for PDAL pipeline"""
return {"pipeline": []}
def json_gdal_base(filename, output_type, radius, resolution=1, bounds=None):
""" Create initial JSON for PDAL pipeline containing a Writer element """
"""Create initial JSON for PDAL pipeline containing a Writer element"""
json = json_base()
d = {
'type': 'writers.gdal',
'resolution': resolution,
'radius': radius,
'filename': filename,
'output_type': output_type,
'data_type': 'float'
"type": "writers.gdal",
"resolution": resolution,
"radius": radius,
"filename": filename,
"output_type": output_type,
"data_type": "float",
}
if bounds is not None:
d['bounds'] = "([%s,%s],[%s,%s])" % (bounds['minx'], bounds['maxx'], bounds['miny'], bounds['maxy'])
d["bounds"] = "([%s,%s],[%s,%s])" % (
bounds["minx"],
bounds["maxx"],
bounds["miny"],
bounds["maxy"],
)
json['pipeline'].insert(0, d)
json["pipeline"].insert(0, d)
return json
def json_las_base(fout):
""" Create initial JSON for writing to a LAS file """
"""Create initial JSON for writing to a LAS file"""
json = json_base()
json['pipeline'].insert(0, {
'type': 'writers.las',
'filename': fout
})
json["pipeline"].insert(0, {"type": "writers.las", "filename": fout})
return json
def json_add_decimation_filter(json, step):
""" Add decimation Filter element and return """
json['pipeline'].insert(0, {
'type': 'filters.decimation',
'step': step
})
"""Add decimation Filter element and return"""
json["pipeline"].insert(0, {"type": "filters.decimation", "step": step})
return json
def json_add_classification_filter(json, classification, equality="equals"):
""" Add classification Filter element and return """
limits = 'Classification[{0}:{0}]'.format(classification)
if equality == 'max':
limits = 'Classification[:{0}]'.format(classification)
"""Add classification Filter element and return"""
limits = "Classification[{0}:{0}]".format(classification)
if equality == "max":
limits = "Classification[:{0}]".format(classification)
json['pipeline'].insert(0, {
'type': 'filters.range',
'limits': limits
})
json["pipeline"].insert(0, {"type": "filters.range", "limits": limits})
return json
def is_ply_file(filename):
_, ext = os.path.splitext(filename)
return ext.lower() == '.ply'
return ext.lower() == ".ply"
def json_add_reader(json, filename):
""" Add Reader Element and return """
reader_type = 'readers.las' # default
"""Add Reader Element and return"""
reader_type = "readers.las" # default
if is_ply_file(filename):
reader_type = 'readers.ply'
reader_type = "readers.ply"
json['pipeline'].insert(0, {
'type': reader_type,
'filename': os.path.abspath(filename)
})
json["pipeline"].insert(
0, {"type": reader_type, "filename": os.path.abspath(filename)}
)
return json
def json_add_readers(json, filenames):
""" Add merge Filter element and readers to a Writer element and return Filter element """
"""Add merge Filter element and readers to a Writer element and return Filter element"""
for f in filenames:
json_add_reader(json, f)
if len(filenames) > 1:
json['pipeline'].insert(0, {
'type': 'filters.merge'
})
json["pipeline"].insert(0, {"type": "filters.merge"})
return json
""" Run PDAL commands """
def run_pipeline(json):
""" Run PDAL Pipeline with provided JSON """
"""Run PDAL Pipeline with provided JSON"""
# write to temp file
f, jsonfile = tempfile.mkstemp(suffix='.json')
os.write(f, jsonlib.dumps(json).encode('utf8'))
f, jsonfile = tempfile.mkstemp(suffix=".json")
os.write(f, jsonlib.dumps(json).encode("utf8"))
os.close(f)
cmd = [
'pdal',
'pipeline',
'-i %s' % double_quote(jsonfile)
]
system.run(' '.join(cmd))
cmd = ["pdal", "pipeline", "-i %s" % double_quote(jsonfile)]
system.run(" ".join(cmd))
os.remove(jsonfile)
def run_pdaltranslate_smrf(fin, fout, scalar, slope, threshold, window):
""" Run PDAL translate """
"""Run PDAL translate"""
cmd = [
'pdal',
'translate',
'-i %s' % fin,
'-o %s' % fout,
'smrf',
'--filters.smrf.scalar=%s' % scalar,
'--filters.smrf.slope=%s' % slope,
'--filters.smrf.threshold=%s' % threshold,
'--filters.smrf.window=%s' % window,
"pdal",
"translate",
"-i %s" % fin,
"-o %s" % fout,
"smrf",
"--filters.smrf.scalar=%s" % scalar,
"--filters.smrf.slope=%s" % slope,
"--filters.smrf.threshold=%s" % threshold,
"--filters.smrf.window=%s" % window,
]
system.run(' '.join(cmd))
system.run(" ".join(cmd))
def merge_point_clouds(input_files, output_file):
@ -175,20 +165,20 @@ def merge_point_clouds(input_files, output_file):
return
cmd = [
'pdal',
'merge',
' '.join(map(double_quote, input_files + [output_file])),
"pdal",
"merge",
" ".join(map(double_quote, input_files + [output_file])),
]
system.run(' '.join(cmd))
system.run(" ".join(cmd))
def translate(input, output):
cmd = [
'pdal',
'translate',
"pdal",
"translate",
'-i "%s"' % input,
'-o "%s"' % output,
]
system.run(' '.join(cmd))
system.run(" ".join(cmd))

Wyświetl plik

@ -1,10 +1,9 @@
def get_dem_vars(args):
return {
'TILED': 'YES',
'COMPRESS': 'DEFLATE',
'BLOCKXSIZE': 512,
'BLOCKYSIZE': 512,
'BIGTIFF': 'IF_SAFER',
'NUM_THREADS': args.max_concurrency,
"TILED": "YES",
"COMPRESS": "DEFLATE",
"BLOCKXSIZE": 512,
"BLOCKYSIZE": 512,
"BIGTIFF": "IF_SAFER",
"NUM_THREADS": args.max_concurrency,
}

Wyświetl plik

@ -21,12 +21,12 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
# for DLS correction, we need the sun position at the time the image was taken
# this can be computed using the pysolar package (ver 0.6)
# https://pypi.python.org/pypi/Pysolar/0.6
# we import multiple times with checking here because the case of Pysolar is
# we import multiple times with checking here because the case of Pysolar is
# different depending on the python version :(
import imp
@ -34,52 +34,62 @@ havePysolar = False
try:
import pysolar.solar as pysolar
havePysolar = True
except ImportError:
try:
import Pysolar.solar as pysolar
havePysolar = True
except ImportError:
import pysolar.solar as pysolar
havePysolar = True
finally:
havePysolar = True
finally:
if not havePysolar:
print("Unable to import pysolar")
def fresnel(phi):
return __multilayer_transmission(phi, n=[1.000277,1.6,1.38])
return __multilayer_transmission(phi, n=[1.000277, 1.6, 1.38])
# define functions to compute the DLS-Sun angle:
def __fresnel_transmission(phi, n1=1.000277, n2=1.38, polarization=[.5, .5]):
def __fresnel_transmission(phi, n1=1.000277, n2=1.38, polarization=[0.5, 0.5]):
"""compute fresnel transmission between media with refractive indices n1 and n2"""
# computes the reflection and transmittance
# for incidence angles phi for transition from medium
# with refractive index n1 to n2
# teflon e.g. n2=1.38
# polycarbonate n2=1.6
# polycarbonate n2=1.6
# polarization=[.5,.5] - unpolarized light
# polarization=[1.,0] - s-polarized light - perpendicular to plane of incidence
# polarization=[0,1.] - p-polarized light - parallel to plane of incidence
f1 = np.cos(phi)
f2 = np.sqrt(1-(n1/n2*np.sin(phi))**2)
Rs = ((n1*f1-n2*f2)/(n1*f1+n2*f2))**2
Rp = ((n1*f2-n2*f1)/(n1*f2+n2*f1))**2
T = 1.-polarization[0]*Rs-polarization[1]*Rp
if T > 1: T= 0.
if T < 0: T = 0.
if np.isnan(T): T = 0.
f2 = np.sqrt(1 - (n1 / n2 * np.sin(phi)) ** 2)
Rs = ((n1 * f1 - n2 * f2) / (n1 * f1 + n2 * f2)) ** 2
Rp = ((n1 * f2 - n2 * f1) / (n1 * f2 + n2 * f1)) ** 2
T = 1.0 - polarization[0] * Rs - polarization[1] * Rp
if T > 1:
T = 0.0
if T < 0:
T = 0.0
if np.isnan(T):
T = 0.0
return T
def __multilayer_transmission(phi, n, polarization=[.5, .5]):
def __multilayer_transmission(phi, n, polarization=[0.5, 0.5]):
T = 1.0
phi_eff = np.copy(phi)
for i in range(0,len(n)-1):
for i in range(0, len(n) - 1):
n1 = n[i]
n2 = n[i+1]
phi_eff = np.arcsin(np.sin(phi_eff)/n1)
n2 = n[i + 1]
phi_eff = np.arcsin(np.sin(phi_eff) / n1)
T *= __fresnel_transmission(phi_eff, n1, n2, polarization=polarization)
return T
# get the position of the sun in North-East-Down (NED) coordinate system
def ned_from_pysolar(sunAzimuth, sunAltitude):
"""Convert pysolar coordinates to NED coordinates."""
@ -90,6 +100,7 @@ def ned_from_pysolar(sunAzimuth, sunAltitude):
)
return np.array(elements).transpose()
# get the sensor orientation in North-East-Down coordinates
# pose is a yaw/pitch/roll tuple of angles measured for the DLS
# ori is the 3D orientation vector of the DLS in body coordinates (typically [0,0,-1])
@ -109,6 +120,7 @@ def get_orientation(pose, ori):
n = np.dot(R, ori)
return n
# from the current position (lat,lon,alt) tuple
# and time (UTC), as well as the sensor orientation (yaw,pitch,roll) tuple
# compute a sensor sun angle - this is needed as the actual sun irradiance
@ -118,27 +130,31 @@ def get_orientation(pose, ori):
# For clear sky, I_direct/I_diffuse ~ 6 and we can simplify this to
# I_measured = I_direct * (cos (sun_sensor_angle) + 1/6)
def compute_sun_angle(
position,
pose,
utc_datetime,
sensor_orientation,
):
""" compute the sun angle using pysolar functions"""
"""compute the sun angle using pysolar functions"""
altitude = 0
azimuth = 0
import warnings
with warnings.catch_warnings(): # Ignore pysolar leap seconds offset warning
with warnings.catch_warnings(): # Ignore pysolar leap seconds offset warning
warnings.simplefilter("ignore")
try:
altitude = pysolar.get_altitude(position[0], position[1], utc_datetime)
azimuth = pysolar.get_azimuth(position[0], position[1], utc_datetime)
except AttributeError: # catch 0.6 version of pysolar required for python 2.7 support
except (
AttributeError
): # catch 0.6 version of pysolar required for python 2.7 support
altitude = pysolar.GetAltitude(position[0], position[1], utc_datetime)
azimuth = 180-pysolar.GetAzimuth(position[0], position[1], utc_datetime)
azimuth = 180 - pysolar.GetAzimuth(position[0], position[1], utc_datetime)
sunAltitude = np.radians(np.array(altitude))
sunAzimuth = np.radians(np.array(azimuth))
sunAzimuth = sunAzimuth % (2 * np.pi ) #wrap range 0 to 2*pi
sunAzimuth = sunAzimuth % (2 * np.pi) # wrap range 0 to 2*pi
nSun = ned_from_pysolar(sunAzimuth, sunAltitude)
nSensor = np.array(get_orientation(pose, sensor_orientation))
angle = np.arccos(np.dot(nSun, nSensor))

Wyświetl plik

@ -20,47 +20,73 @@ def build(input_point_cloud_files, output_path, max_concurrency=8, rerun=False):
if io.dir_exists(output_path):
log.ODM_WARNING("Removing previous EPT directory: %s" % output_path)
shutil.rmtree(output_path)
if io.dir_exists(tmpdir):
log.ODM_WARNING("Removing previous EPT temp directory: %s" % tmpdir)
shutil.rmtree(tmpdir)
if rerun:
dir_cleanup()
# Attempt with entwine (faster, more memory hungry)
try:
build_entwine(input_point_cloud_files, tmpdir, output_path, max_concurrency=max_concurrency)
build_entwine(
input_point_cloud_files,
tmpdir,
output_path,
max_concurrency=max_concurrency,
)
except Exception as e:
log.ODM_WARNING("Cannot build EPT using entwine (%s), attempting with untwine..." % str(e))
log.ODM_WARNING(
"Cannot build EPT using entwine (%s), attempting with untwine..." % str(e)
)
dir_cleanup()
build_untwine(input_point_cloud_files, tmpdir, output_path, max_concurrency=max_concurrency)
build_untwine(
input_point_cloud_files,
tmpdir,
output_path,
max_concurrency=max_concurrency,
)
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
def build_entwine(input_point_cloud_files, tmpdir, output_path, max_concurrency=8, reproject=None):
def build_entwine(
input_point_cloud_files, tmpdir, output_path, max_concurrency=8, reproject=None
):
kwargs = {
'threads': max_concurrency,
'tmpdir': tmpdir,
'all_inputs': "-i " + " ".join(map(double_quote, input_point_cloud_files)),
'outputdir': output_path,
'reproject': (" -r %s " % reproject) if reproject is not None else ""
"threads": max_concurrency,
"tmpdir": tmpdir,
"all_inputs": "-i " + " ".join(map(double_quote, input_point_cloud_files)),
"outputdir": output_path,
"reproject": (" -r %s " % reproject) if reproject is not None else "",
}
system.run('entwine build --threads {threads} --tmp "{tmpdir}" {all_inputs} -o "{outputdir}" {reproject}'.format(**kwargs))
system.run(
'entwine build --threads {threads} --tmp "{tmpdir}" {all_inputs} -o "{outputdir}" {reproject}'.format(
**kwargs
)
)
def build_untwine(input_point_cloud_files, tmpdir, output_path, max_concurrency=8, rerun=False):
def build_untwine(
input_point_cloud_files, tmpdir, output_path, max_concurrency=8, rerun=False
):
kwargs = {
# 'threads': max_concurrency,
'tmpdir': tmpdir,
'files': "--files " + " ".join(map(double_quote, input_point_cloud_files)),
'outputdir': output_path
"tmpdir": tmpdir,
"files": "--files " + " ".join(map(double_quote, input_point_cloud_files)),
"outputdir": output_path,
}
# Run untwine
system.run('untwine --temp_dir "{tmpdir}" {files} --output_dir "{outputdir}"'.format(**kwargs))
system.run(
'untwine --temp_dir "{tmpdir}" {files} --output_dir "{outputdir}"'.format(
**kwargs
)
)
def build_copc(input_point_cloud_files, output_file, convert_rgb_8_to_16=False):
if len(input_point_cloud_files) == 0:
@ -68,7 +94,7 @@ def build_copc(input_point_cloud_files, output_file, convert_rgb_8_to_16=False):
return
base_path, ext = os.path.splitext(output_file)
tmpdir = io.related_file_path(base_path, postfix="-tmp")
if os.path.exists(tmpdir):
log.ODM_WARNING("Removing previous directory %s" % tmpdir)
@ -91,28 +117,39 @@ def build_copc(input_point_cloud_files, output_file, convert_rgb_8_to_16=False):
filename, ext = os.path.splitext(base)
out_16 = os.path.join(tmpdir16, "%s_16%s" % (filename, ext))
try:
system.run('pdal translate -i "{input}" -o "{output}" assign '
'--filters.assign.value="Red = Red / 255 * 65535" '
'--filters.assign.value="Green = Green / 255 * 65535" '
'--filters.assign.value="Blue = Blue / 255 * 65535" '.format(input=f, output=out_16))
system.run(
'pdal translate -i "{input}" -o "{output}" assign '
'--filters.assign.value="Red = Red / 255 * 65535" '
'--filters.assign.value="Green = Green / 255 * 65535" '
'--filters.assign.value="Blue = Blue / 255 * 65535" '.format(
input=f, output=out_16
)
)
converted.append(out_16)
except Exception as e:
log.ODM_WARNING("Cannot convert point cloud to 16bit RGB, COPC is not going to follow the official spec: %s" % str(e))
log.ODM_WARNING(
"Cannot convert point cloud to 16bit RGB, COPC is not going to follow the official spec: %s"
% str(e)
)
ok = False
break
if ok:
input_point_cloud_files = converted
kwargs = {
'tmpdir': tmpdir,
'files': "--files " + " ".join(map(double_quote, input_point_cloud_files)),
'output': output_file
"tmpdir": tmpdir,
"files": "--files " + " ".join(map(double_quote, input_point_cloud_files)),
"output": output_file,
}
# Run untwine
system.run('untwine --temp_dir "{tmpdir}" {files} -o "{output}" --single_file'.format(**kwargs))
system.run(
'untwine --temp_dir "{tmpdir}" {files} -o "{output}" --single_file'.format(
**kwargs
)
)
for d in cleanup:
if os.path.exists(d):
shutil.rmtree(d)
shutil.rmtree(d)

Wyświetl plik

@ -7,22 +7,29 @@ from opendm.system import run
from opendm import log
from opendm.utils import double_quote
def extract_raw_thermal_image_data(image_path):
try:
f, tmp_file_path = tempfile.mkstemp(suffix='.json')
f, tmp_file_path = tempfile.mkstemp(suffix=".json")
os.close(f)
try:
output = run("exiftool -b -x ThumbnailImage -x PreviewImage -j \"%s\" > \"%s\"" % (image_path, tmp_file_path), quiet=True)
output = run(
'exiftool -b -x ThumbnailImage -x PreviewImage -j "%s" > "%s"'
% (image_path, tmp_file_path),
quiet=True,
)
with open(tmp_file_path) as f:
j = json.loads(f.read())
if isinstance(j, list):
j = j[0] # single file
j = j[0] # single file
if "RawThermalImage" in j:
imageBytes = base64.b64decode(j["RawThermalImage"][len("base64:"):])
imageBytes = base64.b64decode(
j["RawThermalImage"][len("base64:") :]
)
with MemoryFile(imageBytes) as memfile:
with memfile.open() as dataset:
@ -30,13 +37,15 @@ def extract_raw_thermal_image_data(image_path):
bands, h, w = img.shape
if bands != 1:
raise Exception("Raw thermal image has more than one band? This is not supported")
raise Exception(
"Raw thermal image has more than one band? This is not supported"
)
# (1, 512, 640) --> (512, 640, 1)
img = img[0][:,:,None]
img = img[0][:, :, None]
del j["RawThermalImage"]
return extract_temperature_params_from(j), img
else:
raise Exception("Invalid JSON (not a list)")
@ -51,6 +60,7 @@ def extract_raw_thermal_image_data(image_path):
log.ODM_WARNING("Cannot create temporary file: %s" % str(e))
return {}, None
def unit(unit):
def _convert(v):
if isinstance(v, float):
@ -64,8 +74,10 @@ def unit(unit):
return float(v)
else:
return float(v)
return _convert
def extract_temperature_params_from(tags):
# Defaults
meta = {
@ -90,5 +102,5 @@ def extract_temperature_params_from(tags):
# All or nothing
raise Exception("Cannot find %s in tags" % m)
params[m] = (meta[m])(tags[m])
return params
return params

Wyświetl plik

@ -4,6 +4,7 @@ from opendm import log
from opendm import location
from pyproj import CRS
class GCPFile:
def __init__(self, gcp_path):
self.gcp_path = gcp_path
@ -11,18 +12,18 @@ class GCPFile:
self.raw_srs = ""
self.srs = None
self.read()
def read(self):
if self.exists():
with open(self.gcp_path, 'r') as f:
with open(self.gcp_path, "r") as f:
contents = f.read().strip()
# Strip eventual BOM characters
contents = contents.replace('\ufeff', '')
lines = list(map(str.strip, contents.split('\n')))
contents = contents.replace("\ufeff", "")
lines = list(map(str.strip, contents.split("\n")))
if lines:
self.raw_srs = lines[0] # SRS
self.raw_srs = lines[0] # SRS
self.srs = location.parse_srs_header(self.raw_srs)
for line in lines[1:]:
@ -36,7 +37,7 @@ class GCPFile:
def iter_entries(self):
for entry in self.entries:
yield self.parse_entry(entry)
def check_entries(self):
coords = {}
gcps = {}
@ -54,24 +55,36 @@ class GCPFile:
description = "insufficient" if coords[k] < 2 else "not ideal"
for entry in gcps[k]:
log.ODM_WARNING(str(entry))
log.ODM_WARNING("The number of images where the GCP %s has been tagged are %s" % (k, description))
log.ODM_WARNING("You should tag at least %s more images" % (3 - coords[k]))
log.ODM_WARNING(
"The number of images where the GCP %s has been tagged are %s"
% (k, description)
)
log.ODM_WARNING(
"You should tag at least %s more images" % (3 - coords[k])
)
log.ODM_WARNING("=====================================")
errors += 1
if len(coords) < 3:
log.ODM_WARNING("Low number of GCPs detected (%s). For best results use at least 5." % (3 - len(coords)))
log.ODM_WARNING(
"Low number of GCPs detected (%s). For best results use at least 5."
% (3 - len(coords))
)
log.ODM_WARNING("=====================================")
errors += 1
if errors > 0:
log.ODM_WARNING("Some issues detected with GCPs (but we're going to process this anyway)")
log.ODM_WARNING(
"Some issues detected with GCPs (but we're going to process this anyway)"
)
def parse_entry(self, entry):
if entry:
parts = entry.split()
x, y, z, px, py, filename = parts[:6]
extras = " ".join(parts[6:])
return GCPEntry(float(x), float(y), float(z), float(px), float(py), filename, extras)
return GCPEntry(
float(x), float(y), float(z), float(px), float(py), filename, extras
)
def get_entry(self, n):
if n < self.entries_count():
@ -79,7 +92,7 @@ class GCPFile:
def entries_count(self):
return len(self.entries)
def exists(self):
return bool(self.gcp_path and os.path.exists(self.gcp_path))
@ -97,8 +110,8 @@ class GCPFile:
entry.py *= ratio
output.append(str(entry))
with open(gcp_file_output, 'w') as f:
f.write('\n'.join(output) + '\n')
with open(gcp_file_output, "w") as f:
f.write("\n".join(output) + "\n")
return gcp_file_output
@ -114,11 +127,17 @@ class GCPFile:
utm_zone, hemisphere = location.get_utm_zone_and_hemisphere_from(lon, lat)
return "WGS84 UTM %s%s" % (utm_zone, hemisphere)
def create_utm_copy(self, gcp_file_output, filenames=None, rejected_entries=None, include_extras=True):
def create_utm_copy(
self,
gcp_file_output,
filenames=None,
rejected_entries=None,
include_extras=True,
):
"""
Creates a new GCP file from an existing GCP file
by optionally including only filenames and reprojecting each point to
a UTM CRS. Rejected entries can recorded by passing a list object to
by optionally including only filenames and reprojecting each point to
a UTM CRS. Rejected entries can recorded by passing a list object to
rejected_entries.
"""
if os.path.exists(gcp_file_output):
@ -130,15 +149,17 @@ class GCPFile:
for entry in self.iter_entries():
if filenames is None or entry.filename in filenames:
entry.x, entry.y, entry.z = transformer.TransformPoint(entry.x, entry.y, entry.z)
entry.x, entry.y, entry.z = transformer.TransformPoint(
entry.x, entry.y, entry.z
)
if not include_extras:
entry.extras = ''
entry.extras = ""
output.append(str(entry))
elif isinstance(rejected_entries, list):
rejected_entries.append(entry)
with open(gcp_file_output, 'w') as f:
f.write('\n'.join(output) + '\n')
with open(gcp_file_output, "w") as f:
f.write("\n".join(output) + "\n")
return gcp_file_output
@ -151,7 +172,7 @@ class GCPFile:
"""
if not self.exists() or not os.path.exists(images_dir):
return None
if os.path.exists(gcp_file_output):
os.remove(gcp_file_output)
@ -159,23 +180,23 @@ class GCPFile:
output = [self.raw_srs]
files_found = 0
for entry in self.iter_entries():
if entry.filename in files:
output.append(str(entry))
files_found += 1
if files_found >= min_images:
with open(gcp_file_output, 'w') as f:
f.write('\n'.join(output) + '\n')
with open(gcp_file_output, "w") as f:
f.write("\n".join(output) + "\n")
return gcp_file_output
def make_micmac_copy(self, output_dir, precisionxy=1, precisionz=1, utm_zone = None):
def make_micmac_copy(self, output_dir, precisionxy=1, precisionz=1, utm_zone=None):
"""
Convert this GCP file in a format compatible with MicMac.
:param output_dir directory where to save the two MicMac GCP files. The directory must exist.
:param utm_zone UTM zone to use for output coordinates (UTM string, PROJ4 or EPSG definition).
:param utm_zone UTM zone to use for output coordinates (UTM string, PROJ4 or EPSG definition).
If one is not specified, the nearest UTM zone will be selected.
:param precisionxy horizontal precision of GCP measurements in meters.
:param precisionz vertical precision of GCP measurements in meters.
@ -187,8 +208,8 @@ class GCPFile:
if not isinstance(precisionz, float) and not isinstance(precisionz, int):
raise AssertionError("precisionz must be a number")
gcp_3d_file = os.path.join(output_dir, '3d_gcp.txt')
gcp_2d_file = os.path.join(output_dir, '2d_gcp.txt')
gcp_3d_file = os.path.join(output_dir, "3d_gcp.txt")
gcp_2d_file = os.path.join(output_dir, "2d_gcp.txt")
if os.path.exists(gcp_3d_file):
os.remove(gcp_3d_file)
@ -209,21 +230,27 @@ class GCPFile:
gcps[k] = [entry]
else:
gcps[k].append(entry)
with open(gcp_3d_file, 'w') as f3:
with open(gcp_2d_file, 'w') as f2:
with open(gcp_3d_file, "w") as f3:
with open(gcp_2d_file, "w") as f2:
gcp_n = 1
for k in gcps:
f3.write("GCP{} {} {} {}\n".format(gcp_n, k, precisionxy, precisionz))
f3.write(
"GCP{} {} {} {}\n".format(gcp_n, k, precisionxy, precisionz)
)
for entry in gcps[k]:
f2.write("GCP{} {} {} {}\n".format(gcp_n, entry.filename, entry.px, entry.py))
f2.write(
"GCP{} {} {} {}\n".format(
gcp_n, entry.filename, entry.px, entry.py
)
)
gcp_n += 1
return (gcp_3d_file, gcp_2d_file)
class GCPEntry:
def __init__(self, x, y, z, px, py, filename, extras=""):
self.x = x
@ -236,9 +263,8 @@ class GCPEntry:
def coords_key(self):
return "{} {} {}".format(self.x, self.y, self.z)
def __str__(self):
return "{} {} {} {} {} {} {}".format(self.x, self.y, self.z,
self.px, self.py,
self.filename,
self.extras).rstrip()
return "{} {} {} {} {} {} {}".format(
self.x, self.y, self.z, self.px, self.py, self.filename, self.extras
).rstrip()

Wyświetl plik

@ -4,19 +4,20 @@ from opendm import log
from opendm import location
from pyproj import CRS
class GeoFile:
def __init__(self, geo_path):
self.geo_path = geo_path
self.entries = {}
self.srs = None
with open(self.geo_path, 'r') as f:
with open(self.geo_path, "r") as f:
contents = f.read().strip()
# Strip eventual BOM characters
contents = contents.replace('\ufeff', '')
lines = list(map(str.strip, contents.split('\n')))
# Strip eventual BOM characters
contents = contents.replace("\ufeff", "")
lines = list(map(str.strip, contents.split("\n")))
if lines:
self.raw_srs = lines[0] # SRS
self.srs = location.parse_srs_header(self.raw_srs)
@ -47,23 +48,45 @@ class GeoFile:
horizontal_accuracy = vertical_accuracy = None
if len(parts) >= 9:
horizontal_accuracy,vertical_accuracy = [float(p) for p in parts[7:9]]
horizontal_accuracy, vertical_accuracy = [
float(p) for p in parts[7:9]
]
i = 9
extras = " ".join(parts[i:])
self.entries[filename] = GeoEntry(filename, x, y, z,
yaw, pitch, roll,
horizontal_accuracy, vertical_accuracy,
extras)
self.entries[filename] = GeoEntry(
filename,
x,
y,
z,
yaw,
pitch,
roll,
horizontal_accuracy,
vertical_accuracy,
extras,
)
else:
log.ODM_WARNING("Malformed geo line: %s" % line)
def get_entry(self, filename):
return self.entries.get(filename)
class GeoEntry:
def __init__(self, filename, x, y, z, yaw=None, pitch=None, roll=None, horizontal_accuracy=None, vertical_accuracy=None, extras=None):
def __init__(
self,
filename,
x,
y,
z,
yaw=None,
pitch=None,
roll=None,
horizontal_accuracy=None,
vertical_accuracy=None,
extras=None,
):
self.filename = filename
self.x = x
self.y = y
@ -76,11 +99,18 @@ class GeoEntry:
self.extras = extras
def __str__(self):
return "{} ({} {} {}) ({} {} {}) ({} {}) {}".format(self.filename,
self.x, self.y, self.z,
self.yaw, self.pitch, self.roll,
self.horizontal_accuracy, self.vertical_accuracy,
self.extras).rstrip()
return "{} ({} {} {}) ({} {} {}) ({} {}) {}".format(
self.filename,
self.x,
self.y,
self.z,
self.yaw,
self.pitch,
self.roll,
self.horizontal_accuracy,
self.vertical_accuracy,
self.extras,
).rstrip()
def position_string(self):
return "{} {} {}".format(self.x, self.y, self.z)

Wyświetl plik

@ -5,11 +5,12 @@ from opendm import log
Image.MAX_IMAGE_PIXELS = None
def get_image_size(file_path, fallback_on_error=True):
"""
Return (width, height) for a given img file
"""
try:
if file_path[-4:].lower() in [".dng", ".raw", ".nef"]:
with rawpy.imread(file_path) as img:
@ -20,11 +21,14 @@ def get_image_size(file_path, fallback_on_error=True):
width, height = img.size
except Exception as e:
if fallback_on_error:
log.ODM_WARNING("Cannot read %s with image library, fallback to cv2: %s" % (file_path, str(e)))
log.ODM_WARNING(
"Cannot read %s with image library, fallback to cv2: %s"
% (file_path, str(e))
)
img = cv2.imread(file_path)
width = img.shape[1]
height = img.shape[0]
else:
raise e
return (width, height)
return (width, height)

Wyświetl plik

@ -17,7 +17,7 @@ def load_obj(obj_path, _info=print):
obj_base_path = os.path.dirname(os.path.abspath(obj_path))
obj = {
'materials': {},
"materials": {},
}
vertices = []
uvs = []
@ -33,7 +33,7 @@ def load_obj(obj_path, _info=print):
if line.startswith("mtllib "):
# Materials
mtl_file = "".join(line.split()[1:]).strip()
obj['materials'].update(load_mtl(mtl_file, obj_base_path, _info=_info))
obj["materials"].update(load_mtl(mtl_file, obj_base_path, _info=_info))
elif line.startswith("v "):
# Vertices
vertices.append(list(map(float, line.split()[1:4])))
@ -44,37 +44,52 @@ def load_obj(obj_path, _info=print):
normals.append(list(map(float, line.split()[1:4])))
elif line.startswith("usemtl "):
mtl_name = "".join(line.split()[1:]).strip()
if not mtl_name in obj['materials']:
if not mtl_name in obj["materials"]:
raise Exception("%s material is missing" % mtl_name)
current_material = mtl_name
elif line.startswith("f "):
if current_material not in faces:
faces[current_material] = []
a,b,c = line.split()[1:]
a, b, c = line.split()[1:]
if a.count("/") == 2:
av, at, an = map(int, a.split("/")[0:3])
bv, bt, bn = map(int, b.split("/")[0:3])
cv, ct, cn = map(int, c.split("/")[0:3])
faces[current_material].append((av - 1, bv - 1, cv - 1, at - 1, bt - 1, ct - 1, an - 1, bn - 1, cn - 1))
faces[current_material].append(
(
av - 1,
bv - 1,
cv - 1,
at - 1,
bt - 1,
ct - 1,
an - 1,
bn - 1,
cn - 1,
)
)
else:
av, at = map(int, a.split("/")[0:2])
bv, bt = map(int, b.split("/")[0:2])
cv, ct = map(int, c.split("/")[0:2])
faces[current_material].append((av - 1, bv - 1, cv - 1, at - 1, bt - 1, ct - 1))
faces[current_material].append(
(av - 1, bv - 1, cv - 1, at - 1, bt - 1, ct - 1)
)
obj['vertices'] = np.array(vertices, dtype=np.float32)
obj['uvs'] = np.array(uvs, dtype=np.float32)
obj['normals'] = np.array(normals, dtype=np.float32)
obj['faces'] = faces
obj["vertices"] = np.array(vertices, dtype=np.float32)
obj["uvs"] = np.array(uvs, dtype=np.float32)
obj["normals"] = np.array(normals, dtype=np.float32)
obj["faces"] = faces
obj['materials'] = convert_materials_to_jpeg(obj['materials'])
obj["materials"] = convert_materials_to_jpeg(obj["materials"])
return obj
def convert_materials_to_jpeg(materials):
min_value = 0
@ -93,14 +108,16 @@ def convert_materials_to_jpeg(materials):
try:
data_range = np.iinfo(image.dtype)
min_value = min(min_value, 0)
value_range = max(value_range, float(data_range.max) - float(data_range.min))
value_range = max(
value_range, float(data_range.max) - float(data_range.min)
)
except ValueError:
# For floats use the actual range of the image values
min_value = min(min_value, float(image.min()))
value_range = max(value_range, float(image.max()) - min_value)
if value_range == 0:
value_range = 255 # Should never happen
value_range = 255 # Should never happen
for mat in materials:
image = materials[mat]
@ -117,7 +134,14 @@ def convert_materials_to_jpeg(materials):
with MemoryFile() as memfile:
bands, h, w = image.shape
bands = min(3, bands)
with memfile.open(driver='JPEG', jpeg_quality=90, count=bands, width=w, height=h, dtype=rasterio.dtypes.uint8) as dst:
with memfile.open(
driver="JPEG",
jpeg_quality=90,
count=bands,
width=w,
height=h,
dtype=rasterio.dtypes.uint8,
) as dst:
for b in range(1, min(3, bands) + 1):
dst.write(image[b - 1], b)
memfile.seek(0)
@ -125,12 +149,13 @@ def convert_materials_to_jpeg(materials):
return materials
def load_mtl(mtl_file, obj_base_path, _info=print):
mtl_file = os.path.join(obj_base_path, mtl_file)
if not os.path.isfile(mtl_file):
raise IOError("Cannot open %s" % mtl_file)
mats = {}
current_mtl = ""
@ -143,30 +168,34 @@ def load_mtl(mtl_file, obj_base_path, _info=print):
map_kd = os.path.join(obj_base_path, map_kd_filename)
if not os.path.isfile(map_kd):
raise IOError("Cannot open %s" % map_kd)
_info("Loading %s" % map_kd_filename)
with rasterio.open(map_kd, 'r') as src:
with rasterio.open(map_kd, "r") as src:
mats[current_mtl] = src.read()
return mats
def paddedBuffer(buf, boundary):
r = len(buf) % boundary
if r == 0:
return buf
if r == 0:
return buf
pad = boundary - r
return buf + b'\x00' * pad
return buf + b"\x00" * pad
def obj2glb(input_obj, output_glb, rtc=(None, None), draco_compression=True, _info=print):
def obj2glb(
input_obj, output_glb, rtc=(None, None), draco_compression=True, _info=print
):
_info("Converting %s --> %s" % (input_obj, output_glb))
obj = load_obj(input_obj, _info=_info)
vertices = obj['vertices']
uvs = obj['uvs']
vertices = obj["vertices"]
uvs = obj["uvs"]
# Flip Y
uvs = (([0, 1] - (uvs * [0, 1])) + uvs * [1, 0]).astype(np.float32)
normals = obj['normals']
normals = obj["normals"]
binary = b''
binary = b""
accessors = []
bufferViews = []
primitives = []
@ -175,26 +204,29 @@ def obj2glb(input_obj, output_glb, rtc=(None, None), draco_compression=True, _in
images = []
bufOffset = 0
def addBufferView(buf, target=None):
nonlocal bufferViews, bufOffset
bufferViews += [pygltflib.BufferView(
buffer=0,
byteOffset=bufOffset,
byteLength=len(buf),
target=target,
)]
bufferViews += [
pygltflib.BufferView(
buffer=0,
byteOffset=bufOffset,
byteLength=len(buf),
target=target,
)
]
bufOffset += len(buf)
return len(bufferViews) - 1
for material in obj['faces'].keys():
faces = obj['faces'][material]
for material in obj["faces"].keys():
faces = obj["faces"][material]
faces = np.array(faces, dtype=np.uint32)
prim_vertices = vertices[faces[:,0:3].flatten()]
prim_uvs = uvs[faces[:,3:6].flatten()]
prim_vertices = vertices[faces[:, 0:3].flatten()]
prim_uvs = uvs[faces[:, 3:6].flatten()]
if faces.shape[1] == 9:
prim_normals = normals[faces[:,6:9].flatten()]
prim_normals = normals[faces[:, 6:9].flatten()]
normals_blob = prim_normals.tobytes()
else:
prim_normals = None
@ -206,13 +238,13 @@ def obj2glb(input_obj, output_glb, rtc=(None, None), draco_compression=True, _in
binary += vertices_blob + uvs_blob
if normals_blob is not None:
binary += normals_blob
verticesBufferView = addBufferView(vertices_blob, pygltflib.ARRAY_BUFFER)
uvsBufferView = addBufferView(uvs_blob, pygltflib.ARRAY_BUFFER)
normalsBufferView = None
if normals_blob is not None:
normalsBufferView = addBufferView(normals_blob, pygltflib.ARRAY_BUFFER)
accessors += [
pygltflib.Accessor(
bufferView=verticesBufferView,
@ -244,50 +276,59 @@ def obj2glb(input_obj, output_glb, rtc=(None, None), draco_compression=True, _in
)
]
primitives += [pygltflib.Primitive(
attributes=pygltflib.Attributes(POSITION=verticesBufferView, TEXCOORD_0=uvsBufferView, NORMAL=normalsBufferView), material=len(primitives)
)]
primitives += [
pygltflib.Primitive(
attributes=pygltflib.Attributes(
POSITION=verticesBufferView,
TEXCOORD_0=uvsBufferView,
NORMAL=normalsBufferView,
),
material=len(primitives),
)
]
for material in obj['faces'].keys():
texture_blob = paddedBuffer(obj['materials'][material], 4)
for material in obj["faces"].keys():
texture_blob = paddedBuffer(obj["materials"][material], 4)
binary += texture_blob
textureBufferView = addBufferView(texture_blob)
images += [pygltflib.Image(bufferView=textureBufferView, mimeType="image/jpeg")]
textures += [pygltflib.Texture(source=len(images) - 1, sampler=0)]
mat = pygltflib.Material(pbrMetallicRoughness=pygltflib.PbrMetallicRoughness(baseColorTexture=pygltflib.TextureInfo(index=len(textures) - 1), metallicFactor=0, roughnessFactor=1),
alphaMode=pygltflib.OPAQUE)
mat.extensions = {
'KHR_materials_unlit': {}
}
mat = pygltflib.Material(
pbrMetallicRoughness=pygltflib.PbrMetallicRoughness(
baseColorTexture=pygltflib.TextureInfo(index=len(textures) - 1),
metallicFactor=0,
roughnessFactor=1,
),
alphaMode=pygltflib.OPAQUE,
)
mat.extensions = {"KHR_materials_unlit": {}}
materials += [mat]
gltf = pygltflib.GLTF2(
scene=0,
scenes=[pygltflib.Scene(nodes=[0])],
nodes=[pygltflib.Node(mesh=0)],
meshes=[pygltflib.Mesh(
primitives=primitives
)],
meshes=[pygltflib.Mesh(primitives=primitives)],
materials=materials,
textures=textures,
samplers=[pygltflib.Sampler(magFilter=pygltflib.LINEAR, minFilter=pygltflib.LINEAR)],
samplers=[
pygltflib.Sampler(magFilter=pygltflib.LINEAR, minFilter=pygltflib.LINEAR)
],
images=images,
accessors=accessors,
bufferViews=bufferViews,
buffers=[pygltflib.Buffer(byteLength=len(binary))],
)
gltf.extensionsRequired = ['KHR_materials_unlit']
gltf.extensionsUsed = ['KHR_materials_unlit']
gltf.extensionsRequired = ["KHR_materials_unlit"]
gltf.extensionsUsed = ["KHR_materials_unlit"]
if rtc != (None, None) and len(rtc) >= 2:
gltf.extensionsUsed.append('CESIUM_RTC')
gltf.extensionsUsed.append("CESIUM_RTC")
gltf.extensions = {
'CESIUM_RTC': {
'center': [float(rtc[0]), float(rtc[1]), 0.0]
}
"CESIUM_RTC": {"center": [float(rtc[0]), float(rtc[1]), 0.0]}
}
gltf.set_binary_blob(binary)
@ -300,11 +341,13 @@ def obj2glb(input_obj, output_glb, rtc=(None, None), draco_compression=True, _in
_info("Compressing with draco")
try:
compressed_glb = io.related_file_path(output_glb, postfix="_compressed")
system.run('draco_transcoder -i "{}" -o "{}" -qt 16 -qp 16'.format(output_glb, compressed_glb))
system.run(
'draco_transcoder -i "{}" -o "{}" -qt 16 -qp 16'.format(
output_glb, compressed_glb
)
)
if os.path.isfile(compressed_glb) and os.path.isfile(output_glb):
os.remove(output_glb)
os.rename(compressed_glb, output_glb)
except Exception as e:
log.ODM_WARNING("Cannot compress GLB with draco: %s" % str(e))

Wyświetl plik

@ -5,8 +5,10 @@ import ctypes
from opendm import log
from repoze.lru import lru_cache
def gpu_disabled_by_user_env():
return bool(os.environ.get('ODM_NO_GPU'))
return bool(os.environ.get("ODM_NO_GPU"))
@lru_cache(maxsize=None)
def has_popsift_and_can_handle_texsize(width, height):
@ -16,7 +18,10 @@ def has_popsift_and_can_handle_texsize(width, height):
compute_major, compute_minor = get_cuda_compute_version(0)
if compute_major < 3 or (compute_major == 3 and compute_minor < 5):
# Not supported
log.ODM_INFO("CUDA compute platform is not supported (detected: %s.%s but we need at least 3.5)" % (compute_major, compute_minor))
log.ODM_INFO(
"CUDA compute platform is not supported (detected: %s.%s but we need at least 3.5)"
% (compute_major, compute_minor)
)
return False
except Exception as e:
log.ODM_INFO("Using CPU for feature extraction: %s" % str(e))
@ -24,6 +29,7 @@ def has_popsift_and_can_handle_texsize(width, height):
try:
from opensfm import pypopsift
return pypopsift.fits_texture(int(width * 1.02), int(height * 1.02))
except (ModuleNotFoundError, ImportError):
return False
@ -31,25 +37,26 @@ def has_popsift_and_can_handle_texsize(width, height):
log.ODM_WARNING(str(e))
return False
@lru_cache(maxsize=None)
def get_cuda_compute_version(device_id = 0):
def get_cuda_compute_version(device_id=0):
cuda_lib = "libcuda.so"
if sys.platform == 'win32':
cuda_lib = os.path.join(os.environ.get('SYSTEMROOT'), 'system32', 'nvcuda.dll')
if sys.platform == "win32":
cuda_lib = os.path.join(os.environ.get("SYSTEMROOT"), "system32", "nvcuda.dll")
if not os.path.isfile(cuda_lib):
cuda_lib = "nvcuda.dll"
nvcuda = ctypes.cdll.LoadLibrary(cuda_lib)
nvcuda.cuInit.argtypes = (ctypes.c_uint32, )
nvcuda.cuInit.restypes = (ctypes.c_int32)
nvcuda.cuInit.argtypes = (ctypes.c_uint32,)
nvcuda.cuInit.restypes = ctypes.c_int32
if nvcuda.cuInit(0) != 0:
raise Exception("Cannot initialize CUDA")
nvcuda.cuDeviceGetCount.argtypes = (ctypes.POINTER(ctypes.c_int32), )
nvcuda.cuDeviceGetCount.restypes = (ctypes.c_int32)
nvcuda.cuDeviceGetCount.argtypes = (ctypes.POINTER(ctypes.c_int32),)
nvcuda.cuDeviceGetCount.restypes = ctypes.c_int32
device_count = ctypes.c_int32()
if nvcuda.cuDeviceGetCount(ctypes.byref(device_count)) != 0:
raise Exception("Cannot get device count")
@ -57,16 +64,26 @@ def get_cuda_compute_version(device_id = 0):
if device_count.value == 0:
raise Exception("No devices")
nvcuda.cuDeviceComputeCapability.argtypes = (ctypes.POINTER(ctypes.c_int32), ctypes.POINTER(ctypes.c_int32), ctypes.c_int32)
nvcuda.cuDeviceComputeCapability.restypes = (ctypes.c_int32)
nvcuda.cuDeviceComputeCapability.argtypes = (
ctypes.POINTER(ctypes.c_int32),
ctypes.POINTER(ctypes.c_int32),
ctypes.c_int32,
)
nvcuda.cuDeviceComputeCapability.restypes = ctypes.c_int32
compute_major = ctypes.c_int32()
compute_minor = ctypes.c_int32()
if nvcuda.cuDeviceComputeCapability(ctypes.byref(compute_major), ctypes.byref(compute_minor), device_id) != 0:
if (
nvcuda.cuDeviceComputeCapability(
ctypes.byref(compute_major), ctypes.byref(compute_minor), device_id
)
!= 0
):
raise Exception("Cannot get CUDA compute version")
return (compute_major.value, compute_minor.value)
def has_gpu(args):
if gpu_disabled_by_user_env():
log.ODM_INFO("Disabling GPU features (ODM_NO_GPU is set)")
@ -75,8 +92,10 @@ def has_gpu(args):
log.ODM_INFO("Disabling GPU features (--no-gpu is set)")
return False
if sys.platform == 'win32':
nvcuda_path = os.path.join(os.environ.get('SYSTEMROOT'), 'system32', 'nvcuda.dll')
if sys.platform == "win32":
nvcuda_path = os.path.join(
os.environ.get("SYSTEMROOT"), "system32", "nvcuda.dll"
)
if os.path.isfile(nvcuda_path):
log.ODM_INFO("CUDA drivers detected")
return True
@ -84,7 +103,7 @@ def has_gpu(args):
log.ODM_INFO("No CUDA drivers detected, using CPU")
return False
else:
if shutil.which('nvidia-smi') is not None:
if shutil.which("nvidia-smi") is not None:
log.ODM_INFO("nvidia-smi detected")
return True
else:

Wyświetl plik

@ -6,6 +6,7 @@ from repoze.lru import lru_cache
from opendm import log
from opendm.shots import get_origin
def rounded_gsd(reconstruction_json, default_value=None, ndigits=0, ignore_gsd=False):
"""
:param reconstruction_json path to OpenSfM's reconstruction.json
@ -22,7 +23,14 @@ def rounded_gsd(reconstruction_json, default_value=None, ndigits=0, ignore_gsd=F
return default_value
def image_max_size(photos, target_resolution, reconstruction_json, gsd_error_estimate = 0.5, ignore_gsd=False, has_gcp=False):
def image_max_size(
photos,
target_resolution,
reconstruction_json,
gsd_error_estimate=0.5,
ignore_gsd=False,
has_gcp=False,
):
"""
:param photos images database
:param target_resolution resolution the user wants have in cm / pixel
@ -37,7 +45,9 @@ def image_max_size(photos, target_resolution, reconstruction_json, gsd_error_est
if ignore_gsd:
isf = 1.0
else:
isf = image_scale_factor(target_resolution, reconstruction_json, gsd_error_estimate, has_gcp=has_gcp)
isf = image_scale_factor(
target_resolution, reconstruction_json, gsd_error_estimate, has_gcp=has_gcp
)
for p in photos:
max_width = max(p.width, max_width)
@ -45,7 +55,10 @@ def image_max_size(photos, target_resolution, reconstruction_json, gsd_error_est
return int(math.ceil(max(max_width, max_height) * isf))
def image_scale_factor(target_resolution, reconstruction_json, gsd_error_estimate = 0.5, has_gcp=False):
def image_scale_factor(
target_resolution, reconstruction_json, gsd_error_estimate=0.5, has_gcp=False
):
"""
:param target_resolution resolution the user wants have in cm / pixel
:param reconstruction_json path to OpenSfM's reconstruction.json
@ -66,8 +79,15 @@ def image_scale_factor(target_resolution, reconstruction_json, gsd_error_estimat
return 1.0
def cap_resolution(resolution, reconstruction_json, gsd_error_estimate = 0.1, gsd_scaling = 1.0, ignore_gsd=False,
ignore_resolution=False, has_gcp=False):
def cap_resolution(
resolution,
reconstruction_json,
gsd_error_estimate=0.1,
gsd_scaling=1.0,
ignore_gsd=False,
ignore_resolution=False,
has_gcp=False,
):
"""
:param resolution resolution in cm / pixel
:param reconstruction_json path to OpenSfM's reconstruction.json
@ -81,19 +101,28 @@ def cap_resolution(resolution, reconstruction_json, gsd_error_estimate = 0.1, gs
if ignore_gsd:
return resolution
gsd = opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=has_gcp or ignore_resolution)
gsd = opensfm_reconstruction_average_gsd(
reconstruction_json, use_all_shots=has_gcp or ignore_resolution
)
if gsd is not None:
gsd = gsd * (1 - gsd_error_estimate) * gsd_scaling
if gsd > resolution or ignore_resolution:
log.ODM_WARNING('Maximum resolution set to {} * (GSD - {}%) '
'({:.2f} cm / pixel, requested resolution was {:.2f} cm / pixel)'
.format(gsd_scaling, gsd_error_estimate * 100, gsd, resolution))
log.ODM_WARNING(
"Maximum resolution set to {} * (GSD - {}%) "
"({:.2f} cm / pixel, requested resolution was {:.2f} cm / pixel)".format(
gsd_scaling, gsd_error_estimate * 100, gsd, resolution
)
)
return gsd
else:
return resolution
else:
log.ODM_WARNING('Cannot calculate GSD, using requested resolution of {:.2f}'.format(resolution))
log.ODM_WARNING(
"Cannot calculate GSD, using requested resolution of {:.2f}".format(
resolution
)
)
return resolution
@ -102,7 +131,7 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
"""
Computes the average Ground Sampling Distance of an OpenSfM reconstruction.
:param reconstruction_json path to OpenSfM's reconstruction.json
:return Ground Sampling Distance value (cm / pixel) or None if
:return Ground Sampling Distance value (cm / pixel) or None if
a GSD estimate cannot be compute
"""
if not os.path.isfile(reconstruction_json):
@ -115,34 +144,41 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
reconstruction = data[0]
point_heights = []
for pointId in reconstruction['points']:
point = reconstruction['points'][pointId]
point_heights.append(point['coordinates'][2])
for pointId in reconstruction["points"]:
point = reconstruction["points"][pointId]
point_heights.append(point["coordinates"][2])
ground_height = np.median(point_heights)
gsds = []
for shotImage in reconstruction['shots']:
shot = reconstruction['shots'][shotImage]
if use_all_shots or shot.get('gps_dop', 999999) < 999999:
camera = reconstruction['cameras'][shot['camera']]
for shotImage in reconstruction["shots"]:
shot = reconstruction["shots"][shotImage]
if use_all_shots or shot.get("gps_dop", 999999) < 999999:
camera = reconstruction["cameras"][shot["camera"]]
shot_origin = get_origin(shot)
shot_height = shot_origin[2]
focal_ratio = camera.get('focal', camera.get('focal_x'))
focal_ratio = camera.get("focal", camera.get("focal_x"))
if not focal_ratio:
log.ODM_WARNING("Cannot parse focal values from %s. This is likely an unsupported camera model." % reconstruction_json)
log.ODM_WARNING(
"Cannot parse focal values from %s. This is likely an unsupported camera model."
% reconstruction_json
)
return None
gsds.append(calculate_gsd_from_focal_ratio(focal_ratio,
shot_height - ground_height,
camera['width']))
gsds.append(
calculate_gsd_from_focal_ratio(
focal_ratio, shot_height - ground_height, camera["width"]
)
)
if len(gsds) > 0:
mean = np.mean(gsds)
if mean < 0:
log.ODM_WARNING("Negative GSD estimated, this might indicate a flipped Z-axis.")
log.ODM_WARNING(
"Negative GSD estimated, this might indicate a flipped Z-axis."
)
return abs(mean)
return None
@ -160,9 +196,9 @@ def calculate_gsd(sensor_width, flight_height, focal_length, image_width):
>>> calculate_gsd(13.2, 100, 8.8, 0)
"""
if sensor_width != 0:
return calculate_gsd_from_focal_ratio(focal_length / sensor_width,
flight_height,
image_width)
return calculate_gsd_from_focal_ratio(
focal_length / sensor_width, flight_height, image_width
)
else:
return None
@ -176,5 +212,5 @@ def calculate_gsd_from_focal_ratio(focal_ratio, flight_height, image_width):
"""
if focal_ratio == 0 or image_width == 0:
return None
return ((flight_height * 100) / image_width) / focal_ratio
return ((flight_height * 100) / image_width) / focal_ratio

Wyświetl plik

@ -2,6 +2,7 @@ import os
import shutil, errno
import json
def absolute_path_file(path_file):
return os.path.abspath(path_file)
@ -30,7 +31,9 @@ def copy(src, dst):
except OSError as e:
if e.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
else:
raise
def rename_file(src, dst):
try:
@ -46,7 +49,7 @@ def rename_file(src, dst):
# find a file in the root directory
def find(filename, folder):
for root, dirs, files in os.walk(folder):
return '/'.join((root, filename)) if filename in files else None
return "/".join((root, filename)) if filename in files else None
def related_file_path(input_file_path, prefix="", postfix="", replace_base=None):
@ -68,6 +71,7 @@ def related_file_path(input_file_path, prefix="", postfix="", replace_base=None)
return os.path.join(path, "{}{}{}{}".format(prefix, basename, postfix, ext))
def path_or_json_string_to_dict(string):
if string == "":
return {}
@ -79,13 +83,14 @@ def path_or_json_string_to_dict(string):
raise ValueError("{0} is not a valid JSON string.".format(string))
elif file_exists(string):
try:
with open(string, 'r') as f:
with open(string, "r") as f:
return json.loads(f.read())
except:
raise ValueError("{0} is not a valid JSON file.".format(string))
else:
raise ValueError("{0} is not a valid JSON file or string.".format(string))
def touch(file):
with open(file, 'w') as fout:
fout.write("Done!\n")
with open(file, "w") as fout:
fout.write("Done!\n")

Wyświetl plik

@ -3,9 +3,10 @@ from opendm import log
from pyproj import Proj, Transformer, CRS
from osgeo import osr
def extract_utm_coords(photos, images_path, output_coords_file):
"""
Create a coordinate file containing the GPS positions of all cameras
Create a coordinate file containing the GPS positions of all cameras
to be used later in the ODM toolchain for automatic georeferecing
:param photos ([ODM_Photo]) list of photos
:param images_path (str) path to dataset images
@ -13,8 +14,10 @@ def extract_utm_coords(photos, images_path, output_coords_file):
:return None
"""
if len(photos) == 0:
raise Exception("No input images, cannot create coordinates file of GPS positions")
raise Exception(
"No input images, cannot create coordinates file of GPS positions"
)
utm_zone = None
hemisphere = None
coords = []
@ -23,21 +26,27 @@ def extract_utm_coords(photos, images_path, output_coords_file):
if photo.latitude is None or photo.longitude is None:
log.ODM_WARNING("GPS position not available for %s" % photo.filename)
continue
if utm_zone is None:
utm_zone, hemisphere = get_utm_zone_and_hemisphere_from(photo.longitude, photo.latitude)
utm_zone, hemisphere = get_utm_zone_and_hemisphere_from(
photo.longitude, photo.latitude
)
try:
alt = photo.altitude if photo.altitude is not None else 0
coord = convert_to_utm(photo.longitude, photo.latitude, alt, utm_zone, hemisphere)
coord = convert_to_utm(
photo.longitude, photo.latitude, alt, utm_zone, hemisphere
)
except:
raise Exception("Failed to convert GPS position to UTM for %s" % photo.filename)
raise Exception(
"Failed to convert GPS position to UTM for %s" % photo.filename
)
coords.append(coord)
if utm_zone is None:
raise Exception("No images seem to have GPS information")
# Calculate average
dx = 0.0
dy = 0.0
@ -55,13 +64,16 @@ def extract_utm_coords(photos, images_path, output_coords_file):
f.write("%s %s\n" % (dx, dy))
for coord in coords:
f.write("%s %s %s\n" % (coord[0] - dx, coord[1] - dy, coord[2]))
def transform2(from_srs, to_srs, x, y):
return transformer(from_srs, to_srs).TransformPoint(x, y, 0)[:2]
def transform3(from_srs, to_srs, x, y, z):
return transformer(from_srs, to_srs).TransformPoint(x, y, z)
def proj_srs_convert(srs):
"""
Convert a Proj SRS object to osr SRS object
@ -74,16 +86,18 @@ def proj_srs_convert(srs):
else:
proj4 = srs.to_proj4()
res.ImportFromProj4(proj4)
res.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
return res
def transformer(from_srs, to_srs):
src = proj_srs_convert(from_srs)
tgt = proj_srs_convert(to_srs)
return osr.CoordinateTransformation(src, tgt)
def get_utm_zone_and_hemisphere_from(lon, lat):
"""
Calculate the UTM zone and hemisphere that a longitude/latitude pair falls on
@ -91,10 +105,11 @@ def get_utm_zone_and_hemisphere_from(lon, lat):
:param lat latitude
:return [utm_zone, hemisphere]
"""
utm_zone = (int(math.floor((lon + 180.0)/6.0)) % 60) + 1
hemisphere = 'S' if lat < 0 else 'N'
utm_zone = (int(math.floor((lon + 180.0) / 6.0)) % 60) + 1
hemisphere = "S" if lat < 0 else "N"
return [utm_zone, hemisphere]
def convert_to_utm(lon, lat, alt, utm_zone, hemisphere):
"""
Convert longitude, latitude and elevation values to UTM
@ -105,14 +120,17 @@ def convert_to_utm(lon, lat, alt, utm_zone, hemisphere):
:param hemisphere one of 'N' or 'S'
:return [x,y,z] UTM coordinates
"""
if hemisphere == 'N':
p = Proj(proj='utm',zone=utm_zone,ellps='WGS84', preserve_units=True)
if hemisphere == "N":
p = Proj(proj="utm", zone=utm_zone, ellps="WGS84", preserve_units=True)
else:
p = Proj(proj='utm',zone=utm_zone,ellps='WGS84', preserve_units=True, south=True)
x,y = p(lon, lat)
p = Proj(
proj="utm", zone=utm_zone, ellps="WGS84", preserve_units=True, south=True
)
x, y = p(lon, lat)
return [x, y, alt]
def parse_srs_header(header):
"""
Parse a header coming from GCP or coordinate file
@ -120,48 +138,51 @@ def parse_srs_header(header):
:return Proj object
"""
header = header.strip()
ref = header.split(' ')
ref = header.split(" ")
try:
if ref[0] == 'WGS84' and ref[1] == 'UTM':
if ref[0] == "WGS84" and ref[1] == "UTM":
datum = ref[0]
utm_pole = (ref[2][len(ref[2]) - 1]).upper()
utm_zone = int(ref[2][:len(ref[2]) - 1])
proj_args = {
'zone': utm_zone,
'datum': datum
}
utm_zone = int(ref[2][: len(ref[2]) - 1])
proj4 = '+proj=utm +zone={zone} +datum={datum} +units=m +no_defs=True'
if utm_pole == 'S':
proj4 += ' +south=True'
proj_args = {"zone": utm_zone, "datum": datum}
proj4 = "+proj=utm +zone={zone} +datum={datum} +units=m +no_defs=True"
if utm_pole == "S":
proj4 += " +south=True"
srs = CRS.from_proj4(proj4.format(**proj_args))
elif '+proj' in header:
srs = CRS.from_proj4(header.strip('\''))
elif "+proj" in header:
srs = CRS.from_proj4(header.strip("'"))
elif header.lower().startswith("epsg:"):
srs = CRS.from_epsg(header.lower()[5:])
else:
raise RuntimeError('Could not parse coordinates. Bad SRS supplied: %s' % header)
raise RuntimeError(
"Could not parse coordinates. Bad SRS supplied: %s" % header
)
except RuntimeError as e:
log.ODM_ERROR('Uh oh! There seems to be a problem with your coordinates/GCP file.\n\n'
'The line: %s\n\n'
'Is not valid. Projections that are valid include:\n'
' - EPSG:*****\n'
' - WGS84 UTM **(N|S)\n'
' - Any valid proj4 string (for example, +proj=utm +zone=32 +north +ellps=WGS84 +datum=WGS84 +units=m +no_defs)\n\n'
' Some valid EPSG codes are not yet available in OpenDroneMap and need substituted with valid proj4 strings\n'
' Try searching for equivalent proj4 strings at spatialreference.org or epsg.io.\n'
'Modify your input and try again.' % header)
log.ODM_ERROR(
"Uh oh! There seems to be a problem with your coordinates/GCP file.\n\n"
"The line: %s\n\n"
"Is not valid. Projections that are valid include:\n"
" - EPSG:*****\n"
" - WGS84 UTM **(N|S)\n"
" - Any valid proj4 string (for example, +proj=utm +zone=32 +north +ellps=WGS84 +datum=WGS84 +units=m +no_defs)\n\n"
" Some valid EPSG codes are not yet available in OpenDroneMap and need substituted with valid proj4 strings\n"
" Try searching for equivalent proj4 strings at spatialreference.org or epsg.io.\n"
"Modify your input and try again." % header
)
raise RuntimeError(e)
return srs
def utm_srs_from_ll(lon, lat):
utm_zone, hemisphere = get_utm_zone_and_hemisphere_from(lon, lat)
return parse_srs_header("WGS84 UTM %s%s" % (utm_zone, hemisphere))
def utm_transformers_from_ll(lon, lat):
source_srs = CRS.from_epsg(4326)
target_srs = utm_srs_from_ll(lon, lat)

Wyświetl plik

@ -11,38 +11,41 @@ from repoze.lru import lru_cache
from opendm.arghelpers import double_quote, args_to_dict
from vmem import virtual_memory
if sys.platform == 'win32' or os.getenv('no_ansiesc'):
# No colors on Windows (sorry !) or existing no_ansiesc env variable
HEADER = ''
OKBLUE = ''
OKGREEN = ''
DEFAULT = ''
WARNING = ''
FAIL = ''
ENDC = ''
if sys.platform == "win32" or os.getenv("no_ansiesc"):
# No colors on Windows (sorry !) or existing no_ansiesc env variable
HEADER = ""
OKBLUE = ""
OKGREEN = ""
DEFAULT = ""
WARNING = ""
FAIL = ""
ENDC = ""
else:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
DEFAULT = '\033[39m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
DEFAULT = "\033[39m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
lock = threading.Lock()
@lru_cache(maxsize=None)
def odm_version():
with open(os.path.join(os.path.dirname(__file__), "..", "VERSION")) as f:
return f.read().split("\n")[0].strip()
def memory():
mem = virtual_memory()
return {
'total': round(mem.total / 1024 / 1024),
'available': round(mem.available / 1024 / 1024)
"total": round(mem.total / 1024 / 1024),
"available": round(mem.available / 1024 / 1024),
}
class ODMLogger:
def __init__(self):
self.json = None
@ -55,74 +58,76 @@ class ODMLogger:
print("%s%s %s%s" % (startc, level, msg, ENDC))
sys.stdout.flush()
if self.json is not None:
self.json['stages'][-1]['messages'].append({
'message': msg,
'type': level_name.lower()
})
self.json["stages"][-1]["messages"].append(
{"message": msg, "type": level_name.lower()}
)
def init_json_output(self, output_files, args):
self.json_output_files = output_files
self.json_output_file = output_files[0]
self.json = {}
self.json['odmVersion'] = odm_version()
self.json['memory'] = memory()
self.json['cpus'] = multiprocessing.cpu_count()
self.json['images'] = -1
self.json['options'] = args_to_dict(args)
self.json['startTime'] = self.start_time.isoformat()
self.json['stages'] = []
self.json['processes'] = []
self.json['success'] = False
self.json["odmVersion"] = odm_version()
self.json["memory"] = memory()
self.json["cpus"] = multiprocessing.cpu_count()
self.json["images"] = -1
self.json["options"] = args_to_dict(args)
self.json["startTime"] = self.start_time.isoformat()
self.json["stages"] = []
self.json["processes"] = []
self.json["success"] = False
def log_json_stage_run(self, name, start_time):
if self.json is not None:
self.json['stages'].append({
'name': name,
'startTime': start_time.isoformat(),
'messages': [],
})
self.json["stages"].append(
{
"name": name,
"startTime": start_time.isoformat(),
"messages": [],
}
)
def log_json_images(self, count):
if self.json is not None:
self.json['images'] = count
def log_json_stage_error(self, error, exit_code, stack_trace = ""):
self.json["images"] = count
def log_json_stage_error(self, error, exit_code, stack_trace=""):
if self.json is not None:
self.json['error'] = {
'code': exit_code,
'message': error
}
self.json['stackTrace'] = list(map(str.strip, stack_trace.split("\n")))
self.json["error"] = {"code": exit_code, "message": error}
self.json["stackTrace"] = list(map(str.strip, stack_trace.split("\n")))
self._log_json_end_time()
def log_json_success(self):
if self.json is not None:
self.json['success'] = True
self.json["success"] = True
self._log_json_end_time()
def log_json_process(self, cmd, exit_code, output = []):
def log_json_process(self, cmd, exit_code, output=[]):
if self.json is not None:
d = {
'command': cmd,
'exitCode': exit_code,
"command": cmd,
"exitCode": exit_code,
}
if output:
d['output'] = output
d["output"] = output
self.json['processes'].append(d)
self.json["processes"].append(d)
def _log_json_end_time(self):
if self.json is not None:
end_time = datetime.datetime.now()
self.json['endTime'] = end_time.isoformat()
self.json['totalTime'] = round((end_time - self.start_time).total_seconds(), 2)
self.json["endTime"] = end_time.isoformat()
self.json["totalTime"] = round(
(end_time - self.start_time).total_seconds(), 2
)
if self.json["stages"]:
last_stage = self.json["stages"][-1]
last_stage["endTime"] = end_time.isoformat()
start_time = dateutil.parser.isoparse(last_stage["startTime"])
last_stage["totalTime"] = round(
(end_time - start_time).total_seconds(), 2
)
if self.json['stages']:
last_stage = self.json['stages'][-1]
last_stage['endTime'] = end_time.isoformat()
start_time = dateutil.parser.isoparse(last_stage['startTime'])
last_stage['totalTime'] = round((end_time - start_time).total_seconds(), 2)
def info(self, msg):
self.log(DEFAULT, msg, "INFO")
@ -138,13 +143,14 @@ class ODMLogger:
def close(self):
if self.json is not None and self.json_output_file is not None:
try:
with open(self.json_output_file, 'w') as f:
with open(self.json_output_file, "w") as f:
f.write(json.dumps(self.json, indent=4))
for f in self.json_output_files[1:]:
shutil.copy(self.json_output_file, f)
except Exception as e:
print("Cannot write log.json: %s" % str(e))
logger = ODMLogger()
ODM_INFO = logger.info

Wyświetl plik

@ -9,42 +9,69 @@ from opendm import point_cloud
from scipy import signal
import numpy as np
def create_25dmesh(inPointCloud, outMesh, radius_steps=["0.05"], dsm_resolution=0.05, depth=8, samples=1, maxVertexCount=100000, available_cores=None, method='gridded', smooth_dsm=True, max_tiles=None):
def create_25dmesh(
inPointCloud,
outMesh,
radius_steps=["0.05"],
dsm_resolution=0.05,
depth=8,
samples=1,
maxVertexCount=100000,
available_cores=None,
method="gridded",
smooth_dsm=True,
max_tiles=None,
):
# Create DSM from point cloud
# Create temporary directory
mesh_directory = os.path.dirname(outMesh)
tmp_directory = os.path.join(mesh_directory, 'tmp')
tmp_directory = os.path.join(mesh_directory, "tmp")
if os.path.exists(tmp_directory):
shutil.rmtree(tmp_directory)
os.mkdir(tmp_directory)
log.ODM_INFO('Created temporary directory: %s' % tmp_directory)
log.ODM_INFO("Created temporary directory: %s" % tmp_directory)
log.ODM_INFO('Creating DSM for 2.5D mesh')
log.ODM_INFO("Creating DSM for 2.5D mesh")
commands.create_dem(
inPointCloud,
'mesh_dsm',
output_type='max',
radiuses=radius_steps,
gapfill=True,
outdir=tmp_directory,
resolution=dsm_resolution,
max_workers=available_cores,
apply_smoothing=smooth_dsm,
max_tiles=max_tiles
)
inPointCloud,
"mesh_dsm",
output_type="max",
radiuses=radius_steps,
gapfill=True,
outdir=tmp_directory,
resolution=dsm_resolution,
max_workers=available_cores,
apply_smoothing=smooth_dsm,
max_tiles=max_tiles,
)
if method == 'gridded':
mesh = dem_to_mesh_gridded(os.path.join(tmp_directory, 'mesh_dsm.tif'), outMesh, maxVertexCount, maxConcurrency=max(1, available_cores))
elif method == 'poisson':
dsm_points = dem_to_points(os.path.join(tmp_directory, 'mesh_dsm.tif'), os.path.join(tmp_directory, 'dsm_points.ply'))
mesh = screened_poisson_reconstruction(dsm_points, outMesh, depth=depth,
samples=samples,
maxVertexCount=maxVertexCount,
threads=max(1, available_cores - 1)), # poissonrecon can get stuck on some machines if --threads == all cores
if method == "gridded":
mesh = dem_to_mesh_gridded(
os.path.join(tmp_directory, "mesh_dsm.tif"),
outMesh,
maxVertexCount,
maxConcurrency=max(1, available_cores),
)
elif method == "poisson":
dsm_points = dem_to_points(
os.path.join(tmp_directory, "mesh_dsm.tif"),
os.path.join(tmp_directory, "dsm_points.ply"),
)
mesh = (
screened_poisson_reconstruction(
dsm_points,
outMesh,
depth=depth,
samples=samples,
maxVertexCount=maxVertexCount,
threads=max(1, available_cores - 1),
),
) # poissonrecon can get stuck on some machines if --threads == all cores
else:
raise 'Not a valid method: ' + method
raise "Not a valid method: " + method
# Cleanup tmp
if os.path.exists(tmp_directory):
@ -54,26 +81,28 @@ def create_25dmesh(inPointCloud, outMesh, radius_steps=["0.05"], dsm_resolution=
def dem_to_points(inGeotiff, outPointCloud):
log.ODM_INFO('Sampling points from DSM: %s' % inGeotiff)
log.ODM_INFO("Sampling points from DSM: %s" % inGeotiff)
kwargs = {
'bin': context.dem2points_path,
'outfile': outPointCloud,
'infile': inGeotiff
"bin": context.dem2points_path,
"outfile": outPointCloud,
"infile": inGeotiff,
}
system.run('"{bin}" -inputFile "{infile}" '
'-outputFile "{outfile}" '
'-skirtHeightThreshold 1.5 '
'-skirtIncrements 0.2 '
'-skirtHeightCap 100 '
'-verbose '.format(**kwargs))
system.run(
'"{bin}" -inputFile "{infile}" '
'-outputFile "{outfile}" '
"-skirtHeightThreshold 1.5 "
"-skirtIncrements 0.2 "
"-skirtHeightCap 100 "
"-verbose ".format(**kwargs)
)
return outPointCloud
def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, maxConcurrency=1):
log.ODM_INFO('Creating mesh from DSM: %s' % inGeotiff)
log.ODM_INFO("Creating mesh from DSM: %s" % inGeotiff)
mesh_path, mesh_filename = os.path.split(outMesh)
# mesh_path = path/to
@ -85,47 +114,53 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, maxConcurrency=1):
outMeshDirty = os.path.join(mesh_path, "{}.dirty{}".format(basename, ext))
# This should work without issues most of the times,
# This should work without issues most of the times,
# but just in case we lower maxConcurrency if it fails.
while True:
try:
kwargs = {
'bin': context.dem2mesh_path,
'outfile': outMeshDirty,
'infile': inGeotiff,
'maxVertexCount': maxVertexCount,
'maxConcurrency': maxConcurrency
"bin": context.dem2mesh_path,
"outfile": outMeshDirty,
"infile": inGeotiff,
"maxVertexCount": maxVertexCount,
"maxConcurrency": maxConcurrency,
}
system.run('"{bin}" -inputFile "{infile}" '
system.run(
'"{bin}" -inputFile "{infile}" '
'-outputFile "{outfile}" '
'-maxTileLength 2000 '
'-maxVertexCount {maxVertexCount} '
'-maxConcurrency {maxConcurrency} '
'-edgeSwapThreshold 0.15 '
'-verbose '.format(**kwargs))
"-maxTileLength 2000 "
"-maxVertexCount {maxVertexCount} "
"-maxConcurrency {maxConcurrency} "
"-edgeSwapThreshold 0.15 "
"-verbose ".format(**kwargs)
)
break
except Exception as e:
maxConcurrency = math.floor(maxConcurrency / 2)
if maxConcurrency >= 1:
log.ODM_WARNING("dem2mesh failed, retrying with lower concurrency (%s) in case this is a memory issue" % maxConcurrency)
log.ODM_WARNING(
"dem2mesh failed, retrying with lower concurrency (%s) in case this is a memory issue"
% maxConcurrency
)
else:
raise e
# Cleanup and reduce vertex count if necessary
# Cleanup and reduce vertex count if necessary
# (as dem2mesh cannot guarantee that we'll have the target vertex count)
cleanupArgs = {
'reconstructmesh': context.omvs_reconstructmesh_path,
'outfile': outMesh,
'infile': outMeshDirty,
'max_faces': maxVertexCount * 2
"reconstructmesh": context.omvs_reconstructmesh_path,
"outfile": outMesh,
"infile": outMeshDirty,
"max_faces": maxVertexCount * 2,
}
system.run('"{reconstructmesh}" -i "{infile}" '
'-o "{outfile}" '
'--archive-type 3 '
'--remove-spikes 0 --remove-spurious 0 --smooth 0 '
'--target-face-num {max_faces} -v 0'.format(**cleanupArgs))
system.run(
'"{reconstructmesh}" -i "{infile}" '
'-o "{outfile}" '
"--archive-type 3 "
"--remove-spikes 0 --remove-spurious 0 --smooth 0 "
"--target-face-num {max_faces} -v 0".format(**cleanupArgs)
)
# Delete intermediate results
os.remove(outMeshDirty)
@ -133,7 +168,15 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, maxConcurrency=1):
return outMesh
def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples = 1, maxVertexCount=100000, pointWeight=4, threads=context.num_cores):
def screened_poisson_reconstruction(
inPointCloud,
outMesh,
depth=8,
samples=1,
maxVertexCount=100000,
pointWeight=4,
threads=context.num_cores,
):
mesh_path, mesh_filename = os.path.split(outMesh)
# mesh_path = path/to
@ -146,38 +189,42 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
outMeshDirty = os.path.join(mesh_path, "{}.dirty{}".format(basename, ext))
if os.path.isfile(outMeshDirty):
os.remove(outMeshDirty)
# Since PoissonRecon has some kind of a race condition on ppc64el, and this helps...
if platform.machine() == 'ppc64le':
log.ODM_WARNING("ppc64le platform detected, forcing single-threaded operation for PoissonRecon")
if platform.machine() == "ppc64le":
log.ODM_WARNING(
"ppc64le platform detected, forcing single-threaded operation for PoissonRecon"
)
threads = 1
while True:
poissonReconArgs = {
'bin': context.poisson_recon_path,
'outfile': outMeshDirty,
'infile': inPointCloud,
'depth': depth,
'samples': samples,
'pointWeight': pointWeight,
'threads': int(threads)
"bin": context.poisson_recon_path,
"outfile": outMeshDirty,
"infile": inPointCloud,
"depth": depth,
"samples": samples,
"pointWeight": pointWeight,
"threads": int(threads),
}
# Run PoissonRecon
try:
system.run('"{bin}" --in "{infile}" '
'--out "{outfile}" '
'--depth {depth} '
'--pointWeight {pointWeight} '
'--samplesPerNode {samples} '
'--threads {threads} '
'--bType 2 '
'--linearFit '.format(**poissonReconArgs))
system.run(
'"{bin}" --in "{infile}" '
'--out "{outfile}" '
"--depth {depth} "
"--pointWeight {pointWeight} "
"--samplesPerNode {samples} "
"--threads {threads} "
"--bType 2 "
"--linearFit ".format(**poissonReconArgs)
)
except Exception as e:
log.ODM_WARNING(str(e))
if os.path.isfile(outMeshDirty):
break # Done!
break # Done!
else:
# PoissonRecon will sometimes fail due to race conditions
@ -187,22 +234,26 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
if threads < 1:
break
else:
log.ODM_WARNING("PoissonRecon failed with %s threads, let's retry with %s..." % (threads * 2, threads))
log.ODM_WARNING(
"PoissonRecon failed with %s threads, let's retry with %s..."
% (threads * 2, threads)
)
# Cleanup and reduce vertex count if necessary
cleanupArgs = {
'reconstructmesh': context.omvs_reconstructmesh_path,
'outfile': outMesh,
'infile':outMeshDirty,
'max_faces': maxVertexCount * 2
"reconstructmesh": context.omvs_reconstructmesh_path,
"outfile": outMesh,
"infile": outMeshDirty,
"max_faces": maxVertexCount * 2,
}
system.run('"{reconstructmesh}" -i "{infile}" '
'-o "{outfile}" '
'--archive-type 3 '
'--remove-spikes 0 --remove-spurious 20 --smooth 0 '
'--target-face-num {max_faces} -v 0'.format(**cleanupArgs))
system.run(
'"{reconstructmesh}" -i "{infile}" '
'-o "{outfile}" '
"--archive-type 3 "
"--remove-spikes 0 --remove-spurious 20 --smooth 0 "
"--target-face-num {max_faces} -v 0".format(**cleanupArgs)
)
# Delete intermediate results
os.remove(outMeshDirty)

Wyświetl plik

@ -14,6 +14,7 @@ from skimage.filters import rank, gaussian
# Loosely based on https://github.com/micasense/imageprocessing/blob/master/micasense/utils.py
def dn_to_radiance(photo, image):
"""
Convert Digital Number values to Radiance values
@ -24,8 +25,10 @@ def dn_to_radiance(photo, image):
image = image.astype("float32")
if len(image.shape) != 3:
raise ValueError("Image should have shape length of 3 (got: %s)" % len(image.shape))
raise ValueError(
"Image should have shape length of 3 (got: %s)" % len(image.shape)
)
# Thermal (this should never happen, but just in case..)
if photo.is_thermal():
return image
@ -50,8 +53,10 @@ def dn_to_radiance(photo, image):
if bit_depth_max:
image /= bit_depth_max
else:
log.ODM_WARNING("Cannot normalize DN for %s, bit depth is missing" % photo.filename)
log.ODM_WARNING(
"Cannot normalize DN for %s, bit depth is missing" % photo.filename
)
if V is not None:
# vignette correction
V = np.repeat(V[:, :, np.newaxis], image.shape[2], axis=2)
@ -62,17 +67,17 @@ def dn_to_radiance(photo, image):
R = 1.0 / (1.0 + a2 * y / exposure_time - a3 * y)
R = np.repeat(R[:, :, np.newaxis], image.shape[2], axis=2)
image *= R
# Floor any negative radiances to zero (can happen due to noise around blackLevel)
if dark_level is not None:
image[image < 0] = 0
# apply the radiometric calibration - i.e. scale by the gain-exposure product and
# multiply with the radiometric calibration coefficient
if gain is not None and exposure_time is not None:
image /= (gain * exposure_time)
image /= gain * exposure_time
if a1 is not None:
# multiply with the radiometric calibration coefficient
image *= a1
@ -82,6 +87,7 @@ def dn_to_radiance(photo, image):
return image
def vignette_map(photo):
x_vc, y_vc = photo.get_vignetting_center()
polynomial = photo.get_vignetting_polynomial()
@ -111,9 +117,10 @@ def vignette_map(photo):
vignette = 1.0 / vignette
return vignette, x, y
return None, None, None
def dn_to_reflectance(photo, image, use_sun_sensor=True):
radiance = dn_to_radiance(photo, image)
irradiance = compute_irradiance(photo, use_sun_sensor=use_sun_sensor)
@ -122,6 +129,7 @@ def dn_to_reflectance(photo, image, use_sun_sensor=True):
reflectance[reflectance > 1.0] = 1.0
return reflectance
def compute_irradiance(photo, use_sun_sensor=True):
# Thermal (this should never happen, but just in case..)
if photo.is_thermal():
@ -136,70 +144,85 @@ def compute_irradiance(photo, use_sun_sensor=True):
if use_sun_sensor and photo.get_sun_sensor():
# Estimate it
dls_orientation_vector = np.array([0,0,-1])
sun_vector_ned, sensor_vector_ned, sun_sensor_angle, \
solar_elevation, solar_azimuth = dls.compute_sun_angle([photo.latitude, photo.longitude],
photo.get_dls_pose(),
photo.get_utc_time(),
dls_orientation_vector)
dls_orientation_vector = np.array([0, 0, -1])
(
sun_vector_ned,
sensor_vector_ned,
sun_sensor_angle,
solar_elevation,
solar_azimuth,
) = dls.compute_sun_angle(
[photo.latitude, photo.longitude],
photo.get_dls_pose(),
photo.get_utc_time(),
dls_orientation_vector,
)
angular_correction = dls.fresnel(sun_sensor_angle)
# TODO: support for direct and scattered irradiance
direct_to_diffuse_ratio = 6.0 # Assumption, clear skies
direct_to_diffuse_ratio = 6.0 # Assumption, clear skies
spectral_irradiance = photo.get_sun_sensor()
percent_diffuse = 1.0 / direct_to_diffuse_ratio
sensor_irradiance = spectral_irradiance / angular_correction
# Find direct irradiance in the plane normal to the sun
untilted_direct_irr = sensor_irradiance / (percent_diffuse + np.cos(sun_sensor_angle))
untilted_direct_irr = sensor_irradiance / (
percent_diffuse + np.cos(sun_sensor_angle)
)
direct_irradiance = untilted_direct_irr
scattered_irradiance = untilted_direct_irr * percent_diffuse
# compute irradiance on the ground using the solar altitude angle
horizontal_irradiance = direct_irradiance * np.sin(solar_elevation) + scattered_irradiance
horizontal_irradiance = (
direct_irradiance * np.sin(solar_elevation) + scattered_irradiance
)
return horizontal_irradiance
elif use_sun_sensor:
log.ODM_WARNING("No sun sensor values found for %s" % photo.filename)
return 1.0
def get_photos_by_band(multi_camera, user_band_name):
band_name = get_primary_band_name(multi_camera, user_band_name)
for band in multi_camera:
if band['name'] == band_name:
return band['photos']
if band["name"] == band_name:
return band["photos"]
def get_primary_band_name(multi_camera, user_band_name):
if len(multi_camera) < 1:
raise Exception("Invalid multi_camera list")
# Pick RGB, or Green, or Blue, in this order, if available, otherwise first band
if user_band_name == "auto":
for aliases in [['rgb', 'redgreenblue'], ['green', 'g'], ['blue', 'b']]:
for aliases in [["rgb", "redgreenblue"], ["green", "g"], ["blue", "b"]]:
for band in multi_camera:
if band['name'].lower() in aliases:
return band['name']
return multi_camera[0]['name']
if band["name"].lower() in aliases:
return band["name"]
return multi_camera[0]["name"]
for band in multi_camera:
if band['name'].lower() == user_band_name.lower():
return band['name']
band_name_fallback = multi_camera[0]['name']
if band["name"].lower() == user_band_name.lower():
return band["name"]
log.ODM_WARNING("Cannot find band name \"%s\", will use \"%s\" instead" % (user_band_name, band_name_fallback))
band_name_fallback = multi_camera[0]["name"]
log.ODM_WARNING(
'Cannot find band name "%s", will use "%s" instead'
% (user_band_name, band_name_fallback)
)
return band_name_fallback
def compute_band_maps(multi_camera, primary_band):
"""
Computes maps of:
Computes maps of:
- { photo filename --> associated primary band photo } (s2p)
- { primary band filename --> list of associated secondary band photos } (p2s)
by looking at capture UUID, capture time or filenames as a fallback
@ -207,10 +230,10 @@ def compute_band_maps(multi_camera, primary_band):
band_name = get_primary_band_name(multi_camera, primary_band)
primary_band_photos = None
for band in multi_camera:
if band['name'] == band_name:
primary_band_photos = band['photos']
if band["name"] == band_name:
primary_band_photos = band["photos"]
break
# Try using capture time as the grouping factor
try:
unique_id_map = {}
@ -220,29 +243,36 @@ def compute_band_maps(multi_camera, primary_band):
for p in primary_band_photos:
uuid = p.get_capture_id()
if uuid is None:
raise Exception("Cannot use capture time (no information in %s)" % p.filename)
raise Exception(
"Cannot use capture time (no information in %s)" % p.filename
)
# Should be unique across primary band
if unique_id_map.get(uuid) is not None:
raise Exception("Unreliable UUID/capture time detected (duplicate)")
unique_id_map[uuid] = p
for band in multi_camera:
photos = band['photos']
photos = band["photos"]
for p in photos:
uuid = p.get_capture_id()
if uuid is None:
raise Exception("Cannot use UUID/capture time (no information in %s)" % p.filename)
raise Exception(
"Cannot use UUID/capture time (no information in %s)"
% p.filename
)
# Should match the primary band
if unique_id_map.get(uuid) is None:
raise Exception("Unreliable UUID/capture time detected (no primary band match)")
raise Exception(
"Unreliable UUID/capture time detected (no primary band match)"
)
s2p[p.filename] = unique_id_map[uuid]
if band['name'] != band_name:
if band["name"] != band_name:
p2s.setdefault(unique_id_map[uuid].filename, []).append(p)
return s2p, p2s
@ -260,38 +290,58 @@ def compute_band_maps(multi_camera, primary_band):
# Quick check
if filename_without_band == p.filename:
raise Exception("Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly." % p.filename)
raise Exception(
"Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly."
% p.filename
)
filename_map[filename_without_band] = p
for band in multi_camera:
photos = band['photos']
photos = band["photos"]
for p in photos:
filename_without_band = re.sub(file_regex, "\\1\\2", p.filename)
# Quick check
if filename_without_band == p.filename:
raise Exception("Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly." % p.filename)
raise Exception(
"Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly."
% p.filename
)
if not filename_without_band in filename_map:
raise Exception("Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly, check that your images have the appropriate CaptureUUID XMP tag and that no images are missing." % p.filename)
raise Exception(
"Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly, check that your images have the appropriate CaptureUUID XMP tag and that no images are missing."
% p.filename
)
s2p[p.filename] = filename_map[filename_without_band]
if band['name'] != band_name:
p2s.setdefault(filename_map[filename_without_band].filename, []).append(p)
if band["name"] != band_name:
p2s.setdefault(
filename_map[filename_without_band].filename, []
).append(p)
return s2p, p2s
def compute_alignment_matrices(multi_camera, primary_band_name, images_path, s2p, p2s, max_concurrency=1, max_samples=30):
def compute_alignment_matrices(
multi_camera,
primary_band_name,
images_path,
s2p,
p2s,
max_concurrency=1,
max_samples=30,
):
log.ODM_INFO("Computing band alignment")
alignment_info = {}
# For each secondary band
for band in multi_camera:
if band['name'] != primary_band_name:
if band["name"] != primary_band_name:
matrices = []
def parallel_compute_homography(p):
@ -301,53 +351,80 @@ def compute_alignment_matrices(multi_camera, primary_band_name, images_path, s2p
return
# Find good matrix candidates for alignment
primary_band_photo = s2p.get(p['filename'])
primary_band_photo = s2p.get(p["filename"])
if primary_band_photo is None:
log.ODM_WARNING("Cannot find primary band photo for %s" % p['filename'])
log.ODM_WARNING(
"Cannot find primary band photo for %s" % p["filename"]
)
return
warp_matrix, dimension, algo = compute_homography(os.path.join(images_path, p['filename']),
os.path.join(images_path, primary_band_photo.filename))
warp_matrix, dimension, algo = compute_homography(
os.path.join(images_path, p["filename"]),
os.path.join(images_path, primary_band_photo.filename),
)
if warp_matrix is not None:
log.ODM_INFO("%s --> %s good match" % (p['filename'], primary_band_photo.filename))
log.ODM_INFO(
"%s --> %s good match"
% (p["filename"], primary_band_photo.filename)
)
matrices.append({
'warp_matrix': warp_matrix,
'eigvals': np.linalg.eigvals(warp_matrix),
'dimension': dimension,
'algo': algo
})
matrices.append(
{
"warp_matrix": warp_matrix,
"eigvals": np.linalg.eigvals(warp_matrix),
"dimension": dimension,
"algo": algo,
}
)
else:
log.ODM_INFO("%s --> %s cannot be matched" % (p['filename'], primary_band_photo.filename))
log.ODM_INFO(
"%s --> %s cannot be matched"
% (p["filename"], primary_band_photo.filename)
)
except Exception as e:
log.ODM_WARNING("Failed to compute homography for %s: %s" % (p['filename'], str(e)))
log.ODM_WARNING(
"Failed to compute homography for %s: %s"
% (p["filename"], str(e))
)
parallel_map(parallel_compute_homography, [{'filename': p.filename} for p in band['photos']], max_concurrency, single_thread_fallback=False)
parallel_map(
parallel_compute_homography,
[{"filename": p.filename} for p in band["photos"]],
max_concurrency,
single_thread_fallback=False,
)
# Find the matrix that has the most common eigvals
# among all matrices. That should be the "best" alignment.
for m1 in matrices:
acc = np.array([0.0,0.0,0.0])
e = m1['eigvals']
acc = np.array([0.0, 0.0, 0.0])
e = m1["eigvals"]
for m2 in matrices:
acc += abs(e - m2['eigvals'])
acc += abs(e - m2["eigvals"])
m1["score"] = acc.sum()
m1['score'] = acc.sum()
# Sort
matrices.sort(key=lambda x: x['score'], reverse=False)
matrices.sort(key=lambda x: x["score"], reverse=False)
if len(matrices) > 0:
alignment_info[band['name']] = matrices[0]
log.ODM_INFO("%s band will be aligned using warp matrix %s (score: %s)" % (band['name'], matrices[0]['warp_matrix'], matrices[0]['score']))
alignment_info[band["name"]] = matrices[0]
log.ODM_INFO(
"%s band will be aligned using warp matrix %s (score: %s)"
% (band["name"], matrices[0]["warp_matrix"], matrices[0]["score"])
)
else:
log.ODM_WARNING("Cannot find alignment matrix for band %s, The band might end up misaligned!" % band['name'])
log.ODM_WARNING(
"Cannot find alignment matrix for band %s, The band might end up misaligned!"
% band["name"]
)
return alignment_info
def compute_homography(image_filename, align_image_filename):
try:
# Convert images to grayscale if needed
@ -355,17 +432,20 @@ def compute_homography(image_filename, align_image_filename):
if image.shape[2] == 3:
image_gray = to_8bit(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
else:
image_gray = to_8bit(image[:,:,0])
image_gray = to_8bit(image[:, :, 0])
max_dim = max(image_gray.shape)
if max_dim <= 320:
log.ODM_WARNING("Small image for band alignment (%sx%s), this might be tough to compute." % (image_gray.shape[1], image_gray.shape[0]))
log.ODM_WARNING(
"Small image for band alignment (%sx%s), this might be tough to compute."
% (image_gray.shape[1], image_gray.shape[0])
)
align_image = imread(align_image_filename, unchanged=True, anydepth=True)
if align_image.shape[2] == 3:
align_image_gray = to_8bit(cv2.cvtColor(align_image, cv2.COLOR_BGR2GRAY))
else:
align_image_gray = to_8bit(align_image[:,:,0])
align_image_gray = to_8bit(align_image[:, :, 0])
def compute_using(algorithm):
try:
@ -378,7 +458,7 @@ def compute_homography(image_filename, align_image_filename):
return None, (None, None)
det = np.linalg.det(h)
# Check #1 homography's determinant will not be close to zero
if abs(det) < 0.25:
return None, (None, None)
@ -387,35 +467,37 @@ def compute_homography(image_filename, align_image_filename):
svd = np.linalg.svd(h, compute_uv=False)
if svd[-1] == 0:
return None, (None, None)
ratio = svd[0] / svd[-1]
if ratio > 100000:
return None, (None, None)
return h, (align_image_gray.shape[1], align_image_gray.shape[0])
warp_matrix = None
dimension = None
algo = None
if max_dim > 320:
algo = 'feat'
algo = "feat"
result = compute_using(find_features_homography)
if result[0] is None:
algo = 'ecc'
log.ODM_INFO("Can't use features matching, will use ECC (this might take a bit)")
algo = "ecc"
log.ODM_INFO(
"Can't use features matching, will use ECC (this might take a bit)"
)
result = compute_using(find_ecc_homography)
if result[0] is None:
algo = None
else: # ECC only for low resolution images
algo = 'ecc'
else: # ECC only for low resolution images
algo = "ecc"
log.ODM_INFO("Using ECC (this might take a bit)")
result = compute_using(find_ecc_homography)
if result[0] is None:
algo = None
warp_matrix, dimension = result
return warp_matrix, dimension, algo
@ -423,9 +505,16 @@ def compute_homography(image_filename, align_image_filename):
log.ODM_WARNING("Compute homography: %s" % str(e))
return None, (None, None), None
def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000, termination_eps=1e-8, start_eps=1e-4):
def find_ecc_homography(
image_gray,
align_image_gray,
number_of_iterations=1000,
termination_eps=1e-8,
start_eps=1e-4,
):
pyramid_levels = 0
h,w = image_gray.shape
h, w = image_gray.shape
max_dim = max(h, w)
downscale = 0
@ -435,29 +524,36 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000,
if downscale > 0:
f = 1 / (2**downscale)
image_gray = cv2.resize(image_gray, None, fx=f, fy=f, interpolation=cv2.INTER_AREA)
h,w = image_gray.shape
image_gray = cv2.resize(
image_gray, None, fx=f, fy=f, interpolation=cv2.INTER_AREA
)
h, w = image_gray.shape
min_dim = min(h, w)
while min_dim > 300:
min_dim /= 2.0
pyramid_levels += 1
log.ODM_INFO("Pyramid levels: %s" % pyramid_levels)
# Quick check on size
if align_image_gray.shape[0] != image_gray.shape[0]:
align_image_gray = to_8bit(align_image_gray)
image_gray = to_8bit(image_gray)
fx = image_gray.shape[1]/align_image_gray.shape[1]
fy = image_gray.shape[0]/align_image_gray.shape[0]
fx = image_gray.shape[1] / align_image_gray.shape[1]
fy = image_gray.shape[0] / align_image_gray.shape[0]
align_image_gray = cv2.resize(align_image_gray, None,
fx=fx,
fy=fy,
interpolation=(cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4))
align_image_gray = cv2.resize(
align_image_gray,
None,
fx=fx,
fy=fy,
interpolation=(
cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4
),
)
# Build pyramids
image_gray_pyr = [image_gray]
@ -465,16 +561,32 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000,
for level in range(pyramid_levels):
image_gray_pyr[0] = to_8bit(image_gray_pyr[0], force_normalize=True)
image_gray_pyr.insert(0, cv2.resize(image_gray_pyr[0], None, fx=1/2, fy=1/2,
interpolation=cv2.INTER_AREA))
image_gray_pyr.insert(
0,
cv2.resize(
image_gray_pyr[0],
None,
fx=1 / 2,
fy=1 / 2,
interpolation=cv2.INTER_AREA,
),
)
align_image_pyr[0] = to_8bit(align_image_pyr[0], force_normalize=True)
align_image_pyr.insert(0, cv2.resize(align_image_pyr[0], None, fx=1/2, fy=1/2,
interpolation=cv2.INTER_AREA))
align_image_pyr.insert(
0,
cv2.resize(
align_image_pyr[0],
None,
fx=1 / 2,
fy=1 / 2,
interpolation=cv2.INTER_AREA,
),
)
# Define the motion model, scale the initial warp matrix to smallest level
warp_matrix = np.eye(3, 3, dtype=np.float32)
for level in range(pyramid_levels+1):
for level in range(pyramid_levels + 1):
ig = gradient(gaussian(image_gray_pyr[level]))
aig = gradient(gaussian(align_image_pyr[level]))
@ -482,56 +594,84 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000,
eps = termination_eps
else:
eps = start_eps - ((start_eps - termination_eps) / (pyramid_levels)) * level
# Define termination criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
number_of_iterations, eps)
criteria = (
cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
number_of_iterations,
eps,
)
try:
log.ODM_INFO("Computing ECC pyramid level %s" % level)
_, warp_matrix = cv2.findTransformECC(ig, aig, warp_matrix, cv2.MOTION_HOMOGRAPHY, criteria, inputMask=None, gaussFiltSize=9)
_, warp_matrix = cv2.findTransformECC(
ig,
aig,
warp_matrix,
cv2.MOTION_HOMOGRAPHY,
criteria,
inputMask=None,
gaussFiltSize=9,
)
except Exception as e:
if level != pyramid_levels:
log.ODM_INFO("Could not compute ECC warp_matrix at pyramid level %s, resetting matrix" % level)
log.ODM_INFO(
"Could not compute ECC warp_matrix at pyramid level %s, resetting matrix"
% level
)
warp_matrix = np.eye(3, 3, dtype=np.float32)
else:
raise e
if level != pyramid_levels:
warp_matrix = warp_matrix * np.array([[1,1,2],[1,1,2],[0.5,0.5,1]], dtype=np.float32)
if level != pyramid_levels:
warp_matrix = warp_matrix * np.array(
[[1, 1, 2], [1, 1, 2], [0.5, 0.5, 1]], dtype=np.float32
)
if downscale > 0:
return warp_matrix * (np.array([[1,1,2],[1,1,2],[0.5,0.5,1]], dtype=np.float32) ** downscale)
return warp_matrix * (
np.array([[1, 1, 2], [1, 1, 2], [0.5, 0.5, 1]], dtype=np.float32)
** downscale
)
else:
return warp_matrix
def find_features_homography(image_gray, align_image_gray, feature_retention=0.7, min_match_count=10):
def find_features_homography(
image_gray, align_image_gray, feature_retention=0.7, min_match_count=10
):
# Detect SIFT features and compute descriptors.
detector = cv2.SIFT_create(edgeThreshold=10, contrastThreshold=0.1)
h,w = image_gray.shape
h, w = image_gray.shape
max_dim = max(h, w)
downscale = 0
max_size = 4096
while max_dim / (2**downscale) > max_size:
downscale += 1
if downscale > 0:
f = 1 / (2**downscale)
image_gray = cv2.resize(image_gray, None, fx=f, fy=f, interpolation=cv2.INTER_AREA)
h,w = image_gray.shape
image_gray = cv2.resize(
image_gray, None, fx=f, fy=f, interpolation=cv2.INTER_AREA
)
h, w = image_gray.shape
if align_image_gray.shape[0] != image_gray.shape[0]:
fx = image_gray.shape[1]/align_image_gray.shape[1]
fy = image_gray.shape[0]/align_image_gray.shape[0]
fx = image_gray.shape[1] / align_image_gray.shape[1]
fy = image_gray.shape[0] / align_image_gray.shape[0]
align_image_gray = cv2.resize(align_image_gray, None,
fx=fx,
fy=fy,
interpolation=(cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4))
align_image_gray = cv2.resize(
align_image_gray,
None,
fx=fx,
fy=fy,
interpolation=(
cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4
),
)
kp_image, desc_image = detector.detectAndCompute(image_gray, None)
kp_align_image, desc_align_image = detector.detectAndCompute(align_image_gray, None)
@ -574,22 +714,27 @@ def find_features_homography(image_gray, align_image_gray, feature_retention=0.7
h, _ = cv2.findHomography(points_image, points_align_image, cv2.RANSAC)
if h is None:
return None
if downscale > 0:
return h * (np.array([[1,1,2],[1,1,2],[0.5,0.5,1]], dtype=np.float32) ** downscale)
return h * (
np.array([[1, 1, 2], [1, 1, 2], [0.5, 0.5, 1]], dtype=np.float32)
** downscale
)
else:
return h
def gradient(im, ksize=5):
im = local_normalize(im)
grad_x = cv2.Sobel(im,cv2.CV_32F,1,0,ksize=ksize)
grad_y = cv2.Sobel(im,cv2.CV_32F,0,1,ksize=ksize)
grad_x = cv2.Sobel(im, cv2.CV_32F, 1, 0, ksize=ksize)
grad_y = cv2.Sobel(im, cv2.CV_32F, 0, 1, ksize=ksize)
grad = cv2.addWeighted(np.absolute(grad_x), 0.5, np.absolute(grad_y), 0.5, 0)
return grad
def local_normalize(im):
width, _ = im.shape
disksize = int(width/5)
disksize = int(width / 5)
if disksize % 2 == 0:
disksize = disksize + 1
selem = disk(disksize)
@ -636,11 +781,16 @@ def resize_match(image, dimension):
mw, mh = dimension
if w != mw or h != mh:
fx = mw/w
fy = mh/h
image = cv2.resize(image, None,
fx=fx,
fy=fx,
interpolation=(cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4))
fx = mw / w
fy = mh / h
image = cv2.resize(
image,
None,
fx=fx,
fy=fx,
interpolation=(
cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4
),
)
return image

Wyświetl plik

@ -2,6 +2,7 @@ import requests
import math
import os
import time
try:
import queue
except ImportError:
@ -11,7 +12,15 @@ from pyodm.utils import AtomicCounter
from pyodm.exceptions import RangeNotAvailableError, OdmError
from urllib3.exceptions import ReadTimeoutError
def download(url, destination, progress_callback=None, parallel_downloads=16, parallel_chunks_size=10, timeout=30):
def download(
url,
destination,
progress_callback=None,
parallel_downloads=16,
parallel_chunks_size=10,
timeout=30,
):
"""Download files in parallel (download accelerator)
Args:
@ -31,19 +40,25 @@ def download(url, destination, progress_callback=None, parallel_downloads=16, pa
download_stream = requests.get(url, timeout=timeout, stream=True)
headers = download_stream.headers
output_path = os.path.join(destination, os.path.basename(url))
# Keep track of download progress (if possible)
content_length = download_stream.headers.get('content-length')
content_length = download_stream.headers.get("content-length")
total_length = int(content_length) if content_length is not None else None
downloaded = 0
chunk_size = int(parallel_chunks_size * 1024 * 1024)
use_fallback = False
accept_ranges = headers.get('accept-ranges')
accept_ranges = headers.get("accept-ranges")
# Can we do parallel downloads?
if accept_ranges is not None and accept_ranges.lower() == 'bytes' and total_length is not None and total_length > chunk_size and parallel_downloads > 1:
if (
accept_ranges is not None
and accept_ranges.lower() == "bytes"
and total_length is not None
and total_length > chunk_size
and parallel_downloads > 1
):
num_chunks = int(math.ceil(total_length / float(chunk_size)))
num_workers = parallel_downloads
@ -63,7 +78,7 @@ def download(url, destination, progress_callback=None, parallel_downloads=16, pa
out_file.write(fd.read())
os.unlink(chunk_file)
current_chunk += 1
else:
time.sleep(0.1)
@ -78,17 +93,29 @@ def download(url, destination, progress_callback=None, parallel_downloads=16, pa
try:
# Download chunk
res = requests.get(url, stream=True, timeout=timeout, headers={'Range': 'bytes=%s-%s' % bytes_range})
res = requests.get(
url,
stream=True,
timeout=timeout,
headers={"Range": "bytes=%s-%s" % bytes_range},
)
if res.status_code == 206:
with open("%s.part%s" % (output_path, part_num), 'wb') as fd:
with open(
"%s.part%s" % (output_path, part_num), "wb"
) as fd:
bytes_written = 0
try:
for chunk in res.iter_content(4096):
bytes_written += fd.write(chunk)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
except (
requests.exceptions.Timeout,
requests.exceptions.ConnectionError,
) as e:
raise OdmError(str(e))
if bytes_written != (bytes_range[1] - bytes_range[0] + 1):
if bytes_written != (
bytes_range[1] - bytes_range[0] + 1
):
# Process again
q.put((part_num, bytes_range))
return
@ -97,8 +124,12 @@ def download(url, destination, progress_callback=None, parallel_downloads=16, pa
nonloc.completed_chunks.value += 1
if progress_callback is not None:
progress_callback(100.0 * nonloc.completed_chunks.value / num_chunks)
progress_callback(
100.0
* nonloc.completed_chunks.value
/ num_chunks
)
nonloc.merge_chunks[part_num] = True
else:
nonloc.error = RangeNotAvailableError()
@ -136,7 +167,7 @@ def download(url, destination, progress_callback=None, parallel_downloads=16, pa
q.put((-1, None))
for t in threads:
t.join()
merge_thread.join()
if nonloc.error is not None:
@ -149,7 +180,7 @@ def download(url, destination, progress_callback=None, parallel_downloads=16, pa
if use_fallback:
# Single connection, boring download
with open(output_path, 'wb') as fd:
with open(output_path, "wb") as fd:
for chunk in download_stream.iter_content(4096):
downloaded += len(chunk)
@ -157,8 +188,12 @@ def download(url, destination, progress_callback=None, parallel_downloads=16, pa
progress_callback((100.0 * float(downloaded) / total_length))
fd.write(chunk)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, ReadTimeoutError) as e:
except (
requests.exceptions.Timeout,
requests.exceptions.ConnectionError,
ReadTimeoutError,
) as e:
raise OdmError(e)
return output_path
return output_path

Wyświetl plik

@ -1,6 +1,7 @@
import os
from opendm import log
def replace_nvm_images(src_nvm_file, img_map, dst_nvm_file):
"""
Create a new NVM file from an existing NVM file
@ -11,15 +12,15 @@ def replace_nvm_images(src_nvm_file, img_map, dst_nvm_file):
with open(src_nvm_file) as f:
lines = list(map(str.strip, f.read().split("\n")))
# Quick check
if len(lines) < 3 or lines[0] != "NVM_V3" or lines[1].strip() != "":
raise Exception("%s does not seem to be a valid NVM file" % src_nvm_file)
num_images = int(lines[2])
entries = []
for l in lines[3:3+num_images]:
for l in lines[3 : 3 + num_images]:
image_path, *p = l.split(" ")
dir_name = os.path.dirname(image_path)
@ -27,15 +28,20 @@ def replace_nvm_images(src_nvm_file, img_map, dst_nvm_file):
new_filename = img_map.get(file_name)
if new_filename is not None:
entries.append("%s %s" % (os.path.join(dir_name, new_filename), " ".join(p)))
entries.append(
"%s %s" % (os.path.join(dir_name, new_filename), " ".join(p))
)
else:
log.ODM_WARNING("Cannot find %s in image map for %s" % (file_name, dst_nvm_file))
log.ODM_WARNING(
"Cannot find %s in image map for %s" % (file_name, dst_nvm_file)
)
if num_images != len(entries):
raise Exception("Cannot write %s, not all band images have been matched" % dst_nvm_file)
raise Exception(
"Cannot write %s, not all band images have been matched" % dst_nvm_file
)
with open(dst_nvm_file, "w") as f:
f.write("NVM_V3\n\n%s\n" % len(entries))
f.write("\n".join(entries))
f.write("\n\n0\n0\n\n0")

Wyświetl plik

@ -1 +1 @@
from .objpacker import obj_pack
from .objpacker import obj_pack

Wyświetl plik

@ -29,8 +29,10 @@ import math
# Based off of the great writeup, demo and code at:
# http://codeincomplete.com/posts/2011/5/7/bin_packing/
class Block():
class Block:
"""A rectangular block, to be packed"""
def __init__(self, w, h, data=None, padding=0):
self.w = w
self.h = h
@ -38,15 +40,17 @@ class Block():
self.y = None
self.fit = None
self.data = data
self.padding = padding # not implemented yet
self.padding = padding # not implemented yet
def __str__(self):
return "({x},{y}) ({w}x{h}): {data}".format(
x=self.x,y=self.y, w=self.w,h=self.h, data=self.data)
x=self.x, y=self.y, w=self.w, h=self.h, data=self.data
)
class _BlockNode():
class _BlockNode:
"""A BlockPacker node"""
def __init__(self, x, y, w, h, used=False, right=None, down=None):
self.x = x
self.y = y
@ -57,20 +61,21 @@ class _BlockNode():
self.down = down
def __repr__(self):
return "({x},{y}) ({w}x{h})".format(x=self.x,y=self.y,w=self.w,h=self.h)
return "({x},{y}) ({w}x{h})".format(x=self.x, y=self.y, w=self.w, h=self.h)
class BlockPacker():
class BlockPacker:
"""Packs blocks of varying sizes into a single, larger block"""
def __init__(self):
self.root = None
def fit(self, blocks):
nblocks = len(blocks)
w = blocks[0].w# if nblocks > 0 else 0
h = blocks[0].h# if nblocks > 0 else 0
w = blocks[0].w # if nblocks > 0 else 0
h = blocks[0].h # if nblocks > 0 else 0
self.root = _BlockNode(0,0, w,h)
self.root = _BlockNode(0, 0, w, h)
for block in blocks:
node = self.find_node(self.root, block.w, block.h)
@ -99,14 +104,8 @@ class BlockPacker():
def split_node(self, node, w, h):
node.used = True
node.down = _BlockNode(
node.x, node.y + h,
node.w, node.h - h
)
node.right = _BlockNode(
node.x + w, node.y,
node.w - w, h
)
node.down = _BlockNode(node.x, node.y + h, node.w, node.h - h)
node.right = _BlockNode(node.x + w, node.y, node.w - w, h)
return node
def grow_node(self, w, h):
@ -131,11 +130,13 @@ class BlockPacker():
def grow_right(self, w, h):
old_root = self.root
self.root = _BlockNode(
0, 0,
old_root.w + w, old_root.h,
0,
0,
old_root.w + w,
old_root.h,
down=old_root,
right=_BlockNode(self.root.w, 0, w, self.root.h),
used=True
used=True,
)
node = self.find_node(self.root, w, h)
@ -147,11 +148,13 @@ class BlockPacker():
def grow_down(self, w, h):
old_root = self.root
self.root = _BlockNode(
0, 0,
old_root.w, old_root.h + h,
0,
0,
old_root.w,
old_root.h + h,
down=_BlockNode(0, self.root.h, self.root.w, h),
right=old_root,
used=True
used=True,
)
node = self.find_node(self.root, w, h)
@ -162,14 +165,14 @@ class BlockPacker():
def crop_by_extents(image, extent):
if min(extent.min_x,extent.min_y) < 0 or max(extent.max_x,extent.max_y) > 1:
if min(extent.min_x, extent.min_y) < 0 or max(extent.max_x, extent.max_y) > 1:
print("\tWARNING! UV Coordinates lying outside of [0:1] space!")
_, h, w = image.shape
minx = max(math.floor(extent.min_x*w), 0)
miny = max(math.floor(extent.min_y*h), 0)
maxx = min(math.ceil(extent.max_x*w), w)
maxy = min(math.ceil(extent.max_y*h), h)
minx = max(math.floor(extent.min_x * w), 0)
miny = max(math.floor(extent.min_y * h), 0)
maxx = min(math.ceil(extent.max_x * w), w)
maxy = min(math.ceil(extent.max_y * h), h)
image = image[:, miny:maxy, minx:maxx]
delta_w = maxx - minx
@ -180,15 +183,16 @@ def crop_by_extents(image, extent):
return (image, changes)
def pack(obj, background=(0,0,0,0), format="PNG", extents=None):
def pack(obj, background=(0, 0, 0, 0), format="PNG", extents=None):
blocks = []
image_name_map = {}
profile = None
for mat in obj['materials']:
filename = obj['materials'][mat]
for mat in obj["materials"]:
filename = obj["materials"][mat]
with rasterio.open(filename, 'r') as f:
with rasterio.open(filename, "r") as f:
profile = f.profile
image = f.read()
@ -197,7 +201,7 @@ def pack(obj, background=(0,0,0,0), format="PNG", extents=None):
changes = None
if extents and extents[mat]:
image, changes = crop_by_extents(image, extents[mat])
image_name_map[filename] = image
_, h, w = image.shape
@ -211,7 +215,9 @@ def pack(obj, background=(0,0,0,0), format="PNG", extents=None):
packer.fit(blocks)
# output_image = Image.new("RGBA", (packer.root.w, packer.root.h))
output_image = np.zeros((profile['count'], packer.root.h, packer.root.w), dtype=profile['dtype'])
output_image = np.zeros(
(profile["count"], packer.root.h, packer.root.w), dtype=profile["dtype"]
)
uv_changes = {}
for block in blocks:
@ -222,18 +228,17 @@ def pack(obj, background=(0,0,0,0), format="PNG", extents=None):
uv_changes[mat] = {
"offset": (
# should be in [0, 1] range
(block.x - (changes[0] if changes else 0))/output_image.shape[2],
(block.x - (changes[0] if changes else 0)) / output_image.shape[2],
# UV origin is bottom left, PIL assumes top left!
(block.y - (changes[1] if changes else 0))/output_image.shape[1]
(block.y - (changes[1] if changes else 0)) / output_image.shape[1],
),
"aspect": (
((1/changes[2]) if changes else 1) * (im_w/output_image.shape[2]),
((1/changes[3]) if changes else 1) * (im_h/output_image.shape[1])
((1 / changes[2]) if changes else 1) * (im_w / output_image.shape[2]),
((1 / changes[3]) if changes else 1) * (im_h / output_image.shape[1]),
),
}
output_image[:, block.y:block.y + im_h, block.x:block.x + im_w] = image
output_image[:, block.y : block.y + im_h, block.x : block.x + im_w] = image
output_image = np.flip(output_image, axis=1)
return output_image, uv_changes, profile

Wyświetl plik

@ -22,14 +22,15 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class AABB():
class AABB:
def __init__(self, min_x=None, min_y=None, max_x=None, max_y=None):
self.min_x = min_x
self.min_y = min_y
self.max_x = max_x
self.max_y = max_y
def add(self, x,y):
def add(self, x, y):
self.min_x = min(self.min_x, x) if self.min_x is not None else x
self.min_y = min(self.min_y, y) if self.min_y is not None else y
self.max_x = max(self.max_x, x) if self.max_x is not None else x
@ -45,9 +46,4 @@ class AABB():
return None
def __repr__(self):
return "({},{}) ({},{})".format(
self.min_x,
self.min_y,
self.max_x,
self.max_y
)
return "({},{}) ({},{})".format(self.min_x, self.min_y, self.max_x, self.max_y)

Wyświetl plik

@ -2,6 +2,7 @@ import os
import rasterio
import warnings
import numpy as np
try:
from .imagepacker.utils import AABB
from .imagepacker import pack
@ -11,16 +12,17 @@ except ImportError:
warnings.filterwarnings("ignore", category=rasterio.errors.NotGeoreferencedWarning)
def load_obj(obj_path, _info=print):
if not os.path.isfile(obj_path):
raise IOError("Cannot open %s" % obj_path)
obj_base_path = os.path.dirname(os.path.abspath(obj_path))
obj = {
'filename': os.path.basename(obj_path),
'root_dir': os.path.dirname(os.path.abspath(obj_path)),
'mtl_filenames': [],
'materials': {},
"filename": os.path.basename(obj_path),
"root_dir": os.path.dirname(os.path.abspath(obj_path)),
"mtl_filenames": [],
"materials": {},
}
uvs = []
@ -34,8 +36,8 @@ def load_obj(obj_path, _info=print):
if line.startswith("mtllib "):
# Materials
mtl_file = "".join(line.split()[1:]).strip()
obj['materials'].update(load_mtl(mtl_file, obj_base_path, _info=_info))
obj['mtl_filenames'].append(mtl_file)
obj["materials"].update(load_mtl(mtl_file, obj_base_path, _info=_info))
obj["mtl_filenames"].append(mtl_file)
# elif line.startswith("v "):
# # Vertices
# vertices.append(list(map(float, line.split()[1:4])))
@ -46,7 +48,7 @@ def load_obj(obj_path, _info=print):
# normals.append(list(map(float, line.split()[1:4])))
elif line.startswith("usemtl "):
mtl_name = "".join(line.split()[1:]).strip()
if not mtl_name in obj['materials']:
if not mtl_name in obj["materials"]:
raise Exception("%s material is missing" % mtl_name)
current_material = mtl_name
@ -54,17 +56,18 @@ def load_obj(obj_path, _info=print):
if current_material not in faces:
faces[current_material] = []
a,b,c = line.split()[1:]
a, b, c = line.split()[1:]
at = int(a.split("/")[1])
bt = int(b.split("/")[1])
ct = int(c.split("/")[1])
faces[current_material].append((at - 1, bt - 1, ct - 1))
faces[current_material].append((at - 1, bt - 1, ct - 1))
obj['uvs'] = np.array(uvs, dtype=np.float32)
obj['faces'] = faces
obj["uvs"] = np.array(uvs, dtype=np.float32)
obj["faces"] = faces
return obj
def load_mtl(mtl_file, obj_base_path, _info=print):
mtl_file = os.path.join(obj_base_path, mtl_file)
@ -88,10 +91,12 @@ def load_mtl(mtl_file, obj_base_path, _info=print):
return mats
def write_obj_changes(obj_file, mtl_file, uv_changes, single_mat, output_dir, _info=print):
def write_obj_changes(
obj_file, mtl_file, uv_changes, single_mat, output_dir, _info=print
):
with open(obj_file) as f:
obj_lines = f.readlines()
out_lines = []
uv_lines = []
current_material = None
@ -122,7 +127,7 @@ def write_obj_changes(obj_file, mtl_file, uv_changes, single_mat, output_dir, _i
for v in line[2:].split():
parts = v.split("/")
if len(parts) >= 2 and parts[1]:
uv_idx = int(parts[1]) - 1 # uv indexes start from 1
uv_idx = int(parts[1]) - 1 # uv indexes start from 1
uv_line_idx = uv_lines[uv_idx]
uv_line = obj_lines[uv_line_idx][3:]
uv = [float(uv.strip()) for uv in uv_line.split()]
@ -139,28 +144,30 @@ def write_obj_changes(obj_file, mtl_file, uv_changes, single_mat, output_dir, _i
out_file = os.path.join(output_dir, os.path.basename(obj_file))
_info("Writing %s" % out_file)
with open(out_file, 'w') as f:
with open(out_file, "w") as f:
f.writelines(out_lines)
def write_output_tex(img, profile, path, _info=print):
_, w, h = img.shape
profile['width'] = w
profile['height'] = h
profile["width"] = w
profile["height"] = h
if 'tiled' in profile:
profile['tiled'] = False
if "tiled" in profile:
profile["tiled"] = False
_info("Writing %s (%sx%s pixels)" % (path, w, h))
with rasterio.open(path, 'w', **profile) as dst:
with rasterio.open(path, "w", **profile) as dst:
for b in range(1, img.shape[0] + 1):
dst.write(img[b - 1], b)
sidecar = path + '.aux.xml'
sidecar = path + ".aux.xml"
if os.path.isfile(sidecar):
os.unlink(sidecar)
def write_output_mtl(src_mtl, mat_file, dst_mtl):
with open(src_mtl, 'r') as src:
with open(src_mtl, "r") as src:
lines = src.readlines()
out = []
@ -176,8 +183,8 @@ def write_output_mtl(src_mtl, mat_file, dst_mtl):
break
else:
out.append(l)
with open(dst_mtl, 'w') as dst:
with open(dst_mtl, "w") as dst:
dst.write("".join(out))
if single_mat is None:
@ -185,51 +192,68 @@ def write_output_mtl(src_mtl, mat_file, dst_mtl):
return single_mat
def obj_pack(obj_file, output_dir=None, _info=print):
if not output_dir:
output_dir = os.path.join(os.path.dirname(os.path.abspath(obj_file)), "packed")
obj = load_obj(obj_file, _info=_info)
if not obj['mtl_filenames']:
if not obj["mtl_filenames"]:
raise Exception("No MTL files found, nothing to do")
if os.path.abspath(obj_file) == os.path.abspath(os.path.join(output_dir, os.path.basename(obj_file))):
raise Exception("This will overwrite %s. Choose a different output directory" % obj_file)
if len(obj['mtl_filenames']) <= 1 and len(obj['materials']) <= 1:
if os.path.abspath(obj_file) == os.path.abspath(
os.path.join(output_dir, os.path.basename(obj_file))
):
raise Exception(
"This will overwrite %s. Choose a different output directory" % obj_file
)
if len(obj["mtl_filenames"]) <= 1 and len(obj["materials"]) <= 1:
raise Exception("File already has a single material, nothing to do")
# Compute AABB for UVs
_info("Computing texture bounds")
extents = {}
for material in obj['materials']:
for material in obj["materials"]:
bounds = AABB()
faces = obj['faces'][material]
faces = obj["faces"][material]
for f in faces:
for uv_idx in f:
uv = obj['uvs'][uv_idx]
uv = obj["uvs"][uv_idx]
bounds.add(uv[0], uv[1])
extents[material] = bounds
_info("Binary packing...")
output_image, uv_changes, profile = pack(obj, extents=extents)
mtl_file = obj['mtl_filenames'][0]
mat_file = os.path.basename(obj['materials'][next(iter(obj['materials']))])
mtl_file = obj["mtl_filenames"][0]
mat_file = os.path.basename(obj["materials"][next(iter(obj["materials"]))])
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
write_output_tex(output_image, profile, os.path.join(output_dir, mat_file), _info=_info)
single_mat = write_output_mtl(os.path.join(obj['root_dir'], mtl_file), mat_file, os.path.join(output_dir, mtl_file))
write_obj_changes(obj_file, mtl_file, uv_changes, single_mat, output_dir, _info=_info)
if __name__ == '__main__':
write_output_tex(
output_image, profile, os.path.join(output_dir, mat_file), _info=_info
)
single_mat = write_output_mtl(
os.path.join(obj["root_dir"], mtl_file),
mat_file,
os.path.join(output_dir, mtl_file),
)
write_obj_changes(
obj_file, mtl_file, uv_changes, single_mat, output_dir, _info=_info
)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Packs textured .OBJ Wavefront files into a single materials")
parser = argparse.ArgumentParser(
description="Packs textured .OBJ Wavefront files into a single materials"
)
parser.add_argument("obj", help="Path to the .OBJ file")
parser.add_argument("-o","--output-dir", help="Output directory")
parser.add_argument("-o", "--output-dir", help="Output directory")
args = parser.parse_args()
obj_pack(args.obj, args.output_dir)
obj_pack(args.obj, args.output_dir)

Wyświetl plik

@ -11,7 +11,10 @@ from opendm.entwine import build_entwine
import fiona
from shapely.geometry import shape
def build_textured_model(input_obj, output_path, reference_lla = None, model_bounds_file=None, rerun=False):
def build_textured_model(
input_obj, output_path, reference_lla=None, model_bounds_file=None, rerun=False
):
if not os.path.isfile(input_obj):
log.ODM_WARNING("No input OBJ file to process")
return
@ -22,27 +25,27 @@ def build_textured_model(input_obj, output_path, reference_lla = None, model_bou
log.ODM_INFO("Generating OGC 3D Tiles textured model")
lat = lon = alt = 0
# Read reference_lla.json (if provided)
if reference_lla is not None and os.path.isfile(reference_lla):
try:
with open(reference_lla) as f:
reference_lla = json.loads(f.read())
lat = reference_lla['latitude']
lon = reference_lla['longitude']
alt = reference_lla['altitude']
lat = reference_lla["latitude"]
lon = reference_lla["longitude"]
alt = reference_lla["altitude"]
except Exception as e:
log.ODM_WARNING("Cannot read %s: %s" % (reference_lla, str(e)))
# Read model bounds (if provided)
divisions = 1 # default
DIV_THRESHOLD = 10000 # m^2 (this is somewhat arbitrary)
divisions = 1 # default
DIV_THRESHOLD = 10000 # m^2 (this is somewhat arbitrary)
if model_bounds_file is not None and os.path.isfile(model_bounds_file):
try:
with fiona.open(model_bounds_file, 'r') as f:
with fiona.open(model_bounds_file, "r") as f:
if len(f) == 1:
poly = shape(f[1]['geometry'])
poly = shape(f[1]["geometry"])
area = poly.area
log.ODM_INFO("Approximate area: %s m^2" % round(area, 2))
@ -57,18 +60,23 @@ def build_textured_model(input_obj, output_path, reference_lla = None, model_bou
try:
kwargs = {
'input': input_obj,
'output': output_path,
'divisions': divisions,
'lat': lat,
'lon': lon,
'alt': alt,
"input": input_obj,
"output": output_path,
"divisions": divisions,
"lat": lat,
"lon": lon,
"alt": alt,
}
system.run('Obj2Tiles "{input}" "{output}" --divisions {divisions} --lat {lat} --lon {lon} --alt {alt} '.format(**kwargs))
system.run(
'Obj2Tiles "{input}" "{output}" --divisions {divisions} --lat {lat} --lon {lon} --alt {alt} '.format(
**kwargs
)
)
except Exception as e:
log.ODM_WARNING("Cannot build 3D tiles textured model: %s" % str(e))
def build_pointcloud(input_pointcloud, output_path, max_concurrency, rerun=False):
if not os.path.isfile(input_pointcloud):
log.ODM_WARNING("No input point cloud file to process")
@ -79,19 +87,21 @@ def build_pointcloud(input_pointcloud, output_path, max_concurrency, rerun=False
shutil.rmtree(output_path)
log.ODM_INFO("Generating OGC 3D Tiles point cloud")
try:
if not os.path.isdir(output_path):
os.mkdir(output_path)
tmpdir = os.path.join(output_path, "tmp")
entwine_output = os.path.join(output_path, "entwine")
build_entwine([input_pointcloud], tmpdir, entwine_output, max_concurrency, "EPSG:4978")
build_entwine(
[input_pointcloud], tmpdir, entwine_output, max_concurrency, "EPSG:4978"
)
kwargs = {
'input': entwine_output,
'output': output_path,
"input": entwine_output,
"output": output_path,
}
system.run('entwine convert -i "{input}" -o "{output}"'.format(**kwargs))
@ -109,27 +119,36 @@ def build_3dtiles(args, tree, reconstruction, rerun=False):
if rerun and os.path.exists(tiles_output_path):
shutil.rmtree(tiles_output_path)
if not os.path.isdir(tiles_output_path):
os.mkdir(tiles_output_path)
# Model
# Model
if not os.path.isdir(model_output_path) or rerun:
reference_lla = os.path.join(tree.opensfm, "reference_lla.json")
model_bounds_file = os.path.join(tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg')
model_bounds_file = os.path.join(
tree.odm_georeferencing, "odm_georeferenced_model.bounds.gpkg"
)
input_obj = os.path.join(tree.odm_texturing, tree.odm_textured_model_obj)
if not os.path.isfile(input_obj):
input_obj = os.path.join(tree.odm_25dtexturing, tree.odm_textured_model_obj)
build_textured_model(input_obj, model_output_path, reference_lla, model_bounds_file, rerun)
build_textured_model(
input_obj, model_output_path, reference_lla, model_bounds_file, rerun
)
else:
log.ODM_WARNING("OGC 3D Tiles model %s already generated" % model_output_path)
# Point cloud
if not os.path.isdir(pointcloud_output_path) or rerun:
build_pointcloud(tree.odm_georeferencing_model_laz, pointcloud_output_path, args.max_concurrency, rerun)
build_pointcloud(
tree.odm_georeferencing_model_laz,
pointcloud_output_path,
args.max_concurrency,
rerun,
)
else:
log.ODM_WARNING("OGC 3D Tiles model %s already generated" % model_output_path)
log.ODM_WARNING("OGC 3D Tiles model %s already generated" % model_output_path)

Wyświetl plik

@ -4,28 +4,36 @@ from opendm import log
from opendm.system import run
from opendm import io
def classify(point_cloud, max_threads=8):
tmp_output = io.related_file_path(point_cloud, postfix=".classified")
if os.path.isfile(tmp_output):
os.remove(tmp_output)
try:
model = get_model("openpointclass",
"https://github.com/uav4geo/OpenPointClass/releases/download/v1.1.3/vehicles-vegetation-buildings.zip",
model = get_model(
"openpointclass",
"https://github.com/uav4geo/OpenPointClass/releases/download/v1.1.3/vehicles-vegetation-buildings.zip",
"v1.0.0",
name="model.bin")
name="model.bin",
)
if model is not None:
run('pcclassify "%s" "%s" "%s" -u -s 2,64' % (point_cloud, tmp_output, model), env_vars={'OMP_NUM_THREADS': max_threads})
run(
'pcclassify "%s" "%s" "%s" -u -s 2,64'
% (point_cloud, tmp_output, model),
env_vars={"OMP_NUM_THREADS": max_threads},
)
if os.path.isfile(tmp_output):
os.remove(point_cloud)
os.rename(tmp_output, point_cloud)
else:
log.ODM_WARNING("Cannot classify using OpenPointClass (no output generated)")
log.ODM_WARNING(
"Cannot classify using OpenPointClass (no output generated)"
)
else:
log.ODM_WARNING("Cannot download/access model from %s" % (model_url))
except Exception as e:
log.ODM_WARNING("Cannot classify using OpenPointClass: %s" % str(e))

Wyświetl plik

@ -19,39 +19,43 @@ from osgeo import ogr
def get_orthophoto_vars(args):
return {
'TILED': 'NO' if args.orthophoto_no_tiled else 'YES',
'COMPRESS': args.orthophoto_compression,
'PREDICTOR': '2' if args.orthophoto_compression in ['LZW', 'DEFLATE'] else '1',
'BIGTIFF': 'IF_SAFER',
'BLOCKXSIZE': 512,
'BLOCKYSIZE': 512,
'NUM_THREADS': args.max_concurrency
"TILED": "NO" if args.orthophoto_no_tiled else "YES",
"COMPRESS": args.orthophoto_compression,
"PREDICTOR": "2" if args.orthophoto_compression in ["LZW", "DEFLATE"] else "1",
"BIGTIFF": "IF_SAFER",
"BLOCKXSIZE": 512,
"BLOCKYSIZE": 512,
"NUM_THREADS": args.max_concurrency,
}
def build_overviews(orthophoto_file):
log.ODM_INFO("Building Overviews")
kwargs = {'orthophoto': orthophoto_file}
kwargs = {"orthophoto": orthophoto_file}
# Run gdaladdo
system.run('gdaladdo -r average '
'--config BIGTIFF_OVERVIEW IF_SAFER '
'--config COMPRESS_OVERVIEW JPEG '
'{orthophoto} 2 4 8 16'.format(**kwargs))
system.run(
"gdaladdo -r average "
"--config BIGTIFF_OVERVIEW IF_SAFER "
"--config COMPRESS_OVERVIEW JPEG "
"{orthophoto} 2 4 8 16".format(**kwargs)
)
def generate_png(orthophoto_file, output_file=None, outsize=None):
if output_file is None:
base, ext = os.path.splitext(orthophoto_file)
output_file = base + '.png'
output_file = base + ".png"
# See if we need to select top three bands
params = []
try:
gtif = gdal.Open(orthophoto_file)
bands = []
for idx in range(1, gtif.RasterCount+1):
for idx in range(1, gtif.RasterCount + 1):
bands.append(gtif.GetRasterBand(idx).GetColorInterpretation())
bands = dict(zip(bands, range(1, len(bands)+1)))
bands = dict(zip(bands, range(1, len(bands) + 1)))
if gtif.RasterCount >= 3:
red = bands.get(gdal.GCI_RedBand)
@ -60,10 +64,10 @@ def generate_png(orthophoto_file, output_file=None, outsize=None):
if red is None or green is None or blue is None:
params.append("-b 1 -b 2 -b 3")
else:
params.append("-b %s -b %s -b %s" % (red, green, blue))
params.append("-b %s -b %s -b %s" % (red, green, blue))
elif gtif.RasterCount <= 2:
params.append("-b 1")
alpha = bands.get(gdal.GCI_AlphaBand)
if alpha is not None:
params.append("-b %s" % alpha)
@ -77,31 +81,41 @@ def generate_png(orthophoto_file, output_file=None, outsize=None):
params.append("-scale_1 -scale_2 -scale_3")
elif gtif.RasterCount <= 2:
params.append("-scale_1")
gtif = None
except Exception as e:
log.ODM_WARNING("Cannot read orthophoto information for PNG generation: %s" % str(e))
log.ODM_WARNING(
"Cannot read orthophoto information for PNG generation: %s" % str(e)
)
if outsize is not None:
params.append("-outsize %s 0" % outsize)
system.run('gdal_translate -of png "%s" "%s" %s '
'-co WORLDFILE=YES '
'--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, " ".join(params), get_max_memory()))
system.run(
'gdal_translate -of png "%s" "%s" %s '
"-co WORLDFILE=YES "
"--config GDAL_CACHEMAX %s%% "
% (orthophoto_file, output_file, " ".join(params), get_max_memory())
)
def generate_kmz(orthophoto_file, output_file=None, outsize=None):
if output_file is None:
base, ext = os.path.splitext(orthophoto_file)
output_file = base + '.kmz'
output_file = base + ".kmz"
# See if we need to select top three bands
bandparam = ""
gtif = gdal.Open(orthophoto_file)
if gtif.RasterCount > 4:
bandparam = "-b 1 -b 2 -b 3 -a_nodata 0"
system.run('gdal_translate -of KMLSUPEROVERLAY -co FORMAT=PNG "%s" "%s" %s '
'--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, bandparam, get_max_memory()))
system.run(
'gdal_translate -of KMLSUPEROVERLAY -co FORMAT=PNG "%s" "%s" %s '
"--config GDAL_CACHEMAX %s%% "
% (orthophoto_file, output_file, bandparam, get_max_memory())
)
def generate_extent_polygon(orthophoto_file):
"""Function to return the orthophoto extent as a polygon into a gpkg file
@ -110,11 +124,11 @@ def generate_extent_polygon(orthophoto_file):
orthophoto_file (str): the path to orthophoto file
"""
base, ext = os.path.splitext(orthophoto_file)
output_file = base + '_extent.dxf'
output_file = base + "_extent.dxf"
try:
gtif = gdal.Open(orthophoto_file)
srs = gtif.GetSpatialRef()
srs = gtif.GetSpatialRef()
geoTransform = gtif.GetGeoTransform()
# calculate the coordinates
@ -122,10 +136,21 @@ def generate_extent_polygon(orthophoto_file):
maxy = geoTransform[3]
maxx = minx + geoTransform[1] * gtif.RasterXSize
miny = maxy + geoTransform[5] * gtif.RasterYSize
# create polygon in wkt format
poly_wkt = "POLYGON ((%s %s, %s %s, %s %s, %s %s, %s %s))" % (minx, miny, minx, maxy, maxx, maxy, maxx, miny, minx, miny)
poly_wkt = "POLYGON ((%s %s, %s %s, %s %s, %s %s, %s %s))" % (
minx,
miny,
minx,
maxy,
maxx,
maxy,
maxx,
miny,
minx,
miny,
)
# create vector file
# just the DXF to support AutoCAD users
# to load the geotiff raster correctly.
@ -147,59 +172,84 @@ def generate_extent_polygon(orthophoto_file):
gtif = None
log.ODM_INFO("Wrote %s" % output_file)
except Exception as e:
log.ODM_WARNING("Cannot create extent layer for %s: %s" % (orthophoto_file, str(e)))
log.ODM_WARNING(
"Cannot create extent layer for %s: %s" % (orthophoto_file, str(e))
)
def generate_tfw(orthophoto_file):
base, ext = os.path.splitext(orthophoto_file)
tfw_file = base + '.tfw'
tfw_file = base + ".tfw"
try:
with rasterio.open(orthophoto_file) as ds:
t = ds.transform
with open(tfw_file, 'w') as f:
with open(tfw_file, "w") as f:
# rasterio affine values taken by
# https://mharty3.github.io/til/GIS/raster-affine-transforms/
f.write("\n".join([str(v) for v in [t.a, t.d, t.b, t.e, t.c, t.f]]) + "\n")
f.write(
"\n".join([str(v) for v in [t.a, t.d, t.b, t.e, t.c, t.f]]) + "\n"
)
log.ODM_INFO("Wrote %s" % tfw_file)
except Exception as e:
log.ODM_WARNING("Cannot create .tfw for %s: %s" % (orthophoto_file, str(e)))
def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir, resolution):
def post_orthophoto_steps(
args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir, resolution
):
if args.crop > 0 or args.boundary:
Cropper.crop(bounds_file_path, orthophoto_file, get_orthophoto_vars(args), keep_original=not args.optimize_disk_space, warp_options=['-dstalpha'])
Cropper.crop(
bounds_file_path,
orthophoto_file,
get_orthophoto_vars(args),
keep_original=not args.optimize_disk_space,
warp_options=["-dstalpha"],
)
if args.build_overviews and not args.cog:
build_overviews(orthophoto_file)
if args.orthophoto_png:
generate_png(orthophoto_file)
if args.orthophoto_kmz:
generate_kmz(orthophoto_file)
if args.tiles:
generate_orthophoto_tiles(orthophoto_file, orthophoto_tiles_dir, args.max_concurrency, resolution)
generate_orthophoto_tiles(
orthophoto_file, orthophoto_tiles_dir, args.max_concurrency, resolution
)
if args.cog:
convert_to_cogeo(orthophoto_file, max_workers=args.max_concurrency, compression=args.orthophoto_compression)
convert_to_cogeo(
orthophoto_file,
max_workers=args.max_concurrency,
compression=args.orthophoto_compression,
)
generate_extent_polygon(orthophoto_file)
generate_tfw(orthophoto_file)
def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance=20, only_max_coords_feature=False):
def compute_mask_raster(
input_raster,
vector_mask,
output_raster,
blend_distance=20,
only_max_coords_feature=False,
):
if not os.path.exists(input_raster):
log.ODM_WARNING("Cannot mask raster, %s does not exist" % input_raster)
return
if not os.path.exists(vector_mask):
log.ODM_WARNING("Cannot mask raster, %s does not exist" % vector_mask)
return
log.ODM_INFO("Computing mask raster: %s" % output_raster)
with rasterio.open(input_raster, 'r') as rast:
with rasterio.open(input_raster, "r") as rast:
with fiona.open(vector_mask) as src:
burn_features = src
@ -209,12 +259,17 @@ def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance
for feature in src:
if feature is not None:
# No complex shapes
if len(feature['geometry']['coordinates'][0]) > max_coords_count:
max_coords_count = len(feature['geometry']['coordinates'][0])
if (
len(feature["geometry"]["coordinates"][0])
> max_coords_count
):
max_coords_count = len(
feature["geometry"]["coordinates"][0]
)
max_coords_feature = feature
if max_coords_feature is not None:
burn_features = [max_coords_feature]
shapes = [feature["geometry"] for feature in burn_features]
out_image, out_transform = mask(rast, shapes, nodata=0)
@ -227,22 +282,28 @@ def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance
dist_t[dist_t > blend_distance] = 1
np.multiply(alpha_band, dist_t, out=alpha_band, casting="unsafe")
else:
log.ODM_WARNING("%s does not have an alpha band, cannot blend cutline!" % input_raster)
log.ODM_WARNING(
"%s does not have an alpha band, cannot blend cutline!"
% input_raster
)
with rasterio.open(output_raster, 'w', BIGTIFF="IF_SAFER", **rast.profile) as dst:
with rasterio.open(
output_raster, "w", BIGTIFF="IF_SAFER", **rast.profile
) as dst:
dst.colorinterp = rast.colorinterp
dst.write(out_image)
return output_raster
def feather_raster(input_raster, output_raster, blend_distance=20):
if not os.path.exists(input_raster):
log.ODM_WARNING("Cannot feather raster, %s does not exist" % input_raster)
return
log.ODM_INFO("Computing feather raster: %s" % output_raster)
with rasterio.open(input_raster, 'r') as rast:
with rasterio.open(input_raster, "r") as rast:
out_image = rast.read()
if blend_distance > 0:
if out_image.shape[0] >= 4:
@ -252,22 +313,28 @@ def feather_raster(input_raster, output_raster, blend_distance=20):
dist_t[dist_t > blend_distance] = 1
np.multiply(alpha_band, dist_t, out=alpha_band, casting="unsafe")
else:
log.ODM_WARNING("%s does not have an alpha band, cannot feather raster!" % input_raster)
log.ODM_WARNING(
"%s does not have an alpha band, cannot feather raster!"
% input_raster
)
with rasterio.open(output_raster, 'w', BIGTIFF="IF_SAFER", **rast.profile) as dst:
with rasterio.open(
output_raster, "w", BIGTIFF="IF_SAFER", **rast.profile
) as dst:
dst.colorinterp = rast.colorinterp
dst.write(out_image)
return output_raster
def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
"""
Based on https://github.com/mapbox/rio-merge-rgba/
Merge orthophotos around cutlines using a blend buffer.
"""
inputs = []
bounds=None
precision=7
bounds = None
precision = 7
for o, c in input_ortho_and_ortho_cuts:
if not io.file_exists(o):
@ -286,11 +353,11 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
res = first.res
dtype = first.dtypes[0]
profile = first.profile
num_bands = first.meta['count'] - 1 # minus alpha
num_bands = first.meta["count"] - 1 # minus alpha
colorinterp = first.colorinterp
log.ODM_INFO("%s valid orthophoto rasters to merge" % len(inputs))
sources = [(rasterio.open(o), rasterio.open(c)) for o,c in inputs]
sources = [(rasterio.open(o), rasterio.open(c)) for o, c in inputs]
# scan input files.
# while we're at it, validate assumptions about inputs
@ -321,12 +388,12 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
profile["transform"] = output_transform
profile["height"] = output_height
profile["width"] = output_width
profile["tiled"] = orthophoto_vars.get('TILED', 'YES') == 'YES'
profile["blockxsize"] = orthophoto_vars.get('BLOCKXSIZE', 512)
profile["blockysize"] = orthophoto_vars.get('BLOCKYSIZE', 512)
profile["compress"] = orthophoto_vars.get('COMPRESS', 'LZW')
profile["predictor"] = orthophoto_vars.get('PREDICTOR', '2')
profile["bigtiff"] = orthophoto_vars.get('BIGTIFF', 'IF_SAFER')
profile["tiled"] = orthophoto_vars.get("TILED", "YES") == "YES"
profile["blockxsize"] = orthophoto_vars.get("BLOCKXSIZE", 512)
profile["blockysize"] = orthophoto_vars.get("BLOCKYSIZE", 512)
profile["compress"] = orthophoto_vars.get("COMPRESS", "LZW")
profile["predictor"] = orthophoto_vars.get("PREDICTOR", "2")
profile["bigtiff"] = orthophoto_vars.get("BIGTIFF", "IF_SAFER")
profile.update()
# create destination file
@ -346,11 +413,14 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
# First pass, write all rasters naively without blending
for src, _ in sources:
src_window = tuple(zip(rowcol(
src.transform, left, top, op=round, precision=precision
), rowcol(
src.transform, right, bottom, op=round, precision=precision
)))
src_window = tuple(
zip(
rowcol(src.transform, left, top, op=round, precision=precision),
rowcol(
src.transform, right, bottom, op=round, precision=precision
),
)
)
temp = np.zeros(dst_shape, dtype=dtype)
temp = src.read(
@ -370,11 +440,14 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
# Second pass, write all feathered rasters
# blending the edges
for src, _ in sources:
src_window = tuple(zip(rowcol(
src.transform, left, top, op=round, precision=precision
), rowcol(
src.transform, right, bottom, op=round, precision=precision
)))
src_window = tuple(
zip(
rowcol(src.transform, left, top, op=round, precision=precision),
rowcol(
src.transform, right, bottom, op=round, precision=precision
),
)
)
temp = np.zeros(dst_shape, dtype=dtype)
temp = src.read(
@ -383,10 +456,12 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
where = temp[-1] != 0
for b in range(0, num_bands):
blended = temp[-1] / 255.0 * temp[b] + (1 - temp[-1] / 255.0) * dstarr[b]
np.copyto(dstarr[b], blended, casting='unsafe', where=where)
blended = (
temp[-1] / 255.0 * temp[b] + (1 - temp[-1] / 255.0) * dstarr[b]
)
np.copyto(dstarr[b], blended, casting="unsafe", where=where)
dstarr[-1][where] = 255.0
# check if dest has any nodata pixels available
if np.count_nonzero(dstarr[-1]) == blocksize:
break
@ -394,11 +469,14 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
# Third pass, write cut rasters
# blending the cutlines
for _, cut in sources:
src_window = tuple(zip(rowcol(
cut.transform, left, top, op=round, precision=precision
), rowcol(
cut.transform, right, bottom, op=round, precision=precision
)))
src_window = tuple(
zip(
rowcol(cut.transform, left, top, op=round, precision=precision),
rowcol(
cut.transform, right, bottom, op=round, precision=precision
),
)
)
temp = np.zeros(dst_shape, dtype=dtype)
temp = cut.read(
@ -408,8 +486,10 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
# For each band, average alpha values between
# destination raster and cut raster
for b in range(0, num_bands):
blended = temp[-1] / 255.0 * temp[b] + (1 - temp[-1] / 255.0) * dstarr[b]
np.copyto(dstarr[b], blended, casting='unsafe', where=temp[-1]!=0)
blended = (
temp[-1] / 255.0 * temp[b] + (1 - temp[-1] / 255.0) * dstarr[b]
)
np.copyto(dstarr[b], blended, casting="unsafe", where=temp[-1] != 0)
dstrast.write(dstarr, window=dst_window)

Wyświetl plik

@ -25,58 +25,72 @@ from opendm.gpu import has_popsift_and_can_handle_texsize, has_gpu
from opensfm import multiview, exif
from opensfm.actions.export_geocoords import _transform
class OSFMContext:
def __init__(self, opensfm_project_path):
self.opensfm_project_path = opensfm_project_path
def run(self, command):
osfm_bin = os.path.join(context.opensfm_path, 'bin', 'opensfm')
system.run('"%s" %s "%s"' %
(osfm_bin, command, self.opensfm_project_path))
osfm_bin = os.path.join(context.opensfm_path, "bin", "opensfm")
system.run('"%s" %s "%s"' % (osfm_bin, command, self.opensfm_project_path))
def is_reconstruction_done(self):
tracks_file = os.path.join(self.opensfm_project_path, 'tracks.csv')
reconstruction_file = os.path.join(self.opensfm_project_path, 'reconstruction.json')
tracks_file = os.path.join(self.opensfm_project_path, "tracks.csv")
reconstruction_file = os.path.join(
self.opensfm_project_path, "reconstruction.json"
)
return io.file_exists(tracks_file) and io.file_exists(reconstruction_file)
def create_tracks(self, rerun=False):
tracks_file = os.path.join(self.opensfm_project_path, 'tracks.csv')
rs_file = self.path('rs_done.txt')
tracks_file = os.path.join(self.opensfm_project_path, "tracks.csv")
rs_file = self.path("rs_done.txt")
if not io.file_exists(tracks_file) or rerun:
self.run('create_tracks')
self.run("create_tracks")
else:
log.ODM_WARNING('Found a valid OpenSfM tracks file in: %s' % tracks_file)
log.ODM_WARNING("Found a valid OpenSfM tracks file in: %s" % tracks_file)
def reconstruct(self, rolling_shutter_correct=False, merge_partial=False, rerun=False):
reconstruction_file = os.path.join(self.opensfm_project_path, 'reconstruction.json')
def reconstruct(
self, rolling_shutter_correct=False, merge_partial=False, rerun=False
):
reconstruction_file = os.path.join(
self.opensfm_project_path, "reconstruction.json"
)
if not io.file_exists(reconstruction_file) or rerun:
self.run('reconstruct')
self.run("reconstruct")
if merge_partial:
self.check_merge_partial_reconstructions()
else:
log.ODM_WARNING('Found a valid OpenSfM reconstruction file in: %s' % reconstruction_file)
log.ODM_WARNING(
"Found a valid OpenSfM reconstruction file in: %s" % reconstruction_file
)
# Check that a reconstruction file has been created
if not self.reconstructed():
raise system.ExitException("The program could not process this dataset using the current settings. "
"Check that the images have enough overlap, "
"that there are enough recognizable features "
"and that the images are in focus. "
"The program will now exit.")
raise system.ExitException(
"The program could not process this dataset using the current settings. "
"Check that the images have enough overlap, "
"that there are enough recognizable features "
"and that the images are in focus. "
"The program will now exit."
)
if rolling_shutter_correct:
rs_file = self.path('rs_done.txt')
rs_file = self.path("rs_done.txt")
if not io.file_exists(rs_file) or rerun:
self.run('rs_correct')
self.run("rs_correct")
log.ODM_INFO("Re-running the reconstruction pipeline")
self.match_features(True)
self.create_tracks(True)
self.reconstruct(rolling_shutter_correct=False, merge_partial=merge_partial, rerun=True)
self.reconstruct(
rolling_shutter_correct=False,
merge_partial=merge_partial,
rerun=True,
)
self.touch(rs_file)
else:
@ -89,7 +103,10 @@ class OSFMContext:
tracks_manager = data.load_tracks_manager()
if len(reconstructions) > 1:
log.ODM_WARNING("Multiple reconstructions detected (%s), this might be an indicator that some areas did not have sufficient overlap" % len(reconstructions))
log.ODM_WARNING(
"Multiple reconstructions detected (%s), this might be an indicator that some areas did not have sufficient overlap"
% len(reconstructions)
)
log.ODM_INFO("Attempting merge")
merged = Reconstruction()
@ -110,7 +127,9 @@ class OSFMContext:
new_point = merged.create_point(point.id, point.coordinates)
new_point.color = point.color
except RuntimeError as e:
log.ODM_WARNING("Cannot merge shot id %s (%s)" % (shot.id, str(e)))
log.ODM_WARNING(
"Cannot merge shot id %s (%s)" % (shot.id, str(e))
)
continue
for shot in rec.shots.values():
@ -118,7 +137,9 @@ class OSFMContext:
try:
obsdict = tracks_manager.get_shot_observations(shot.id)
except RuntimeError:
log.ODM_WARNING("Shot id %s missing from tracks_manager!" % shot.id)
log.ODM_WARNING(
"Shot id %s missing from tracks_manager!" % shot.id
)
continue
for track_id, obs in obsdict.items():
if track_id in merged.points:
@ -126,7 +147,7 @@ class OSFMContext:
data.save_reconstruction([merged])
def setup(self, args, images_path, reconstruction, append_config = [], rerun=False):
def setup(self, args, images_path, reconstruction, append_config=[], rerun=False):
"""
Setup a OpenSfM project
"""
@ -136,14 +157,22 @@ class OSFMContext:
if not io.dir_exists(self.opensfm_project_path):
system.mkdir_p(self.opensfm_project_path)
list_path = os.path.join(self.opensfm_project_path, 'image_list.txt')
list_path = os.path.join(self.opensfm_project_path, "image_list.txt")
if not io.file_exists(list_path) or rerun:
if reconstruction.multi_camera:
photos = get_photos_by_band(reconstruction.multi_camera, args.primary_band)
photos = get_photos_by_band(
reconstruction.multi_camera, args.primary_band
)
if len(photos) < 1:
raise Exception("Not enough images in selected band %s" % args.primary_band.lower())
log.ODM_INFO("Reconstruction will use %s images from %s band" % (len(photos), args.primary_band.lower()))
raise Exception(
"Not enough images in selected band %s"
% args.primary_band.lower()
)
log.ODM_INFO(
"Reconstruction will use %s images from %s band"
% (len(photos), args.primary_band.lower())
)
else:
photos = reconstruction.photos
@ -151,7 +180,7 @@ class OSFMContext:
num_zero_alt = 0
has_alt = True
has_gps = False
with open(list_path, 'w') as fout:
with open(list_path, "w") as fout:
for photo in photos:
if photo.altitude is None:
has_alt = False
@ -160,54 +189,69 @@ class OSFMContext:
if photo.latitude is not None and photo.longitude is not None:
has_gps = True
fout.write('%s\n' % os.path.join(images_path, photo.filename))
fout.write("%s\n" % os.path.join(images_path, photo.filename))
# check 0 altitude images percentage when has_alt is True
if has_alt and num_zero_alt / len(photos) > 0.05:
log.ODM_WARNING("More than 5% of images have zero altitude, this might be an indicator that the images have no altitude information")
log.ODM_WARNING(
"More than 5% of images have zero altitude, this might be an indicator that the images have no altitude information"
)
has_alt = False
# check for image_groups.txt (split-merge)
image_groups_file = os.path.join(args.project_path, "image_groups.txt")
if 'split_image_groups_is_set' in args:
if "split_image_groups_is_set" in args:
image_groups_file = os.path.abspath(args.split_image_groups)
if io.file_exists(image_groups_file):
dst_groups_file = os.path.join(self.opensfm_project_path, "image_groups.txt")
dst_groups_file = os.path.join(
self.opensfm_project_path, "image_groups.txt"
)
io.copy(image_groups_file, dst_groups_file)
log.ODM_INFO("Copied %s to %s" % (image_groups_file, dst_groups_file))
# check for cameras
if args.cameras:
try:
camera_overrides = camera.get_opensfm_camera_models(args.cameras)
with open(os.path.join(self.opensfm_project_path, "camera_models_overrides.json"), 'w') as f:
with open(
os.path.join(
self.opensfm_project_path, "camera_models_overrides.json"
),
"w",
) as f:
f.write(json.dumps(camera_overrides))
log.ODM_INFO("Wrote camera_models_overrides.json to OpenSfM directory")
log.ODM_INFO(
"Wrote camera_models_overrides.json to OpenSfM directory"
)
except Exception as e:
log.ODM_WARNING("Cannot set camera_models_overrides.json: %s" % str(e))
log.ODM_WARNING(
"Cannot set camera_models_overrides.json: %s" % str(e)
)
# Check image masks
masks = []
for p in photos:
if p.mask is not None:
masks.append((p.filename, os.path.join(images_path, p.mask)))
if masks:
log.ODM_INFO("Found %s image masks" % len(masks))
with open(os.path.join(self.opensfm_project_path, "mask_list.txt"), 'w') as f:
with open(
os.path.join(self.opensfm_project_path, "mask_list.txt"), "w"
) as f:
for fname, mask in masks:
f.write("{} {}\n".format(fname, mask))
# Compute feature_process_size
feature_process_size = 2048 # default
feature_process_size = 2048 # default
feature_quality_scale = {
'ultra': 1,
'high': 0.5,
'medium': 0.25,
'low': 0.125,
'lowest': 0.0675,
"ultra": 1,
"high": 0.5,
"medium": 0.25,
"low": 0.125,
"lowest": 0.0675,
}
max_dims = find_largest_photo_dims(photos)
@ -221,17 +265,26 @@ class OSFMContext:
upper_limit = 4480
megapixels = (w * h) / 1e6
multiplier = 1
if megapixels < 2:
multiplier = 2
elif megapixels > 42:
multiplier = 0.5
factor = min(1, feature_quality_scale[args.feature_quality] * multiplier)
feature_process_size = min(upper_limit, max(lower_limit, int(max_dim * factor)))
log.ODM_INFO("Photo dimensions for feature extraction: %ipx" % feature_process_size)
factor = min(
1, feature_quality_scale[args.feature_quality] * multiplier
)
feature_process_size = min(
upper_limit, max(lower_limit, int(max_dim * factor))
)
log.ODM_INFO(
"Photo dimensions for feature extraction: %ipx"
% feature_process_size
)
else:
log.ODM_WARNING("Cannot compute max image dimensions, going with defaults")
log.ODM_WARNING(
"Cannot compute max image dimensions, going with defaults"
)
# create config file for OpenSfM
if args.matcher_neighbors > 0:
@ -240,7 +293,7 @@ class OSFMContext:
else:
matcher_graph_rounds = 50
matcher_neighbors = 0
# Always use matcher-neighbors if less than 4 pictures
if len(photos) <= 3:
matcher_graph_rounds = 0
@ -248,14 +301,15 @@ class OSFMContext:
config = [
"use_exif_size: no",
"flann_algorithm: KDTREE", # more stable, faster than KMEANS
"flann_algorithm: KDTREE", # more stable, faster than KMEANS
"feature_process_size: %s" % feature_process_size,
"feature_min_frames: %s" % args.min_num_features,
"processes: %s" % args.max_concurrency,
"matching_gps_neighbors: %s" % matcher_neighbors,
"matching_gps_distance: 0",
"matching_graph_rounds: %s" % matcher_graph_rounds,
"optimize_camera_parameters: %s" % ('no' if args.use_fixed_camera_params else 'yes'),
"optimize_camera_parameters: %s"
% ("no" if args.use_fixed_camera_params else "yes"),
"reconstruction_algorithm: %s" % (args.sfm_algorithm),
"undistorted_image_format: tif",
"bundle_outlier_filtering_type: AUTO",
@ -264,14 +318,16 @@ class OSFMContext:
"triangulation_type: ROBUST",
"retriangulation_ratio: 2",
]
if args.matcher_order > 0:
if not reconstruction.is_georeferenced():
config.append("matching_order_neighbors: %s" % args.matcher_order)
else:
log.ODM_WARNING("Georeferenced reconstruction, ignoring --matcher-order")
log.ODM_WARNING(
"Georeferenced reconstruction, ignoring --matcher-order"
)
if args.camera_lens != 'auto':
if args.camera_lens != "auto":
config.append("camera_projection_type: %s" % args.camera_lens.upper())
matcher_type = args.matcher_type
@ -280,19 +336,23 @@ class OSFMContext:
osfm_matchers = {
"bow": "WORDS",
"flann": "FLANN",
"bruteforce": "BRUTEFORCE"
"bruteforce": "BRUTEFORCE",
}
if not has_gps and not 'matcher_type_is_set' in args:
log.ODM_INFO("No GPS information, using BOW matching by default (you can override this by setting --matcher-type explicitly)")
if not has_gps and not "matcher_type_is_set" in args:
log.ODM_INFO(
"No GPS information, using BOW matching by default (you can override this by setting --matcher-type explicitly)"
)
matcher_type = "bow"
if matcher_type == "bow":
# Cannot use anything other than HAHOG with BOW
if feature_type != "HAHOG":
log.ODM_WARNING("Using BOW matching, will use HAHOG feature type, not SIFT")
log.ODM_WARNING(
"Using BOW matching, will use HAHOG feature type, not SIFT"
)
feature_type = "HAHOG"
config.append("matcher_type: %s" % osfm_matchers[matcher_type])
# GPU acceleration?
@ -309,7 +369,7 @@ class OSFMContext:
log.ODM_INFO("Using GPU for extracting SIFT features")
feature_type = "SIFT_GPU"
self.gpu_sift_feature_extraction = True
config.append("feature_type: %s" % feature_type)
if has_alt:
@ -321,71 +381,87 @@ class OSFMContext:
config.append("align_method: auto")
else:
config.append("align_method: orientation_prior")
if args.use_hybrid_bundle_adjustment:
log.ODM_INFO("Enabling hybrid bundle adjustment")
config.append("bundle_interval: 100") # Bundle after adding 'bundle_interval' cameras
config.append("bundle_new_points_ratio: 1.2") # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
config.append("local_bundle_radius: 1") # Max image graph distance for images to be included in local bundle adjustment
config.append(
"bundle_interval: 100"
) # Bundle after adding 'bundle_interval' cameras
config.append(
"bundle_new_points_ratio: 1.2"
) # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
config.append(
"local_bundle_radius: 1"
) # Max image graph distance for images to be included in local bundle adjustment
else:
config.append("local_bundle_radius: 0")
if gcp_path:
config.append("bundle_use_gcp: yes")
if not args.force_gps:
config.append("bundle_use_gps: no")
else:
config.append("bundle_compensate_gps_bias: yes")
io.copy(gcp_path, self.path("gcp_list.txt"))
config = config + append_config
# write config file
log.ODM_INFO(config)
config_filename = self.get_config_file_path()
with open(config_filename, 'w') as fout:
with open(config_filename, "w") as fout:
fout.write("\n".join(config))
# We impose our own reference_lla
if reconstruction.is_georeferenced():
self.write_reference_lla(reconstruction.georef.utm_east_offset, reconstruction.georef.utm_north_offset, reconstruction.georef.proj4())
self.write_reference_lla(
reconstruction.georef.utm_east_offset,
reconstruction.georef.utm_north_offset,
reconstruction.georef.proj4(),
)
else:
log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" % list_path)
log.ODM_WARNING(
"%s already exists, not rerunning OpenSfM setup" % list_path
)
def get_config_file_path(self):
return os.path.join(self.opensfm_project_path, 'config.yaml')
return os.path.join(self.opensfm_project_path, "config.yaml")
def reconstructed(self):
if not io.file_exists(self.path("reconstruction.json")):
return False
with open(self.path("reconstruction.json"), 'r') as f:
with open(self.path("reconstruction.json"), "r") as f:
return f.readline().strip() != "[]"
def extract_metadata(self, rerun=False):
metadata_dir = self.path("exif")
if not io.dir_exists(metadata_dir) or rerun:
self.run('extract_metadata')
def photos_to_metadata(self, photos, rolling_shutter, rolling_shutter_readout, rerun=False):
self.run("extract_metadata")
def photos_to_metadata(
self, photos, rolling_shutter, rolling_shutter_readout, rerun=False
):
metadata_dir = self.path("exif")
if io.dir_exists(metadata_dir) and not rerun:
log.ODM_WARNING("%s already exists, not rerunning photo to metadata" % metadata_dir)
log.ODM_WARNING(
"%s already exists, not rerunning photo to metadata" % metadata_dir
)
return
if io.dir_exists(metadata_dir):
shutil.rmtree(metadata_dir)
os.makedirs(metadata_dir, exist_ok=True)
camera_models = {}
data = DataSet(self.opensfm_project_path)
for p in photos:
d = p.to_opensfm_exif(rolling_shutter, rolling_shutter_readout)
with open(os.path.join(metadata_dir, "%s.exif" % p.filename), 'w') as f:
with open(os.path.join(metadata_dir, "%s.exif" % p.filename), "w") as f:
f.write(json.dumps(d, indent=4))
camera_id = p.camera_id()
@ -413,51 +489,55 @@ class OSFMContext:
def feature_matching(self, rerun=False):
features_dir = self.path("features")
if not io.dir_exists(features_dir) or rerun:
try:
self.run('detect_features')
self.run("detect_features")
except system.SubprocessException as e:
# Sometimes feature extraction by GPU can fail
# for various reasons, so before giving up
# we try to fallback to CPU
if hasattr(self, 'gpu_sift_feature_extraction'):
log.ODM_WARNING("GPU SIFT extraction failed, maybe the graphics card is not supported? Attempting fallback to CPU")
self.update_config({'feature_type': "SIFT"})
if hasattr(self, "gpu_sift_feature_extraction"):
log.ODM_WARNING(
"GPU SIFT extraction failed, maybe the graphics card is not supported? Attempting fallback to CPU"
)
self.update_config({"feature_type": "SIFT"})
if os.path.exists(features_dir):
shutil.rmtree(features_dir)
self.run('detect_features')
self.run("detect_features")
else:
raise e
else:
log.ODM_WARNING('Detect features already done: %s exists' % features_dir)
log.ODM_WARNING("Detect features already done: %s exists" % features_dir)
self.match_features(rerun)
def match_features(self, rerun=False):
matches_dir = self.path("matches")
if not io.dir_exists(matches_dir) or rerun:
self.run('match_features')
self.run("match_features")
else:
log.ODM_WARNING('Match features already done: %s exists' % matches_dir)
log.ODM_WARNING("Match features already done: %s exists" % matches_dir)
def align_reconstructions(self, rerun):
alignment_file = self.path('alignment_done.txt')
alignment_file = self.path("alignment_done.txt")
if not io.file_exists(alignment_file) or rerun:
log.ODM_INFO("Aligning submodels...")
meta_data = metadataset.MetaDataSet(self.opensfm_project_path)
reconstruction_shots = tools.load_reconstruction_shots(meta_data)
transformations = tools.align_reconstructions(reconstruction_shots,
tools.partial_reconstruction_name,
False)
transformations = tools.align_reconstructions(
reconstruction_shots, tools.partial_reconstruction_name, False
)
tools.apply_transformations(transformations)
self.touch(alignment_file)
else:
log.ODM_WARNING('Found a alignment done progress file in: %s' % alignment_file)
log.ODM_WARNING(
"Found a alignment done progress file in: %s" % alignment_file
)
def touch(self, file):
with open(file, 'w') as fout:
with open(file, "w") as fout:
fout.write("Done!\n")
def path(self, *paths):
@ -467,14 +547,21 @@ class OSFMContext:
if not os.path.exists(output) or rerun:
try:
reconstruction_file = self.path("reconstruction.json")
with open(output, 'w') as fout:
fout.write(json.dumps(camera.get_cameras_from_opensfm(reconstruction_file), indent=4))
with open(output, "w") as fout:
fout.write(
json.dumps(
camera.get_cameras_from_opensfm(reconstruction_file),
indent=4,
)
)
except Exception as e:
log.ODM_WARNING("Cannot export cameras to %s. %s." % (output, str(e)))
else:
log.ODM_INFO("Already extracted cameras")
def convert_and_undistort(self, rerun=False, imageFilter=None, image_list=None, runId="nominal"):
def convert_and_undistort(
self, rerun=False, imageFilter=None, image_list=None, runId="nominal"
):
log.ODM_INFO("Undistorting %s ..." % self.opensfm_project_path)
done_flag_file = self.path("undistorted", "%s_done.txt" % runId)
@ -484,9 +571,10 @@ class OSFMContext:
if image_list is not None:
ds._set_image_list(image_list)
undistort.run_dataset(ds, "reconstruction.json",
0, None, "undistorted", imageFilter)
undistort.run_dataset(
ds, "reconstruction.json", 0, None, "undistorted", imageFilter
)
self.touch(done_flag_file)
else:
log.ODM_WARNING("Already undistorted (%s)" % runId)
@ -503,13 +591,13 @@ class OSFMContext:
def backup_reconstruction(self):
if os.path.exists(self.recon_backup_file()):
os.remove(self.recon_backup_file())
log.ODM_INFO("Backing up reconstruction")
shutil.copyfile(self.recon_file(), self.recon_backup_file())
def recon_backup_file(self):
return self.path("reconstruction.backup.json")
def recon_file(self):
return self.path("reconstruction.json")
@ -519,9 +607,9 @@ class OSFMContext:
# Augment reconstruction.json
for recon in reconstruction:
shots = recon['shots']
shots = recon["shots"]
sids = list(shots)
for shot_id in sids:
secondary_photos = p2s.get(shot_id)
if secondary_photos is None:
@ -531,10 +619,9 @@ class OSFMContext:
for p in secondary_photos:
shots[p.filename] = shots[shot_id]
with open(self.recon_file(), 'w') as f:
with open(self.recon_file(), "w") as f:
f.write(json.dumps(reconstruction))
def update_config(self, cfg_dict):
cfg_file = self.get_config_file_path()
log.ODM_INFO("Updating %s" % cfg_file)
@ -545,12 +632,16 @@ class OSFMContext:
for k, v in cfg_dict.items():
cfg[k] = v
log.ODM_INFO("%s: %s" % (k, v))
with open(cfg_file, 'w') as fout:
with open(cfg_file, "w") as fout:
fout.write(yaml.dump(cfg, default_flow_style=False))
except Exception as e:
log.ODM_WARNING("Cannot update configuration file %s: %s" % (cfg_file, str(e)))
log.ODM_WARNING(
"Cannot update configuration file %s: %s" % (cfg_file, str(e))
)
else:
log.ODM_WARNING("Tried to update configuration, but %s does not exist." % cfg_file)
log.ODM_WARNING(
"Tried to update configuration, but %s does not exist." % cfg_file
)
def export_stats(self, rerun=False):
log.ODM_INFO("Export reconstruction stats")
@ -569,7 +660,7 @@ class OSFMContext:
pdf_report = report.Report(data, odm_stats)
pdf_report.generate_report()
pdf_report.save_report("report.pdf")
if os.path.exists(osfm_report_path):
if os.path.exists(report_path):
os.unlink(report_path)
@ -578,20 +669,22 @@ class OSFMContext:
log.ODM_WARNING("Report could not be generated")
else:
log.ODM_WARNING("Report %s already exported" % report_path)
def write_reference_lla(self, offset_x, offset_y, proj4):
reference_lla = self.path("reference_lla.json")
longlat = CRS.from_epsg("4326")
lon, lat = location.transform2(CRS.from_proj4(proj4), longlat, offset_x, offset_y)
lon, lat = location.transform2(
CRS.from_proj4(proj4), longlat, offset_x, offset_y
)
with open(reference_lla, "w") as f:
f.write(
json.dumps(
{"latitude": lat, "longitude": lon, "altitude": 0.0}, indent=4
)
)
with open(reference_lla, 'w') as f:
f.write(json.dumps({
'latitude': lat,
'longitude': lon,
'altitude': 0.0
}, indent=4))
log.ODM_INFO("Wrote reference_lla.json")
def ground_control_points(self, proj4):
@ -602,7 +695,7 @@ class OSFMContext:
if not io.file_exists(gcp_stats_file):
return []
gcps_stats = {}
try:
with open(gcp_stats_file) as f:
@ -612,35 +705,37 @@ class OSFMContext:
if not gcps_stats:
return []
ds = DataSet(self.opensfm_project_path)
reference = ds.load_reference()
projection = pyproj.Proj(proj4)
result = []
for gcp in gcps_stats:
geocoords = _transform(gcp['coordinates'], reference, projection)
result.append({
'id': gcp['id'],
'observations': gcp['observations'],
'coordinates': geocoords,
'error': gcp['error']
})
geocoords = _transform(gcp["coordinates"], reference, projection)
result.append(
{
"id": gcp["id"],
"observations": gcp["observations"],
"coordinates": geocoords,
"error": gcp["error"],
}
)
return result
def name(self):
return os.path.basename(os.path.abspath(self.path("..")))
def get_submodel_argv(args, submodels_path = None, submodel_name = None):
def get_submodel_argv(args, submodels_path=None, submodel_name=None):
"""
Gets argv for a submodel starting from the args passed to the application startup.
Additionally, if project_name, submodels_path and submodel_name are passed, the function
handles the <project name> value and --project-path detection / override.
When all arguments are set to None, --project-path and project name are always removed.
:return the same as argv, but removing references to --split,
:return the same as argv, but removing references to --split,
setting/replacing --project-path and name
removing --rerun-from, --rerun, --rerun-all, --sm-cluster
removing --pc-las, --pc-csv, --pc-ept, --tiles flags (processing these is wasteful)
@ -652,9 +747,29 @@ def get_submodel_argv(args, submodels_path = None, submodel_name = None):
reading the contents of --cameras
reading the contents of --boundary
"""
assure_always = ['orthophoto_cutline', 'dem_euclidean_map', 'skip_3dmodel', 'skip_report']
remove_always = ['split', 'split_overlap', 'rerun_from', 'rerun', 'gcp', 'end_with', 'sm_cluster', 'rerun_all', 'pc_csv', 'pc_las', 'pc_ept', 'tiles', 'copy-to', 'cog']
read_json_always = ['cameras', 'boundary']
assure_always = [
"orthophoto_cutline",
"dem_euclidean_map",
"skip_3dmodel",
"skip_report",
]
remove_always = [
"split",
"split_overlap",
"rerun_from",
"rerun",
"gcp",
"end_with",
"sm_cluster",
"rerun_all",
"pc_csv",
"pc_las",
"pc_ept",
"tiles",
"copy-to",
"cog",
]
read_json_always = ["cameras", "boundary"]
argv = sys.argv
@ -662,14 +777,14 @@ def get_submodel_argv(args, submodels_path = None, submodel_name = None):
startup_script = argv[0]
# On Windows, make sure we always invoke the "run.bat" file
if sys.platform == 'win32':
if sys.platform == "win32":
startup_script_dir = os.path.dirname(startup_script)
startup_script = os.path.join(startup_script_dir, "run")
result = [startup_script]
result = [startup_script]
args_dict = vars(args).copy()
set_keys = [k[:-len("_is_set")] for k in args_dict.keys() if k.endswith("_is_set")]
set_keys = [k[: -len("_is_set")] for k in args_dict.keys() if k.endswith("_is_set")]
# Handle project name and project path (special case)
if "name" in set_keys:
@ -688,7 +803,7 @@ def get_submodel_argv(args, submodels_path = None, submodel_name = None):
if not k in set_keys:
set_keys.append(k)
args_dict[k] = True
# Read JSON always
for k in read_json_always:
if k in set_keys:
@ -710,13 +825,13 @@ def get_submodel_argv(args, submodels_path = None, submodel_name = None):
# Populate result
for k in set_keys:
result.append("--%s" % k.replace("_", "-"))
# No second value for booleans
if isinstance(args_dict[k], bool) and args_dict[k] == True:
continue
result.append(str(args_dict[k]))
if submodels_path:
result.append("--project-path")
result.append(submodels_path)
@ -726,6 +841,7 @@ def get_submodel_argv(args, submodels_path = None, submodel_name = None):
return result
def get_submodel_args_dict(args):
submodel_argv = get_submodel_argv(args)
result = {}
@ -757,8 +873,8 @@ def get_submodel_paths(submodels_path, *paths):
return result
for f in os.listdir(submodels_path):
if f.startswith('submodel'):
p = os.path.join(submodels_path, f, *paths)
if f.startswith("submodel"):
p = os.path.join(submodels_path, f, *paths)
if os.path.exists(p):
result.append(p)
else:
@ -766,6 +882,7 @@ def get_submodel_paths(submodels_path, *paths):
return result
def get_all_submodel_paths(submodels_path, *all_paths):
"""
:return Existing, multiple paths for all submodels as a nested list (all or nothing for each submodel)
@ -780,11 +897,11 @@ def get_all_submodel_paths(submodels_path, *all_paths):
return result
for f in os.listdir(submodels_path):
if f.startswith('submodel'):
if f.startswith("submodel"):
all_found = True
for ap in all_paths:
p = os.path.join(submodels_path, f, ap)
p = os.path.join(submodels_path, f, ap)
if not os.path.exists(p):
log.ODM_WARNING("Missing %s from submodel %s" % (p, f))
all_found = False
@ -794,11 +911,16 @@ def get_all_submodel_paths(submodels_path, *all_paths):
return result
def is_submodel(opensfm_root):
# A bit hackish, but works without introducing additional markers / flags
# Look at the path of the opensfm directory and see if "submodel_" is part of it
parts = os.path.abspath(opensfm_root).split(os.path.sep)
return (len(parts) >= 2 and parts[-2][:9] == "submodel_") or \
os.path.isfile(os.path.join(opensfm_root, "split_merge_stop_at_reconstruction.txt")) or \
os.path.isfile(os.path.join(opensfm_root, "features", "empty"))
return (
(len(parts) >= 2 and parts[-2][:9] == "submodel_")
or os.path.isfile(
os.path.join(opensfm_root, "split_merge_stop_at_reconstruction.txt")
)
or os.path.isfile(os.path.join(opensfm_root, "features", "empty"))
)

Plik diff jest za duży Load Diff

Wyświetl plik

@ -12,6 +12,7 @@ from opendm.dem.pdal import run_pipeline
from opendm.opc import classify
from opendm.dem import commands
def ply_info(input_ply):
if not os.path.exists(input_ply):
raise IOError("%s does not exist" % input_ply)
@ -21,7 +22,7 @@ def ply_info(input_ply):
has_views = False
vertex_count = 0
with open(input_ply, 'r', errors='ignore') as f:
with open(input_ply, "r", errors="ignore") as f:
line = f.readline().strip().lower()
i = 0
while line != "end_header":
@ -37,35 +38,48 @@ def ply_info(input_ply):
i += 1
if i > 100:
raise IOError("Cannot find end_header field. Invalid PLY?")
return {
'has_normals': has_normals,
'vertex_count': vertex_count,
'has_views': has_views,
'header_lines': i + 1
"has_normals": has_normals,
"vertex_count": vertex_count,
"has_views": has_views,
"header_lines": i + 1,
}
def split(input_point_cloud, outdir, filename_template, capacity, dims=None):
log.ODM_INFO("Splitting point cloud filtering in chunks of {} vertices".format(capacity))
log.ODM_INFO(
"Splitting point cloud filtering in chunks of {} vertices".format(capacity)
)
if not os.path.exists(input_point_cloud):
log.ODM_ERROR("{} does not exist, cannot split point cloud. The program will now exit.".format(input_point_cloud))
log.ODM_ERROR(
"{} does not exist, cannot split point cloud. The program will now exit.".format(
input_point_cloud
)
)
sys.exit(1)
if not os.path.exists(outdir):
system.mkdir_p(outdir)
if len(os.listdir(outdir)) != 0:
log.ODM_ERROR("%s already contains some files. The program will now exit.".format(outdir))
log.ODM_ERROR(
"%s already contains some files. The program will now exit.".format(outdir)
)
sys.exit(1)
cmd = 'pdal split -i "%s" -o "%s" --capacity %s ' % (input_point_cloud, os.path.join(outdir, filename_template), capacity)
cmd = 'pdal split -i "%s" -o "%s" --capacity %s ' % (
input_point_cloud,
os.path.join(outdir, filename_template),
capacity,
)
if filename_template.endswith(".ply"):
cmd += ("--writers.ply.sized_types=false "
"--writers.ply.storage_mode=\"little endian\" ")
cmd += (
"--writers.ply.sized_types=false "
'--writers.ply.storage_mode="little endian" '
)
if dims is not None:
cmd += '--writers.ply.dims="%s"' % dims
system.run(cmd)
@ -73,53 +87,71 @@ def split(input_point_cloud, outdir, filename_template, capacity, dims=None):
return [os.path.join(outdir, f) for f in os.listdir(outdir)]
def filter(input_point_cloud, output_point_cloud, output_stats, standard_deviation=2.5, sample_radius=0, boundary=None, max_concurrency=1):
def filter(
input_point_cloud,
output_point_cloud,
output_stats,
standard_deviation=2.5,
sample_radius=0,
boundary=None,
max_concurrency=1,
):
"""
Filters a point cloud
"""
if not os.path.exists(input_point_cloud):
log.ODM_ERROR("{} does not exist. The program will now exit.".format(input_point_cloud))
log.ODM_ERROR(
"{} does not exist. The program will now exit.".format(input_point_cloud)
)
sys.exit(1)
args = [
'--input "%s"' % input_point_cloud,
'--output "%s"' % output_point_cloud,
'--concurrency %s' % max_concurrency
"--concurrency %s" % max_concurrency,
]
if sample_radius > 0:
log.ODM_INFO("Sampling points around a %sm radius" % sample_radius)
args.append('--radius %s' % sample_radius)
args.append("--radius %s" % sample_radius)
meank = 16
log.ODM_INFO("Filtering {} (statistical, meanK {}, standard deviation {})".format(input_point_cloud, meank, standard_deviation))
args.append('--meank %s' % meank)
args.append('--std %s' % standard_deviation)
log.ODM_INFO(
"Filtering {} (statistical, meanK {}, standard deviation {})".format(
input_point_cloud, meank, standard_deviation
)
)
args.append("--meank %s" % meank)
args.append("--std %s" % standard_deviation)
args.append('--stats "%s"' % output_stats)
if boundary is not None:
log.ODM_INFO("Boundary {}".format(boundary))
fd, boundary_json_file = tempfile.mkstemp(suffix='.boundary.json')
fd, boundary_json_file = tempfile.mkstemp(suffix=".boundary.json")
os.close(fd)
with open(boundary_json_file, 'w') as f:
with open(boundary_json_file, "w") as f:
f.write(as_geojson(boundary))
args.append('--boundary "%s"' % boundary_json_file)
system.run('"%s" %s' % (context.fpcfilter_path, " ".join(args)))
if not os.path.exists(output_point_cloud):
log.ODM_WARNING("{} not found, filtering has failed.".format(output_point_cloud))
log.ODM_WARNING(
"{} not found, filtering has failed.".format(output_point_cloud)
)
def get_spacing(stats_file, resolution_fallback=5.0):
def fallback():
log.ODM_WARNING("Cannot read %s, falling back to resolution estimate" % stats_file)
log.ODM_WARNING(
"Cannot read %s, falling back to resolution estimate" % stats_file
)
return (resolution_fallback / 100.0) / 2.0
if not os.path.isfile(stats_file):
return fallback()
with open(stats_file, 'r') as f:
with open(stats_file, "r") as f:
j = json.loads(f.read())
if "spacing" in j:
d = j["spacing"]
@ -130,17 +162,25 @@ def get_spacing(stats_file, resolution_fallback=5.0):
else:
return fallback()
def export_info_json(pointcloud_path, info_file_path):
system.run('pdal info --dimensions "X,Y,Z" "{0}" > "{1}"'.format(pointcloud_path, info_file_path))
system.run(
'pdal info --dimensions "X,Y,Z" "{0}" > "{1}"'.format(
pointcloud_path, info_file_path
)
)
def export_summary_json(pointcloud_path, summary_file_path):
system.run('pdal info --summary "{0}" > "{1}"'.format(pointcloud_path, summary_file_path))
system.run(
'pdal info --summary "{0}" > "{1}"'.format(pointcloud_path, summary_file_path)
)
def get_extent(input_point_cloud):
fd, json_file = tempfile.mkstemp(suffix='.json')
fd, json_file = tempfile.mkstemp(suffix=".json")
os.close(fd)
# Get point cloud extent
fallback = False
@ -151,38 +191,64 @@ def get_extent(input_point_cloud):
try:
if not fallback:
run('pdal info --summary "{0}" > "{1}"'.format(input_point_cloud, json_file))
run(
'pdal info --summary "{0}" > "{1}"'.format(input_point_cloud, json_file)
)
except:
fallback = True
run('pdal info "{0}" > "{1}"'.format(input_point_cloud, json_file))
bounds = {}
with open(json_file, 'r') as f:
with open(json_file, "r") as f:
result = json.loads(f.read())
if not fallback:
summary = result.get('summary')
if summary is None: raise Exception("Cannot compute summary for %s (summary key missing)" % input_point_cloud)
bounds = summary.get('bounds')
else:
stats = result.get('stats')
if stats is None: raise Exception("Cannot compute bounds for %s (stats key missing)" % input_point_cloud)
bbox = stats.get('bbox')
if bbox is None: raise Exception("Cannot compute bounds for %s (bbox key missing)" % input_point_cloud)
native = bbox.get('native')
if native is None: raise Exception("Cannot compute bounds for %s (native key missing)" % input_point_cloud)
bounds = native.get('bbox')
if bounds is None: raise Exception("Cannot compute bounds for %s (bounds key missing)" % input_point_cloud)
if bounds.get('maxx', None) is None or \
bounds.get('minx', None) is None or \
bounds.get('maxy', None) is None or \
bounds.get('miny', None) is None or \
bounds.get('maxz', None) is None or \
bounds.get('minz', None) is None:
raise Exception("Cannot compute bounds for %s (invalid keys) %s" % (input_point_cloud, str(bounds)))
if not fallback:
summary = result.get("summary")
if summary is None:
raise Exception(
"Cannot compute summary for %s (summary key missing)"
% input_point_cloud
)
bounds = summary.get("bounds")
else:
stats = result.get("stats")
if stats is None:
raise Exception(
"Cannot compute bounds for %s (stats key missing)"
% input_point_cloud
)
bbox = stats.get("bbox")
if bbox is None:
raise Exception(
"Cannot compute bounds for %s (bbox key missing)"
% input_point_cloud
)
native = bbox.get("native")
if native is None:
raise Exception(
"Cannot compute bounds for %s (native key missing)"
% input_point_cloud
)
bounds = native.get("bbox")
if bounds is None:
raise Exception(
"Cannot compute bounds for %s (bounds key missing)" % input_point_cloud
)
if (
bounds.get("maxx", None) is None
or bounds.get("minx", None) is None
or bounds.get("maxy", None) is None
or bounds.get("miny", None) is None
or bounds.get("maxz", None) is None
or bounds.get("minz", None) is None
):
raise Exception(
"Cannot compute bounds for %s (invalid keys) %s"
% (input_point_cloud, str(bounds))
)
os.remove(json_file)
return bounds
@ -198,12 +264,12 @@ def merge(input_point_cloud_files, output_file, rerun=False):
os.remove(output_file)
kwargs = {
'all_inputs': " ".join(map(double_quote, input_point_cloud_files)),
'output': output_file
"all_inputs": " ".join(map(double_quote, input_point_cloud_files)),
"output": output_file,
}
system.run('lasmerge -i {all_inputs} -o "{output}"'.format(**kwargs))
def fast_merge_ply(input_point_cloud_files, output_file):
# Assumes that all input files share the same header/content format
@ -213,33 +279,35 @@ def fast_merge_ply(input_point_cloud_files, output_file):
if num_files == 0:
log.ODM_WARNING("No input point cloud files to process")
return
if io.file_exists(output_file):
log.ODM_WARNING("Removing previous point cloud: %s" % output_file)
os.remove(output_file)
vertex_count = sum([ply_info(pcf)['vertex_count'] for pcf in input_point_cloud_files])
vertex_count = sum(
[ply_info(pcf)["vertex_count"] for pcf in input_point_cloud_files]
)
master_file = input_point_cloud_files[0]
with open(output_file, "wb") as out:
with open(master_file, "r", errors="ignore") as fhead:
# Copy header
line = fhead.readline()
out.write(line.encode('utf8'))
out.write(line.encode("utf8"))
i = 0
while line.strip().lower() != "end_header":
line = fhead.readline()
# Intercept element vertex field
if line.lower().startswith("element vertex "):
out.write(("element vertex %s\n" % vertex_count).encode('utf8'))
out.write(("element vertex %s\n" % vertex_count).encode("utf8"))
else:
out.write(line.encode('utf8'))
out.write(line.encode("utf8"))
i += 1
if i > 100:
raise IOError("Cannot find end_header field. Invalid PLY?")
for ipc in input_point_cloud_files:
i = 0
with open(ipc, "rb") as fin:
@ -251,10 +319,10 @@ def fast_merge_ply(input_point_cloud_files, output_file):
i += 1
if i > 100:
raise IOError("Cannot find end_header field. Invalid PLY?")
# Write fields
out.write(fin.read())
return output_file
@ -265,78 +333,107 @@ def merge_ply(input_point_cloud_files, output_file, dims=None):
return
cmd = [
'pdal',
'merge',
'--writers.ply.sized_types=false',
"pdal",
"merge",
"--writers.ply.sized_types=false",
'--writers.ply.storage_mode="little endian"',
('--writers.ply.dims="%s"' % dims) if dims is not None else '',
' '.join(map(double_quote, input_point_cloud_files + [output_file])),
('--writers.ply.dims="%s"' % dims) if dims is not None else "",
" ".join(map(double_quote, input_point_cloud_files + [output_file])),
]
system.run(' '.join(cmd))
system.run(" ".join(cmd))
def post_point_cloud_steps(args, tree, rerun=False):
# Classify and rectify before generating derivate files
if args.pc_classify:
pc_classify_marker = os.path.join(tree.odm_georeferencing, 'pc_classify_done.txt')
pc_classify_marker = os.path.join(
tree.odm_georeferencing, "pc_classify_done.txt"
)
if not io.file_exists(pc_classify_marker) or rerun:
log.ODM_INFO("Classifying {} using Simple Morphological Filter (1/2)".format(tree.odm_georeferencing_model_laz))
commands.classify(tree.odm_georeferencing_model_laz,
args.smrf_scalar,
args.smrf_slope,
args.smrf_threshold,
args.smrf_window
)
log.ODM_INFO(
"Classifying {} using Simple Morphological Filter (1/2)".format(
tree.odm_georeferencing_model_laz
)
)
commands.classify(
tree.odm_georeferencing_model_laz,
args.smrf_scalar,
args.smrf_slope,
args.smrf_threshold,
args.smrf_window,
)
log.ODM_INFO("Classifying {} using OpenPointClass (2/2)".format(tree.odm_georeferencing_model_laz))
log.ODM_INFO(
"Classifying {} using OpenPointClass (2/2)".format(
tree.odm_georeferencing_model_laz
)
)
classify(tree.odm_georeferencing_model_laz, args.max_concurrency)
with open(pc_classify_marker, 'w') as f:
f.write('Classify: smrf\n')
f.write('Scalar: {}\n'.format(args.smrf_scalar))
f.write('Slope: {}\n'.format(args.smrf_slope))
f.write('Threshold: {}\n'.format(args.smrf_threshold))
f.write('Window: {}\n'.format(args.smrf_window))
with open(pc_classify_marker, "w") as f:
f.write("Classify: smrf\n")
f.write("Scalar: {}\n".format(args.smrf_scalar))
f.write("Slope: {}\n".format(args.smrf_slope))
f.write("Threshold: {}\n".format(args.smrf_threshold))
f.write("Window: {}\n".format(args.smrf_window))
if args.pc_rectify:
commands.rectify(tree.odm_georeferencing_model_laz)
# XYZ point cloud output
if args.pc_csv:
log.ODM_INFO("Creating CSV file (XYZ format)")
if not io.file_exists(tree.odm_georeferencing_xyz_file) or rerun:
system.run("pdal translate -i \"{}\" "
"-o \"{}\" "
system.run(
'pdal translate -i "{}" '
'-o "{}" '
"--writers.text.format=csv "
"--writers.text.order=\"X,Y,Z\" "
'--writers.text.order="X,Y,Z" '
"--writers.text.keep_unspecified=false ".format(
tree.odm_georeferencing_model_laz,
tree.odm_georeferencing_xyz_file))
tree.odm_georeferencing_model_laz, tree.odm_georeferencing_xyz_file
)
)
else:
log.ODM_WARNING("Found existing CSV file %s" % tree.odm_georeferencing_xyz_file)
log.ODM_WARNING(
"Found existing CSV file %s" % tree.odm_georeferencing_xyz_file
)
# LAS point cloud output
if args.pc_las:
log.ODM_INFO("Creating LAS file")
if not io.file_exists(tree.odm_georeferencing_model_las) or rerun:
system.run("pdal translate -i \"{}\" "
"-o \"{}\" ".format(
tree.odm_georeferencing_model_laz,
tree.odm_georeferencing_model_las))
system.run(
'pdal translate -i "{}" '
'-o "{}" '.format(
tree.odm_georeferencing_model_laz, tree.odm_georeferencing_model_las
)
)
else:
log.ODM_WARNING("Found existing LAS file %s" % tree.odm_georeferencing_model_las)
log.ODM_WARNING(
"Found existing LAS file %s" % tree.odm_georeferencing_model_las
)
# EPT point cloud output
if args.pc_ept:
log.ODM_INFO("Creating Entwine Point Tile output")
entwine.build([tree.odm_georeferencing_model_laz], tree.entwine_pointcloud, max_concurrency=args.max_concurrency, rerun=rerun)
entwine.build(
[tree.odm_georeferencing_model_laz],
tree.entwine_pointcloud,
max_concurrency=args.max_concurrency,
rerun=rerun,
)
# COPC point clouds
if args.pc_copc:
log.ODM_INFO("Creating Cloud Optimized Point Cloud (COPC)")
copc_output = io.related_file_path(tree.odm_georeferencing_model_laz, postfix=".copc")
entwine.build_copc([tree.odm_georeferencing_model_laz], copc_output, convert_rgb_8_to_16=True)
copc_output = io.related_file_path(
tree.odm_georeferencing_model_laz, postfix=".copc"
)
entwine.build_copc(
[tree.odm_georeferencing_model_laz], copc_output, convert_rgb_8_to_16=True
)

Wyświetl plik

@ -2,13 +2,14 @@ import socket
import os
from opendm import log
PROGRESS_BROADCAST_PORT = 6367 #ODMR
PROGRESS_BROADCAST_PORT = 6367 # ODMR
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except:
log.ODM_WARNING("Cannot create UDP socket, progress reporting will be disabled.")
sock = None
class Broadcaster:
def __init__(self, port):
self.port = port
@ -28,13 +29,24 @@ class Broadcaster:
UDP_IP = "127.0.0.1"
if global_progress > 100:
log.ODM_WARNING("Global progress is > 100 (%s), please contact the developers." % global_progress)
log.ODM_WARNING(
"Global progress is > 100 (%s), please contact the developers."
% global_progress
)
global_progress = 100
try:
sock.sendto("PGUP/{}/{}/{}".format(self.pid, self.project_name, float(global_progress)).encode('utf8'),
(UDP_IP, self.port))
sock.sendto(
"PGUP/{}/{}/{}".format(
self.pid, self.project_name, float(global_progress)
).encode("utf8"),
(UDP_IP, self.port),
)
except Exception as e:
log.ODM_WARNING("Failed to broadcast progress update on UDP port %s (%s)" % (str(self.port), str(e)))
log.ODM_WARNING(
"Failed to broadcast progress update on UDP port %s (%s)"
% (str(self.port), str(e))
)
progressbc = Broadcaster(PROGRESS_BROADCAST_PORT)
progressbc = Broadcaster(PROGRESS_BROADCAST_PORT)

Wyświetl plik

@ -4,11 +4,14 @@ from osgeo.gdalconst import GA_Update
from opendm import io
from opendm import log
def get_pseudogeo_utm():
return '+proj=utm +zone=30 +ellps=WGS84 +datum=WGS84 +units=m +no_defs'
return "+proj=utm +zone=30 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
def get_pseudogeo_scale():
return 0.1 # Arbitrarily chosen
return 0.1 # Arbitrarily chosen
def add_pseudo_georeferencing(geotiff):
if not io.file_exists(geotiff):
@ -16,15 +19,23 @@ def add_pseudo_georeferencing(geotiff):
return
try:
log.ODM_INFO("Adding pseudo georeferencing (raster should show up at the equator) to %s" % geotiff)
log.ODM_INFO(
"Adding pseudo georeferencing (raster should show up at the equator) to %s"
% geotiff
)
dst_ds = gdal.Open(geotiff, GA_Update)
srs = osr.SpatialReference()
srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
srs.ImportFromProj4(get_pseudogeo_utm())
dst_ds.SetProjection( srs.ExportToWkt() )
dst_ds.SetGeoTransform( [ 0.0, get_pseudogeo_scale(), 0.0, 0.0, 0.0, -get_pseudogeo_scale() ] )
dst_ds.SetProjection(srs.ExportToWkt())
dst_ds.SetGeoTransform(
[0.0, get_pseudogeo_scale(), 0.0, 0.0, 0.0, -get_pseudogeo_scale()]
)
dst_ds = None
except Exception as e:
log.ODM_WARNING("Cannot add pseudo georeferencing to %s (%s), skipping..." % (geotiff, str(e)))
log.ODM_WARNING(
"Cannot add pseudo georeferencing to %s (%s), skipping..."
% (geotiff, str(e))
)

Wyświetl plik

@ -20,33 +20,46 @@ try:
except ImportError:
import Queue as queue
class LocalRemoteExecutor:
"""
A class for performing OpenSfM reconstructions and full ODM pipeline executions
using a mix of local and remote processing. Tasks are executed locally one at a time
and remotely until a node runs out of available slots for processing. This allows us
to use the processing power of the current machine as well as offloading tasks to a
to use the processing power of the current machine as well as offloading tasks to a
network node.
"""
def __init__(self, nodeUrl, rolling_shutter = False, rerun = False):
def __init__(self, nodeUrl, rolling_shutter=False, rerun=False):
self.node = Node.from_url(nodeUrl)
self.params = {
'tasks': [],
'threads': [],
'rolling_shutter': rolling_shutter,
'rerun': rerun
"tasks": [],
"threads": [],
"rolling_shutter": rolling_shutter,
"rerun": rerun,
}
self.node_online = True
log.ODM_INFO("LRE: Initializing using cluster node %s:%s" % (self.node.host, self.node.port))
log.ODM_INFO(
"LRE: Initializing using cluster node %s:%s"
% (self.node.host, self.node.port)
)
try:
info = self.node.info()
log.ODM_INFO("LRE: Node is online and running %s version %s" % (info.engine, info.engine_version))
log.ODM_INFO(
"LRE: Node is online and running %s version %s"
% (info.engine, info.engine_version)
)
except exceptions.NodeConnectionError:
log.ODM_WARNING("LRE: The node seems to be offline! We'll still process the dataset, but it's going to run entirely locally.")
log.ODM_WARNING(
"LRE: The node seems to be offline! We'll still process the dataset, but it's going to run entirely locally."
)
self.node_online = False
except Exception as e:
raise system.ExitException("LRE: An unexpected problem happened while opening the node connection: %s" % str(e))
raise system.ExitException(
"LRE: An unexpected problem happened while opening the node connection: %s"
% str(e)
)
def set_projects(self, paths):
self.project_paths = paths
@ -66,7 +79,7 @@ class LocalRemoteExecutor:
error = None
local_processing = False
max_remote_tasks = None
calculate_task_limit_lock = threading.Lock()
finished_tasks = AtomicCounter(0)
remote_running_tasks = AtomicCounter(0)
@ -83,28 +96,37 @@ class LocalRemoteExecutor:
except exceptions.OdmError:
removed = False
return removed
def cleanup_remote_tasks():
if self.params['tasks']:
if self.params["tasks"]:
log.ODM_WARNING("LRE: Attempting to cleanup remote tasks")
else:
log.ODM_INFO("LRE: No remote tasks left to cleanup")
for task in self.params['tasks']:
log.ODM_INFO("LRE: Removing remote task %s... %s" % (task.uuid, 'OK' if remove_task_safe(task) else 'NO'))
for task in self.params["tasks"]:
log.ODM_INFO(
"LRE: Removing remote task %s... %s"
% (task.uuid, "OK" if remove_task_safe(task) else "NO")
)
def handle_result(task, local, error = None, partial=False):
def handle_result(task, local, error=None, partial=False):
def cleanup_remote():
if not partial and task.remote_task:
log.ODM_INFO("LRE: Cleaning up remote task (%s)... %s" % (task.remote_task.uuid, 'OK' if remove_task_safe(task.remote_task) else 'NO'))
self.params['tasks'].remove(task.remote_task)
log.ODM_INFO(
"LRE: Cleaning up remote task (%s)... %s"
% (
task.remote_task.uuid,
"OK" if remove_task_safe(task.remote_task) else "NO",
)
)
self.params["tasks"].remove(task.remote_task)
task.remote_task = None
if error:
log.ODM_WARNING("LRE: %s failed with: %s" % (task, str(error)))
# Special case in which the error is caused by a SIGTERM signal
# this means a local processing was terminated either by CTRL+C or
# this means a local processing was terminated either by CTRL+C or
# by canceling the task.
if str(error) == "Child was terminated by signal 15":
system.exit_gracefully()
@ -116,46 +138,62 @@ class LocalRemoteExecutor:
with calculate_task_limit_lock:
if nonloc.max_remote_tasks is None:
node_task_limit = 0
for t in self.params['tasks']:
for t in self.params["tasks"]:
try:
info = t.info(with_output=-3)
if info.status == TaskStatus.RUNNING and info.processing_time >= 0 and len(info.output) >= 3:
if (
info.status == TaskStatus.RUNNING
and info.processing_time >= 0
and len(info.output) >= 3
):
node_task_limit += 1
except exceptions.OdmError:
pass
nonloc.max_remote_tasks = max(1, node_task_limit)
log.ODM_INFO("LRE: Node task limit reached. Setting max remote tasks to %s" % node_task_limit)
log.ODM_INFO(
"LRE: Node task limit reached. Setting max remote tasks to %s"
% node_task_limit
)
# Retry, but only if the error is not related to a task failure
if task.retries < task.max_retries and not isinstance(error, exceptions.TaskFailedError):
if task.retries < task.max_retries and not isinstance(
error, exceptions.TaskFailedError
):
# Put task back in queue
# Don't increment the retry counter if this task simply reached the task
# limit count.
if not task_limit_reached:
task.retries += 1
task.wait_until = datetime.datetime.now() + datetime.timedelta(seconds=task.retries * task.retry_timeout)
task.wait_until = datetime.datetime.now() + datetime.timedelta(
seconds=task.retries * task.retry_timeout
)
cleanup_remote()
q.task_done()
log.ODM_INFO("LRE: Re-queueing %s (retries: %s)" % (task, task.retries))
log.ODM_INFO(
"LRE: Re-queueing %s (retries: %s)" % (task, task.retries)
)
q.put(task)
if not local: remote_running_tasks.increment(-1)
if not local:
remote_running_tasks.increment(-1)
return
else:
nonloc.error = error
finished_tasks.increment()
if not local: remote_running_tasks.increment(-1)
if not local:
remote_running_tasks.increment(-1)
else:
if not partial:
log.ODM_INFO("LRE: %s finished successfully" % task)
finished_tasks.increment()
if not local: remote_running_tasks.increment(-1)
if not local:
remote_running_tasks.increment(-1)
cleanup_remote()
if not partial: q.task_done()
if not partial:
q.task_done()
def local_worker():
while True:
# Block until a new queue item is available
@ -174,7 +212,6 @@ class LocalRemoteExecutor:
finally:
nonloc.local_processing = False
def remote_worker():
while True:
# Block until a new queue item is available
@ -183,10 +220,13 @@ class LocalRemoteExecutor:
if task is None or nonloc.error is not None:
q.task_done()
break
# Yield to local processing
if not nonloc.local_processing:
log.ODM_INFO("LRE: Yielding to local processing, sending %s back to the queue" % task)
log.ODM_INFO(
"LRE: Yielding to local processing, sending %s back to the queue"
% task
)
q.put(task)
q.task_done()
time.sleep(0.05)
@ -194,7 +234,10 @@ class LocalRemoteExecutor:
# If we've found an estimate of the limit on the maximum number of tasks
# a node can process, we block until some tasks have completed
if nonloc.max_remote_tasks is not None and remote_running_tasks.value >= nonloc.max_remote_tasks:
if (
nonloc.max_remote_tasks is not None
and remote_running_tasks.value >= nonloc.max_remote_tasks
):
q.put(task)
q.task_done()
time.sleep(2)
@ -206,7 +249,7 @@ class LocalRemoteExecutor:
task.process(False, handle_result)
except Exception as e:
handle_result(task, False, e)
# Create queue thread
local_thread = threading.Thread(target=local_worker)
if self.node_online:
@ -221,12 +264,14 @@ class LocalRemoteExecutor:
# block until all tasks are done (or CTRL+C)
try:
while finished_tasks.value < len(self.project_paths) and nonloc.error is None:
while (
finished_tasks.value < len(self.project_paths) and nonloc.error is None
):
time.sleep(0.5)
except KeyboardInterrupt:
log.ODM_WARNING("LRE: CTRL+C")
system.exit_gracefully()
# stop workers
q.put(None)
if self.node_online:
@ -238,73 +283,86 @@ class LocalRemoteExecutor:
remote_thread.join()
# Wait for all remains threads
for thrds in self.params['threads']:
for thrds in self.params["threads"]:
thrds.join()
system.remove_cleanup_callback(cleanup_remote_tasks)
cleanup_remote_tasks()
if nonloc.error is not None:
# Try not to leak access token
if isinstance(nonloc.error, exceptions.NodeConnectionError):
raise exceptions.NodeConnectionError("A connection error happened. Check the connection to the processing node and try again.")
raise exceptions.NodeConnectionError(
"A connection error happened. Check the connection to the processing node and try again."
)
else:
raise nonloc.error
class NodeTaskLimitReachedException(Exception):
pass
class Task:
def __init__(self, project_path, node, params, max_retries=5, retry_timeout=10):
self.project_path = project_path
self.node = node
self.params = params
self.wait_until = datetime.datetime.now() # Don't run this task until a certain time
self.wait_until = (
datetime.datetime.now()
) # Don't run this task until a certain time
self.max_retries = max_retries
self.retries = 0
self.retry_timeout = retry_timeout
self.remote_task = None
def process(self, local, done):
def handle_result(error = None, partial=False):
def handle_result(error=None, partial=False):
done(self, local, error, partial)
log.ODM_INFO("LRE: About to process %s %s" % (self, 'locally' if local else 'remotely'))
log.ODM_INFO(
"LRE: About to process %s %s" % (self, "locally" if local else "remotely")
)
if local:
self._process_local(handle_result) # Block until complete
self._process_local(handle_result) # Block until complete
else:
now = datetime.datetime.now()
if self.wait_until > now:
wait_for = (self.wait_until - now).seconds + 1
log.ODM_INFO("LRE: Waiting %s seconds before processing %s" % (wait_for, self))
log.ODM_INFO(
"LRE: Waiting %s seconds before processing %s" % (wait_for, self)
)
time.sleep(wait_for)
# TODO: we could consider uploading multiple tasks
# in parallel. But since we are using the same node
# perhaps this wouldn't be a big speedup.
self._process_remote(handle_result) # Block until upload is complete
self._process_remote(handle_result) # Block until upload is complete
def path(self, *paths):
return os.path.join(self.project_path, *paths)
def touch(self, file):
with open(file, 'w') as fout:
with open(file, "w") as fout:
fout.write("Done!\n")
def create_seed_payload(self, paths, touch_files=[]):
paths = filter(os.path.exists, map(lambda p: self.path(p), paths))
outfile = self.path("seed.zip")
with zipfile.ZipFile(outfile, "w", compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zf:
with zipfile.ZipFile(
outfile, "w", compression=zipfile.ZIP_DEFLATED, allowZip64=True
) as zf:
for p in paths:
if os.path.isdir(p):
for root, _, filenames in os.walk(p):
for filename in filenames:
filename = os.path.join(root, filename)
filename = os.path.normpath(filename)
zf.write(filename, os.path.relpath(filename, self.project_path))
zf.write(
filename, os.path.relpath(filename, self.project_path)
)
else:
zf.write(p, os.path.relpath(p, self.project_path))
@ -319,33 +377,41 @@ class Task:
done()
except Exception as e:
done(e)
def _process_remote(self, done):
try:
self.process_remote(done)
done(error=None, partial=True) # Upload is completed, but processing is not (partial)
done(
error=None, partial=True
) # Upload is completed, but processing is not (partial)
except Exception as e:
done(e)
def execute_remote_task(self, done, seed_files = [], seed_touch_files = [], outputs = [], ):
def execute_remote_task(
self,
done,
seed_files=[],
seed_touch_files=[],
outputs=[],
):
"""
Run a task by creating a seed file with all files in seed_files, optionally
creating empty files (for flag checks) specified in seed_touch_files
and returning the results specified in outputs. Yeah it's pretty cool!
"""
seed_file = self.create_seed_payload(seed_files, touch_files=seed_touch_files)
# Find all images
images = glob.glob(self.path("images/**"))
# Add GCP (optional)
if os.path.exists(self.path("gcp_list.txt")):
images.append(self.path("gcp_list.txt"))
# Add GEO (optional)
if os.path.exists(self.path("geo.txt")):
images.append(self.path("geo.txt"))
# Add seed file
images.append(seed_file)
@ -358,48 +424,67 @@ class Task:
nonloc.last_update = time.time()
# Upload task
task = self.node.create_task(images,
get_submodel_args_dict(config.config()),
progress_callback=print_progress,
skip_post_processing=True,
outputs=outputs)
task = self.node.create_task(
images,
get_submodel_args_dict(config.config()),
progress_callback=print_progress,
skip_post_processing=True,
outputs=outputs,
)
self.remote_task = task
# Cleanup seed file
os.remove(seed_file)
# Keep track of tasks for cleanup
self.params['tasks'].append(task)
self.params["tasks"].append(task)
# Check status
info = task.info()
if info.status in [TaskStatus.RUNNING, TaskStatus.COMPLETED]:
def monitor():
class nonloc:
status_callback_calls = 0
last_update = 0
def status_callback(info):
# If a task switches from RUNNING to QUEUED, then we need to
# If a task switches from RUNNING to QUEUED, then we need to
# stop the process and re-add the task to the queue.
if info.status == TaskStatus.QUEUED:
log.ODM_WARNING("LRE: %s (%s) turned from RUNNING to QUEUED. Re-adding to back of the queue." % (self, task.uuid))
raise NodeTaskLimitReachedException("Delayed task limit reached")
log.ODM_WARNING(
"LRE: %s (%s) turned from RUNNING to QUEUED. Re-adding to back of the queue."
% (self, task.uuid)
)
raise NodeTaskLimitReachedException(
"Delayed task limit reached"
)
elif info.status == TaskStatus.RUNNING:
# Print a status message once in a while
nonloc.status_callback_calls += 1
if nonloc.status_callback_calls > 30:
log.ODM_INFO("LRE: %s (%s) is still running" % (self, task.uuid))
log.ODM_INFO(
"LRE: %s (%s) is still running" % (self, task.uuid)
)
nonloc.status_callback_calls = 0
try:
def print_progress(percentage):
if (time.time() - nonloc.last_update >= 2) or int(percentage) == 100:
log.ODM_INFO("LRE: Download of %s at [%s%%]" % (self, int(percentage)))
if (time.time() - nonloc.last_update >= 2) or int(
percentage
) == 100:
log.ODM_INFO(
"LRE: Download of %s at [%s%%]"
% (self, int(percentage))
)
nonloc.last_update = time.time()
task.wait_for_completion(status_callback=status_callback)
log.ODM_INFO("LRE: Downloading assets for %s" % self)
task.download_assets(self.project_path, progress_callback=print_progress)
task.download_assets(
self.project_path, progress_callback=print_progress
)
log.ODM_INFO("LRE: Downloaded and extracted assets for %s" % self)
done()
except exceptions.TaskFailedError as e:
@ -409,30 +494,37 @@ class Task:
# Save to file
error_log_path = self.path("error.log")
with open(error_log_path, 'w') as f:
f.write('\n'.join(output_lines) + '\n')
with open(error_log_path, "w") as f:
f.write("\n".join(output_lines) + "\n")
msg = "(%s) failed with task output: %s\nFull log saved at %s" % (task.uuid, "\n".join(output_lines[-10:]), error_log_path)
msg = (
"(%s) failed with task output: %s\nFull log saved at %s"
% (task.uuid, "\n".join(output_lines[-10:]), error_log_path)
)
done(exceptions.TaskFailedError(msg))
except:
log.ODM_WARNING("LRE: Could not retrieve task output for %s (%s)" % (self, task.uuid))
log.ODM_WARNING(
"LRE: Could not retrieve task output for %s (%s)"
% (self, task.uuid)
)
done(e)
except Exception as e:
done(e)
# Launch monitor thread and return
t = threading.Thread(target=monitor)
self.params['threads'].append(t)
self.params["threads"].append(t)
t.start()
elif info.status == TaskStatus.QUEUED:
raise NodeTaskLimitReachedException("Task limit reached")
else:
raise Exception("Could not send task to node, task status is %s" % str(info.status))
raise Exception(
"Could not send task to node, task status is %s" % str(info.status)
)
def process_local(self):
raise NotImplementedError()
def process_remote(self, done):
raise NotImplementedError()
@ -446,31 +538,47 @@ class ReconstructionTask(Task):
log.ODM_INFO("==================================")
log.ODM_INFO("Local Reconstruction %s" % octx.name())
log.ODM_INFO("==================================")
octx.feature_matching(self.params['rerun'])
octx.create_tracks(self.params['rerun'])
octx.reconstruct(self.params['rolling_shutter'], True, self.params['rerun'])
octx.feature_matching(self.params["rerun"])
octx.create_tracks(self.params["rerun"])
octx.reconstruct(self.params["rolling_shutter"], True, self.params["rerun"])
def process_remote(self, done):
octx = OSFMContext(self.path("opensfm"))
if not octx.is_feature_matching_done() or not octx.is_reconstruction_done() or self.params['rerun']:
self.execute_remote_task(done, seed_files=["opensfm/exif",
"opensfm/camera_models.json",
"opensfm/reference_lla.json"],
seed_touch_files=["opensfm/split_merge_stop_at_reconstruction.txt"],
outputs=["opensfm/matches", "opensfm/features",
"opensfm/reconstruction.json",
"opensfm/tracks.csv",
"cameras.json"])
if (
not octx.is_feature_matching_done()
or not octx.is_reconstruction_done()
or self.params["rerun"]
):
self.execute_remote_task(
done,
seed_files=[
"opensfm/exif",
"opensfm/camera_models.json",
"opensfm/reference_lla.json",
],
seed_touch_files=["opensfm/split_merge_stop_at_reconstruction.txt"],
outputs=[
"opensfm/matches",
"opensfm/features",
"opensfm/reconstruction.json",
"opensfm/tracks.csv",
"cameras.json",
],
)
else:
log.ODM_INFO("Already processed feature matching and reconstruction for %s" % octx.name())
log.ODM_INFO(
"Already processed feature matching and reconstruction for %s"
% octx.name()
)
done()
class ToolchainTask(Task):
def process_local(self):
completed_file = self.path("toolchain_completed.txt")
submodel_name = os.path.basename(self.project_path)
if not os.path.exists(completed_file) or self.params['rerun']:
if not os.path.exists(completed_file) or self.params["rerun"]:
log.ODM_INFO("=============================")
log.ODM_INFO("Local Toolchain %s" % self)
log.ODM_INFO("=============================")
@ -479,37 +587,48 @@ class ToolchainTask(Task):
argv = get_submodel_argv(config.config(), submodels_path, submodel_name)
# Re-run the ODM toolchain on the submodel
system.run(" ".join(map(double_quote, map(str, argv))), env_vars=os.environ.copy())
system.run(
" ".join(map(double_quote, map(str, argv))), env_vars=os.environ.copy()
)
# This will only get executed if the command above succeeds
self.touch(completed_file)
else:
log.ODM_INFO("Already processed toolchain for %s" % submodel_name)
def process_remote(self, done):
completed_file = self.path("toolchain_completed.txt")
submodel_name = os.path.basename(self.project_path)
def handle_result(error = None):
def handle_result(error=None):
# Mark task as completed if no error
if error is None:
self.touch(completed_file)
done(error=error)
if not os.path.exists(completed_file) or self.params['rerun']:
self.execute_remote_task(handle_result, seed_files=["opensfm/camera_models.json",
"opensfm/reference_lla.json",
"opensfm/reconstruction.json",
"opensfm/tracks.csv"],
seed_touch_files=["opensfm/features/empty",
"opensfm/matches/empty",
"opensfm/exif/empty"],
outputs=["odm_orthophoto/cutline.gpkg",
"odm_orthophoto/odm_orthophoto_cut.tif",
"odm_orthophoto/odm_orthophoto_feathered.tif",
"odm_dem",
"odm_report",
"odm_georeferencing"])
if not os.path.exists(completed_file) or self.params["rerun"]:
self.execute_remote_task(
handle_result,
seed_files=[
"opensfm/camera_models.json",
"opensfm/reference_lla.json",
"opensfm/reconstruction.json",
"opensfm/tracks.csv",
],
seed_touch_files=[
"opensfm/features/empty",
"opensfm/matches/empty",
"opensfm/exif/empty",
],
outputs=[
"odm_orthophoto/cutline.gpkg",
"odm_orthophoto/odm_orthophoto_cut.tif",
"odm_orthophoto/odm_orthophoto_feathered.tif",
"odm_dem",
"odm_report",
"odm_georeferencing",
],
)
else:
log.ODM_INFO("Already processed toolchain for %s" % submodel_name)
handle_result()

Wyświetl plik

@ -2,60 +2,53 @@ from opendm import log
# Make Model (lowercase) --> readout time (ms)
RS_DATABASE = {
'autel robotics xt701': 25, # Autel Evo II 8k
'dji phantom vision fc200': 74, # Phantom 2
'dji fc300s': 33, # Phantom 3 Advanced
'dji fc300c': 33, # Phantom 3 Standard
'dji fc300x': 33, # Phantom 3 Professional
'dji fc330': 33, # Phantom 4
'dji fc6310': 33, # Phantom 4 Professional
'dji fc7203': lambda p: 19 if p.get_capture_megapixels() < 10 else 25, # DJI Mavic Mini v1 (at 16:9 => 9MP 19ms, at 4:3 => 12MP 25ms)
'dji fc2103': 32, # DJI Mavic Air 1
'dji fc3170': 27, # DJI Mavic Air 2
'dji fc3411': 32, # DJI Mavic Air 2S
'dji fc220': 64, # DJI Mavic Pro (Platinum)
'hasselblad l1d-20c': lambda p: 47 if p.get_capture_megapixels() < 17 else 56, # DJI Mavic 2 Pro (at 16:10 => 16.8MP 47ms, at 3:2 => 19.9MP 56ms. 4:3 has 17.7MP with same image height as 3:2 which can be concluded as same sensor readout)
'hasselblad l2d-20c': 16.6, # DJI Mavic 3 (not enterprise version)
'dji fc3582': lambda p: 26 if p.get_capture_megapixels() < 48 else 60, # DJI Mini 3 pro (at 48MP readout is 60ms, at 12MP it's 26ms)
'dji fc350': 30, # Inspire 1
'dji mavic2-enterprise-advanced': 31, # DJI Mavic 2 Enterprise Advanced
'dji zenmuse z30': 8, # DJI Zenmuse Z30
'yuneec e90': 44, # Yuneec E90
'gopro hero4 black': 30, # GoPro Hero 4 Black
'gopro hero8 black': 17, # GoPro Hero 8 Black
'teracube teracube one': 32, # TeraCube TeraCube_One TR1907Q Mobile Phone
'fujifilm x-a5': 186, # FUJIFILM X-A5 Mirrorless Interchangeable Lens Camera
'fujifilm x-t2': 35, # FUJIFILM X-T2 Mirrorless Interchangeable Lens Camera
'autel robotics xl724': 29, # Autel Nano+
'parrot anafi': 39, # Parrot Anafi
'autel robotics xt705': 30, # Autel EVO II pro
# Help us add more!
"autel robotics xt701": 25, # Autel Evo II 8k
"dji phantom vision fc200": 74, # Phantom 2
"dji fc300s": 33, # Phantom 3 Advanced
"dji fc300c": 33, # Phantom 3 Standard
"dji fc300x": 33, # Phantom 3 Professional
"dji fc330": 33, # Phantom 4
"dji fc6310": 33, # Phantom 4 Professional
"dji fc7203": lambda p: (
19 if p.get_capture_megapixels() < 10 else 25
), # DJI Mavic Mini v1 (at 16:9 => 9MP 19ms, at 4:3 => 12MP 25ms)
"dji fc2103": 32, # DJI Mavic Air 1
"dji fc3170": 27, # DJI Mavic Air 2
"dji fc3411": 32, # DJI Mavic Air 2S
"dji fc220": 64, # DJI Mavic Pro (Platinum)
"hasselblad l1d-20c": lambda p: (
47 if p.get_capture_megapixels() < 17 else 56
), # DJI Mavic 2 Pro (at 16:10 => 16.8MP 47ms, at 3:2 => 19.9MP 56ms. 4:3 has 17.7MP with same image height as 3:2 which can be concluded as same sensor readout)
"hasselblad l2d-20c": 16.6, # DJI Mavic 3 (not enterprise version)
"dji fc3582": lambda p: (
26 if p.get_capture_megapixels() < 48 else 60
), # DJI Mini 3 pro (at 48MP readout is 60ms, at 12MP it's 26ms)
"dji fc350": 30, # Inspire 1
"dji mavic2-enterprise-advanced": 31, # DJI Mavic 2 Enterprise Advanced
"dji zenmuse z30": 8, # DJI Zenmuse Z30
"yuneec e90": 44, # Yuneec E90
"gopro hero4 black": 30, # GoPro Hero 4 Black
"gopro hero8 black": 17, # GoPro Hero 8 Black
"teracube teracube one": 32, # TeraCube TeraCube_One TR1907Q Mobile Phone
"fujifilm x-a5": 186, # FUJIFILM X-A5 Mirrorless Interchangeable Lens Camera
"fujifilm x-t2": 35, # FUJIFILM X-T2 Mirrorless Interchangeable Lens Camera
"autel robotics xl724": 29, # Autel Nano+
"parrot anafi": 39, # Parrot Anafi
"autel robotics xt705": 30, # Autel EVO II pro
# Help us add more!
# See: https://github.com/OpenDroneMap/RSCalibration for instructions
}
DEFAULT_RS_READOUT = 30 # Just a guess
DEFAULT_RS_READOUT = 30 # Just a guess
def make_model_key(make, model):
return ("%s %s" % (make.strip(), model.strip())).lower().strip()
warn_db_missing = {}
info_db_found = {}
def get_rolling_shutter_readout(photo, override_value=0):
global warn_db_missing
global info_db_found
@ -64,7 +57,7 @@ def get_rolling_shutter_readout(photo, override_value=0):
if override_value > 0:
return override_value
key = make_model_key(make, model)
if key in RS_DATABASE:
rsd = RS_DATABASE[key]
@ -75,17 +68,25 @@ def get_rolling_shutter_readout(photo, override_value=0):
elif callable(rsd):
val = float(rsd(photo))
else:
log.ODM_WARNING("Invalid rolling shutter calibration entry, returning default of %sms" % DEFAULT_RS_READOUT)
log.ODM_WARNING(
"Invalid rolling shutter calibration entry, returning default of %sms"
% DEFAULT_RS_READOUT
)
if not key in info_db_found:
log.ODM_INFO("Rolling shutter profile for \"%s %s\" selected, using %sms as --rolling-shutter-readout." % (make, model, val))
log.ODM_INFO(
'Rolling shutter profile for "%s %s" selected, using %sms as --rolling-shutter-readout.'
% (make, model, val)
)
info_db_found[key] = True
return val
else:
# Warn once
if not key in warn_db_missing:
log.ODM_WARNING("Rolling shutter readout time for \"%s %s\" is not in our database, using default of %sms which might be incorrect. Use --rolling-shutter-readout to set an actual value (see https://github.com/OpenDroneMap/RSCalibration for instructions on how to calculate this value)" % (make, model, DEFAULT_RS_READOUT))
log.ODM_WARNING(
'Rolling shutter readout time for "%s %s" is not in our database, using default of %sms which might be incorrect. Use --rolling-shutter-readout to set an actual value (see https://github.com/OpenDroneMap/RSCalibration for instructions on how to calculate this value)'
% (make, model, DEFAULT_RS_READOUT)
)
warn_db_missing[key] = True
return float(DEFAULT_RS_READOUT)

Wyświetl plik

@ -7,10 +7,12 @@ from osgeo import gdal
import numpy as np
import cv2
def get_rotation_matrix(rotation):
"""Get rotation as a 3x3 matrix."""
return cv2.Rodrigues(rotation)[0]
def matrix_to_rotation(rotation_matrix):
R = np.array(rotation_matrix, dtype=float)
# if not np.isclose(np.linalg.det(R), 1):
@ -19,11 +21,21 @@ def matrix_to_rotation(rotation_matrix):
# raise ValueError("Not orthogonal")
return cv2.Rodrigues(R)[0].ravel()
def get_origin(shot):
"""The origin of the pose in world coordinates."""
return -get_rotation_matrix(np.array(shot['rotation'])).T.dot(np.array(shot['translation']))
return -get_rotation_matrix(np.array(shot["rotation"])).T.dot(
np.array(shot["translation"])
)
def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset=None, pseudo_geotiff=None, a_matrix=None):
def get_geojson_shots_from_opensfm(
reconstruction_file,
utm_srs=None,
utm_offset=None,
pseudo_geotiff=None,
a_matrix=None,
):
"""
Extract shots from OpenSfM's reconstruction.json
"""
@ -36,17 +48,21 @@ def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset
# the pseudo-georeferencing CRS UL corner is at 0,0
# but our shot coordinates aren't, so we need to offset them
raster = gdal.Open(pseudo_geotiff)
ulx, xres, _, uly, _, yres = raster.GetGeoTransform()
ulx, xres, _, uly, _, yres = raster.GetGeoTransform()
lrx = ulx + (raster.RasterXSize * xres)
lry = uly + (raster.RasterYSize * yres)
pseudo_geocoords = np.array([[1.0 / get_pseudogeo_scale() ** 2, 0, 0, ulx + lrx / 2.0],
[0, 1.0 / get_pseudogeo_scale() ** 2, 0, uly + lry / 2.0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
pseudo_geocoords = np.array(
[
[1.0 / get_pseudogeo_scale() ** 2, 0, 0, ulx + lrx / 2.0],
[0, 1.0 / get_pseudogeo_scale() ** 2, 0, uly + lry / 2.0],
[0, 0, 1, 0],
[0, 0, 0, 1],
]
)
raster = None
pseudo = True
# Couldn't get a SRS?
if utm_srs is None:
return None
@ -54,17 +70,17 @@ def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset
crstrans = transformer(CRS.from_proj4(utm_srs), CRS.from_epsg("4326"))
if os.path.exists(reconstruction_file):
with open(reconstruction_file, 'r') as fin:
with open(reconstruction_file, "r") as fin:
reconstructions = json.loads(fin.read())
feats = []
added_shots = {}
for recon in reconstructions:
cameras = recon.get('cameras', {})
cameras = recon.get("cameras", {})
for filename in recon.get('shots', {}):
shot = recon['shots'][filename]
cam_id = shot.get('camera')
for filename in recon.get("shots", {}):
shot = recon["shots"][filename]
cam_id = shot.get("camera")
if (not cam_id in cameras) or (filename in added_shots):
continue
@ -76,57 +92,69 @@ def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset
# Translation
utm_coords = np.dot(Rs, origin) + T
trans_coords = crstrans.TransformPoint(utm_coords[0], utm_coords[1], utm_coords[2])
trans_coords = crstrans.TransformPoint(
utm_coords[0], utm_coords[1], utm_coords[2]
)
# Rotation
rotation_matrix = get_rotation_matrix(np.array(shot['rotation']))
rotation_matrix = get_rotation_matrix(
np.array(shot["rotation"])
)
rotation = matrix_to_rotation(np.dot(rotation_matrix, Rs1))
translation = origin
else:
rotation = shot['rotation']
rotation = shot["rotation"]
# Just add UTM offset
origin = get_origin(shot)
utm_coords = [origin[0] + utm_offset[0],
origin[1] + utm_offset[1],
origin[2]]
utm_coords = [
origin[0] + utm_offset[0],
origin[1] + utm_offset[1],
origin[2],
]
if a_matrix is not None:
rotation = list(np.array(rotation).dot(a_matrix[:3,:3]))
utm_coords = list(a_matrix.dot(np.hstack((np.array(utm_coords), 1)))[:-1])
translation = utm_coords
trans_coords = crstrans.TransformPoint(utm_coords[0], utm_coords[1], utm_coords[2])
rotation = list(np.array(rotation).dot(a_matrix[:3, :3]))
utm_coords = list(
a_matrix.dot(np.hstack((np.array(utm_coords), 1)))[:-1]
)
feats.append({
'type': 'Feature',
'properties': {
'filename': filename,
'camera': cam_id,
'focal': cam.get('focal', cam.get('focal_x')), # Focal ratio = focal length (mm) / max(sensor_width, sensor_height) (mm)
'width': cam.get('width', 0),
'height': cam.get('height', 0),
'capture_time': shot.get('capture_time', 0),
'translation': list(translation),
'rotation': list(rotation)
},
'geometry':{
'type': 'Point',
'coordinates': list(trans_coords)
translation = utm_coords
trans_coords = crstrans.TransformPoint(
utm_coords[0], utm_coords[1], utm_coords[2]
)
feats.append(
{
"type": "Feature",
"properties": {
"filename": filename,
"camera": cam_id,
"focal": cam.get(
"focal", cam.get("focal_x")
), # Focal ratio = focal length (mm) / max(sensor_width, sensor_height) (mm)
"width": cam.get("width", 0),
"height": cam.get("height", 0),
"capture_time": shot.get("capture_time", 0),
"translation": list(translation),
"rotation": list(rotation),
},
"geometry": {
"type": "Point",
"coordinates": list(trans_coords),
},
}
})
)
added_shots[filename] = True
return {
'type': 'FeatureCollection',
'features': feats
}
return {"type": "FeatureCollection", "features": feats}
else:
raise RuntimeError("%s does not exist." % reconstruction_file)
def merge_geojson_shots(geojson_shots_files, output_geojson_file):
result = {}
added_files = {}
@ -135,29 +163,30 @@ def merge_geojson_shots(geojson_shots_files, output_geojson_file):
shots = json.loads(f.read())
if len(result) == 0:
for feat in shots.get('features', []):
added_files[feat['properties']['filename']] = True
for feat in shots.get("features", []):
added_files[feat["properties"]["filename"]] = True
# Use first file as base
result = shots
else:
# Append features if filename not already added
for feat in shots.get('features', []):
if not feat['properties']['filename'] in added_files:
result['features'].append(feat)
for feat in shots.get("features", []):
if not feat["properties"]["filename"] in added_files:
result["features"].append(feat)
with open(output_geojson_file, "w") as f:
f.write(json.dumps(result))
def merge_cameras(cameras_json_files, output_cameras_file):
result = {}
for cameras_file in cameras_json_files:
with open(cameras_file, "r") as f:
cameras = json.loads(f.read())
for cam_id in cameras:
if not cam_id in result:
result[cam_id] = cameras[cam_id]
with open(output_cameras_file, "w") as f:
f.write(json.dumps(result))

Wyświetl plik

@ -4,21 +4,32 @@ import numpy as np
# Kaiming He, Jian Sun
# https://arxiv.org/abs/1505.00996
def box(img, radius):
dst = np.zeros_like(img)
(r, c) = img.shape
s = [radius, 1]
c_sum = np.cumsum(img, 0)
dst[0:radius+1, :, ...] = c_sum[radius:2*radius+1, :, ...]
dst[radius+1:r-radius, :, ...] = c_sum[2*radius+1:r, :, ...] - c_sum[0:r-2*radius-1, :, ...]
dst[r-radius:r, :, ...] = np.tile(c_sum[r-1:r, :, ...], s) - c_sum[r-2*radius-1:r-radius-1, :, ...]
dst[0 : radius + 1, :, ...] = c_sum[radius : 2 * radius + 1, :, ...]
dst[radius + 1 : r - radius, :, ...] = (
c_sum[2 * radius + 1 : r, :, ...] - c_sum[0 : r - 2 * radius - 1, :, ...]
)
dst[r - radius : r, :, ...] = (
np.tile(c_sum[r - 1 : r, :, ...], s)
- c_sum[r - 2 * radius - 1 : r - radius - 1, :, ...]
)
s = [1, radius]
c_sum = np.cumsum(dst, 1)
dst[:, 0:radius+1, ...] = c_sum[:, radius:2*radius+1, ...]
dst[:, radius+1:c-radius, ...] = c_sum[:, 2*radius+1 : c, ...] - c_sum[:, 0 : c-2*radius-1, ...]
dst[:, c-radius: c, ...] = np.tile(c_sum[:, c-1:c, ...], s) - c_sum[:, c-2*radius-1 : c-radius-1, ...]
dst[:, 0 : radius + 1, ...] = c_sum[:, radius : 2 * radius + 1, ...]
dst[:, radius + 1 : c - radius, ...] = (
c_sum[:, 2 * radius + 1 : c, ...] - c_sum[:, 0 : c - 2 * radius - 1, ...]
)
dst[:, c - radius : c, ...] = (
np.tile(c_sum[:, c - 1 : c, ...], s)
- c_sum[:, c - 2 * radius - 1 : c - radius - 1, ...]
)
return dst
@ -31,7 +42,9 @@ def guided_filter(img, guide, radius, eps):
mean_img = box(img, radius) / CNT
mean_guide = box(guide, radius) / CNT
a = ((box(img * guide, radius) / CNT) - mean_img * mean_guide) / (((box(img * img, radius) / CNT) - mean_img * mean_img) + eps)
a = ((box(img * guide, radius) / CNT) - mean_img * mean_guide) / (
((box(img * img, radius) / CNT) - mean_img * mean_img) + eps
)
b = mean_guide - a * mean_img
return (box(a, radius) / CNT) * img + (box(b, radius) / CNT)

Wyświetl plik

@ -1,4 +1,3 @@
import time
import numpy as np
import cv2
@ -12,30 +11,35 @@ from threading import Lock
mutex = Lock()
# Use GPU if it is available, otherwise CPU
provider = "CUDAExecutionProvider" if "CUDAExecutionProvider" in ort.get_available_providers() else "CPUExecutionProvider"
provider = (
"CUDAExecutionProvider"
if "CUDAExecutionProvider" in ort.get_available_providers()
else "CPUExecutionProvider"
)
class SkyFilter():
def __init__(self, model, width = 384, height = 384):
class SkyFilter:
def __init__(self, model, width=384, height=384):
self.model = model
self.width, self.height = width, height
log.ODM_INFO(' ?> Using provider %s' % provider)
log.ODM_INFO(" ?> Using provider %s" % provider)
self.load_model()
def load_model(self):
log.ODM_INFO(' -> Loading the model')
self.session = ort.InferenceSession(self.model, providers=[provider])
log.ODM_INFO(" -> Loading the model")
self.session = ort.InferenceSession(self.model, providers=[provider])
def get_mask(self, img):
height, width, c = img.shape
# Resize image to fit the model input
new_img = cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_AREA)
new_img = cv2.resize(
img, (self.width, self.height), interpolation=cv2.INTER_AREA
)
new_img = np.array(new_img, dtype=np.float32)
# Input vector for onnx model
@ -55,34 +59,34 @@ class SkyFilter():
return self.refine(output, img)
def refine(self, pred, img):
guided_filter_radius, guided_filter_eps = 20, 0.01
refined = guided_filter(img[:,:,2], pred[:,:,0], guided_filter_radius, guided_filter_eps)
refined = guided_filter(
img[:, :, 2], pred[:, :, 0], guided_filter_radius, guided_filter_eps
)
res = np.clip(refined, a_min=0, a_max=1)
# Convert res to CV_8UC1
res = np.array(res * 255., dtype=np.uint8)
res = np.array(res * 255.0, dtype=np.uint8)
# Thresholding
res = cv2.threshold(res, 127, 255, cv2.THRESH_BINARY_INV)[1]
return res
def run_img(self, img_path, dest):
img = read_image(img_path)
img = np.array(img / 255., dtype=np.float32)
img = np.array(img / 255.0, dtype=np.float32)
mask = self.get_mask(img)
mask = self.get_mask(img)
img_name = os.path.basename(img_path)
fpath = os.path.join(dest, img_name)
fname, _ = os.path.splitext(fpath)
mask_name = fname + '_mask.png'
mask_name = fname + "_mask.png"
cv2.imwrite(mask_name, mask)
return mask_name

Wyświetl plik

@ -13,34 +13,44 @@ from collections import deque
from opendm import context
from opendm import log
class SubprocessException(Exception):
def __init__(self, msg, errorCode):
super().__init__(msg)
self.errorCode = errorCode
class ExitException(Exception):
pass
def get_ccd_widths():
"""Return the CCD Width of the camera listed in the JSON defs file."""
with open(context.ccd_widths_path) as f:
sensor_data = json.loads(f.read())
return dict(zip(map(string.lower, sensor_data.keys()), sensor_data.values()))
running_subprocesses = []
cleanup_callbacks = []
def add_cleanup_callback(func):
global cleanup_callbacks
cleanup_callbacks.append(func)
def remove_cleanup_callback(func):
global cleanup_callbacks
try:
cleanup_callbacks.remove(func)
except ValueError as e:
log.ODM_EXCEPTION("Tried to remove %s from cleanup_callbacks but got: %s" % (str(func), str(e)))
log.ODM_EXCEPTION(
"Tried to remove %s from cleanup_callbacks but got: %s"
% (str(func), str(e))
)
def exit_gracefully():
global running_subprocesses
@ -53,44 +63,63 @@ def exit_gracefully():
for sp in running_subprocesses:
log.ODM_WARNING("Sending TERM signal to PID %s..." % sp.pid)
if sys.platform == 'win32':
if sys.platform == "win32":
os.kill(sp.pid, signal.CTRL_C_EVENT)
else:
os.killpg(os.getpgid(sp.pid), signal.SIGTERM)
os._exit(1)
def sighandler(signum, frame):
exit_gracefully()
signal.signal(signal.SIGINT, sighandler)
signal.signal(signal.SIGTERM, sighandler)
def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}, packages_paths=context.python_packages_paths, quiet=False):
def run(
cmd,
env_paths=[context.superbuild_bin_path],
env_vars={},
packages_paths=context.python_packages_paths,
quiet=False,
):
"""Run a system command"""
global running_subprocesses
if not quiet:
log.ODM_INFO('running %s' % cmd)
log.ODM_INFO("running %s" % cmd)
env = os.environ.copy()
sep = ":"
if sys.platform == 'win32':
if sys.platform == "win32":
sep = ";"
if len(env_paths) > 0:
env["PATH"] = env["PATH"] + sep + sep.join(env_paths)
if len(packages_paths) > 0:
env["PYTHONPATH"] = env.get("PYTHONPATH", "") + sep + sep.join(packages_paths)
if sys.platform == 'darwin':
env["PYTHONPATH"] = env.get("PYTHONPATH", "") + sep + sep.join(packages_paths)
if sys.platform == "darwin":
# Propagate DYLD_LIBRARY_PATH
cmd = "export DYLD_LIBRARY_PATH=\"%s\" && %s" % (env.get("DYLD_LIBRARY_PATH", ""), cmd)
cmd = 'export DYLD_LIBRARY_PATH="%s" && %s' % (
env.get("DYLD_LIBRARY_PATH", ""),
cmd,
)
for k in env_vars:
env[k] = str(env_vars[k])
p = subprocess.Popen(cmd, shell=True, env=env, start_new_session=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p = subprocess.Popen(
cmd,
shell=True,
env=env,
start_new_session=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
running_subprocesses.append(p)
lines = deque()
for line in io.TextIOWrapper(p.stdout):
@ -107,14 +136,16 @@ def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}, packages_path
running_subprocesses.remove(p)
if retcode < 0:
raise SubprocessException("Child was terminated by signal {}".format(-retcode), -retcode)
raise SubprocessException(
"Child was terminated by signal {}".format(-retcode), -retcode
)
elif retcode > 0:
raise SubprocessException("Child returned {}".format(retcode), retcode)
def now():
"""Return the current time"""
return datetime.datetime.now().strftime('%a %b %d %H:%M:%S %Z %Y')
return datetime.datetime.now().strftime("%a %b %d %H:%M:%S %Z %Y")
def now_raw():
@ -128,35 +159,43 @@ def benchmark(start, benchmarking_file, process):
"""
# Write to benchmark file
delta = (datetime.datetime.now() - start).total_seconds()
with open(benchmarking_file, 'a') as b:
b.write('%s runtime: %s seconds\n' % (process, delta))
with open(benchmarking_file, "a") as b:
b.write("%s runtime: %s seconds\n" % (process, delta))
def mkdir_p(path):
"""Make a directory including parent directories.
"""
"""Make a directory including parent directories."""
try:
os.makedirs(path)
except os.error as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
# Python2 shutil.which
def which(program):
path=os.getenv('PATH')
path = os.getenv("PATH")
for p in path.split(os.path.pathsep):
p=os.path.join(p,program)
if os.path.exists(p) and os.access(p,os.X_OK):
p = os.path.join(p, program)
if os.path.exists(p) and os.access(p, os.X_OK):
return p
def link_file(src, dst):
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
if not os.path.isfile(dst):
if sys.platform == 'win32':
if sys.platform == "win32":
os.link(src, dst)
else:
os.symlink(os.path.relpath(os.path.abspath(src), os.path.dirname(os.path.abspath(dst))), dst)
os.symlink(
os.path.relpath(
os.path.abspath(src), os.path.dirname(os.path.abspath(dst))
),
dst,
)
def move_files(src, dst):
if not os.path.isdir(dst):
@ -166,6 +205,7 @@ def move_files(src, dst):
if os.path.isfile(os.path.join(src, f)):
shutil.move(os.path.join(src, f), dst)
def delete_files(folder, exclude=()):
if not os.path.isdir(folder):
return
@ -173,4 +213,4 @@ def delete_files(folder, exclude=()):
for f in os.listdir(folder):
if os.path.isfile(os.path.join(folder, f)):
if not exclude or not f.endswith(exclude):
os.unlink(os.path.join(folder, f))
os.unlink(os.path.join(folder, f))

Wyświetl plik

@ -5,7 +5,8 @@ from opendm.thermal_tools import dji_unpack
from opendm.exiftool import extract_raw_thermal_image_data
from opendm.thermal_tools.thermal_utils import sensor_vals_to_temp
def resize_to_match(image, match_photo = None):
def resize_to_match(image, match_photo=None):
"""
Resize images to match the dimension of another photo
:param image numpy array containing image data to resize
@ -15,12 +16,16 @@ def resize_to_match(image, match_photo = None):
if match_photo is not None:
h, w, _ = image.shape
if w != match_photo.width or h != match_photo.height:
image = cv2.resize(image, None,
fx=match_photo.width/w,
fy=match_photo.height/h,
interpolation=cv2.INTER_LANCZOS4)
image = cv2.resize(
image,
None,
fx=match_photo.width / w,
fy=match_photo.height / h,
interpolation=cv2.INTER_LANCZOS4,
)
return image
def dn_to_temperature(photo, image, images_path):
"""
Convert Digital Number values to temperature (C) values
@ -37,34 +42,45 @@ def dn_to_temperature(photo, image, images_path):
# but not necessarily for others
if photo.camera_make == "MicaSense" and photo.camera_model[:5] == "Altum":
image = image.astype("float32")
image -= (273.15 * 100.0) # Convert Kelvin to Celsius
image -= 273.15 * 100.0 # Convert Kelvin to Celsius
image *= 0.01
return image
elif photo.camera_make == "DJI" and photo.camera_model == "ZH20T":
filename, file_extension = os.path.splitext(photo.filename)
# DJI H20T high gain mode supports measurement of -40~150 celsius degrees
if file_extension.lower() in [".tif", ".tiff"] and image.min() >= 23315: # Calibrated grayscale tif
if (
file_extension.lower() in [".tif", ".tiff"] and image.min() >= 23315
): # Calibrated grayscale tif
image = image.astype("float32")
image -= (273.15 * 100.0) # Convert Kelvin to Celsius
image -= 273.15 * 100.0 # Convert Kelvin to Celsius
image *= 0.01
return image
else:
return image
elif photo.camera_make == "DJI" and photo.camera_model == "MAVIC2-ENTERPRISE-ADVANCED":
elif (
photo.camera_make == "DJI"
and photo.camera_model == "MAVIC2-ENTERPRISE-ADVANCED"
):
image = dji_unpack.extract_temperatures_dji(photo, image, images_path)
image = image.astype("float32")
return image
else:
try:
params, image = extract_raw_thermal_image_data(os.path.join(images_path, photo.filename))
params, image = extract_raw_thermal_image_data(
os.path.join(images_path, photo.filename)
)
image = sensor_vals_to_temp(image, **params)
except Exception as e:
log.ODM_WARNING("Cannot radiometrically calibrate %s: %s" % (photo.filename, str(e)))
log.ODM_WARNING(
"Cannot radiometrically calibrate %s: %s" % (photo.filename, str(e))
)
image = image.astype("float32")
return image
else:
image = image.astype("float32")
log.ODM_WARNING("Tried to radiometrically calibrate a non-thermal image with temperature values (%s)" % photo.filename)
log.ODM_WARNING(
"Tried to radiometrically calibrate a non-thermal image with temperature values (%s)"
% photo.filename
)
return image

Wyświetl plik

@ -5,46 +5,53 @@ from opendm import log
from opendm.thermal_tools.thermal_utils import sensor_vals_to_temp
def extract_temperatures_dji(photo, image, dataset_tree):
"""Extracts the DJI-encoded thermal image as 2D floating-point numpy array with temperatures in degC.
The raw sensor values are obtained using the sample binaries provided in the official Thermal SDK by DJI.
The executable file is run and generates a 16 bit unsigned RAW image with Little Endian byte order.
Link to DJI Forum post: https://forum.dji.com/forum.php?mod=redirect&goto=findpost&ptid=230321&pid=2389016
"""
# Hardcoded metadata for mean of values
# This is added to support the possibility of extracting RJPEG from DJI M2EA
meta = {
"Emissivity": 0.95,
"ObjectDistance": 50, #This is mean value of flights for better results. Need to be changed later, or improved by bypassing options from task broker
"AtmosphericTemperature": 20,
"ReflectedApparentTemperature": 30,
"IRWindowTemperature": 20,
"IRWindowTransmission": 1,
"RelativeHumidity": 40,
"PlanckR1": 21106.77,
"PlanckB": 1501,
"PlanckF": 1,
"PlanckO": -7340,
"PlanckR2": 0.012545258,
}
if photo.camera_model == "MAVIC2-ENTERPRISE-ADVANCED":
# Adding support for MAVIC2-ENTERPRISE-ADVANCED Camera images
im = Image.open(f"{dataset_tree}/{photo.filename}")
# concatenate APP3 chunks of data
a = im.applist[3][1]
for i in range(4, 14):
a += im.applist[i][1]
# create image from bytes
try:
img = Image.frombytes("I;16L", (640, 512), a)
except ValueError as e:
log.ODM_ERROR("Error during extracting temperature values for file %s : %s" % photo.filename, e)
else:
log.ODM_WARNING("Only DJI M2EA currently supported, please wait for new updates")
return image
# Extract raw sensor values from generated image into numpy array
raw_sensor_np = np.array(img)
## extracting the temperatures from thermal images
thermal_np = sensor_vals_to_temp(raw_sensor_np, **meta)
return thermal_np
def extract_temperatures_dji(photo, image, dataset_tree):
"""Extracts the DJI-encoded thermal image as 2D floating-point numpy array with temperatures in degC.
The raw sensor values are obtained using the sample binaries provided in the official Thermal SDK by DJI.
The executable file is run and generates a 16 bit unsigned RAW image with Little Endian byte order.
Link to DJI Forum post: https://forum.dji.com/forum.php?mod=redirect&goto=findpost&ptid=230321&pid=2389016
"""
# Hardcoded metadata for mean of values
# This is added to support the possibility of extracting RJPEG from DJI M2EA
meta = {
"Emissivity": 0.95,
"ObjectDistance": 50, # This is mean value of flights for better results. Need to be changed later, or improved by bypassing options from task broker
"AtmosphericTemperature": 20,
"ReflectedApparentTemperature": 30,
"IRWindowTemperature": 20,
"IRWindowTransmission": 1,
"RelativeHumidity": 40,
"PlanckR1": 21106.77,
"PlanckB": 1501,
"PlanckF": 1,
"PlanckO": -7340,
"PlanckR2": 0.012545258,
}
if photo.camera_model == "MAVIC2-ENTERPRISE-ADVANCED":
# Adding support for MAVIC2-ENTERPRISE-ADVANCED Camera images
im = Image.open(f"{dataset_tree}/{photo.filename}")
# concatenate APP3 chunks of data
a = im.applist[3][1]
for i in range(4, 14):
a += im.applist[i][1]
# create image from bytes
try:
img = Image.frombytes("I;16L", (640, 512), a)
except ValueError as e:
log.ODM_ERROR(
"Error during extracting temperature values for file %s : %s"
% photo.filename,
e,
)
else:
log.ODM_WARNING(
"Only DJI M2EA currently supported, please wait for new updates"
)
return image
# Extract raw sensor values from generated image into numpy array
raw_sensor_np = np.array(img)
## extracting the temperatures from thermal images
thermal_np = sensor_vals_to_temp(raw_sensor_np, **meta)
return thermal_np

Wyświetl plik

@ -1,7 +1,9 @@
"""Thermal Image manipulation utilities."""
"""Based on https://github.com/detecttechnologies/thermal_base"""
import numpy as np
def sensor_vals_to_temp(
raw,
Emissivity=1.0,
@ -16,7 +18,8 @@ def sensor_vals_to_temp(
PlanckF=1,
PlanckO=-7340,
PlanckR2=0.012545258,
**kwargs,):
**kwargs,
):
"""Convert raw values from the thermographic sensor sensor to temperatures in °C. Tested for Flir and DJI cams."""
# this calculation has been ported to python from https://github.com/gtatters/Thermimage/blob/master/R/raw2temp.R
# a detailed explanation of what is going on here can be found there
@ -39,46 +42,60 @@ def sensor_vals_to_temp(
- 0.00027816 * (AtmosphericTemperature) ** 2
+ 0.00000068455 * (AtmosphericTemperature) ** 3
)
tau1 = ATX * np.exp(-np.sqrt(ObjectDistance / 2) * (ATA1 + ATB1 * np.sqrt(h2o))) + (1 - ATX) * np.exp(
-np.sqrt(ObjectDistance / 2) * (ATA2 + ATB2 * np.sqrt(h2o))
)
tau2 = ATX * np.exp(-np.sqrt(ObjectDistance / 2) * (ATA1 + ATB1 * np.sqrt(h2o))) + (1 - ATX) * np.exp(
-np.sqrt(ObjectDistance / 2) * (ATA2 + ATB2 * np.sqrt(h2o))
)
tau1 = ATX * np.exp(-np.sqrt(ObjectDistance / 2) * (ATA1 + ATB1 * np.sqrt(h2o))) + (
1 - ATX
) * np.exp(-np.sqrt(ObjectDistance / 2) * (ATA2 + ATB2 * np.sqrt(h2o)))
tau2 = ATX * np.exp(-np.sqrt(ObjectDistance / 2) * (ATA1 + ATB1 * np.sqrt(h2o))) + (
1 - ATX
) * np.exp(-np.sqrt(ObjectDistance / 2) * (ATA2 + ATB2 * np.sqrt(h2o)))
# radiance from the environment
raw_refl1 = PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (ReflectedApparentTemperature + 273.15)) - PlanckF)) - PlanckO
raw_refl1 = (
PlanckR1
/ (
PlanckR2
* (np.exp(PlanckB / (ReflectedApparentTemperature + 273.15)) - PlanckF)
)
- PlanckO
)
# Reflected component
raw_refl1_attn = (1 - Emissivity) / Emissivity * raw_refl1
raw_refl1_attn = (1 - Emissivity) / Emissivity * raw_refl1
# Emission from atmosphere 1
raw_atm1 = (
PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (AtmosphericTemperature + 273.15)) - PlanckF)) - PlanckO
)
PlanckR1
/ (PlanckR2 * (np.exp(PlanckB / (AtmosphericTemperature + 273.15)) - PlanckF))
- PlanckO
)
# attenuation for atmospheric 1 emission
raw_atm1_attn = (1 - tau1) / Emissivity / tau1 * raw_atm1
raw_atm1_attn = (1 - tau1) / Emissivity / tau1 * raw_atm1
# Emission from window due to its own temp
raw_wind = (
PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (IRWindowTemperature + 273.15)) - PlanckF)) - PlanckO
)
PlanckR1
/ (PlanckR2 * (np.exp(PlanckB / (IRWindowTemperature + 273.15)) - PlanckF))
- PlanckO
)
# Componen due to window emissivity
raw_wind_attn = (
emiss_wind / Emissivity / tau1 / IRWindowTransmission * raw_wind
)
raw_wind_attn = emiss_wind / Emissivity / tau1 / IRWindowTransmission * raw_wind
# Reflection from window due to external objects
raw_refl2 = (
PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (ReflectedApparentTemperature + 273.15)) - PlanckF)) - PlanckO
)
PlanckR1
/ (
PlanckR2
* (np.exp(PlanckB / (ReflectedApparentTemperature + 273.15)) - PlanckF)
)
- PlanckO
)
# component due to window reflectivity
raw_refl2_attn = (
refl_wind / Emissivity / tau1 / IRWindowTransmission * raw_refl2
)
raw_refl2_attn = refl_wind / Emissivity / tau1 / IRWindowTransmission * raw_refl2
# Emission from atmosphere 2
raw_atm2 = (
PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (AtmosphericTemperature + 273.15)) - PlanckF)) - PlanckO
)
PlanckR1
/ (PlanckR2 * (np.exp(PlanckB / (AtmosphericTemperature + 273.15)) - PlanckF))
- PlanckO
)
# attenuation for atmospheric 2 emission
raw_atm2_attn = (
(1 - tau2) / Emissivity / tau1 / IRWindowTransmission / tau2 * raw_atm2
@ -114,6 +131,7 @@ def normalize_temp_matrix(thermal_np):
thermal_np = num / den
return thermal_np
def clip_temp_to_roi(thermal_np, thermal_roi_values):
"""
Given an RoI within a temperature matrix, this function clips the temperature values in the entire thermal.
@ -136,4 +154,4 @@ def clip_temp_to_roi(thermal_np, thermal_roi_values):
def scale_with_roi(thermal_np, thermal_roi_values):
"""Alias for clip_temp_to_roi, to be deprecated in the future."""
return clip_temp_to_roi(thermal_np, thermal_roi_values)
return clip_temp_to_roi(thermal_np, thermal_roi_values)

Plik diff jest za duży Load Diff

Wyświetl plik

@ -1,5 +1,5 @@
#!/usr/bin/env python
#******************************************************************************
# ******************************************************************************
# $Id$
#
# Project: GDAL Python Interface
@ -8,7 +8,7 @@
# Author: Frank Warmerdam, warmerdam@pobox.com
# Trent Hare (USGS)
#
#******************************************************************************
# ******************************************************************************
# Copyright (c) 2009, Frank Warmerdam
# Copyright (c) 2010, Even Rouault <even dot rouault at mines-paris dot org>
#
@ -29,13 +29,14 @@
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#******************************************************************************
# ******************************************************************************
import sys
import numpy
from osgeo import gdal
# =============================================================================
# rgb_to_hsv()
#
@ -43,88 +44,95 @@ from osgeo import gdal
# hsv values will be with hue and saturation in the range [0,1] and value
# in the range [0,255]
#
def rgb_to_hsv( r,g,b ):
def rgb_to_hsv(r, g, b):
maxc = numpy.maximum(r,numpy.maximum(g,b))
minc = numpy.minimum(r,numpy.minimum(g,b))
maxc = numpy.maximum(r, numpy.maximum(g, b))
minc = numpy.minimum(r, numpy.minimum(g, b))
v = maxc
minc_eq_maxc = numpy.equal(minc,maxc)
minc_eq_maxc = numpy.equal(minc, maxc)
# compute the difference, but reset zeros to ones to avoid divide by zeros later.
ones = numpy.ones((r.shape[0],r.shape[1]))
maxc_minus_minc = numpy.choose( minc_eq_maxc, (maxc-minc,ones) )
ones = numpy.ones((r.shape[0], r.shape[1]))
maxc_minus_minc = numpy.choose(minc_eq_maxc, (maxc - minc, ones))
s = (maxc-minc) / numpy.maximum(ones,maxc)
rc = (maxc-r) / maxc_minus_minc
gc = (maxc-g) / maxc_minus_minc
bc = (maxc-b) / maxc_minus_minc
s = (maxc - minc) / numpy.maximum(ones, maxc)
rc = (maxc - r) / maxc_minus_minc
gc = (maxc - g) / maxc_minus_minc
bc = (maxc - b) / maxc_minus_minc
maxc_is_r = numpy.equal(maxc,r)
maxc_is_g = numpy.equal(maxc,g)
maxc_is_b = numpy.equal(maxc,b)
maxc_is_r = numpy.equal(maxc, r)
maxc_is_g = numpy.equal(maxc, g)
maxc_is_b = numpy.equal(maxc, b)
h = numpy.zeros((r.shape[0],r.shape[1]))
h = numpy.choose( maxc_is_b, (h,4.0+gc-rc) )
h = numpy.choose( maxc_is_g, (h,2.0+rc-bc) )
h = numpy.choose( maxc_is_r, (h,bc-gc) )
h = numpy.zeros((r.shape[0], r.shape[1]))
h = numpy.choose(maxc_is_b, (h, 4.0 + gc - rc))
h = numpy.choose(maxc_is_g, (h, 2.0 + rc - bc))
h = numpy.choose(maxc_is_r, (h, bc - gc))
h = numpy.mod(h/6.0,1.0)
h = numpy.mod(h / 6.0, 1.0)
hsv = numpy.asarray([h,s,v])
hsv = numpy.asarray([h, s, v])
return hsv
# =============================================================================
# hsv_to_rgb()
#
# hsv comes in as [h,s,v] with hue and saturation in the range [0,1],
# but value in the range [0,255].
def hsv_to_rgb( hsv ):
def hsv_to_rgb(hsv):
h = hsv[0]
s = hsv[1]
v = hsv[2]
#if s == 0.0: return v, v, v
i = (h*6.0).astype(int)
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
# if s == 0.0: return v, v, v
i = (h * 6.0).astype(int)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
r = i.choose( v, q, p, p, t, v )
g = i.choose( t, v, v, q, p, p )
b = i.choose( p, p, t, v, v, q )
r = i.choose(v, q, p, p, t, v)
g = i.choose(t, v, v, q, p, p)
b = i.choose(p, p, t, v, v, q)
rgb = numpy.asarray([r,g,b]).astype(numpy.uint8)
rgb = numpy.asarray([r, g, b]).astype(numpy.uint8)
return rgb
# =============================================================================
# Usage()
def Usage():
print("""Usage: hsv_merge.py [-q] [-of format] src_color src_greyscale dst_color
print(
"""Usage: hsv_merge.py [-q] [-of format] src_color src_greyscale dst_color
where src_color is a RGB or RGBA dataset,
src_greyscale is a greyscale dataset (e.g. the result of gdaldem hillshade)
dst_color will be a RGB or RGBA dataset using the greyscale as the
intensity for the color dataset.
""")
"""
)
sys.exit(1)
# =============================================================================
# Mainline
# =============================================================================
argv = gdal.GeneralCmdLineProcessor( sys.argv )
argv = gdal.GeneralCmdLineProcessor(sys.argv)
if argv is None:
sys.exit( 0 )
sys.exit(0)
format = 'GTiff'
format = "GTiff"
src_color_filename = None
src_greyscale_filename = None
dst_color_filename = None
@ -135,11 +143,11 @@ i = 1
while i < len(argv):
arg = argv[i]
if arg == '-of':
if arg == "-of":
i = i + 1
format = argv[i]
elif arg == '-q' or arg == '-quiet':
elif arg == "-q" or arg == "-quiet":
quiet = True
elif src_color_filename is None:
@ -160,22 +168,27 @@ if dst_color_filename is None:
datatype = gdal.GDT_Byte
hilldataset = gdal.Open( src_greyscale_filename, gdal.GA_ReadOnly )
colordataset = gdal.Open( src_color_filename, gdal.GA_ReadOnly )
hilldataset = gdal.Open(src_greyscale_filename, gdal.GA_ReadOnly)
colordataset = gdal.Open(src_color_filename, gdal.GA_ReadOnly)
#check for 3 or 4 bands in the color file
if (colordataset.RasterCount != 3 and colordataset.RasterCount != 4):
print('Source image does not appear to have three or four bands as required.')
# check for 3 or 4 bands in the color file
if colordataset.RasterCount != 3 and colordataset.RasterCount != 4:
print("Source image does not appear to have three or four bands as required.")
sys.exit(1)
#define output format, name, size, type and set projection
# define output format, name, size, type and set projection
out_driver = gdal.GetDriverByName(format)
outdataset = out_driver.Create(dst_color_filename, colordataset.RasterXSize, \
colordataset.RasterYSize, colordataset.RasterCount, datatype)
outdataset = out_driver.Create(
dst_color_filename,
colordataset.RasterXSize,
colordataset.RasterYSize,
colordataset.RasterCount,
datatype,
)
outdataset.SetProjection(hilldataset.GetProjection())
outdataset.SetGeoTransform(hilldataset.GetGeoTransform())
#assign RGB and hillshade bands
# assign RGB and hillshade bands
rBand = colordataset.GetRasterBand(1)
gBand = colordataset.GetRasterBand(2)
bBand = colordataset.GetRasterBand(3)
@ -187,37 +200,37 @@ else:
hillband = hilldataset.GetRasterBand(1)
hillbandnodatavalue = hillband.GetNoDataValue()
#check for same file size
if ((rBand.YSize != hillband.YSize) or (rBand.XSize != hillband.XSize)):
print('Color and hillshade must be the same size in pixels.')
# check for same file size
if (rBand.YSize != hillband.YSize) or (rBand.XSize != hillband.XSize):
print("Color and hillshade must be the same size in pixels.")
sys.exit(1)
#loop over lines to apply hillshade
# loop over lines to apply hillshade
for i in range(hillband.YSize):
#load RGB and Hillshade arrays
# load RGB and Hillshade arrays
rScanline = rBand.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1)
gScanline = gBand.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1)
bScanline = bBand.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1)
hillScanline = hillband.ReadAsArray(0, i, hillband.XSize, 1, hillband.XSize, 1)
#convert to HSV
hsv = rgb_to_hsv( rScanline, gScanline, bScanline )
# convert to HSV
hsv = rgb_to_hsv(rScanline, gScanline, bScanline)
# if there's nodata on the hillband, use the v value from the color
# dataset instead of the hillshade value.
if hillbandnodatavalue is not None:
equal_to_nodata = numpy.equal(hillScanline, hillbandnodatavalue)
v = numpy.choose(equal_to_nodata,(hillScanline,hsv[2]))
v = numpy.choose(equal_to_nodata, (hillScanline, hsv[2]))
else:
v = hillScanline
#replace v with hillshade
hsv_adjusted = numpy.asarray( [hsv[0], hsv[1], v] )
# replace v with hillshade
hsv_adjusted = numpy.asarray([hsv[0], hsv[1], v])
#convert back to RGB
dst_color = hsv_to_rgb( hsv_adjusted )
# convert back to RGB
dst_color = hsv_to_rgb(hsv_adjusted)
#write out new RGB bands to output one band at a time
# write out new RGB bands to output one band at a time
outband = outdataset.GetRasterBand(1)
outband.WriteArray(dst_color[0], 0, i)
outband = outdataset.GetRasterBand(2)
@ -229,6 +242,6 @@ for i in range(hillband.YSize):
outband = outdataset.GetRasterBand(4)
outband.WriteArray(aScanline, 0, i)
#update progress line
# update progress line
if not quiet:
gdal.TermProgress_nocb( (float(i+1) / hillband.YSize) )
gdal.TermProgress_nocb((float(i + 1) / hillband.YSize))

Wyświetl plik

@ -5,17 +5,30 @@ from opendm import log
from opendm import system
from opendm import io
def generate_tiles(geotiff, output_dir, max_concurrency, resolution):
circumference_earth_cm = 2*math.pi*637_813_700
circumference_earth_cm = 2 * math.pi * 637_813_700
px_per_tile = 256
resolution_equator_cm = circumference_earth_cm/px_per_tile
zoom = math.ceil(math.log(resolution_equator_cm/resolution, 2))
resolution_equator_cm = circumference_earth_cm / px_per_tile
zoom = math.ceil(math.log(resolution_equator_cm / resolution, 2))
min_zoom = 5 # 4.89 km/px
max_zoom = min(zoom, 23) # No deeper zoom than 23 (1.86 cm/px at equator)
gdal2tiles = os.path.join(os.path.dirname(__file__), "gdal2tiles.py")
system.run('%s "%s" --processes %s -z %s-%s -n -w none "%s" "%s"' % (sys.executable, gdal2tiles, max_concurrency, min_zoom, max_zoom, geotiff, output_dir))
system.run(
'%s "%s" --processes %s -z %s-%s -n -w none "%s" "%s"'
% (
sys.executable,
gdal2tiles,
max_concurrency,
min_zoom,
max_zoom,
geotiff,
output_dir,
)
)
def generate_orthophoto_tiles(geotiff, output_dir, max_concurrency, resolution):
try:
@ -23,6 +36,7 @@ def generate_orthophoto_tiles(geotiff, output_dir, max_concurrency, resolution):
except Exception as e:
log.ODM_WARNING("Cannot generate orthophoto tiles: %s" % str(e))
def generate_colored_hillshade(geotiff):
relief_file = os.path.join(os.path.dirname(__file__), "color_relief.txt")
hsv_merge_script = os.path.join(os.path.dirname(__file__), "hsv_merge.py")
@ -37,18 +51,36 @@ def generate_colored_hillshade(geotiff):
if os.path.isfile(f):
os.remove(f)
system.run('gdaldem color-relief "%s" "%s" "%s" -alpha -co ALPHA=YES' % (geotiff, relief_file, colored_dem))
system.run('gdaldem hillshade "%s" "%s" -z 1.0 -s 1.0 -az 315.0 -alt 45.0' % (geotiff, hillshade_dem))
system.run('%s "%s" "%s" "%s" "%s"' % (sys.executable, hsv_merge_script, colored_dem, hillshade_dem, colored_hillshade_dem))
system.run(
'gdaldem color-relief "%s" "%s" "%s" -alpha -co ALPHA=YES'
% (geotiff, relief_file, colored_dem)
)
system.run(
'gdaldem hillshade "%s" "%s" -z 1.0 -s 1.0 -az 315.0 -alt 45.0'
% (geotiff, hillshade_dem)
)
system.run(
'%s "%s" "%s" "%s" "%s"'
% (
sys.executable,
hsv_merge_script,
colored_dem,
hillshade_dem,
colored_hillshade_dem,
)
)
return outputs
except Exception as e:
log.ODM_WARNING("Cannot generate colored hillshade: %s" % str(e))
return (None, None, None)
def generate_dem_tiles(geotiff, output_dir, max_concurrency, resolution):
try:
colored_dem, hillshade_dem, colored_hillshade_dem = generate_colored_hillshade(geotiff)
colored_dem, hillshade_dem, colored_hillshade_dem = generate_colored_hillshade(
geotiff
)
generate_tiles(colored_hillshade_dem, output_dir, max_concurrency, resolution)
# Cleanup

Wyświetl plik

@ -21,6 +21,7 @@ from opendm.photo import ODM_Photo
# Ignore warnings about proj information being lost
warnings.filterwarnings("ignore")
class ODM_Reconstruction(object):
def __init__(self, photos):
self.photos = photos
@ -28,7 +29,7 @@ class ODM_Reconstruction(object):
self.gcp = None
self.multi_camera = self.detect_multi_camera()
self.filter_photos()
def detect_multi_camera(self):
"""
Looks at the reconstruction photos and determines if this
@ -44,9 +45,9 @@ class ODM_Reconstruction(object):
band_indexes[p.band_name] = str(p.band_index)
band_photos[p.band_name].append(p)
bands_count = len(band_photos)
# Band name with the minimum number of photos
max_band_name = None
max_photos = -1
@ -59,17 +60,25 @@ class ODM_Reconstruction(object):
# Validate that all bands have the same number of images,
# otherwise this is not a multi-camera setup
img_per_band = len(band_photos[max_band_name])
mc = []
for band_name in band_indexes:
mc.append({'name': band_name, 'photos': band_photos[band_name]})
mc.append({"name": band_name, "photos": band_photos[band_name]})
filter_missing = False
for band in band_photos:
if len(band_photos[band]) < img_per_band:
log.ODM_WARNING("Multi-camera setup detected, but band \"%s\" (identified from \"%s\") has only %s images (instead of %s), perhaps images are missing or are corrupted." % (band, band_photos[band][0].filename, len(band_photos[band]), len(band_photos[max_band_name])))
log.ODM_WARNING(
'Multi-camera setup detected, but band "%s" (identified from "%s") has only %s images (instead of %s), perhaps images are missing or are corrupted.'
% (
band,
band_photos[band][0].filename,
len(band_photos[band]),
len(band_photos[max_band_name]),
)
)
filter_missing = True
if filter_missing:
# Calculate files to ignore
_, p2s = multispectral.compute_band_maps(mc, max_band_name)
@ -81,149 +90,198 @@ class ODM_Reconstruction(object):
for filename in p2s:
if len(p2s[filename]) < max_files_per_band:
photos_to_remove = p2s[filename] + [p for p in self.photos if p.filename == filename]
photos_to_remove = p2s[filename] + [
p for p in self.photos if p.filename == filename
]
for photo in photos_to_remove:
log.ODM_WARNING("Excluding %s" % photo.filename)
self.photos = [p for p in self.photos if p != photo]
for i in range(len(mc)):
mc[i]['photos'] = [p for p in mc[i]['photos'] if p != photo]
mc[i]["photos"] = [
p for p in mc[i]["photos"] if p != photo
]
log.ODM_INFO("New image count: %s" % len(self.photos))
# We enforce a normalized band order for all bands that we can identify
# and rely on the manufacturer's band_indexes as a fallback for all others
normalized_band_order = {
'RGB': '0',
'REDGREENBLUE': '0',
'RED': '1',
'R': '1',
'GREEN': '2',
'G': '2',
'BLUE': '3',
'B': '3',
'NIR': '4',
'N': '4',
'REDEDGE': '5',
'RE': '5',
'PANCHRO': '6',
'LWIR': '7',
'L': '7',
"RGB": "0",
"REDGREENBLUE": "0",
"RED": "1",
"R": "1",
"GREEN": "2",
"G": "2",
"BLUE": "3",
"B": "3",
"NIR": "4",
"N": "4",
"REDEDGE": "5",
"RE": "5",
"PANCHRO": "6",
"LWIR": "7",
"L": "7",
}
for band_name in band_indexes:
if band_name.upper() not in normalized_band_order:
log.ODM_WARNING(f"Cannot identify order for {band_name} band, using manufacturer suggested index instead")
log.ODM_WARNING(
f"Cannot identify order for {band_name} band, using manufacturer suggested index instead"
)
# Sort
mc.sort(key=lambda x: normalized_band_order.get(x['name'].upper(), '9' + band_indexes[x['name']]))
mc.sort(
key=lambda x: normalized_band_order.get(
x["name"].upper(), "9" + band_indexes[x["name"]]
)
)
for c, d in enumerate(mc):
log.ODM_INFO(f"Band {c + 1}: {d['name']}")
return mc
return None
def filter_photos(self):
if not self.multi_camera:
return # Nothing to do, use all images
return # Nothing to do, use all images
else:
# Sometimes people might try process both RGB + Blue/Red/Green bands
# because these are the contents of the SD card from a drone (e.g. DJI P4 Multispectral)
# But we don't want to process both, so we discard the RGB files in favor
bands = {}
for b in self.multi_camera:
bands[b['name'].lower()] = b['name']
bands[b["name"].lower()] = b["name"]
bands_to_remove = []
if 'rgb' in bands or 'redgreenblue' in bands:
if 'red' in bands and 'green' in bands and 'blue' in bands:
bands_to_remove.append(bands['rgb'] if 'rgb' in bands else bands['redgreenblue'])
if "rgb" in bands or "redgreenblue" in bands:
if "red" in bands and "green" in bands and "blue" in bands:
bands_to_remove.append(
bands["rgb"] if "rgb" in bands else bands["redgreenblue"]
)
# Mavic 3M's RGB camera lens are too different than the multispectral ones
# so we drop the RGB channel instead
elif self.photos[0].is_make_model("DJI", "M3M") and 'red' in bands and 'green' in bands:
bands_to_remove.append(bands['rgb'] if 'rgb' in bands else bands['redgreenblue'])
elif (
self.photos[0].is_make_model("DJI", "M3M")
and "red" in bands
and "green" in bands
):
bands_to_remove.append(
bands["rgb"] if "rgb" in bands else bands["redgreenblue"]
)
else:
for b in ['red', 'green', 'blue']:
for b in ["red", "green", "blue"]:
if b in bands:
bands_to_remove.append(bands[b])
if len(bands_to_remove) > 0:
log.ODM_WARNING("Redundant bands detected, probably because RGB images are mixed with single band images. We will trim some bands as needed")
log.ODM_WARNING(
"Redundant bands detected, probably because RGB images are mixed with single band images. We will trim some bands as needed"
)
for band_to_remove in bands_to_remove:
self.multi_camera = [b for b in self.multi_camera if b['name'] != band_to_remove]
self.multi_camera = [
b for b in self.multi_camera if b["name"] != band_to_remove
]
photos_before = len(self.photos)
self.photos = [p for p in self.photos if p.band_name != band_to_remove]
self.photos = [
p for p in self.photos if p.band_name != band_to_remove
]
photos_after = len(self.photos)
log.ODM_WARNING("Skipping %s band (%s images)" % (band_to_remove, photos_before - photos_after))
log.ODM_WARNING(
"Skipping %s band (%s images)"
% (band_to_remove, photos_before - photos_after)
)
def is_georeferenced(self):
return self.georef is not None
def has_gcp(self):
return self.is_georeferenced() and self.gcp is not None and self.gcp.exists()
def has_geotagged_photos(self):
for photo in self.photos:
if photo.latitude is None and photo.longitude is None:
return False
return True
return True
def georeference_with_gcp(self, gcp_file, output_coords_file, output_gcp_file, output_model_txt_geo, rerun=False):
if not io.file_exists(output_coords_file) or not io.file_exists(output_gcp_file) or rerun:
def georeference_with_gcp(
self,
gcp_file,
output_coords_file,
output_gcp_file,
output_model_txt_geo,
rerun=False,
):
if (
not io.file_exists(output_coords_file)
or not io.file_exists(output_gcp_file)
or rerun
):
gcp = GCPFile(gcp_file)
if gcp.exists():
if gcp.entries_count() == 0:
raise RuntimeError("This GCP file does not have any entries. Are the entries entered in the proper format?")
raise RuntimeError(
"This GCP file does not have any entries. Are the entries entered in the proper format?"
)
gcp.check_entries()
# Convert GCP file to a UTM projection since the rest of the pipeline
# does not handle other SRS well.
rejected_entries = []
utm_gcp = GCPFile(gcp.create_utm_copy(output_gcp_file, filenames=[p.filename for p in self.photos], rejected_entries=rejected_entries, include_extras=True))
utm_gcp = GCPFile(
gcp.create_utm_copy(
output_gcp_file,
filenames=[p.filename for p in self.photos],
rejected_entries=rejected_entries,
include_extras=True,
)
)
if not utm_gcp.exists():
raise RuntimeError("Could not project GCP file to UTM. Please double check your GCP file for mistakes.")
raise RuntimeError(
"Could not project GCP file to UTM. Please double check your GCP file for mistakes."
)
for re in rejected_entries:
log.ODM_WARNING("GCP line ignored (image not found): %s" % str(re))
if utm_gcp.entries_count() > 0:
log.ODM_INFO("%s GCP points will be used for georeferencing" % utm_gcp.entries_count())
log.ODM_INFO(
"%s GCP points will be used for georeferencing"
% utm_gcp.entries_count()
)
else:
raise RuntimeError("A GCP file was provided, but no valid GCP entries could be used. Note that the GCP file is case sensitive (\".JPG\" is not the same as \".jpg\").")
raise RuntimeError(
'A GCP file was provided, but no valid GCP entries could be used. Note that the GCP file is case sensitive (".JPG" is not the same as ".jpg").'
)
self.gcp = utm_gcp
# Compute RTC offsets from GCP points
x_pos = [p.x for p in utm_gcp.iter_entries()]
y_pos = [p.y for p in utm_gcp.iter_entries()]
x_off, y_off = int(np.round(np.mean(x_pos))), int(np.round(np.mean(y_pos)))
x_off, y_off = int(np.round(np.mean(x_pos))), int(
np.round(np.mean(y_pos))
)
# Create coords file, we'll be using this later
# during georeferencing
with open(output_coords_file, 'w') as f:
with open(output_coords_file, "w") as f:
coords_header = gcp.wgs84_utm_zone()
f.write(coords_header + "\n")
f.write("{} {}\n".format(x_off, y_off))
log.ODM_INFO("Generated coords file from GCP: %s" % coords_header)
# Deprecated: This is mostly for backward compatibility and should be
# be removed at some point
shutil.copyfile(output_coords_file, output_model_txt_geo)
@ -235,45 +293,51 @@ class ODM_Reconstruction(object):
log.ODM_INFO("Coordinates file already exist: %s" % output_coords_file)
log.ODM_INFO("GCP file already exist: %s" % output_gcp_file)
self.gcp = GCPFile(output_gcp_file)
self.georef = ODM_GeoRef.FromCoordsFile(output_coords_file)
return self.georef
def georeference_with_gps(self, images_path, output_coords_file, output_model_txt_geo, rerun=False):
def georeference_with_gps(
self, images_path, output_coords_file, output_model_txt_geo, rerun=False
):
try:
if not io.file_exists(output_coords_file) or rerun:
location.extract_utm_coords(self.photos, images_path, output_coords_file)
location.extract_utm_coords(
self.photos, images_path, output_coords_file
)
else:
log.ODM_INFO("Coordinates file already exist: %s" % output_coords_file)
# Deprecated: This is mostly for backward compatibility and should be
# be removed at some point
if not io.file_exists(output_model_txt_geo) or rerun:
with open(output_coords_file, 'r') as f:
with open(output_model_txt_geo, 'w+') as w:
w.write(f.readline()) # CRS
w.write(f.readline()) # Offset
with open(output_coords_file, "r") as f:
with open(output_model_txt_geo, "w+") as w:
w.write(f.readline()) # CRS
w.write(f.readline()) # Offset
else:
log.ODM_INFO("Model geo file already exist: %s" % output_model_txt_geo)
self.georef = ODM_GeoRef.FromCoordsFile(output_coords_file)
except:
log.ODM_WARNING('Could not generate coordinates file. The orthophoto will not be georeferenced.')
log.ODM_WARNING(
"Could not generate coordinates file. The orthophoto will not be georeferenced."
)
self.gcp = GCPFile(None)
return self.georef
def save_proj_srs(self, file):
# Save proj to file for future use (unless this
# Save proj to file for future use (unless this
# dataset is not georeferenced)
if self.is_georeferenced():
with open(file, 'w') as f:
with open(file, "w") as f:
f.write(self.get_proj_srs())
def get_proj_srs(self):
if self.is_georeferenced():
return self.georef.proj4()
def get_proj_offset(self):
if self.is_georeferenced():
return (self.georef.utm_east_offset, self.georef.utm_north_offset)
@ -284,13 +348,14 @@ class ODM_Reconstruction(object):
for p in self.photos:
if p.filename == filename:
return p
class ODM_GeoRef(object):
@staticmethod
def FromCoordsFile(coords_file):
# check for coordinate file existence
if not io.file_exists(coords_file):
log.ODM_WARNING('Could not find file %s' % coords_file)
log.ODM_WARNING("Could not find file %s" % coords_file)
return
srs = None
@ -318,95 +383,134 @@ class ODM_GeoRef(object):
def proj4(self):
return self.srs.to_proj4()
def utm_offset(self):
return (self.utm_east_offset, self.utm_north_offset)
class ODM_Tree(object):
def __init__(self, root_path, gcp_file = None, geo_file = None, align_file = None):
def __init__(self, root_path, gcp_file=None, geo_file=None, align_file=None):
# root path to the project
self.root_path = io.absolute_path_file(root_path)
self.input_images = os.path.join(self.root_path, 'images')
self.input_images = os.path.join(self.root_path, "images")
# modules paths
# here are defined where all modules should be located in
# order to keep track all files al directories during the
# whole reconstruction process.
self.dataset_raw = os.path.join(self.root_path, 'images')
self.opensfm = os.path.join(self.root_path, 'opensfm')
self.openmvs = os.path.join(self.opensfm, 'undistorted', 'openmvs')
self.odm_meshing = os.path.join(self.root_path, 'odm_meshing')
self.odm_texturing = os.path.join(self.root_path, 'odm_texturing')
self.odm_25dtexturing = os.path.join(self.root_path, 'odm_texturing_25d')
self.odm_georeferencing = os.path.join(self.root_path, 'odm_georeferencing')
self.odm_filterpoints = os.path.join(self.root_path, 'odm_filterpoints')
self.odm_orthophoto = os.path.join(self.root_path, 'odm_orthophoto')
self.odm_report = os.path.join(self.root_path, 'odm_report')
self.dataset_raw = os.path.join(self.root_path, "images")
self.opensfm = os.path.join(self.root_path, "opensfm")
self.openmvs = os.path.join(self.opensfm, "undistorted", "openmvs")
self.odm_meshing = os.path.join(self.root_path, "odm_meshing")
self.odm_texturing = os.path.join(self.root_path, "odm_texturing")
self.odm_25dtexturing = os.path.join(self.root_path, "odm_texturing_25d")
self.odm_georeferencing = os.path.join(self.root_path, "odm_georeferencing")
self.odm_filterpoints = os.path.join(self.root_path, "odm_filterpoints")
self.odm_orthophoto = os.path.join(self.root_path, "odm_orthophoto")
self.odm_report = os.path.join(self.root_path, "odm_report")
# important files paths
# benchmarking
self.benchmarking = os.path.join(self.root_path, 'benchmark.txt')
self.dataset_list = os.path.join(self.root_path, 'img_list.txt')
self.benchmarking = os.path.join(self.root_path, "benchmark.txt")
self.dataset_list = os.path.join(self.root_path, "img_list.txt")
# opensfm
self.opensfm_image_list = os.path.join(self.opensfm, 'image_list.txt')
self.opensfm_reconstruction = os.path.join(self.opensfm, 'reconstruction.json')
self.opensfm_reconstruction_nvm = os.path.join(self.opensfm, 'undistorted/reconstruction.nvm')
self.opensfm_geocoords_reconstruction = os.path.join(self.opensfm, 'reconstruction.geocoords.json')
self.opensfm_topocentric_reconstruction = os.path.join(self.opensfm, 'reconstruction.topocentric.json')
self.opensfm_image_list = os.path.join(self.opensfm, "image_list.txt")
self.opensfm_reconstruction = os.path.join(self.opensfm, "reconstruction.json")
self.opensfm_reconstruction_nvm = os.path.join(
self.opensfm, "undistorted/reconstruction.nvm"
)
self.opensfm_geocoords_reconstruction = os.path.join(
self.opensfm, "reconstruction.geocoords.json"
)
self.opensfm_topocentric_reconstruction = os.path.join(
self.opensfm, "reconstruction.topocentric.json"
)
# OpenMVS
self.openmvs_model = os.path.join(self.openmvs, 'scene_dense_dense_filtered.ply')
self.openmvs_model = os.path.join(
self.openmvs, "scene_dense_dense_filtered.ply"
)
# filter points
self.filtered_point_cloud = os.path.join(self.odm_filterpoints, "point_cloud.ply")
self.filtered_point_cloud_stats = os.path.join(self.odm_filterpoints, "point_cloud_stats.json")
self.filtered_point_cloud = os.path.join(
self.odm_filterpoints, "point_cloud.ply"
)
self.filtered_point_cloud_stats = os.path.join(
self.odm_filterpoints, "point_cloud_stats.json"
)
# odm_meshing
self.odm_mesh = os.path.join(self.odm_meshing, 'odm_mesh.ply')
self.odm_meshing_log = os.path.join(self.odm_meshing, 'odm_meshing_log.txt')
self.odm_25dmesh = os.path.join(self.odm_meshing, 'odm_25dmesh.ply')
self.odm_25dmeshing_log = os.path.join(self.odm_meshing, 'odm_25dmeshing_log.txt')
self.odm_mesh = os.path.join(self.odm_meshing, "odm_mesh.ply")
self.odm_meshing_log = os.path.join(self.odm_meshing, "odm_meshing_log.txt")
self.odm_25dmesh = os.path.join(self.odm_meshing, "odm_25dmesh.ply")
self.odm_25dmeshing_log = os.path.join(
self.odm_meshing, "odm_25dmeshing_log.txt"
)
# texturing
self.odm_textured_model_obj = 'odm_textured_model_geo.obj'
self.odm_textured_model_glb = 'odm_textured_model_geo.glb'
self.odm_textured_model_obj = "odm_textured_model_geo.obj"
self.odm_textured_model_glb = "odm_textured_model_geo.glb"
# odm_georeferencing
self.odm_georeferencing_coords = os.path.join(
self.odm_georeferencing, 'coords.txt')
self.odm_georeferencing_gcp = gcp_file or io.find('gcp_list.txt', self.root_path)
self.odm_georeferencing_gcp_utm = os.path.join(self.odm_georeferencing, 'gcp_list_utm.txt')
self.odm_geo_file = geo_file or io.find('geo.txt', self.root_path)
self.odm_align_file = align_file or io.find('align.laz', self.root_path) or io.find('align.las', self.root_path) or io.find('align.tif', self.root_path)
self.odm_georeferencing_proj = 'proj.txt'
self.odm_georeferencing, "coords.txt"
)
self.odm_georeferencing_gcp = gcp_file or io.find(
"gcp_list.txt", self.root_path
)
self.odm_georeferencing_gcp_utm = os.path.join(
self.odm_georeferencing, "gcp_list_utm.txt"
)
self.odm_geo_file = geo_file or io.find("geo.txt", self.root_path)
self.odm_align_file = (
align_file
or io.find("align.laz", self.root_path)
or io.find("align.las", self.root_path)
or io.find("align.tif", self.root_path)
)
self.odm_georeferencing_proj = "proj.txt"
self.odm_georeferencing_model_txt_geo = os.path.join(
self.odm_georeferencing, 'odm_georeferencing_model_geo.txt')
self.odm_georeferencing, "odm_georeferencing_model_geo.txt"
)
self.odm_georeferencing_xyz_file = os.path.join(
self.odm_georeferencing, 'odm_georeferenced_model.csv')
self.odm_georeferencing, "odm_georeferenced_model.csv"
)
self.odm_georeferencing_model_laz = os.path.join(
self.odm_georeferencing, 'odm_georeferenced_model.laz')
self.odm_georeferencing, "odm_georeferenced_model.laz"
)
self.odm_georeferencing_model_las = os.path.join(
self.odm_georeferencing, 'odm_georeferenced_model.las')
self.odm_georeferencing, "odm_georeferenced_model.las"
)
self.odm_georeferencing_alignment_matrix = os.path.join(
self.odm_georeferencing, 'alignment_matrix.json'
self.odm_georeferencing, "alignment_matrix.json"
)
# odm_orthophoto
self.odm_orthophoto_render = os.path.join(self.odm_orthophoto, 'odm_orthophoto_render.tif')
self.odm_orthophoto_tif = os.path.join(self.odm_orthophoto, 'odm_orthophoto.tif')
self.odm_orthophoto_corners = os.path.join(self.odm_orthophoto, 'odm_orthophoto_corners.txt')
self.odm_orthophoto_log = os.path.join(self.odm_orthophoto, 'odm_orthophoto_log.txt')
self.odm_orthophoto_tif_log = os.path.join(self.odm_orthophoto, 'gdal_translate_log.txt')
self.odm_orthophoto_render = os.path.join(
self.odm_orthophoto, "odm_orthophoto_render.tif"
)
self.odm_orthophoto_tif = os.path.join(
self.odm_orthophoto, "odm_orthophoto.tif"
)
self.odm_orthophoto_corners = os.path.join(
self.odm_orthophoto, "odm_orthophoto_corners.txt"
)
self.odm_orthophoto_log = os.path.join(
self.odm_orthophoto, "odm_orthophoto_log.txt"
)
self.odm_orthophoto_tif_log = os.path.join(
self.odm_orthophoto, "gdal_translate_log.txt"
)
# tiles
self.orthophoto_tiles = os.path.join(self.root_path, "orthophoto_tiles")
# Split-merge
self.submodels_path = os.path.join(self.root_path, 'submodels')
# Split-merge
self.submodels_path = os.path.join(self.root_path, "submodels")
# Tiles
self.entwine_pointcloud = self.path("entwine_pointcloud")
@ -436,28 +540,32 @@ class ODM_Stage:
"""
Does this stage need to be rerun?
"""
return (self.args.rerun is not None and self.args.rerun == self.name) or \
(self.args.rerun_all) or \
(self.args.rerun_from is not None and self.name in self.args.rerun_from)
def run(self, outputs = {}):
return (
(self.args.rerun is not None and self.args.rerun == self.name)
or (self.args.rerun_all)
or (self.args.rerun_from is not None and self.name in self.args.rerun_from)
)
def run(self, outputs={}):
start_time = system.now_raw()
log.logger.log_json_stage_run(self.name, start_time)
log.ODM_INFO('Running %s stage' % self.name)
log.ODM_INFO("Running %s stage" % self.name)
self.process(self.args, outputs)
# The tree variable should always be populated at this point
if outputs.get('tree') is None:
raise Exception("Assert violation: tree variable is missing from outputs dictionary.")
if outputs.get("tree") is None:
raise Exception(
"Assert violation: tree variable is missing from outputs dictionary."
)
try:
system.benchmark(start_time, outputs['tree'].benchmarking, self.name)
system.benchmark(start_time, outputs["tree"].benchmarking, self.name)
except Exception as e:
log.ODM_WARNING("Cannot write benchmark file: %s" % str(e))
log.ODM_INFO('Finished %s stage' % self.name)
log.ODM_INFO("Finished %s stage" % self.name)
self.update_progress_end()
# Last stage?
@ -474,7 +582,7 @@ class ODM_Stage:
return max(0.0, self.progress - self.prev_stage.progress)
else:
return max(0.0, self.progress)
def previous_stages_progress(self):
if self.prev_stage:
return max(0.0, self.prev_stage.progress)
@ -486,16 +594,16 @@ class ODM_Stage:
def update_progress(self, progress):
progress = max(0.0, min(100.0, progress))
progressbc.send_update(self.previous_stages_progress() +
(self.delta_progress() / 100.0) * float(progress))
progressbc.send_update(
self.previous_stages_progress()
+ (self.delta_progress() / 100.0) * float(progress)
)
def last_stage(self):
if self.next_stage:
return self.next_stage.last_stage()
else:
return self
def process(self, args, outputs):
raise NotImplementedError

Wyświetl plik

@ -6,6 +6,7 @@ from opendm.photo import find_largest_photo_dims
from osgeo import gdal
from opendm.arghelpers import double_quote
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
@ -15,7 +16,7 @@ class NumpyEncoder(json.JSONEncoder):
def get_depthmap_resolution(args, photos):
max_dims = find_largest_photo_dims(photos)
min_dim = 320 # Never go lower than this
min_dim = 320 # Never go lower than this
if max_dims is not None:
w, h = max_dims
@ -23,24 +24,29 @@ def get_depthmap_resolution(args, photos):
megapixels = (w * h) / 1e6
multiplier = 1
if megapixels < 6:
multiplier = 2
elif megapixels > 42:
multiplier = 0.5
pc_quality_scale = {
'ultra': 0.5,
'high': 0.25,
'medium': 0.125,
'low': 0.0675,
'lowest': 0.03375
"ultra": 0.5,
"high": 0.25,
"medium": 0.125,
"low": 0.0675,
"lowest": 0.03375,
}
return max(min_dim, int(max_dim * pc_quality_scale[args.pc_quality] * multiplier))
return max(
min_dim, int(max_dim * pc_quality_scale[args.pc_quality] * multiplier)
)
else:
log.ODM_WARNING("Cannot compute max image dimensions, going with default depthmap_resolution of 640")
return 640 # Sensible default
log.ODM_WARNING(
"Cannot compute max image dimensions, going with default depthmap_resolution of 640"
)
return 640 # Sensible default
def get_raster_stats(geotiff):
stats = []
@ -49,15 +55,11 @@ def get_raster_stats(geotiff):
srcband = gtif.GetRasterBand(b + 1)
s = srcband.GetStatistics(True, True)
stats.append({
'min': s[0],
'max': s[1],
'mean': s[2],
'stddev': s[3]
})
stats.append({"min": s[0], "max": s[1], "mean": s[2], "stddev": s[3]})
return stats
def get_processing_results_paths():
return [
"odm_georeferencing",
@ -75,6 +77,7 @@ def get_processing_results_paths():
"log.json",
]
def copy_paths(paths, destination, rerun):
if not os.path.isdir(destination):
os.makedirs(destination)
@ -90,7 +93,9 @@ def copy_paths(paths, destination, rerun):
elif os.path.isdir(dst_path):
shutil.rmtree(dst_path)
except Exception as e:
log.ODM_WARNING("Cannot remove file %s: %s, skipping..." % (dst_path, str(e)))
log.ODM_WARNING(
"Cannot remove file %s: %s, skipping..." % (dst_path, str(e))
)
if not os.path.exists(dst_path):
if os.path.isfile(p):
@ -100,6 +105,7 @@ def copy_paths(paths, destination, rerun):
shutil.copytree(p, dst_path)
log.ODM_INFO("Copying %s --> %s" % (p, dst_path))
def rm_r(path):
try:
if os.path.isdir(path) and not os.path.islink(path):
@ -109,8 +115,10 @@ def rm_r(path):
except:
log.ODM_WARNING("Cannot remove %s" % path)
def np_to_json(arr):
return json.dumps(arr, cls=NumpyEncoder)
def np_from_json(json_dump):
return np.asarray(json.loads(json_dump))
return np.asarray(json.loads(json_dump))

Wyświetl plik

@ -37,16 +37,19 @@ from osgeo import gdal
def CopyBand(srcband, dstband):
for line in range(srcband.YSize):
line_data = srcband.ReadRaster(0, line, srcband.XSize, 1)
dstband.WriteRaster(0, line, srcband.XSize, 1, line_data,
buf_type=srcband.DataType)
dstband.WriteRaster(
0, line, srcband.XSize, 1, line_data, buf_type=srcband.DataType
)
def Usage():
print("""
print(
"""
gdal_fillnodata [-q] [-md max_distance] [-si smooth_iterations]
[-o name=value] [-b band]
srcfile [-nomask] [-mask filename] [-of format] [-co name=value]* [dstfile]
""")
"""
)
sys.exit(1)
@ -59,10 +62,10 @@ def main(argv):
src_band = 1
dst_filename = None
frmt = 'GTiff'
frmt = "GTiff"
creation_options = []
mask = 'default'
mask = "default"
gdal.AllRegister()
argv = gdal.GeneralCmdLineProcessor(argv)
@ -74,41 +77,41 @@ def main(argv):
while i < len(argv):
arg = argv[i]
if arg == '-of' or arg == '-f':
if arg == "-of" or arg == "-f":
i = i + 1
frmt = argv[i]
elif arg == '-co':
elif arg == "-co":
i = i + 1
creation_options.append(argv[i])
elif arg == '-q' or arg == '-quiet':
elif arg == "-q" or arg == "-quiet":
quiet_flag = 1
elif arg == '-si':
elif arg == "-si":
i = i + 1
smoothing_iterations = int(argv[i])
elif arg == '-b':
elif arg == "-b":
i = i + 1
src_band = int(argv[i])
elif arg == '-md':
elif arg == "-md":
i = i + 1
max_distance = float(argv[i])
elif arg == '-nomask':
mask = 'none'
elif arg == "-nomask":
mask = "none"
elif arg == '-mask':
elif arg == "-mask":
i = i + 1
mask = argv[i]
elif arg == '-mask':
elif arg == "-mask":
i = i + 1
mask = argv[i]
elif arg[:2] == '-h':
elif arg[:2] == "-h":
Usage()
elif src_filename is None:
@ -131,10 +134,10 @@ def main(argv):
try:
gdal.FillNodata
except AttributeError:
print('')
print("")
print('gdal.FillNodata() not available. You are likely using "old gen"')
print('bindings or an older version of the next gen bindings.')
print('')
print("bindings or an older version of the next gen bindings.")
print("")
sys.exit(1)
# =============================================================================
@ -147,14 +150,14 @@ def main(argv):
src_ds = gdal.Open(src_filename, gdal.GA_ReadOnly)
if src_ds is None:
print('Unable to open %s' % src_filename)
print("Unable to open %s" % src_filename)
sys.exit(1)
srcband = src_ds.GetRasterBand(src_band)
if mask == 'default':
if mask == "default":
maskband = srcband.GetMaskBand()
elif mask == 'none':
elif mask == "none":
maskband = None
else:
mask_ds = gdal.Open(mask)
@ -167,10 +170,16 @@ def main(argv):
if dst_filename is not None:
drv = gdal.GetDriverByName(frmt)
dst_ds = drv.Create(dst_filename, src_ds.RasterXSize, src_ds.RasterYSize, 1,
srcband.DataType, creation_options)
dst_ds = drv.Create(
dst_filename,
src_ds.RasterXSize,
src_ds.RasterYSize,
1,
srcband.DataType,
creation_options,
)
wkt = src_ds.GetProjection()
if wkt != '':
if wkt != "":
dst_ds.SetProjection(wkt)
gt = src_ds.GetGeoTransform(can_return_null=True)
if gt:
@ -200,10 +209,14 @@ def main(argv):
else:
prog_func = gdal.TermProgress_nocb
result = gdal.FillNodata(dstband, maskband,
max_distance, smoothing_iterations, options,
callback=prog_func)
result = gdal.FillNodata(
dstband,
maskband,
max_distance,
smoothing_iterations,
options,
callback=prog_func,
)
src_ds = None
dst_ds = None
@ -212,5 +225,5 @@ def main(argv):
return result
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main(sys.argv))

Wyświetl plik

@ -1,6 +1,7 @@
import cv2
import numpy as np
class ThresholdBlurChecker:
def __init__(self, threshold):
self.threshold = threshold
@ -15,6 +16,7 @@ class ThresholdBlurChecker:
var = cv2.Laplacian(image_bw, cv2.CV_64F).var()
return var, var < self.threshold
class SimilarityChecker:
def __init__(self, threshold, max_features=500):
self.threshold = threshold
@ -28,11 +30,15 @@ class SimilarityChecker:
if self.last_image is None:
self.last_image = image_bw
self.last_image_id = id
self.last_image_features = cv2.goodFeaturesToTrack(image_bw, self.max_features, 0.01, 10)
self.last_image_features = cv2.goodFeaturesToTrack(
image_bw, self.max_features, 0.01, 10
)
return 0, False, None
# Detect features
features, status, _ = cv2.calcOpticalFlowPyrLK(self.last_image, image_bw, self.last_image_features, None)
features, status, _ = cv2.calcOpticalFlowPyrLK(
self.last_image, image_bw, self.last_image_features, None
)
# Filter out the "bad" features (i.e. those that are not tracked successfully)
good_features = features[status == 1]
@ -43,10 +49,12 @@ class SimilarityChecker:
res = distance < self.threshold
if (not res):
if not res:
self.last_image = image_bw
self.last_image_id = id
self.last_image_features = cv2.goodFeaturesToTrack(image_bw, self.max_features, 0.01, 10)
self.last_image_features = cv2.goodFeaturesToTrack(
image_bw, self.max_features, 0.01, 10
)
return distance, res, self.last_image_id
@ -67,7 +75,9 @@ class NaiveBlackFrameChecker:
class BlackFrameChecker:
def __init__(self, picture_black_ratio_th=0.98, pixel_black_th=0.30):
self.picture_black_ratio_th = picture_black_ratio_th if picture_black_ratio_th is not None else 0.98
self.picture_black_ratio_th = (
picture_black_ratio_th if picture_black_ratio_th is not None else 0.98
)
self.pixel_black_th = pixel_black_th if pixel_black_th is not None else 0.30
self.luminance_minimum_value = None
self.luminance_range_size = None
@ -93,7 +103,7 @@ class BlackFrameChecker:
frame_index = start_frame if start_frame is not None else 0
# Read and process frames from video file
while (cap.isOpened() and (end_frame is None or frame_index <= end_frame)):
while cap.isOpened() and (end_frame is None or frame_index <= end_frame):
ret, frame = cap.read()
if not ret:
@ -105,13 +115,20 @@ class BlackFrameChecker:
gray_frame_max = gray_frame.max()
# Update luminance range size and minimum value
self.luminance_range_size = max(self.luminance_range_size, gray_frame_max - gray_frame_min)
self.luminance_minimum_value = min(self.luminance_minimum_value, gray_frame_min)
self.luminance_range_size = max(
self.luminance_range_size, gray_frame_max - gray_frame_min
)
self.luminance_minimum_value = min(
self.luminance_minimum_value, gray_frame_min
)
frame_index += 1
# Calculate absolute threshold for considering a pixel "black"
self.absolute_threshold = self.luminance_minimum_value + self.pixel_black_th * self.luminance_range_size
self.absolute_threshold = (
self.luminance_minimum_value
+ self.pixel_black_th * self.luminance_range_size
)
# Close video file
cap.release()
@ -125,4 +142,4 @@ class BlackFrameChecker:
ratio_black_pixels = nb_black_pixels / (image_bw.shape[0] * image_bw.shape[1])
# Check if ratio of black pixels is above threshold
return ratio_black_pixels >= self.picture_black_ratio_th
return ratio_black_pixels >= self.picture_black_ratio_th

Wyświetl plik

@ -1,8 +1,8 @@
import argparse
import datetime
import os
class Parameters:
def __init__(self, args):

Wyświetl plik

@ -6,11 +6,11 @@ import re
def match_single(regexes, line, dtype=int):
if isinstance(regexes, str):
regexes = [(regexes, dtype)]
for i in range(len(regexes)):
if isinstance(regexes[i], str):
regexes[i] = (regexes[i], dtype)
try:
for r, transform in regexes:
match = re.search(r, line)
@ -18,10 +18,11 @@ def match_single(regexes, line, dtype=int):
res = match.group(1)
return transform(res)
except Exception as e:
log.ODM_WARNING("Cannot parse SRT line \"%s\": %s", (line, str(e)))
log.ODM_WARNING('Cannot parse SRT line "%s": %s', (line, str(e)))
return None
class SrtFileParser:
def __init__(self, filename):
self.filename = filename
@ -35,7 +36,10 @@ class SrtFileParser:
self.parse()
# check min and max
if timestamp < self.data[0]["start"] or timestamp > self.data[len(self.data) - 1]["end"]:
if (
timestamp < self.data[0]["start"]
or timestamp > self.data[len(self.data) - 1]["end"]
):
return None
for entry in self.data:
@ -47,30 +51,37 @@ class SrtFileParser:
def get_gps(self, timestamp):
if not self.data:
self.parse()
# Initialize on first call
prev_coords = None
if not self.gps_data:
i = 0
for d in self.data:
lat, lon, alt = d.get('latitude'), d.get('longitude'), d.get('altitude')
lat, lon, alt = d.get("latitude"), d.get("longitude"), d.get("altitude")
if alt is None:
alt = 0
tm = d.get('start')
tm = d.get("start")
if lat is not None and lon is not None:
if self.ll_to_utm is None:
self.ll_to_utm, self.utm_to_ll = location.utm_transformers_from_ll(lon, lat)
self.ll_to_utm, self.utm_to_ll = (
location.utm_transformers_from_ll(lon, lat)
)
coords = self.ll_to_utm.TransformPoint(lon, lat, alt)
# First or new (in X/Y only) or last
add = (not len(self.gps_data)) or (coords[0], coords[1]) != (self.gps_data[-1][1][0], self.gps_data[-1][1][1]) or i == len(self.data) - 1
add = (
(not len(self.gps_data))
or (coords[0], coords[1])
!= (self.gps_data[-1][1][0], self.gps_data[-1][1][1])
or i == len(self.data) - 1
)
if add:
self.gps_data.append((tm, coords))
i += 1
# No data available
if not len(self.gps_data) or self.gps_data[0][0] > timestamp:
return None
@ -94,7 +105,7 @@ class SrtFileParser:
gd_e = self.gps_data[end]
sx, sy, sz = gd_s[1]
ex, ey, ez = gd_e[1]
dt = (gd_e[0] - gd_s[0]).total_seconds()
if dt == 0:
return None
@ -105,9 +116,7 @@ class SrtFileParser:
t = (timestamp - gd_s[0]).total_seconds()
return self.utm_to_ll.TransformPoint(
sx + dx * t,
sy + dy * t,
sz + dz * t
sx + dx * t, sy + dy * t, sz + dz * t
)
def parse(self):
@ -120,12 +129,12 @@ class SrtFileParser:
# <font size="36">SrtCnt : 1, DiffTime : 16ms
# 2023-01-06 18:56:48,380,821
# [iso : 3200] [shutter : 1/60.0] [fnum : 280] [ev : 0] [ct : 3925] [color_md : default] [focal_len : 240] [latitude: 0.000000] [longitude: 0.000000] [altitude: 0.000000] </font>
# </font>
# </font>
# DJI Mavic Mini
# 1
# 00:00:00,000 --> 00:00:01,000
# F/2.8, SS 206.14, ISO 150, EV 0, GPS (-82.6669, 27.7716, 10), D 2.80m, H 0.00m, H.S 0.00m/s, V.S 0.00m/s
# F/2.8, SS 206.14, ISO 150, EV 0, GPS (-82.6669, 27.7716, 10), D 2.80m, H 0.00m, H.S 0.00m/s, V.S 0.00m/s
# DJI Phantom4 RTK
# 36
@ -149,10 +158,9 @@ class SrtFileParser:
# DJI Unknown Model #2
# 1
# 00:00:00,000 --> 00:00:00,033
# No:1, F/2.8, SS 155.55, ISO 100, EV 0, M.M AE_METER_CENTER, A.T (126,109), Luma 106, Coef(1.000000, 1.000000, 1.000000), FaceDetectTag (0), FaceDetectRect (0,0,0,0,), Gain (1.000000,4096), Index (Ev:10085,Nf:0), E.M 0, AERect(n/a), AeAdvScene (GR:91.000000,GWR:1.000000,LLR:0.196683,RR:0.870551), LeCurve(64) (1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,128,), AfSpd 0/0, Af Rect(X:0, Y:0, W:0, H:0), AfPos 0, AwbMode WB_AUTOMATIC, Awb Gain(R:8206, G:4096, B:7058), ColorTemp 5241, B.L (-1020, -1020, -1020, -1020), IQS (39253, 208), ToneInfo (0,16,33,51,68,85,102,119,136,152,169,185,202,218,234,250,266,282,298,314,330,346,362,378,394,410,425,441,457,473,488,500,514,532,550,567,584,602,619,637,654,671,688,705,721,738,754,770,786,801,817,832,847,862,877,892,907,922,937,951,966,981,995,1011,0,64,134,205,274,342,410,477,544,611,677,743,809,873,937,1002,1066,1130,1194,1258,1322,1385,1449,1512,1576,1640,1703,1766,1829,1893,1952,2003,2058,2130,2201,2270,2339,2410,2479,2548,2616,2685,2753,2820,2886,2952,3016,3080,3144,3207,3270,3329,3391,3451,3511,3571,3630,3688,3748,3807,3866,3924,3983,4044,), Isp Info (PIPE 1,ADJ 0,De 0) GPS (-2.5927, 52.0035, 15), D 0.61m, H 1.00m, H.S 0.00m/s, V.S 0.00m/s
# No:1, F/2.8, SS 155.55, ISO 100, EV 0, M.M AE_METER_CENTER, A.T (126,109), Luma 106, Coef(1.000000, 1.000000, 1.000000), FaceDetectTag (0), FaceDetectRect (0,0,0,0,), Gain (1.000000,4096), Index (Ev:10085,Nf:0), E.M 0, AERect(n/a), AeAdvScene (GR:91.000000,GWR:1.000000,LLR:0.196683,RR:0.870551), LeCurve(64) (1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,1024,128,), AfSpd 0/0, Af Rect(X:0, Y:0, W:0, H:0), AfPos 0, AwbMode WB_AUTOMATIC, Awb Gain(R:8206, G:4096, B:7058), ColorTemp 5241, B.L (-1020, -1020, -1020, -1020), IQS (39253, 208), ToneInfo (0,16,33,51,68,85,102,119,136,152,169,185,202,218,234,250,266,282,298,314,330,346,362,378,394,410,425,441,457,473,488,500,514,532,550,567,584,602,619,637,654,671,688,705,721,738,754,770,786,801,817,832,847,862,877,892,907,922,937,951,966,981,995,1011,0,64,134,205,274,342,410,477,544,611,677,743,809,873,937,1002,1066,1130,1194,1258,1322,1385,1449,1512,1576,1640,1703,1766,1829,1893,1952,2003,2058,2130,2201,2270,2339,2410,2479,2548,2616,2685,2753,2820,2886,2952,3016,3080,3144,3207,3270,3329,3391,3451,3511,3571,3630,3688,3748,3807,3866,3924,3983,4044,), Isp Info (PIPE 1,ADJ 0,De 0) GPS (-2.5927, 52.0035, 15), D 0.61m, H 1.00m, H.S 0.00m/s, V.S 0.00m/s
with open(self.filename, 'r') as f:
with open(self.filename, "r") as f:
iso = None
shutter = None
@ -166,21 +174,23 @@ class SrtFileParser:
for line in f:
# Remove html tags, spaces
line = re.sub('<[^<]+?>', '', line).strip()
line = re.sub("<[^<]+?>", "", line).strip()
if not line:
if start is not None:
self.data.append({
"start": start,
"end": end,
"iso": iso,
"shutter": shutter,
"fnum": fnum,
"focal_len": focal_len,
"latitude": latitude,
"longitude": longitude,
"altitude": altitude
})
self.data.append(
{
"start": start,
"end": end,
"iso": iso,
"shutter": shutter,
"fnum": fnum,
"focal_len": focal_len,
"latitude": latitude,
"longitude": longitude,
"altitude": altitude,
}
)
iso = None
shutter = None
@ -196,45 +206,91 @@ class SrtFileParser:
continue
# Search this "00:00:00,000 --> 00:00:00,016"
match = re.search("(\d{2}:\d{2}:\d{2},\d+) --> (\d{2}:\d{2}:\d{2},\d+)", line)
match = re.search(
"(\d{2}:\d{2}:\d{2},\d+) --> (\d{2}:\d{2}:\d{2},\d+)", line
)
if match:
start = datetime.strptime(match.group(1), "%H:%M:%S,%f")
end = datetime.strptime(match.group(2), "%H:%M:%S,%f")
iso = match_single([
"iso : (\d+)",
"ISO (\d+)"
], line)
iso = match_single(["iso : (\d+)", "ISO (\d+)"], line)
shutter = match_single([
"shutter : \d+/(\d+\.?\d*)"
"SS (\d+\.?\d*)"
], line)
shutter = match_single(
["shutter : \d+/(\d+\.?\d*)" "SS (\d+\.?\d*)"], line
)
fnum = match_single([
("fnum : (\d+)", lambda v: float(v)/100.0),
("F/([\d\.]+)", float),
], line)
fnum = match_single(
[
("fnum : (\d+)", lambda v: float(v) / 100.0),
("F/([\d\.]+)", float),
],
line,
)
focal_len = match_single("focal_len : (\d+)", line)
latitude = match_single([
("latitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
("latitude : ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
("GPS \([\d\.\-]+,? ([\d\.\-]+),? [\d\.\-]+\)", lambda v: float(v) if v != 0 else None),
("RTK \([-+]?\d+\.\d+, (-?\d+\.\d+), -?\d+\)", lambda v: float(v) if v != 0 else None),
], line)
longitude = match_single([
("longitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
("longtitude : ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
("GPS \(([\d\.\-]+),? [\d\.\-]+,? [\d\.\-]+\)", lambda v: float(v) if v != 0 else None),
("RTK \((-?\d+\.\d+), [-+]?\d+\.\d+, -?\d+\)", lambda v: float(v) if v != 0 else None),
], line)
altitude = match_single([
("altitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
("GPS \([\d\.\-]+,? [\d\.\-]+,? ([\d\.\-]+)\)", lambda v: float(v) if v != 0 else None),
("RTK \([-+]?\d+\.\d+, [-+]?\d+\.\d+, (-?\d+)\)", lambda v: float(v) if v != 0 else None),
("abs_alt: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
], line)
latitude = match_single(
[
(
"latitude: ([\d\.\-]+)",
lambda v: float(v) if v != 0 else None,
),
(
"latitude : ([\d\.\-]+)",
lambda v: float(v) if v != 0 else None,
),
(
"GPS \([\d\.\-]+,? ([\d\.\-]+),? [\d\.\-]+\)",
lambda v: float(v) if v != 0 else None,
),
(
"RTK \([-+]?\d+\.\d+, (-?\d+\.\d+), -?\d+\)",
lambda v: float(v) if v != 0 else None,
),
],
line,
)
longitude = match_single(
[
(
"longitude: ([\d\.\-]+)",
lambda v: float(v) if v != 0 else None,
),
(
"longtitude : ([\d\.\-]+)",
lambda v: float(v) if v != 0 else None,
),
(
"GPS \(([\d\.\-]+),? [\d\.\-]+,? [\d\.\-]+\)",
lambda v: float(v) if v != 0 else None,
),
(
"RTK \((-?\d+\.\d+), [-+]?\d+\.\d+, -?\d+\)",
lambda v: float(v) if v != 0 else None,
),
],
line,
)
altitude = match_single(
[
(
"altitude: ([\d\.\-]+)",
lambda v: float(v) if v != 0 else None,
),
(
"GPS \([\d\.\-]+,? [\d\.\-]+,? ([\d\.\-]+)\)",
lambda v: float(v) if v != 0 else None,
),
(
"RTK \([-+]?\d+\.\d+, [-+]?\d+\.\d+, (-?\d+)\)",
lambda v: float(v) if v != 0 else None,
),
(
"abs_alt: ([\d\.\-]+)",
lambda v: float(v) if v != 0 else None,
),
],
line,
)

Wyświetl plik

@ -12,33 +12,54 @@ import piexif
from opendm import log
from opendm.video.srtparser import SrtFileParser
from opendm.video.parameters import Parameters
from opendm.video.checkers import BlackFrameChecker, SimilarityChecker, ThresholdBlurChecker
from opendm.video.checkers import (
BlackFrameChecker,
SimilarityChecker,
ThresholdBlurChecker,
)
class Video2Dataset:
def __init__(self, parameters : Parameters):
def __init__(self, parameters: Parameters):
self.parameters = parameters
self.blur_checker = ThresholdBlurChecker(parameters.blur_threshold) if parameters.blur_threshold is not None else None
self.similarity_checker = SimilarityChecker(parameters.distance_threshold) if parameters.distance_threshold is not None else None
self.black_checker = BlackFrameChecker(parameters.black_ratio_threshold, parameters.pixel_black_threshold) if parameters.black_ratio_threshold is not None or parameters.pixel_black_threshold is not None else None
self.blur_checker = (
ThresholdBlurChecker(parameters.blur_threshold)
if parameters.blur_threshold is not None
else None
)
self.similarity_checker = (
SimilarityChecker(parameters.distance_threshold)
if parameters.distance_threshold is not None
else None
)
self.black_checker = (
BlackFrameChecker(
parameters.black_ratio_threshold, parameters.pixel_black_threshold
)
if parameters.black_ratio_threshold is not None
or parameters.pixel_black_threshold is not None
else None
)
self.frame_index = parameters.start
self.f = None
def ProcessVideo(self):
self.date_now = None
start = time.time()
if (self.parameters.stats_file is not None):
if self.parameters.stats_file is not None:
self.f = open(self.parameters.stats_file, "w")
self.f.write("global_idx;file_name;frame_index;blur_score;is_blurry;is_black;last_frame_index;similarity_score;is_similar;written\n")
self.f.write(
"global_idx;file_name;frame_index;blur_score;is_blurry;is_black;last_frame_index;similarity_score;is_similar;written\n"
)
self.global_idx = 0
output_file_paths = []
# foreach input file
for input_file in self.parameters.input:
# get file name
@ -52,12 +73,16 @@ class Video2Dataset:
# Set pseudo start time
if self.date_now is None:
try:
self.date_now = datetime.datetime.fromtimestamp(os.path.getmtime(input_file))
self.date_now = datetime.datetime.fromtimestamp(
os.path.getmtime(input_file)
)
except:
self.date_now = datetime.datetime.now()
else:
self.date_now += datetime.timedelta(seconds=video_info.total_frames / video_info.frame_rate)
self.date_now += datetime.timedelta(
seconds=video_info.total_frames / video_info.frame_rate
)
log.ODM_INFO("Use pseudo start time: %s" % self.date_now)
if self.parameters.use_srt:
@ -80,46 +105,79 @@ class Video2Dataset:
else:
srt_parser = None
if (self.black_checker is not None and self.black_checker.NeedPreProcess()):
if self.black_checker is not None and self.black_checker.NeedPreProcess():
start2 = time.time()
log.ODM_INFO("Preprocessing for black frame checker... this might take a bit")
self.black_checker.PreProcess(input_file, self.parameters.start, self.parameters.end)
log.ODM_INFO(
"Preprocessing for black frame checker... this might take a bit"
)
self.black_checker.PreProcess(
input_file, self.parameters.start, self.parameters.end
)
end = time.time()
log.ODM_INFO("Preprocessing time: {:.2f}s".format(end - start2))
log.ODM_INFO("Calculated luminance_range_size is {}".format(self.black_checker.luminance_range_size))
log.ODM_INFO("Calculated luminance_minimum_value is {}".format(self.black_checker.luminance_minimum_value))
log.ODM_INFO("Calculated absolute_threshold is {}".format(self.black_checker.absolute_threshold))
log.ODM_INFO(
"Calculated luminance_range_size is {}".format(
self.black_checker.luminance_range_size
)
)
log.ODM_INFO(
"Calculated luminance_minimum_value is {}".format(
self.black_checker.luminance_minimum_value
)
)
log.ODM_INFO(
"Calculated absolute_threshold is {}".format(
self.black_checker.absolute_threshold
)
)
# open video file
cap = cv2.VideoCapture(input_file)
if (not cap.isOpened()):
if not cap.isOpened():
log.ODM_INFO("Error opening video stream or file")
return
if (self.parameters.start is not None):
if self.parameters.start is not None:
cap.set(cv2.CAP_PROP_POS_FRAMES, self.parameters.start)
self.frame_index = self.parameters.start
start_frame = self.parameters.start
else:
start_frame = 0
frames_to_process = self.parameters.end - start_frame + 1 if (self.parameters.end is not None) else video_info.total_frames - start_frame
frames_to_process = (
self.parameters.end - start_frame + 1
if (self.parameters.end is not None)
else video_info.total_frames - start_frame
)
progress = 0
while (cap.isOpened()):
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if (self.parameters.end is not None and self.frame_index > self.parameters.end):
if (
self.parameters.end is not None
and self.frame_index > self.parameters.end
):
break
# Calculate progress percentage
prev_progress = progress
progress = floor((self.frame_index - start_frame + 1) / frames_to_process * 100)
progress = floor(
(self.frame_index - start_frame + 1) / frames_to_process * 100
)
if progress != prev_progress:
print("[{}][{:3d}%] Processing frame {}/{}: ".format(file_name, progress, self.frame_index - start_frame + 1, frames_to_process), end="\r")
print(
"[{}][{:3d}%] Processing frame {}/{}: ".format(
file_name,
progress,
self.frame_index - start_frame + 1,
frames_to_process,
),
end="\r",
)
stats = self.ProcessFrame(frame, video_info, srt_parser)
@ -135,15 +193,22 @@ class Video2Dataset:
if self.f is not None:
self.f.close()
if self.parameters.limit is not None and self.parameters.limit > 0 and self.global_idx >= self.parameters.limit:
log.ODM_INFO("Limit of {} frames reached, trimming dataset".format(self.parameters.limit))
if (
self.parameters.limit is not None
and self.parameters.limit > 0
and self.global_idx >= self.parameters.limit
):
log.ODM_INFO(
"Limit of {} frames reached, trimming dataset".format(
self.parameters.limit
)
)
output_file_paths = limit_files(output_file_paths, self.parameters.limit)
end = time.time()
log.ODM_INFO("Total processing time: {:.2f}s".format(end - start))
return output_file_paths
def ProcessFrame(self, frame, video_info, srt_parser):
res = {"frame_index": self.frame_index, "global_idx": self.global_idx}
@ -155,9 +220,13 @@ class Video2Dataset:
if resolution < w or resolution < h:
m = max(w, h)
factor = resolution / m
frame_bw = cv2.resize(frame_bw, (int(ceil(w * factor)), int(ceil(h * factor))), interpolation=cv2.INTER_NEAREST)
frame_bw = cv2.resize(
frame_bw,
(int(ceil(w * factor)), int(ceil(h * factor))),
interpolation=cv2.INTER_NEAREST,
)
if (self.blur_checker is not None):
if self.blur_checker is not None:
blur_score, is_blurry = self.blur_checker.IsBlur(frame_bw, self.frame_index)
res["blur_score"] = blur_score
res["is_blurry"] = is_blurry
@ -167,7 +236,7 @@ class Video2Dataset:
self.frame_index += 1
return res
if (self.black_checker is not None):
if self.black_checker is not None:
is_black = self.black_checker.IsBlack(frame_bw, self.frame_index)
res["is_black"] = is_black
@ -176,8 +245,10 @@ class Video2Dataset:
self.frame_index += 1
return res
if (self.similarity_checker is not None):
similarity_score, is_similar, last_frame_index = self.similarity_checker.IsSimilar(frame_bw, self.frame_index)
if self.similarity_checker is not None:
similarity_score, is_similar, last_frame_index = (
self.similarity_checker.IsSimilar(frame_bw, self.frame_index)
)
res["similarity_score"] = similarity_score
res["is_similar"] = is_similar
res["last_frame_index"] = last_frame_index
@ -202,24 +273,35 @@ class Video2Dataset:
if max_dim < w or max_dim < h:
m = max(w, h)
factor = max_dim / m
frame = cv2.resize(frame, (int(ceil(w * factor)), int(ceil(h * factor))), interpolation=cv2.INTER_AREA)
frame = cv2.resize(
frame,
(int(ceil(w * factor)), int(ceil(h * factor))),
interpolation=cv2.INTER_AREA,
)
path = os.path.join(self.parameters.output,
"{}_{}_{}.{}".format(video_info.basename, self.global_idx, self.frame_index, self.parameters.frame_format))
path = os.path.join(
self.parameters.output,
"{}_{}_{}.{}".format(
video_info.basename,
self.global_idx,
self.frame_index,
self.parameters.frame_format,
),
)
_, buf = cv2.imencode('.' + self.parameters.frame_format, frame)
_, buf = cv2.imencode("." + self.parameters.frame_format, frame)
delta = datetime.timedelta(seconds=(self.frame_index / video_info.frame_rate))
elapsed_time = datetime.datetime(1900, 1, 1) + delta
img = Image.open(io.BytesIO(buf))
entry = gps_coords = None
if srt_parser is not None:
entry = srt_parser.get_entry(elapsed_time)
gps_coords = srt_parser.get_gps(elapsed_time)
exif_time = (elapsed_time + (self.date_now - datetime.datetime(1900, 1, 1)))
exif_time = elapsed_time + (self.date_now - datetime.datetime(1900, 1, 1))
elapsed_time_str = exif_time.strftime("%Y:%m:%d %H:%M:%S")
subsec_time_str = exif_time.strftime("%f")
@ -231,8 +313,12 @@ class Video2Dataset:
piexif.ImageIFD.DateTime: elapsed_time_str,
piexif.ImageIFD.XResolution: (frame.shape[1], 1),
piexif.ImageIFD.YResolution: (frame.shape[0], 1),
piexif.ImageIFD.Make: "DJI" if video_info.basename.lower().startswith("dji") else "Unknown",
piexif.ImageIFD.Model: "Unknown"
piexif.ImageIFD.Make: (
"DJI"
if video_info.basename.lower().startswith("dji")
else "Unknown"
),
piexif.ImageIFD.Model: "Unknown",
},
"Exif": {
piexif.ExifIFD.DateTimeOriginal: elapsed_time_str,
@ -240,39 +326,52 @@ class Video2Dataset:
piexif.ExifIFD.SubSecTime: subsec_time_str,
piexif.ExifIFD.PixelXDimension: frame.shape[1],
piexif.ExifIFD.PixelYDimension: frame.shape[0],
}}
},
}
if entry is not None:
if entry["shutter"] is not None:
exif_dict["Exif"][piexif.ExifIFD.ExposureTime] = (1, int(entry["shutter"]))
exif_dict["Exif"][piexif.ExifIFD.ExposureTime] = (
1,
int(entry["shutter"]),
)
if entry["focal_len"] is not None:
exif_dict["Exif"][piexif.ExifIFD.FocalLength] = (entry["focal_len"], 100)
exif_dict["Exif"][piexif.ExifIFD.FocalLength] = (
entry["focal_len"],
100,
)
if entry["fnum"] is not None:
exif_dict["Exif"][piexif.ExifIFD.FNumber] = float_to_rational(entry["fnum"])
exif_dict["Exif"][piexif.ExifIFD.FNumber] = float_to_rational(
entry["fnum"]
)
if entry["iso"] is not None:
exif_dict["Exif"][piexif.ExifIFD.ISOSpeedRatings] = entry["iso"]
if gps_coords is not None:
exif_dict["GPS"] = get_gps_location(elapsed_time, gps_coords[1], gps_coords[0], gps_coords[2])
exif_dict["GPS"] = get_gps_location(
elapsed_time, gps_coords[1], gps_coords[0], gps_coords[2]
)
exif_bytes = piexif.dump(exif_dict)
img.save(path, exif=exif_bytes, quality=95)
return path
def WriteStats(self, input_file, stats):
self.f.write("{};{};{};{};{};{};{};{};{};{}\n".format(
stats["global_idx"],
input_file,
stats["frame_index"],
stats["blur_score"] if "blur_score" in stats else "",
stats["is_blurry"] if "is_blurry" in stats else "",
stats["is_black"] if "is_black" in stats else "",
stats["last_frame_index"] if "last_frame_index" in stats else "",
stats["similarity_score"] if "similarity_score" in stats else "",
stats["is_similar"] if "is_similar" in stats else "",
stats["written"] if "written" in stats else "").replace(".", ","))
self.f.write(
"{};{};{};{};{};{};{};{};{};{}\n".format(
stats["global_idx"],
input_file,
stats["frame_index"],
stats["blur_score"] if "blur_score" in stats else "",
stats["is_blurry"] if "is_blurry" in stats else "",
stats["is_black"] if "is_black" in stats else "",
stats["last_frame_index"] if "last_frame_index" in stats else "",
stats["similarity_score"] if "similarity_score" in stats else "",
stats["is_similar"] if "is_similar" in stats else "",
stats["written"] if "written" in stats else "",
).replace(".", ",")
)
def get_video_info(input_file):
@ -285,16 +384,20 @@ def get_video_info(input_file):
video.release()
return collections.namedtuple("VideoInfo", ["total_frames", "frame_rate", "basename"])(total_frames, frame_rate, basename)
return collections.namedtuple(
"VideoInfo", ["total_frames", "frame_rate", "basename"]
)(total_frames, frame_rate, basename)
def float_to_rational(f):
f = Fraction(f).limit_denominator()
return (f.numerator, f.denominator)
def limit_files(paths, limit):
if len(paths) <= limit:
return paths
to_keep = []
all_idxes = np.arange(0, len(paths))
keep_idxes = np.linspace(0, len(paths) - 1, limit, dtype=int)
@ -308,6 +411,7 @@ def limit_files(paths, limit):
return to_keep
def to_deg(value, loc):
"""convert decimal coordinates into degrees, munutes and seconds tuple
Keyword arguments: value is float gps-value, loc is direction list ["S", "N"] or ["W", "E"]
@ -320,23 +424,32 @@ def to_deg(value, loc):
else:
loc_value = ""
abs_value = abs(value)
deg = int(abs_value)
t1 = (abs_value-deg)*60
deg = int(abs_value)
t1 = (abs_value - deg) * 60
min = int(t1)
sec = round((t1 - min)* 60, 5)
sec = round((t1 - min) * 60, 5)
return (deg, min, sec, loc_value)
def get_gps_location(elapsed_time, lat, lng, altitude):
lat_deg = to_deg(lat, ["S", "N"])
lng_deg = to_deg(lng, ["W", "E"])
exiv_lat = (float_to_rational(lat_deg[0]), float_to_rational(lat_deg[1]), float_to_rational(lat_deg[2]))
exiv_lng = (float_to_rational(lng_deg[0]), float_to_rational(lng_deg[1]), float_to_rational(lng_deg[2]))
exiv_lat = (
float_to_rational(lat_deg[0]),
float_to_rational(lat_deg[1]),
float_to_rational(lat_deg[2]),
)
exiv_lng = (
float_to_rational(lng_deg[0]),
float_to_rational(lng_deg[1]),
float_to_rational(lng_deg[2]),
)
gps_ifd = {
piexif.GPSIFD.GPSVersionID: (2, 0, 0, 0),
piexif.GPSIFD.GPSDateStamp: elapsed_time.strftime('%Y:%m:%d')
piexif.GPSIFD.GPSDateStamp: elapsed_time.strftime("%Y:%m:%d"),
}
if lat is not None and lng is not None:
@ -348,4 +461,4 @@ def get_gps_location(elapsed_time, lat, lng, altitude):
gps_ifd[piexif.GPSIFD.GPSAltitudeRef] = 0
gps_ifd[piexif.GPSIFD.GPSAltitude] = float_to_rational(round(altitude))
return gps_ifd
return gps_ifd

129
run.py
Wyświetl plik

@ -2,8 +2,11 @@
# Basic check
import sys
if sys.version_info.major < 3:
print("Ups! ODM needs to run with Python 3. It seems you launched it with Python 2. Try using: python3 run.py ... ")
print(
"Ups! ODM needs to run with Python 3. It seems you launched it with Python 2. Try using: python3 run.py ... "
)
sys.exit(1)
import os
@ -17,6 +20,7 @@ from opendm.arghelpers import args_to_dict, save_opts, compare_args, find_rerun_
from stages.odm_app import ODMApp
def odm_version():
try:
with open("VERSION") as f:
@ -24,41 +28,48 @@ def odm_version():
except:
return "?"
if __name__ == '__main__':
if __name__ == "__main__":
args = config.config()
log.ODM_INFO('Initializing ODM %s - %s' % (odm_version(), system.now()))
log.ODM_INFO("Initializing ODM %s - %s" % (odm_version(), system.now()))
progressbc.set_project_name(args.name)
args.project_path = os.path.join(args.project_path, args.name)
if not io.dir_exists(args.project_path):
log.ODM_ERROR('Directory %s does not exist.' % args.name)
log.ODM_ERROR("Directory %s does not exist." % args.name)
exit(1)
opts_json = os.path.join(args.project_path, "options.json")
auto_rerun_stage, opts_diff = find_rerun_stage(opts_json, args, config.rerun_stages, config.processopts)
auto_rerun_stage, opts_diff = find_rerun_stage(
opts_json, args, config.rerun_stages, config.processopts
)
if auto_rerun_stage is not None and len(auto_rerun_stage) > 0:
log.ODM_INFO("Rerunning from: %s" % auto_rerun_stage[0])
args.rerun_from = auto_rerun_stage
# Print args
args_dict = args_to_dict(args)
log.ODM_INFO('==============')
log.ODM_INFO("==============")
for k in args_dict.keys():
log.ODM_INFO('%s: %s%s' % (k, args_dict[k], ' [changed]' if k in opts_diff else ''))
log.ODM_INFO('==============')
log.ODM_INFO(
"%s: %s%s" % (k, args_dict[k], " [changed]" if k in opts_diff else "")
)
log.ODM_INFO("==============")
# If user asks to rerun everything, delete all of the existing progress directories.
if args.rerun_all:
log.ODM_INFO("Rerun all -- Removing old data")
for d in [os.path.join(args.project_path, p) for p in get_processing_results_paths()] + [
os.path.join(args.project_path, "odm_meshing"),
os.path.join(args.project_path, "opensfm"),
os.path.join(args.project_path, "odm_texturing_25d"),
os.path.join(args.project_path, "odm_filterpoints"),
os.path.join(args.project_path, "submodels")]:
for d in [
os.path.join(args.project_path, p) for p in get_processing_results_paths()
] + [
os.path.join(args.project_path, "odm_meshing"),
os.path.join(args.project_path, "opensfm"),
os.path.join(args.project_path, "odm_texturing_25d"),
os.path.join(args.project_path, "odm_filterpoints"),
os.path.join(args.project_path, "submodels"),
]:
rm_r(d)
app = ODMApp(args)
@ -66,50 +77,50 @@ if __name__ == '__main__':
if retcode == 0:
save_opts(opts_json, args)
# Do not show ASCII art for local submodels runs
if retcode == 0 and not "submodels" in args.project_path:
log.ODM_INFO('MMMMMMMMMMMNNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNNNMMMMMMMMMMM')
log.ODM_INFO('MMMMMMdo:..---../sNMMMMMMMMMMMMMMMMMMMMMMMMMMNs/..---..:odMMMMMM')
log.ODM_INFO('MMMMy-.odNMMMMMNy/`/mMMMMMMMMMMMMMMMMMMMMMMm/`/hNMMMMMNdo.-yMMMM')
log.ODM_INFO('MMN/`sMMMMMMMMMNNMm/`yMMMMMMMMMMMMMMMMMMMMy`/mMNNMMMMMMMMNs`/MMM')
log.ODM_INFO('MM/ hMMMMMMMMNs.+MMM/ dMMMMMMMMMMMMMMMMMMh +MMM+.sNMMMMMMMMh +MM')
log.ODM_INFO('MN /MMMMMMNo/./mMMMMN :MMMMMMMMMMMMMMMMMM: NMMMMm/./oNMMMMMM: NM')
log.ODM_INFO('Mm +MMMMMN+ `/MMMMMMM`-MMMMMMMMMMMMMMMMMM-`MMMMMMM:` oNMMMMM+ mM')
log.ODM_INFO('MM..NMMNs./mNMMMMMMMy sMMMMMMMMMMMMMMMMMMo hMMMMMMMNm/.sNMMN`-MM')
log.ODM_INFO('MMd`:mMNomMMMMMMMMMy`:MMMMMMMNmmmmNMMMMMMN:`hMMMMMMMMMdoNMm-`dMM')
log.ODM_INFO('MMMm:.omMMMMMMMMNh/ sdmmho/.`..`-``-/sddh+ /hNMMMMMMMMdo.:mMMM')
log.ODM_INFO('MMMMMd+--/osss+:-:/` ```:- .ym+ hmo``:-` `+:-:ossso/-:+dMMMMM')
log.ODM_INFO('MMMMMMMNmhysosydmNMo /ds`/NMM+ hMMd..dh. sMNmdysosyhmNMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMMMMMMMMMs .:-:``hmmN+ yNmds -:.:`-NMMMMMMMMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMMMMMMMMN.-mNm- //:::. -:://: +mMd`-NMMMMMMMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMMMMMMMM+ dMMN -MMNNN+ yNNNMN :MMMs sMMMMMMMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMMMMMMMM`.mmmy /mmmmm/ smmmmm``mmmh :MMMMMMMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMMMMMMMM``:::- ./////. -:::::` :::: -MMMMMMMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMMMMMMMM:`mNNd /NNNNN+ hNNNNN .NNNy +MMMMMMMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMMMMMMMMd`/MMM.`ys+//. -/+oso +MMN.`mMMMMMMMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMMMMMMMMMy /o:- `oyhd/ shys+ `-:s-`hMMMMMMMMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMNmdhhhdmNMMM` +d+ sMMM+ hMMN:`hh- sMMNmdhhhdmNMMMMMMMM')
log.ODM_INFO('MMMMMms:::/++//::+ho .+- /dM+ hNh- +/` -h+:://++/::/smMMMMM')
log.ODM_INFO('MMMN+./hmMMMMMMNds- ./oso:.``:. :-``.:os+- -sdNMMMMMMmy:.oNMMM')
log.ODM_INFO('MMm-.hMNhNMMMMMMMMNo`/MMMMMNdhyyyyhhdNMMMM+`oNMMMMMMMMNhNMh.-mMM')
log.ODM_INFO('MM:`mMMN/-sNNMMMMMMMo yMMMMMMMMMMMMMMMMMMy sMMMMMMMNNs-/NMMm`:MM')
log.ODM_INFO('Mm /MMMMMd/.-oMMMMMMN :MMMMMMMMMMMMMMMMMM-`MMMMMMMo-./dMMMMM/ NM')
log.ODM_INFO('Mm /MMMMMMm:-`sNMMMMN :MMMMMMMMMMMMMMMMMM-`MMMMMNs`-/NMMMMMM/ NM')
log.ODM_INFO('MM:`mMMMMMMMMd/-sMMMo yMMMMMMMMMMMMMMMMMMy sMMMs-/dMMMMMMMMd`:MM')
log.ODM_INFO('MMm-.hMMMMMMMMMdhMNo`+MMMMMMMMMMMMMMMMMMMM+`oNMhdMMMMMMMMMh.-mMM')
log.ODM_INFO('MMMNo./hmNMMMMMNms--yMMMMMMMMMMMMMMMMMMMMMMy--smNMMMMMNmy/.oNMMM')
log.ODM_INFO('MMMMMms:-:/+++/:-+hMMMMMMMMMMMMMMMMMMMMMMMMMNh+-:/+++/:-:smMMMMM')
log.ODM_INFO('MMMMMMMMNdhhyhdmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMmdhyhhmNMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMMMMMNNNNNMMMMMMNNNNNNMMMMMMMMNNMMMMMMMNNMMMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMMMh/-...-+dMMMm......:+hMMMMs../MMMMMo..sMMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMM/ /yhy- sMMm -hhy/ :NMM+ oMMMy /MMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMy /MMMMN` NMm /MMMMo +MM: .` yMd``` :MMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMM+ sMMMMM: hMm /MMMMd -MM- /s `h.`d- -MMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMs +MMMMM. mMm /MMMMy /MM. +M/ yM: `MMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMN- smNm/ +MMm :NNdo` .mMM` oMM+/yMM/ MMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMMNo- `:yMMMm `:sNMMM` sMMMMMMM+ NMMMMMMMMMMM')
log.ODM_INFO('MMMMMMMMMMMMMMMNmmNMMMMMMMNmmmmNMMMMMMMNNMMMMMMMMMNNMMMMMMMMMMMM')
log.ODM_INFO('ODM app finished - %s' % system.now())
log.ODM_INFO("MMMMMMMMMMMNNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNNNMMMMMMMMMMM")
log.ODM_INFO("MMMMMMdo:..---../sNMMMMMMMMMMMMMMMMMMMMMMMMMMNs/..---..:odMMMMMM")
log.ODM_INFO("MMMMy-.odNMMMMMNy/`/mMMMMMMMMMMMMMMMMMMMMMMm/`/hNMMMMMNdo.-yMMMM")
log.ODM_INFO("MMN/`sMMMMMMMMMNNMm/`yMMMMMMMMMMMMMMMMMMMMy`/mMNNMMMMMMMMNs`/MMM")
log.ODM_INFO("MM/ hMMMMMMMMNs.+MMM/ dMMMMMMMMMMMMMMMMMMh +MMM+.sNMMMMMMMMh +MM")
log.ODM_INFO("MN /MMMMMMNo/./mMMMMN :MMMMMMMMMMMMMMMMMM: NMMMMm/./oNMMMMMM: NM")
log.ODM_INFO("Mm +MMMMMN+ `/MMMMMMM`-MMMMMMMMMMMMMMMMMM-`MMMMMMM:` oNMMMMM+ mM")
log.ODM_INFO("MM..NMMNs./mNMMMMMMMy sMMMMMMMMMMMMMMMMMMo hMMMMMMMNm/.sNMMN`-MM")
log.ODM_INFO("MMd`:mMNomMMMMMMMMMy`:MMMMMMMNmmmmNMMMMMMN:`hMMMMMMMMMdoNMm-`dMM")
log.ODM_INFO("MMMm:.omMMMMMMMMNh/ sdmmho/.`..`-``-/sddh+ /hNMMMMMMMMdo.:mMMM")
log.ODM_INFO("MMMMMd+--/osss+:-:/` ```:- .ym+ hmo``:-` `+:-:ossso/-:+dMMMMM")
log.ODM_INFO("MMMMMMMNmhysosydmNMo /ds`/NMM+ hMMd..dh. sMNmdysosyhmNMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMMMMMMMMMs .:-:``hmmN+ yNmds -:.:`-NMMMMMMMMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMMMMMMMMN.-mNm- //:::. -:://: +mMd`-NMMMMMMMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMMMMMMMM+ dMMN -MMNNN+ yNNNMN :MMMs sMMMMMMMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMMMMMMMM`.mmmy /mmmmm/ smmmmm``mmmh :MMMMMMMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMMMMMMMM``:::- ./////. -:::::` :::: -MMMMMMMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMMMMMMMM:`mNNd /NNNNN+ hNNNNN .NNNy +MMMMMMMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMMMMMMMMd`/MMM.`ys+//. -/+oso +MMN.`mMMMMMMMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMMMMMMMMMy /o:- `oyhd/ shys+ `-:s-`hMMMMMMMMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMNmdhhhdmNMMM` +d+ sMMM+ hMMN:`hh- sMMNmdhhhdmNMMMMMMMM")
log.ODM_INFO("MMMMMms:::/++//::+ho .+- /dM+ hNh- +/` -h+:://++/::/smMMMMM")
log.ODM_INFO("MMMN+./hmMMMMMMNds- ./oso:.``:. :-``.:os+- -sdNMMMMMMmy:.oNMMM")
log.ODM_INFO("MMm-.hMNhNMMMMMMMMNo`/MMMMMNdhyyyyhhdNMMMM+`oNMMMMMMMMNhNMh.-mMM")
log.ODM_INFO("MM:`mMMN/-sNNMMMMMMMo yMMMMMMMMMMMMMMMMMMy sMMMMMMMNNs-/NMMm`:MM")
log.ODM_INFO("Mm /MMMMMd/.-oMMMMMMN :MMMMMMMMMMMMMMMMMM-`MMMMMMMo-./dMMMMM/ NM")
log.ODM_INFO("Mm /MMMMMMm:-`sNMMMMN :MMMMMMMMMMMMMMMMMM-`MMMMMNs`-/NMMMMMM/ NM")
log.ODM_INFO("MM:`mMMMMMMMMd/-sMMMo yMMMMMMMMMMMMMMMMMMy sMMMs-/dMMMMMMMMd`:MM")
log.ODM_INFO("MMm-.hMMMMMMMMMdhMNo`+MMMMMMMMMMMMMMMMMMMM+`oNMhdMMMMMMMMMh.-mMM")
log.ODM_INFO("MMMNo./hmNMMMMMNms--yMMMMMMMMMMMMMMMMMMMMMMy--smNMMMMMNmy/.oNMMM")
log.ODM_INFO("MMMMMms:-:/+++/:-+hMMMMMMMMMMMMMMMMMMMMMMMMMNh+-:/+++/:-:smMMMMM")
log.ODM_INFO("MMMMMMMMNdhhyhdmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMmdhyhhmNMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMMMMMNNNNNMMMMMMNNNNNNMMMMMMMMNNMMMMMMMNNMMMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMMMh/-...-+dMMMm......:+hMMMMs../MMMMMo..sMMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMM/ /yhy- sMMm -hhy/ :NMM+ oMMMy /MMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMy /MMMMN` NMm /MMMMo +MM: .` yMd``` :MMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMM+ sMMMMM: hMm /MMMMd -MM- /s `h.`d- -MMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMs +MMMMM. mMm /MMMMy /MM. +M/ yM: `MMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMN- smNm/ +MMm :NNdo` .mMM` oMM+/yMM/ MMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMMNo- `:yMMMm `:sNMMM` sMMMMMMM+ NMMMMMMMMMMM")
log.ODM_INFO("MMMMMMMMMMMMMMMNmmNMMMMMMMNmmmmNMMMMMMMNNMMMMMMMMMNNMMMMMMMMMMMM")
log.ODM_INFO("ODM app finished - %s" % system.now())
else:
exit(retcode)
exit(retcode)

Wyświetl plik

@ -17,12 +17,14 @@ from opendm.bgfilter import BgFilter
from opendm.concurrency import parallel_map
from opendm.video.video2dataset import Parameters, Video2Dataset
def save_images_database(photos, database_file):
with open(database_file, 'w') as f:
with open(database_file, "w") as f:
f.write(json.dumps([p.__dict__ for p in photos]))
log.ODM_INFO("Wrote images database: %s" % database_file)
def load_images_database(database_file):
# Empty is used to create types.ODM_Photo class
# instances without calling __init__
@ -33,7 +35,7 @@ def load_images_database(database_file):
log.ODM_INFO("Loading images database: %s" % database_file)
with open(database_file, 'r') as f:
with open(database_file, "r") as f:
photos_json = json.load(f)
for photo_json in photos_json:
p = Empty()
@ -44,18 +46,22 @@ def load_images_database(database_file):
return result
class ODMLoadDatasetStage(types.ODM_Stage):
def process(self, args, outputs):
outputs['start_time'] = system.now_raw()
outputs["start_time"] = system.now_raw()
tree = types.ODM_Tree(args.project_path, args.gcp, args.geo, args.align)
outputs['tree'] = tree
outputs["tree"] = tree
if io.file_exists(tree.benchmarking):
# Delete the previously made file
try:
os.remove(tree.benchmarking)
with open(tree.benchmarking, 'a') as b:
b.write('ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores))
with open(tree.benchmarking, "a") as b:
b.write(
"ODM Benchmarking file created %s\nNumber of Cores: %s\n\n"
% (system.now(), context.num_cores)
)
except Exception as e:
log.ODM_WARNING("Cannot write benchmark file: %s" % str(e))
@ -76,21 +82,27 @@ class ODMLoadDatasetStage(types.ODM_Stage):
def search_video_files(in_dir):
entries = os.listdir(in_dir)
return [os.path.join(in_dir, f) for f in entries if valid_filename(f, context.supported_video_extensions)]
return [
os.path.join(in_dir, f)
for f in entries
if valid_filename(f, context.supported_video_extensions)
]
def find_mask(photo_path, masks):
(pathfn, ext) = os.path.splitext(os.path.basename(photo_path))
k = "{}_mask".format(pathfn)
mask = masks.get(k)
if mask:
# Spaces are not supported due to OpenSfM's mask_list.txt format reqs
if not " " in mask:
return mask
else:
log.ODM_WARNING("Image mask {} has a space. Spaces are currently not supported for image masks.".format(mask))
log.ODM_WARNING(
"Image mask {} has a space. Spaces are currently not supported for image masks.".format(
mask
)
)
# get images directory
images_dir = tree.dataset_raw
@ -98,55 +110,67 @@ class ODMLoadDatasetStage(types.ODM_Stage):
# define paths and create working directories
system.mkdir_p(tree.odm_georeferencing)
log.ODM_INFO('Loading dataset from: %s' % images_dir)
log.ODM_INFO("Loading dataset from: %s" % images_dir)
# check if we rerun cell or not
images_database_file = os.path.join(tree.root_path, 'images.json')
images_database_file = os.path.join(tree.root_path, "images.json")
if not io.file_exists(images_database_file) or self.rerun():
if not os.path.exists(images_dir):
raise system.ExitException("There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s" % (images_dir, args.project_path))
raise system.ExitException(
"There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s"
% (images_dir, args.project_path)
)
# Check if we need to extract video frames
frames_db_file = os.path.join(images_dir, 'frames.json')
frames_db_file = os.path.join(images_dir, "frames.json")
if not os.path.exists(frames_db_file) or self.rerun():
video_files = search_video_files(images_dir)
# If we're re-running the pipeline, and frames have been extracted during a previous run
# we need to remove those before re-extracting them
if len(video_files) > 0 and os.path.exists(frames_db_file) and self.rerun():
if (
len(video_files) > 0
and os.path.exists(frames_db_file)
and self.rerun()
):
log.ODM_INFO("Re-run, removing previously extracted video frames")
frames = []
try:
with open(frames_db_file, 'r') as f:
with open(frames_db_file, "r") as f:
frames = json.loads(f.read())
except Exception as e:
log.ODM_WARNING("Cannot check previous video extraction: %s" % str(e))
log.ODM_WARNING(
"Cannot check previous video extraction: %s" % str(e)
)
for f in frames:
fp = os.path.join(images_dir, f)
if os.path.isfile(fp):
os.remove(fp)
if len(video_files) > 0:
log.ODM_INFO("Found video files (%s), extracting frames" % len(video_files))
log.ODM_INFO(
"Found video files (%s), extracting frames" % len(video_files)
)
try:
params = Parameters({
"input": video_files,
"output": images_dir,
"blur_threshold": 200,
"distance_threshold": 10,
"black_ratio_threshold": 0.98,
"pixel_black_threshold": 0.30,
"use_srt": True,
"max_dimension": args.video_resolution,
"limit": args.video_limit,
})
params = Parameters(
{
"input": video_files,
"output": images_dir,
"blur_threshold": 200,
"distance_threshold": 10,
"black_ratio_threshold": 0.98,
"pixel_black_threshold": 0.30,
"use_srt": True,
"max_dimension": args.video_resolution,
"limit": args.video_limit,
}
)
v2d = Video2Dataset(params)
frames = v2d.ProcessVideo()
with open(frames_db_file, 'w') as f:
with open(frames_db_file, "w") as f:
f.write(json.dumps([os.path.basename(f) for f in frames]))
except Exception as e:
log.ODM_WARNING("Could not extract video frames: %s" % str(e))
@ -160,20 +184,26 @@ class ODMLoadDatasetStage(types.ODM_Stage):
masks = {}
for r in rejects:
(p, ext) = os.path.splitext(r)
if p[-5:] == "_mask" and ext.lower() in context.supported_extensions:
if (
p[-5:] == "_mask"
and ext.lower() in context.supported_extensions
):
masks[p] = r
photos = []
with open(tree.dataset_list, 'w') as dataset_list:
with open(tree.dataset_list, "w") as dataset_list:
log.ODM_INFO("Loading %s images" % len(path_files))
for f in path_files:
try:
p = types.ODM_Photo(f)
p.set_mask(find_mask(f, masks))
photos.append(p)
dataset_list.write(photos[-1].filename + '\n')
dataset_list.write(photos[-1].filename + "\n")
except PhotoCorruptedException:
log.ODM_WARNING("%s seems corrupted and will not be used" % os.path.basename(f))
log.ODM_WARNING(
"%s seems corrupted and will not be used"
% os.path.basename(f)
)
# Check if a geo file is available
if tree.odm_geo_file is not None and os.path.isfile(tree.odm_geo_file):
@ -188,19 +218,27 @@ class ODMLoadDatasetStage(types.ODM_Stage):
updated += 1
log.ODM_INFO("Updated %s image positions" % updated)
# Warn if a file path is specified but it does not exist
elif tree.odm_geo_file is not None and not os.path.isfile(tree.odm_geo_file):
log.ODM_WARNING("Image geolocation file %s does not exist" % tree.odm_geo_file)
elif tree.odm_geo_file is not None and not os.path.isfile(
tree.odm_geo_file
):
log.ODM_WARNING(
"Image geolocation file %s does not exist" % tree.odm_geo_file
)
# GPSDOP override if we have GPS accuracy information (such as RTK)
if 'gps_accuracy_is_set' in args:
log.ODM_INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy)
if "gps_accuracy_is_set" in args:
log.ODM_INFO(
"Forcing GPS DOP to %s for all images" % args.gps_accuracy
)
for p in photos:
p.override_gps_dop(args.gps_accuracy)
# Override projection type
if args.camera_lens != "auto":
log.ODM_INFO("Setting camera lens to %s for all images" % args.camera_lens)
log.ODM_INFO(
"Setting camera lens to %s for all images" % args.camera_lens
)
for p in photos:
p.override_camera_projection(args.camera_lens)
@ -212,39 +250,73 @@ class ODMLoadDatasetStage(types.ODM_Stage):
# - Is not nadir (or if orientation info is missing, or if camera lens is fisheye), AND
# - There are no spaces in the image filename (OpenSfM requirement)
# Automatically generate a sky mask
# Generate list of sky images
sky_images = []
for p in photos:
if p.mask is None and (args.camera_lens in ['fisheye', 'spherical'] or p.pitch is None or (abs(p.pitch) > 20)) and (not " " in p.filename):
sky_images.append({'file': os.path.join(images_dir, p.filename), 'p': p})
if (
p.mask is None
and (
args.camera_lens in ["fisheye", "spherical"]
or p.pitch is None
or (abs(p.pitch) > 20)
)
and (not " " in p.filename)
):
sky_images.append(
{"file": os.path.join(images_dir, p.filename), "p": p}
)
if len(sky_images) > 0:
log.ODM_INFO("Automatically generating sky masks for %s images" % len(sky_images))
model = ai.get_model("skyremoval", "https://github.com/OpenDroneMap/SkyRemoval/releases/download/v1.0.5/model.zip", "v1.0.5")
log.ODM_INFO(
"Automatically generating sky masks for %s images"
% len(sky_images)
)
model = ai.get_model(
"skyremoval",
"https://github.com/OpenDroneMap/SkyRemoval/releases/download/v1.0.5/model.zip",
"v1.0.5",
)
if model is not None:
sf = SkyFilter(model=model)
def parallel_sky_filter(item):
try:
mask_file = sf.run_img(item['file'], images_dir)
mask_file = sf.run_img(item["file"], images_dir)
# Check and set
if mask_file is not None and os.path.isfile(mask_file):
item['p'].set_mask(os.path.basename(mask_file))
log.ODM_INFO("Wrote %s" % os.path.basename(mask_file))
if mask_file is not None and os.path.isfile(
mask_file
):
item["p"].set_mask(os.path.basename(mask_file))
log.ODM_INFO(
"Wrote %s" % os.path.basename(mask_file)
)
else:
log.ODM_WARNING("Cannot generate mask for %s" % item['file'])
log.ODM_WARNING(
"Cannot generate mask for %s" % item["file"]
)
except Exception as e:
log.ODM_WARNING("Cannot generate mask for %s: %s" % (item['file'], str(e)))
log.ODM_WARNING(
"Cannot generate mask for %s: %s"
% (item["file"], str(e))
)
parallel_map(parallel_sky_filter, sky_images, max_workers=args.max_concurrency)
parallel_map(
parallel_sky_filter,
sky_images,
max_workers=args.max_concurrency,
)
log.ODM_INFO("Sky masks generation completed!")
else:
log.ODM_WARNING("Cannot load AI model (you might need to be connected to the internet?)")
log.ODM_WARNING(
"Cannot load AI model (you might need to be connected to the internet?)"
)
else:
log.ODM_INFO("No sky masks will be generated (masks already provided, or images are nadir)")
log.ODM_INFO(
"No sky masks will be generated (masks already provided, or images are nadir)"
)
# End sky removal
@ -253,88 +325,131 @@ class ODMLoadDatasetStage(types.ODM_Stage):
# For each image that :
# - Doesn't already have a mask, AND
# - There are no spaces in the image filename (OpenSfM requirement)
# Generate list of sky images
bg_images = []
for p in photos:
if p.mask is None and (not " " in p.filename):
bg_images.append({'file': os.path.join(images_dir, p.filename), 'p': p})
bg_images.append(
{"file": os.path.join(images_dir, p.filename), "p": p}
)
if len(bg_images) > 0:
log.ODM_INFO("Automatically generating background masks for %s images" % len(bg_images))
model = ai.get_model("bgremoval", "https://github.com/OpenDroneMap/ODM/releases/download/v2.9.0/u2net.zip", "v2.9.0")
log.ODM_INFO(
"Automatically generating background masks for %s images"
% len(bg_images)
)
model = ai.get_model(
"bgremoval",
"https://github.com/OpenDroneMap/ODM/releases/download/v2.9.0/u2net.zip",
"v2.9.0",
)
if model is not None:
bg = BgFilter(model=model)
def parallel_bg_filter(item):
try:
mask_file = bg.run_img(item['file'], images_dir)
mask_file = bg.run_img(item["file"], images_dir)
# Check and set
if mask_file is not None and os.path.isfile(mask_file):
item['p'].set_mask(os.path.basename(mask_file))
log.ODM_INFO("Wrote %s" % os.path.basename(mask_file))
if mask_file is not None and os.path.isfile(
mask_file
):
item["p"].set_mask(os.path.basename(mask_file))
log.ODM_INFO(
"Wrote %s" % os.path.basename(mask_file)
)
else:
log.ODM_WARNING("Cannot generate mask for %s" % img)
log.ODM_WARNING(
"Cannot generate mask for %s" % img
)
except Exception as e:
log.ODM_WARNING("Cannot generate mask for %s: %s" % (img, str(e)))
log.ODM_WARNING(
"Cannot generate mask for %s: %s"
% (img, str(e))
)
parallel_map(parallel_bg_filter, bg_images, max_workers=args.max_concurrency)
parallel_map(
parallel_bg_filter,
bg_images,
max_workers=args.max_concurrency,
)
log.ODM_INFO("Background masks generation completed!")
else:
log.ODM_WARNING("Cannot load AI model (you might need to be connected to the internet?)")
log.ODM_WARNING(
"Cannot load AI model (you might need to be connected to the internet?)"
)
else:
log.ODM_INFO("No background masks will be generated (masks already provided)")
log.ODM_INFO(
"No background masks will be generated (masks already provided)"
)
# End bg removal
# Save image database for faster restart
save_images_database(photos, images_database_file)
else:
raise system.ExitException('Not enough supported images in %s' % images_dir)
raise system.ExitException(
"Not enough supported images in %s" % images_dir
)
else:
# We have an images database, just load it
photos = load_images_database(images_database_file)
log.ODM_INFO('Found %s usable images' % len(photos))
log.ODM_INFO("Found %s usable images" % len(photos))
log.logger.log_json_images(len(photos))
# Create reconstruction object
reconstruction = types.ODM_Reconstruction(photos)
if tree.odm_georeferencing_gcp and not args.use_exif:
reconstruction.georeference_with_gcp(tree.odm_georeferencing_gcp,
tree.odm_georeferencing_coords,
tree.odm_georeferencing_gcp_utm,
tree.odm_georeferencing_model_txt_geo,
rerun=self.rerun())
reconstruction.georeference_with_gcp(
tree.odm_georeferencing_gcp,
tree.odm_georeferencing_coords,
tree.odm_georeferencing_gcp_utm,
tree.odm_georeferencing_model_txt_geo,
rerun=self.rerun(),
)
else:
reconstruction.georeference_with_gps(tree.dataset_raw,
tree.odm_georeferencing_coords,
tree.odm_georeferencing_model_txt_geo,
rerun=self.rerun())
reconstruction.save_proj_srs(os.path.join(tree.odm_georeferencing, tree.odm_georeferencing_proj))
outputs['reconstruction'] = reconstruction
reconstruction.georeference_with_gps(
tree.dataset_raw,
tree.odm_georeferencing_coords,
tree.odm_georeferencing_model_txt_geo,
rerun=self.rerun(),
)
reconstruction.save_proj_srs(
os.path.join(tree.odm_georeferencing, tree.odm_georeferencing_proj)
)
outputs["reconstruction"] = reconstruction
# Try to load boundaries
if args.boundary:
if reconstruction.is_georeferenced():
outputs['boundary'] = boundary.load_boundary(args.boundary, reconstruction.get_proj_srs())
outputs["boundary"] = boundary.load_boundary(
args.boundary, reconstruction.get_proj_srs()
)
else:
args.boundary = None
log.ODM_WARNING("Reconstruction is not georeferenced, but boundary file provided (will ignore boundary file)")
log.ODM_WARNING(
"Reconstruction is not georeferenced, but boundary file provided (will ignore boundary file)"
)
# If sfm-algorithm is triangulation, check if photos have OPK
if args.sfm_algorithm == 'triangulation':
if args.sfm_algorithm == "triangulation":
for p in photos:
if not p.has_opk():
log.ODM_WARNING("No omega/phi/kappa angles found in input photos (%s), switching sfm-algorithm to incremental" % p.filename)
args.sfm_algorithm = 'incremental'
log.ODM_WARNING(
"No omega/phi/kappa angles found in input photos (%s), switching sfm-algorithm to incremental"
% p.filename
)
args.sfm_algorithm = "incremental"
break
# Rolling shutter cannot be done in non-georeferenced datasets
if args.rolling_shutter and not reconstruction.is_georeferenced():
log.ODM_WARNING("Reconstruction is not georeferenced, disabling rolling shutter correction")
log.ODM_WARNING(
"Reconstruction is not georeferenced, disabling rolling shutter correction"
)
args.rolling_shutter = False

Wyświetl plik

@ -10,16 +10,20 @@ from opendm.photo import find_largest_photo_dim
from opendm.objpacker import obj_pack
from opendm.gltf import obj2glb
class ODMMvsTexStage(types.ODM_Stage):
def process(self, args, outputs):
tree = outputs['tree']
reconstruction = outputs['reconstruction']
tree = outputs["tree"]
reconstruction = outputs["reconstruction"]
max_dim = find_largest_photo_dim(reconstruction.photos)
max_texture_size = 8 * 1024 # default
max_texture_size = 8 * 1024 # default
if max_dim > 8000:
log.ODM_INFO("Large input images (%s pixels), increasing maximum texture size." % max_dim)
log.ODM_INFO(
"Large input images (%s pixels), increasing maximum texture size."
% max_dim
)
max_texture_size *= 3
class nonloc:
@ -29,52 +33,81 @@ class ODMMvsTexStage(types.ODM_Stage):
subdir = ""
if not primary and band is not None:
subdir = band
if not args.skip_3dmodel and (primary or args.use_3dmesh):
nonloc.runs += [{
'out_dir': os.path.join(tree.odm_texturing, subdir),
'model': tree.odm_mesh,
'nadir': False,
'primary': primary,
'nvm_file': nvm_file,
'labeling_file': os.path.join(tree.odm_texturing, "odm_textured_model_geo_labeling.vec") if subdir else None
}]
nonloc.runs += [
{
"out_dir": os.path.join(tree.odm_texturing, subdir),
"model": tree.odm_mesh,
"nadir": False,
"primary": primary,
"nvm_file": nvm_file,
"labeling_file": (
os.path.join(
tree.odm_texturing,
"odm_textured_model_geo_labeling.vec",
)
if subdir
else None
),
}
]
if not args.use_3dmesh:
nonloc.runs += [{
'out_dir': os.path.join(tree.odm_25dtexturing, subdir),
'model': tree.odm_25dmesh,
'nadir': True,
'primary': primary,
'nvm_file': nvm_file,
'labeling_file': os.path.join(tree.odm_25dtexturing, "odm_textured_model_geo_labeling.vec") if subdir else None
}]
nonloc.runs += [
{
"out_dir": os.path.join(tree.odm_25dtexturing, subdir),
"model": tree.odm_25dmesh,
"nadir": True,
"primary": primary,
"nvm_file": nvm_file,
"labeling_file": (
os.path.join(
tree.odm_25dtexturing,
"odm_textured_model_geo_labeling.vec",
)
if subdir
else None
),
}
]
if reconstruction.multi_camera:
for band in reconstruction.multi_camera:
primary = band['name'] == get_primary_band_name(reconstruction.multi_camera, args.primary_band)
nvm_file = os.path.join(tree.opensfm, "undistorted", "reconstruction_%s.nvm" % band['name'].lower())
add_run(nvm_file, primary, band['name'].lower())
primary = band["name"] == get_primary_band_name(
reconstruction.multi_camera, args.primary_band
)
nvm_file = os.path.join(
tree.opensfm,
"undistorted",
"reconstruction_%s.nvm" % band["name"].lower(),
)
add_run(nvm_file, primary, band["name"].lower())
# Sort to make sure primary band is processed first
nonloc.runs.sort(key=lambda r: r['primary'], reverse=True)
nonloc.runs.sort(key=lambda r: r["primary"], reverse=True)
else:
add_run(tree.opensfm_reconstruction_nvm)
progress_per_run = 100.0 / len(nonloc.runs)
progress = 0.0
for r in nonloc.runs:
if not io.dir_exists(r['out_dir']):
system.mkdir_p(r['out_dir'])
if not io.dir_exists(r["out_dir"]):
system.mkdir_p(r["out_dir"])
odm_textured_model_obj = os.path.join(r['out_dir'], tree.odm_textured_model_obj)
unaligned_obj = io.related_file_path(odm_textured_model_obj, postfix="_unaligned")
odm_textured_model_obj = os.path.join(
r["out_dir"], tree.odm_textured_model_obj
)
unaligned_obj = io.related_file_path(
odm_textured_model_obj, postfix="_unaligned"
)
if not io.file_exists(odm_textured_model_obj) or self.rerun():
log.ODM_INFO('Writing MVS Textured file in: %s'
% odm_textured_model_obj)
log.ODM_INFO(
"Writing MVS Textured file in: %s" % odm_textured_model_obj
)
if os.path.isfile(unaligned_obj):
os.unlink(unaligned_obj)
@ -88,27 +121,33 @@ class ODMMvsTexStage(types.ODM_Stage):
skipGlobalSeamLeveling = "--skip_global_seam_leveling"
if args.texturing_keep_unseen_faces:
keepUnseenFaces = "--keep_unseen_faces"
if (r['nadir']):
nadir = '--nadir_mode'
if r["nadir"]:
nadir = "--nadir_mode"
# mvstex definitions
kwargs = {
'bin': context.mvstex_path,
'out_dir': os.path.join(r['out_dir'], "odm_textured_model_geo"),
'model': r['model'],
'dataTerm': 'gmi',
'outlierRemovalType': 'gauss_clamping',
'skipGlobalSeamLeveling': skipGlobalSeamLeveling,
'keepUnseenFaces': keepUnseenFaces,
'toneMapping': 'none',
'nadirMode': nadir,
'maxTextureSize': '--max_texture_size=%s' % max_texture_size,
'nvm_file': r['nvm_file'],
'intermediate': '--no_intermediate_results' if (r['labeling_file'] or not reconstruction.multi_camera) else '',
'labelingFile': '-L "%s"' % r['labeling_file'] if r['labeling_file'] else ''
"bin": context.mvstex_path,
"out_dir": os.path.join(r["out_dir"], "odm_textured_model_geo"),
"model": r["model"],
"dataTerm": "gmi",
"outlierRemovalType": "gauss_clamping",
"skipGlobalSeamLeveling": skipGlobalSeamLeveling,
"keepUnseenFaces": keepUnseenFaces,
"toneMapping": "none",
"nadirMode": nadir,
"maxTextureSize": "--max_texture_size=%s" % max_texture_size,
"nvm_file": r["nvm_file"],
"intermediate": (
"--no_intermediate_results"
if (r["labeling_file"] or not reconstruction.multi_camera)
else ""
),
"labelingFile": (
'-L "%s"' % r["labeling_file"] if r["labeling_file"] else ""
),
}
mvs_tmp_dir = os.path.join(r['out_dir'], 'tmp')
mvs_tmp_dir = os.path.join(r["out_dir"], "tmp")
# mvstex creates a tmp directory, so make sure it is empty
if io.dir_exists(mvs_tmp_dir):
@ -116,24 +155,33 @@ class ODMMvsTexStage(types.ODM_Stage):
shutil.rmtree(mvs_tmp_dir)
# run texturing binary
system.run('"{bin}" "{nvm_file}" "{model}" "{out_dir}" '
'-d {dataTerm} -o {outlierRemovalType} '
'-t {toneMapping} '
'{intermediate} '
'{skipGlobalSeamLeveling} '
'{keepUnseenFaces} '
'{nadirMode} '
'{labelingFile} '
'{maxTextureSize} '.format(**kwargs))
system.run(
'"{bin}" "{nvm_file}" "{model}" "{out_dir}" '
"-d {dataTerm} -o {outlierRemovalType} "
"-t {toneMapping} "
"{intermediate} "
"{skipGlobalSeamLeveling} "
"{keepUnseenFaces} "
"{nadirMode} "
"{labelingFile} "
"{maxTextureSize} ".format(**kwargs)
)
if r['primary'] and (not r['nadir'] or args.skip_3dmodel):
if r["primary"] and (not r["nadir"] or args.skip_3dmodel):
# GlTF?
if args.gltf:
log.ODM_INFO("Generating glTF Binary")
odm_textured_model_glb = os.path.join(r['out_dir'], tree.odm_textured_model_glb)
odm_textured_model_glb = os.path.join(
r["out_dir"], tree.odm_textured_model_glb
)
try:
obj2glb(odm_textured_model_obj, odm_textured_model_glb, rtc=reconstruction.get_proj_offset(), _info=log.ODM_INFO)
obj2glb(
odm_textured_model_obj,
odm_textured_model_glb,
rtc=reconstruction.get_proj_offset(),
_info=log.ODM_INFO,
)
except Exception as e:
log.ODM_WARNING(str(e))
@ -141,44 +189,51 @@ class ODMMvsTexStage(types.ODM_Stage):
if args.texturing_single_material:
log.ODM_INFO("Packing to single material")
packed_dir = os.path.join(r['out_dir'], 'packed')
packed_dir = os.path.join(r["out_dir"], "packed")
if io.dir_exists(packed_dir):
log.ODM_INFO("Removing old packed directory {}".format(packed_dir))
log.ODM_INFO(
"Removing old packed directory {}".format(packed_dir)
)
shutil.rmtree(packed_dir)
try:
obj_pack(os.path.join(r['out_dir'], tree.odm_textured_model_obj), packed_dir, _info=log.ODM_INFO)
obj_pack(
os.path.join(r["out_dir"], tree.odm_textured_model_obj),
packed_dir,
_info=log.ODM_INFO,
)
# Move packed/* into texturing folder
system.delete_files(r['out_dir'], (".vec", ))
system.move_files(packed_dir, r['out_dir'])
system.delete_files(r["out_dir"], (".vec",))
system.move_files(packed_dir, r["out_dir"])
if os.path.isdir(packed_dir):
os.rmdir(packed_dir)
except Exception as e:
log.ODM_WARNING(str(e))
# Backward compatibility: copy odm_textured_model_geo.mtl to odm_textured_model.mtl
# for certain older WebODM clients which expect a odm_textured_model.mtl
# to be present for visualization
# We should remove this at some point in the future
geo_mtl = os.path.join(r['out_dir'], 'odm_textured_model_geo.mtl')
geo_mtl = os.path.join(r["out_dir"], "odm_textured_model_geo.mtl")
if io.file_exists(geo_mtl):
nongeo_mtl = os.path.join(r['out_dir'], 'odm_textured_model.mtl')
nongeo_mtl = os.path.join(r["out_dir"], "odm_textured_model.mtl")
shutil.copy(geo_mtl, nongeo_mtl)
progress += progress_per_run
self.update_progress(progress)
else:
log.ODM_WARNING('Found a valid ODM Texture file in: %s'
% odm_textured_model_obj)
log.ODM_WARNING(
"Found a valid ODM Texture file in: %s" % odm_textured_model_obj
)
if args.optimize_disk_space:
for r in nonloc.runs:
if io.file_exists(r['model']):
os.remove(r['model'])
undistorted_images_path = os.path.join(tree.opensfm, "undistorted", "images")
if io.file_exists(r["model"]):
os.remove(r["model"])
undistorted_images_path = os.path.join(
tree.opensfm, "undistorted", "images"
)
if io.dir_exists(undistorted_images_path):
shutil.rmtree(undistorted_images_path)

Wyświetl plik

@ -32,51 +32,48 @@ class ODMApp:
json_log_paths.append(args.copy_to)
log.logger.init_json_output(json_log_paths, args)
dataset = ODMLoadDatasetStage('dataset', args, progress=5.0)
split = ODMSplitStage('split', args, progress=75.0)
merge = ODMMergeStage('merge', args, progress=100.0)
opensfm = ODMOpenSfMStage('opensfm', args, progress=25.0)
openmvs = ODMOpenMVSStage('openmvs', args, progress=50.0)
filterpoints = ODMFilterPoints('odm_filterpoints', args, progress=52.0)
meshing = ODMeshingStage('odm_meshing', args, progress=60.0,
max_vertex=args.mesh_size,
oct_tree=max(1, min(14, args.mesh_octree_depth)),
samples=1.0,
point_weight=4.0,
max_concurrency=args.max_concurrency)
texturing = ODMMvsTexStage('mvs_texturing', args, progress=70.0)
georeferencing = ODMGeoreferencingStage('odm_georeferencing', args, progress=80.0,
gcp_file=args.gcp)
dem = ODMDEMStage('odm_dem', args, progress=90.0,
max_concurrency=args.max_concurrency)
orthophoto = ODMOrthoPhotoStage('odm_orthophoto', args, progress=98.0)
report = ODMReport('odm_report', args, progress=99.0)
postprocess = ODMPostProcess('odm_postprocess', args, progress=100.0)
dataset = ODMLoadDatasetStage("dataset", args, progress=5.0)
split = ODMSplitStage("split", args, progress=75.0)
merge = ODMMergeStage("merge", args, progress=100.0)
opensfm = ODMOpenSfMStage("opensfm", args, progress=25.0)
openmvs = ODMOpenMVSStage("openmvs", args, progress=50.0)
filterpoints = ODMFilterPoints("odm_filterpoints", args, progress=52.0)
meshing = ODMeshingStage(
"odm_meshing",
args,
progress=60.0,
max_vertex=args.mesh_size,
oct_tree=max(1, min(14, args.mesh_octree_depth)),
samples=1.0,
point_weight=4.0,
max_concurrency=args.max_concurrency,
)
texturing = ODMMvsTexStage("mvs_texturing", args, progress=70.0)
georeferencing = ODMGeoreferencingStage(
"odm_georeferencing", args, progress=80.0, gcp_file=args.gcp
)
dem = ODMDEMStage(
"odm_dem", args, progress=90.0, max_concurrency=args.max_concurrency
)
orthophoto = ODMOrthoPhotoStage("odm_orthophoto", args, progress=98.0)
report = ODMReport("odm_report", args, progress=99.0)
postprocess = ODMPostProcess("odm_postprocess", args, progress=100.0)
# Normal pipeline
self.first_stage = dataset
dataset.connect(split) \
.connect(merge) \
.connect(opensfm)
dataset.connect(split).connect(merge).connect(opensfm)
if args.fast_orthophoto:
opensfm.connect(filterpoints)
else:
opensfm.connect(openmvs) \
.connect(filterpoints)
filterpoints \
.connect(meshing) \
.connect(texturing) \
.connect(georeferencing) \
.connect(dem) \
.connect(orthophoto) \
.connect(report) \
.connect(postprocess)
opensfm.connect(openmvs).connect(filterpoints)
filterpoints.connect(meshing).connect(texturing).connect(
georeferencing
).connect(dem).connect(orthophoto).connect(report).connect(postprocess)
def execute(self):
try:
self.first_stage.run()
@ -84,7 +81,9 @@ class ODMApp:
return 0
except system.SubprocessException as e:
print("")
print("===== Dumping Info for Geeks (developers need this to fix bugs) =====")
print(
"===== Dumping Info for Geeks (developers need this to fix bugs) ====="
)
print(str(e))
stack_trace = traceback.format_exc()
print(stack_trace)
@ -96,15 +95,25 @@ class ODMApp:
if code == 139 or code == 134 or code == 1 or code == 3221225477:
# Segfault
log.ODM_ERROR("Uh oh! Processing stopped because of strange values in the reconstruction. This is often a sign that the input data has some issues or the software cannot deal with it. Have you followed best practices for data acquisition? See https://docs.opendronemap.org/flying/")
log.ODM_ERROR(
"Uh oh! Processing stopped because of strange values in the reconstruction. This is often a sign that the input data has some issues or the software cannot deal with it. Have you followed best practices for data acquisition? See https://docs.opendronemap.org/flying/"
)
elif code == 137 or code == 3221226505:
log.ODM_ERROR("Whoops! You ran out of memory! Add more RAM to your computer, if you're using docker configure it to use more memory, for WSL2 make use of .wslconfig (https://docs.microsoft.com/en-us/windows/wsl/wsl-config#configure-global-options-with-wslconfig), resize your images, lower the quality settings or process the images using a cloud provider (e.g. https://webodm.net).")
log.ODM_ERROR(
"Whoops! You ran out of memory! Add more RAM to your computer, if you're using docker configure it to use more memory, for WSL2 make use of .wslconfig (https://docs.microsoft.com/en-us/windows/wsl/wsl-config#configure-global-options-with-wslconfig), resize your images, lower the quality settings or process the images using a cloud provider (e.g. https://webodm.net)."
)
elif code == 132:
log.ODM_ERROR("Oh no! It looks like your CPU is not supported (is it fairly old?). You can still use ODM, but you will need to build your own docker image. See https://github.com/OpenDroneMap/ODM#build-from-source")
log.ODM_ERROR(
"Oh no! It looks like your CPU is not supported (is it fairly old?). You can still use ODM, but you will need to build your own docker image. See https://github.com/OpenDroneMap/ODM#build-from-source"
)
elif code == 3:
log.ODM_ERROR("ODM can't find a program that is required for processing to run! Did you do a custom build of ODM? (cool!) Make sure that all programs required by ODM are in the right place and are built correctly.")
log.ODM_ERROR(
"ODM can't find a program that is required for processing to run! Did you do a custom build of ODM? (cool!) Make sure that all programs required by ODM are in the right place and are built correctly."
)
else:
log.ODM_ERROR("The program exited with a strange error code. Please report it at https://community.opendronemap.org")
log.ODM_ERROR(
"The program exited with a strange error code. Please report it at https://community.opendronemap.org"
)
# TODO: more?

Wyświetl plik

@ -13,33 +13,39 @@ from opendm import pseudogeo
from opendm.tiles.tiler import generate_dem_tiles
from opendm.cogeo import convert_to_cogeo
class ODMDEMStage(types.ODM_Stage):
def process(self, args, outputs):
tree = outputs['tree']
reconstruction = outputs['reconstruction']
tree = outputs["tree"]
reconstruction = outputs["reconstruction"]
dem_input = tree.odm_georeferencing_model_laz
pc_model_found = io.file_exists(dem_input)
ignore_resolution = False
pseudo_georeference = False
if not reconstruction.is_georeferenced():
log.ODM_WARNING("Not georeferenced, using ungeoreferenced point cloud...")
ignore_resolution = True
pseudo_georeference = True
resolution = gsd.cap_resolution(args.dem_resolution, tree.opensfm_reconstruction,
gsd_scaling=1.0,
ignore_gsd=args.ignore_gsd,
ignore_resolution=ignore_resolution and args.ignore_gsd,
has_gcp=reconstruction.has_gcp())
resolution = gsd.cap_resolution(
args.dem_resolution,
tree.opensfm_reconstruction,
gsd_scaling=1.0,
ignore_gsd=args.ignore_gsd,
ignore_resolution=ignore_resolution and args.ignore_gsd,
has_gcp=reconstruction.has_gcp(),
)
log.ODM_INFO('Create DSM: ' + str(args.dsm))
log.ODM_INFO('Create DTM: ' + str(args.dtm))
log.ODM_INFO('DEM input file {0} found: {1}'.format(dem_input, str(pc_model_found)))
log.ODM_INFO("Create DSM: " + str(args.dsm))
log.ODM_INFO("Create DTM: " + str(args.dtm))
log.ODM_INFO(
"DEM input file {0} found: {1}".format(dem_input, str(pc_model_found))
)
# define paths and create working directories
odm_dem_root = tree.path('odm_dem')
odm_dem_root = tree.path("odm_dem")
if not io.dir_exists(odm_dem_root):
system.mkdir_p(odm_dem_root)
@ -48,54 +54,80 @@ class ODMDEMStage(types.ODM_Stage):
# Do we need to process anything here?
if (args.dsm or args.dtm) and pc_model_found:
dsm_output_filename = os.path.join(odm_dem_root, 'dsm.tif')
dtm_output_filename = os.path.join(odm_dem_root, 'dtm.tif')
dsm_output_filename = os.path.join(odm_dem_root, "dsm.tif")
dtm_output_filename = os.path.join(odm_dem_root, "dtm.tif")
if (args.dtm and not io.file_exists(dtm_output_filename)) or \
(args.dsm and not io.file_exists(dsm_output_filename)) or \
self.rerun():
if (
(args.dtm and not io.file_exists(dtm_output_filename))
or (args.dsm and not io.file_exists(dsm_output_filename))
or self.rerun()
):
products = []
if args.dsm or (args.dtm and args.dem_euclidean_map): products.append('dsm')
if args.dtm: products.append('dtm')
if args.dsm or (args.dtm and args.dem_euclidean_map):
products.append("dsm")
if args.dtm:
products.append("dtm")
radius_steps = commands.get_dem_radius_steps(tree.filtered_point_cloud_stats, args.dem_gapfill_steps, resolution)
radius_steps = commands.get_dem_radius_steps(
tree.filtered_point_cloud_stats, args.dem_gapfill_steps, resolution
)
for product in products:
commands.create_dem(
dem_input,
product,
output_type='idw' if product == 'dtm' else 'max',
radiuses=list(map(str, radius_steps)),
gapfill=args.dem_gapfill_steps > 0,
outdir=odm_dem_root,
resolution=resolution / 100.0,
decimation=args.dem_decimation,
max_workers=args.max_concurrency,
with_euclidean_map=args.dem_euclidean_map,
max_tiles=None if reconstruction.has_geotagged_photos() else math.ceil(len(reconstruction.photos) / 2)
)
dem_input,
product,
output_type="idw" if product == "dtm" else "max",
radiuses=list(map(str, radius_steps)),
gapfill=args.dem_gapfill_steps > 0,
outdir=odm_dem_root,
resolution=resolution / 100.0,
decimation=args.dem_decimation,
max_workers=args.max_concurrency,
with_euclidean_map=args.dem_euclidean_map,
max_tiles=(
None
if reconstruction.has_geotagged_photos()
else math.ceil(len(reconstruction.photos) / 2)
),
)
dem_geotiff_path = os.path.join(odm_dem_root, "{}.tif".format(product))
bounds_file_path = os.path.join(tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg')
dem_geotiff_path = os.path.join(
odm_dem_root, "{}.tif".format(product)
)
bounds_file_path = os.path.join(
tree.odm_georeferencing, "odm_georeferenced_model.bounds.gpkg"
)
if args.crop > 0 or args.boundary:
# Crop DEM
Cropper.crop(bounds_file_path, dem_geotiff_path, utils.get_dem_vars(args), keep_original=not args.optimize_disk_space)
Cropper.crop(
bounds_file_path,
dem_geotiff_path,
utils.get_dem_vars(args),
keep_original=not args.optimize_disk_space,
)
if pseudo_georeference:
pseudogeo.add_pseudo_georeferencing(dem_geotiff_path)
if args.tiles:
generate_dem_tiles(dem_geotiff_path, tree.path("%s_tiles" % product), args.max_concurrency, resolution)
generate_dem_tiles(
dem_geotiff_path,
tree.path("%s_tiles" % product),
args.max_concurrency,
resolution,
)
if args.cog:
convert_to_cogeo(dem_geotiff_path, max_workers=args.max_concurrency)
convert_to_cogeo(
dem_geotiff_path, max_workers=args.max_concurrency
)
progress += 40
self.update_progress(progress)
else:
log.ODM_WARNING('Found existing outputs in: %s' % odm_dem_root)
log.ODM_WARNING("Found existing outputs in: %s" % odm_dem_root)
else:
log.ODM_WARNING('DEM will not be generated')
log.ODM_WARNING("DEM will not be generated")

Some files were not shown because too many files have changed in this diff Show More