OpenDroneMap-ODM/opendm/point_cloud.py

334 wiersze
12 KiB
Python
Czysty Zwykły widok Historia

2020-07-07 18:20:57 +00:00
import os, sys, shutil, tempfile, json, math
from opendm import system
from opendm import log
from opendm import context
2019-04-11 20:29:53 +00:00
from opendm.system import run
2019-10-29 18:25:12 +00:00
from opendm import entwine
from opendm import io
2020-07-07 20:14:55 +00:00
from opendm.concurrency import parallel_map
2019-10-29 18:25:12 +00:00
from pipes import quote
2020-07-07 18:20:57 +00:00
def ply_info(input_ply):
if not os.path.exists(input_ply):
2020-09-08 17:08:57 +00:00
raise IOError("%s does not exist" % input_ply)
# Read PLY header, check if point cloud has normals
has_normals = False
2020-07-07 18:20:57 +00:00
vertex_count = 0
2020-09-09 17:23:53 +00:00
with open(input_ply, 'r', errors='ignore') as f:
line = f.readline().strip().lower()
i = 0
2020-07-07 20:14:55 +00:00
while line != "end_header":
line = f.readline().strip().lower()
props = line.split(" ")
2020-07-07 18:20:57 +00:00
if len(props) == 3:
if props[0] == "property" and props[2] in ["nx", "normalx", "normal_x"]:
has_normals = True
elif props[0] == "element" and props[1] == "vertex":
vertex_count = int(props[2])
i += 1
2020-07-07 20:14:55 +00:00
if i > 100:
raise IOError("Cannot find end_header field. Invalid PLY?")
2020-07-07 18:20:57 +00:00
return {
'has_normals': has_normals,
'vertex_count': vertex_count,
}
def split(input_point_cloud, outdir, filename_template, capacity, dims=None):
log.ODM_INFO("Splitting point cloud filtering in chunks of {} vertices".format(capacity))
if not os.path.exists(input_point_cloud):
log.ODM_ERROR("{} does not exist, cannot split point cloud. The program will now exit.".format(input_point_cloud))
sys.exit(1)
if not os.path.exists(outdir):
system.mkdir_p(outdir)
2020-07-07 18:20:57 +00:00
if len(os.listdir(outdir)) != 0:
log.ODM_ERROR("%s already contains some files. The program will now exit.".format(outdir))
sys.exit(1)
cmd = 'pdal split -i "%s" -o "%s" --capacity %s ' % (input_point_cloud, os.path.join(outdir, filename_template), capacity)
if filename_template.endswith(".ply"):
cmd += ("--writers.ply.sized_types=false "
"--writers.ply.storage_mode='little endian' ")
if dims is not None:
cmd += '--writers.ply.dims="%s"' % dims
system.run(cmd)
return [os.path.join(outdir, f) for f in os.listdir(outdir)]
def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=16, sample_radius=0, verbose=False, max_concurrency=1):
"""
Filters a point cloud
"""
if not os.path.exists(input_point_cloud):
log.ODM_ERROR("{} does not exist. The program will now exit.".format(input_point_cloud))
sys.exit(1)
2019-10-28 13:53:46 +00:00
if (standard_deviation <= 0 or meank <= 0) and sample_radius <= 0:
log.ODM_INFO("Skipping point cloud filtering")
# if using the option `--pc-filter 0`, we need copy input_point_cloud
shutil.copy(input_point_cloud, output_point_cloud)
return
filters = []
2019-10-28 13:53:46 +00:00
if sample_radius > 0:
log.ODM_INFO("Sampling points around a %sm radius" % sample_radius)
filters.append('sample')
2019-10-28 13:53:46 +00:00
if standard_deviation > 0 and meank > 0:
2020-07-07 18:20:57 +00:00
log.ODM_INFO("Filtering {} (statistical, meanK {}, standard deviation {})".format(input_point_cloud, meank, standard_deviation))
filters.append('outlier')
if len(filters) > 0:
filters.append('range')
2020-07-07 18:20:57 +00:00
info = ply_info(input_point_cloud)
dims = "x=float,y=float,z=float,"
2020-07-07 18:20:57 +00:00
if info['has_normals']:
dims += "nx=float,ny=float,nz=float,"
dims += "red=uchar,blue=uchar,green=uchar"
2020-07-07 18:20:57 +00:00
if info['vertex_count'] == 0:
log.ODM_ERROR("Cannot read vertex count for {}".format(input_point_cloud))
sys.exit(1)
2020-07-07 18:20:57 +00:00
# Do we need to split this?
2020-07-09 14:03:11 +00:00
VERTEX_THRESHOLD = 250000
2020-07-07 21:33:00 +00:00
should_split = max_concurrency > 1 and info['vertex_count'] > VERTEX_THRESHOLD*2
2020-07-07 18:20:57 +00:00
if should_split:
partsdir = os.path.join(os.path.dirname(output_point_cloud), "parts")
if os.path.exists(partsdir):
log.ODM_WARNING("Removing existing directory %s" % partsdir)
shutil.rmtree(partsdir)
2020-07-07 21:33:00 +00:00
point_cloud_submodels = split(input_point_cloud, partsdir, "part.ply", capacity=VERTEX_THRESHOLD, dims=dims)
2020-07-07 18:20:57 +00:00
2020-07-07 20:14:55 +00:00
def run_filter(pcs):
2020-07-07 18:20:57 +00:00
# Recurse
2020-07-07 20:14:55 +00:00
filter(pcs['path'], io.related_file_path(pcs['path'], postfix="_filtered"),
2020-07-07 18:20:57 +00:00
standard_deviation=standard_deviation,
meank=meank,
sample_radius=sample_radius,
verbose=verbose,
max_concurrency=1)
2020-07-07 20:14:55 +00:00
# Filter
parallel_map(run_filter, [{'path': p} for p in point_cloud_submodels], max_concurrency)
2020-07-07 18:20:57 +00:00
# Merge
log.ODM_INFO("Merging %s point cloud chunks to %s" % (len(point_cloud_submodels), output_point_cloud))
filtered_pcs = [io.related_file_path(pcs, postfix="_filtered") for pcs in point_cloud_submodels]
2020-07-07 20:14:55 +00:00
#merge_ply(filtered_pcs, output_point_cloud, dims)
fast_merge_ply(filtered_pcs, output_point_cloud)
2020-07-07 18:20:57 +00:00
2020-07-07 20:14:55 +00:00
if os.path.exists(partsdir):
shutil.rmtree(partsdir)
2020-07-07 18:20:57 +00:00
else:
# Process point cloud (or a point cloud submodel) in a single step
filterArgs = {
'inputFile': input_point_cloud,
'outputFile': output_point_cloud,
'stages': " ".join(filters),
'dims': dims
}
cmd = ("pdal translate -i \"{inputFile}\" "
"-o \"{outputFile}\" "
"{stages} "
"--writers.ply.sized_types=false "
"--writers.ply.storage_mode='little endian' "
"--writers.ply.dims=\"{dims}\" "
"").format(**filterArgs)
if 'sample' in filters:
cmd += "--filters.sample.radius={} ".format(sample_radius)
if 'outlier' in filters:
cmd += ("--filters.outlier.method='statistical' "
"--filters.outlier.mean_k={} "
"--filters.outlier.multiplier={} ").format(meank, standard_deviation)
if 'range' in filters:
# Remove outliers
cmd += "--filters.range.limits='Classification![7:7]' "
2020-07-07 18:20:57 +00:00
system.run(cmd)
2019-04-11 20:29:53 +00:00
if not os.path.exists(output_point_cloud):
log.ODM_WARNING("{} not found, filtering has failed.".format(output_point_cloud))
2020-07-07 18:20:57 +00:00
2019-04-11 20:29:53 +00:00
def get_extent(input_point_cloud):
fd, json_file = tempfile.mkstemp(suffix='.json')
os.close(fd)
# Get point cloud extent
fallback = False
# We know PLY files do not have --summary support
if input_point_cloud.lower().endswith(".ply"):
fallback = True
run('pdal info {0} > {1}'.format(input_point_cloud, json_file))
try:
if not fallback:
run('pdal info --summary {0} > {1}'.format(input_point_cloud, json_file))
except:
fallback = True
run('pdal info {0} > {1}'.format(input_point_cloud, json_file))
bounds = {}
with open(json_file, 'r') as f:
result = json.loads(f.read())
if not fallback:
summary = result.get('summary')
if summary is None: raise Exception("Cannot compute summary for %s (summary key missing)" % input_point_cloud)
bounds = summary.get('bounds')
else:
stats = result.get('stats')
if stats is None: raise Exception("Cannot compute bounds for %s (stats key missing)" % input_point_cloud)
bbox = stats.get('bbox')
if bbox is None: raise Exception("Cannot compute bounds for %s (bbox key missing)" % input_point_cloud)
native = bbox.get('native')
if native is None: raise Exception("Cannot compute bounds for %s (native key missing)" % input_point_cloud)
bounds = native.get('bbox')
if bounds is None: raise Exception("Cannot compute bounds for %s (bounds key missing)" % input_point_cloud)
if bounds.get('maxx', None) is None or \
bounds.get('minx', None) is None or \
bounds.get('maxy', None) is None or \
bounds.get('miny', None) is None or \
bounds.get('maxz', None) is None or \
bounds.get('minz', None) is None:
raise Exception("Cannot compute bounds for %s (invalid keys) %s" % (input_point_cloud, str(bounds)))
os.remove(json_file)
2019-10-29 18:25:12 +00:00
return bounds
def merge(input_point_cloud_files, output_file, rerun=False):
num_files = len(input_point_cloud_files)
if num_files == 0:
log.ODM_WARNING("No input point cloud files to process")
return
2020-07-07 18:20:57 +00:00
if io.file_exists(output_file):
2019-10-29 18:25:12 +00:00
log.ODM_WARNING("Removing previous point cloud: %s" % output_file)
os.remove(output_file)
kwargs = {
'all_inputs': " ".join(map(quote, input_point_cloud_files)),
'output': output_file
}
system.run('lasmerge -i {all_inputs} -o "{output}"'.format(**kwargs))
2019-04-11 20:29:53 +00:00
2020-07-07 20:14:55 +00:00
def fast_merge_ply(input_point_cloud_files, output_file):
# Assumes that all input files share the same header/content format
# As the merge is a naive byte stream copy
num_files = len(input_point_cloud_files)
if num_files == 0:
log.ODM_WARNING("No input point cloud files to process")
return
if io.file_exists(output_file):
log.ODM_WARNING("Removing previous point cloud: %s" % output_file)
os.remove(output_file)
vertex_count = sum([ply_info(pcf)['vertex_count'] for pcf in input_point_cloud_files])
master_file = input_point_cloud_files[0]
with open(output_file, "wb") as out:
2020-09-09 17:23:53 +00:00
with open(master_file, "r", errors="ignore") as fhead:
2020-07-07 20:14:55 +00:00
# Copy header
line = fhead.readline()
2020-09-09 17:23:53 +00:00
out.write(line.encode('utf8'))
2020-07-07 20:14:55 +00:00
i = 0
while line.strip().lower() != "end_header":
line = fhead.readline()
# Intercept element vertex field
if line.lower().startswith("element vertex "):
2020-09-09 17:23:53 +00:00
out.write(("element vertex %s\n" % vertex_count).encode('utf8'))
else:
out.write(line.encode('utf8'))
2020-07-07 20:14:55 +00:00
i += 1
if i > 100:
raise IOError("Cannot find end_header field. Invalid PLY?")
for ipc in input_point_cloud_files:
i = 0
with open(ipc, "rb") as fin:
# Skip header
line = fin.readline()
2020-09-09 17:23:53 +00:00
while line.strip().lower() != b"end_header":
2020-07-07 20:14:55 +00:00
line = fin.readline()
i += 1
if i > 100:
raise IOError("Cannot find end_header field. Invalid PLY?")
# Write fields
out.write(fin.read())
return output_file
2020-07-07 18:20:57 +00:00
def merge_ply(input_point_cloud_files, output_file, dims=None):
num_files = len(input_point_cloud_files)
if num_files == 0:
log.ODM_WARNING("No input point cloud files to process")
return
cmd = [
'pdal',
'merge',
'--writers.ply.sized_types=false',
'--writers.ply.storage_mode="little endian"',
('--writers.ply.dims="%s"' % dims) if dims is not None else '',
' '.join(map(quote, input_point_cloud_files + [output_file])),
]
system.run(' '.join(cmd))
2019-10-29 18:25:12 +00:00
def post_point_cloud_steps(args, tree):
# XYZ point cloud output
if args.pc_csv:
log.ODM_INFO("Creating geo-referenced CSV file (XYZ format)")
system.run("pdal translate -i \"{}\" "
"-o \"{}\" "
"--writers.text.format=csv "
"--writers.text.order=\"X,Y,Z\" "
"--writers.text.keep_unspecified=false ".format(
tree.odm_georeferencing_model_laz,
tree.odm_georeferencing_xyz_file))
# LAS point cloud output
if args.pc_las:
log.ODM_INFO("Creating geo-referenced LAS file")
system.run("pdal translate -i \"{}\" "
"-o \"{}\" ".format(
tree.odm_georeferencing_model_laz,
tree.odm_georeferencing_model_las))
# EPT point cloud output
if args.pc_ept:
log.ODM_INFO("Creating geo-referenced Entwine Point Tile output")
entwine.build([tree.odm_georeferencing_model_laz], tree.entwine_pointcloud, max_concurrency=args.max_concurrency, rerun=False)