Misc bug fixes, GCP class, cleanup

Former-commit-id: 8c7b2c2d05
pull/1161/head
Piero Toffanin 2019-05-01 15:11:26 -04:00
rodzic 7330687977
commit d214b94305
14 zmienionych plików z 119 dodań i 733 usunięć

77
opendm/gcp.py 100644
Wyświetl plik

@ -0,0 +1,77 @@
import glob
import os
from opendm import log
class GCPFile:
def __init__(self, gcp_path):
self.gcp_path = gcp_path
self.entries = []
self.srs = ""
self.read()
def read(self):
if self.exists():
with open(self.gcp_path, 'r') as f:
contents = f.read().strip()
lines = map(str.strip, contents.split('\n'))
if lines:
self.srs = lines[0] # SRS
for line in lines[1:]:
if line != "" and line[0] != "#":
parts = line.split()
if len(parts) >= 6:
self.entries.append(line)
else:
log.ODM_WARNING("Malformed GCP line: %s" % line)
def entries_dict(self):
for entry in self.entries:
parts = entry.split()
x, y, z, px, py, filename = parts[:6]
extras = " ".join(parts[6:])
yield {
'x': x,
'y': y,
'z': z,
'px': px,
'py': py,
'filename': filename,
'extras': extras
}
def entry_dict_to_s(self, entry):
return "{x} {y} {z} {px} {py} {filename} {extras}".format(**entry).rstrip()
def exists(self):
return os.path.exists(self.gcp_path)
def make_filtered_copy(self, gcp_file_output, images_dir, min_images=3):
"""
Creates a new GCP file from an existing GCP file includes
only the points that reference images existing in the images_dir directory.
If less than min_images images are referenced, no GCP copy is created.
:return gcp_file_output if successful, None if no output file was created.
"""
if not self.exists() or not os.path.exists(images_dir):
return None
if os.path.exists(gcp_file_output):
os.remove(gcp_file_output)
files = map(os.path.basename, glob.glob(os.path.join(images_dir, "*")))
output = [self.srs]
files_found = 0
for entry in self.entries_dict():
if entry['filename'] in files:
output.append(self.entry_dict_to_s(entry))
files_found += 1
if files_found >= min_images:
with open(gcp_file_output, 'w') as f:
f.write('\n'.join(output) + '\n')
return gcp_file_output

Wyświetl plik

@ -188,19 +188,17 @@ def get_submodel_argv(args, submodels_path, submodel_name):
adding --orthophoto-cutline
adding --dem-euclidean-map
adding --skip-3dmodel (split-merge does not support 3D model merging)
removing --gcp (the GCP path if specified is always "gcp_list.txt")
"""
assure_always = ['--orthophoto-cutline', '--dem-euclidean-map', '--skip-3dmodel']
remove_always_2 = ['--split', '--rerun-from', '--rerun', '--gcp']
remove_always_1 = ['--rerun-all']
argv = sys.argv
result = [argv[0]]
i = 1
project_path_found = False
project_name_added = False
orthophoto_cutline_found = False
dem_euclidean_map_found = False
skip_3dmodel_found = False
# TODO: what about GCP paths?
found_args = {}
while i < len(argv):
arg = argv[i]
@ -210,55 +208,38 @@ def get_submodel_argv(args, submodels_path, submodel_name):
# Project name?
if arg == args.name:
result.append(submodel_name)
project_name_added = True
found_args['project_name'] = True
else:
result.append(arg)
i += 1
elif arg == '--project-path':
result.append(arg)
result.append(submodels_path)
project_path_found = True
found_args[arg] = True
i += 2
elif arg == '--orthophoto-cutline':
elif arg in assure_always:
result.append(arg)
orthophoto_cutline_found = True
found_args[arg] = True
i += 1
elif arg == '--dem-euclidean-map':
result.append(arg)
dem_euclidean_map_found = True
i += 1
elif arg == '--skip-3dmodel':
result.append(arg)
skip_3dmodel_found = True
i += 1
elif arg == '--split':
elif arg in remove_always_2:
i += 2
elif arg == '--rerun-from':
i += 2
elif arg == '--rerun':
i += 2
elif arg == '--rerun-all':
elif arg in remove_always_1:
i += 1
else:
result.append(arg)
i += 1
if not project_path_found:
if not found_args.get('--project-path'):
result.append('--project-path')
result.append(submodels_path)
if not project_name_added:
if not found_args.get('project_name'):
result.append(submodel_name)
if not orthophoto_cutline_found:
result.append("--orthophoto-cutline")
if not dem_euclidean_map_found:
result.append("--dem-euclidean-map")
if not skip_3dmodel_found:
result.append("--skip-3dmodel")
for arg in assure_always:
if not found_args.get(arg):
result.append(arg)
return result
@ -267,6 +248,9 @@ def get_submodel_paths(submodels_path, *paths):
:return Existing paths for all submodels
"""
result = []
if not os.path.exists(submodels_path):
return result
for f in os.listdir(submodels_path):
if f.startswith('submodel'):
p = os.path.join(submodels_path, f, *paths)
@ -287,6 +271,9 @@ def get_all_submodel_paths(submodels_path, *all_paths):
["path/submodel_0001/odm_orthophoto.tif", "path/submodel_0001/dem.tif"]]
"""
result = []
if not os.path.exists(submodels_path):
return result
for f in os.listdir(submodels_path):
if f.startswith('submodel'):
all_found = True

Plik binarny nie jest wyświetlany.

Wyświetl plik

@ -1 +0,0 @@
<HTML><HEAD><TITLE> Web Authentication Redirect</TITLE><META http-equiv="Cache-control" content="no-cache"><META http-equiv="Pragma" content="no-cache"><META http-equiv="Expires" content="-1"><META http-equiv="refresh" content="1; URL=http://1.1.1.1/login.html?redirect=google.com/"></HEAD></HTML>

Wyświetl plik

@ -1,98 +0,0 @@
# Split and merge pipeline for large-scale reconstructions
Large datasets can be slow to process. An option to speed up the reconstruction process is to split them into smaller datasets. We will call each of the small datasets a *submodel*. Smaller datasets run faster because they involve fewer images on each bundle adjustment iteration. Additionally, the reconstruction of the different submodels can be done in parallel.
Since the reconstructions of the submodels are done independently, they will not be necessarily aligned with each other. Only the GPS positions of the images and the ground control points will determine the alignment. When the neighboring reconstructions share cameras or points, it is possible to enforce the alignment of common cameras and points between the different reconstructions.
Here, we describe the OpenDroneMap pipeline for splitting a large dataset and aligning the resulting submodels. The pipeline uses the OpenSfM commands documented [here](http://opensfm.readthedocs.io/en/latest/large.html) and combines them with the rest of the ODM pipeline.
The main workflow is as follows
- Initial setup
- Run feature detection and matching
- Splitting the dataset
- Running SfM reconstruction on each submodel
- Aligning the reconstructions
- Run dense matching and the rest of the ODM pipeline for each of the aligned reconstructions
The script `run_all.sh` runs all the steps but it is also possible to run one by one. It the following we describe what each command does.
## Initial setup
The `setup.py` command initializes the dataset and writes the config file for OpenSfM. The command accepts command line parameters to configure the process.
A first group of parameters are equivalent to the standard ODM parameters and configure the feature extraction and matching: `--resize-to`, `--min-num-features`, `--num-cores`, `--matcher-neighbors`.
A second group of parameters controls the size and overlap of the submodels. They are equivalent to the [OpenSfM parameters](http://opensfm.readthedocs.io/en/latest/large.html#config-parameters) with the same name.
- `submodel_size`: Average number of images per submodel. When splitting a large dataset into smaller submodels, images are grouped into clusters. This value regulates the number of images that each cluster should have on average. The splitting is done via K-means clustering with `k` set to the number of images divided by `submodel_size`.
- `submodel_overlap`: Radius of the overlapping region between submodels in meters. To be able to align the different submodels, there needs to be some common images between the neighboring submodels. Any image that is closer to a cluster than `submodel_overlap` it is added to that cluster.
Finally, if you already know how you want to split the dataset, you can provide that information and it will be used instead of the clustering algorithm.
The grouping can be provided by adding a file named `image_groups.txt` in the main dataset folder. The file should have one line per image. Each line should have two words: first the name of the image and second the name of the group it belongs to. For example:
01.jpg A
02.jpg A
03.jpg B
04.jpg B
05.jpg C
will create 3 submodels.
## Run feature detection and matching
The `run_matching.py` command runs feature extraction and matching for all images in the dataset. These are reused for each submodel.
## Splitting the dataset
The `split.py` command, runs OpenSfM's `create_submodels` command to split the dataset into submodels. It uses the parameters in `dataset/OpenSfM/config.yaml`, which are set by the `setup.py` described above.
The submodels are created with the following directory structure
dataset/
|-- image_groups.txt
|-- images/
|-- opensfm/
| |-- camera_models.json
| |-- config.yaml
| |-- image_list.txt
| |-- exif/
| |-- features/
| |-- matches/
| |-- image_groups.txt -> ../image_groups.txt
| |-- profile.log
| `-- reference_lla.json
`-- submodels/
|-- opensfm/
| |-- clusters.npz
| |-- clusters_with_neighbors.geojson
| |-- clusters_with_neighbors.npz
| `-- image_list_with_gps.tsv
|-- submodel_0000/
| |-- images/
| |-- opensfm/
| | |-- config.yaml
| | |-- image_list.txt
| | |-- camera_models.json -> ../../../opensfm/camera_models.json
| | |-- exif -> ../../../opensfm/exif
| | |-- features -> ../../../opensfm/features
| | |-- matches -> ../../../opensfm/matches
| | `-- reference_lla.json -> ../../../opensfm/reference_lla.json
| |-- odm_meshing/
| |-- odm_texturing/
| |-- odm_georeferencing/
| `-- odm_orthophoto/
|-- submodel_0001/
| `-- ...
`-- ...
## Running SfM reconstruction on each submodel
The command `run_reconstructions.py` will run create SfM reconstruction for each submodel. This will only create the sparse reconstructions, which are stored in the file `opensfm/reconstruction.json` on each submodel folder.
It will run multiple reconstructions in parallel with the number of processes specified by the `--num-cores` options in the setup.
## Aligning the reconstructions
Once all submodels have been reconstructed, the `align.py` command will improve the alignment between each other. The result is a sparse reconstruction stored in `opensfm/reconstruction.aligned.json` on each submodel folder.
## Run dense matching, meshing and texturing
Now that each submodel has a sparse reconstruction and that they are all aligned, the rest of the ODM pipeline can be run normally. The command `run_dense.py` will run dense matching, meshing and texturing for each submodel independently.

Wyświetl plik

@ -1,32 +0,0 @@
#!/usr/bin/env python
import argparse
import logging
import os
import subprocess
from opendm import context
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
def run_command(args):
result = subprocess.Popen(args).wait()
if result != 0:
logger.error("The command '{}' exited with return value {}". format(
' '.join(args), result))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Align metadaset submodels')
parser.add_argument('dataset',
help='path to the dataset to be processed')
args = parser.parse_args()
command = os.path.join(context.opensfm_path, 'bin', 'opensfm')
path = os.path.join(args.dataset, 'opensfm')
run_command([command, 'align_submodels', path])

Wyświetl plik

@ -1,152 +0,0 @@
from opendm import io
from opendm import log
from opendm import system
import argparse
from functools import partial
import os
from opensfm.large import metadataset
from scipy.spatial import Voronoi
from shapely.geometry import shape, LineString, Point
import shapely.ops
import numpy as np
import json
import pyproj
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Align metadaset submodels')
parser.add_argument('dataset',
help='path to the dataset to be processed')
parser.add_argument('--overwrite', '-o',
action='store_true',
default=False,
help='Force overwrite of generated files')
args = parser.parse_args()
submodels_path = io.join_paths(args.dataset, 'submodels')
sfm_path = io.join_paths(args.dataset, 'opensfm')
meta_data = metadataset.MetaDataSet(sfm_path)
data = metadataset.DataSet(sfm_path)
voronoi_file = io.join_paths(meta_data.data_path, 'voronoi.geojson')
proj_path = io.join_paths(args.dataset, "odm_georeferencing/proj.txt")
out_tif = io.join_paths(args.dataset, "merged.tif")
addo_log = io.join_paths(args.dataset, "gdal_addo.log")
bounds_files = {}
for folder in os.listdir(io.join_paths(args.dataset, 'submodels')):
if 'submodel' in folder:
folder_number = '0' if folder.split('_')[1] == '0000' else folder.split('_')[1].lstrip('0')
bounds_file = io.join_paths(submodels_path, folder +
"/odm_georeferencing/odm_georeferenced_model.bounds.geojson")
if io.file_exists(bounds_file):
bounds_files[folder_number] = bounds_file
# Do voronoi calcs
# # load clusters
images, positions, labels, centers = meta_data.load_clusters()
cluster_proj = pyproj.Proj(init='epsg:4326')
with open(proj_path, 'r') as fr:
transform_proj = pyproj.Proj(fr.read())
# projection transformation
project = partial(
pyproj.transform,
cluster_proj,
transform_proj)
# turn this into a list of points
pos_transformed = [shapely.ops.transform(project, Point(x[1], x[0])) for x in positions]
#back to ndarray
positions = np.array([pos_transformed[x].coords[0] for x in range(len(pos_transformed))])
clust = np.concatenate((images, labels, positions), 1)
# Run voronoi on the whole cluster
vor = Voronoi(clust[:, [2, 3]].astype(float))
lines = [
LineString(vor.vertices[line])
for line in vor.ridge_vertices
if -1 not in line
]
# # For each part, build a boundary polygon
v_poly_dis_intersected = {}
for subnum in np.unique(clust[:, 1]):
submodel = clust[clust[:, 1] == subnum]
polygons = []
for poly in shapely.ops.polygonize(lines):
for point in submodel:
if poly.contains(Point(point[[2, 3]])):
polygons.append(poly) # Todo: this is expensive
break
# Dissolve list of polyogns
voronoi_polygons_dissolved = shapely.ops.unary_union(polygons)
# intersect with bounds
with open(bounds_files[subnum]) as f:
# There should only be one polygon here
bounds = shape(json.loads(f.read())['features'][0]['geometry'])
v_poly_dis_intersected[subnum] = voronoi_polygons_dissolved.intersection(bounds)
features = []
for submodel in v_poly_dis_intersected:
features.append({
"type": "Feature",
"geometry": shapely.geometry.mapping(v_poly_dis_intersected[submodel]),
"properties": {
"submodel": int(submodel)
}
})
polygons_layer = {
"type": "FeatureCollection",
"features": features,
"crs": {"type": "name", "properties": {"name": transform_proj.srs, "type": "proj4"}}
}
with open(voronoi_file, "w") as f:
json.dump(polygons_layer, f)
ortho_tifs = {}
for folder in os.listdir(io.join_paths(args.dataset, 'submodels')):
if 'submodel' in folder:
folder_number = folder.split('_')[1] # string extract number
tif_file = io.join_paths(submodels_path, folder + "/odm_orthophoto/odm_orthophoto.tif")
if io.file_exists(tif_file):
ortho_tifs[folder_number] = tif_file
kwargs = {
'f_out': out_tif,
'files': ' '.join(ortho_tifs.values()),
'clusters': voronoi_file
}
if io.file_exists(kwargs['f_out']) and not args.overwrite:
log.ODM_ERROR("File {f_out} exists, use --overwrite to force overwrite of file.".format(**kwargs))
else:
# use bounds as cutlines (blending)
system.run('gdal_merge.py -o {f_out} '
'-createonly '
'-co "BIGTIFF=YES" '
'-co "BLOCKXSIZE=512" '
'-co "BLOCKYSIZE=512" {files}'.format(**kwargs)
)
for tif in ortho_tifs:
kwargs['name'] = '0' if tif == '0000' else tif.lstrip('0') # is tif a tuple?
kwargs['file'] = ortho_tifs[tif]
system.run('gdalwarp -cutline {clusters} '
'-cwhere "submodel = \'{name}\'" '
'-r lanczos -multi -wo NUM_THREADS=ALL_CPUS '
' {file} {f_out}'.format(**kwargs)
)
log.ODM_INFO("Building Overviews")
kwargs = {
'orthophoto': out_tif,
'log': addo_log
}
# Run gdaladdo
system.run('gdaladdo -ro -r average '
'--config BIGTIFF_OVERVIEW IF_SAFER '
'--config COMPRESS_OVERVIEW JPEG '
'{orthophoto} 2 4 8 16 > {log}'.format(**kwargs))

Wyświetl plik

@ -1,17 +0,0 @@
#!/usr/bin/env bash
RUNPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/../..
export PYTHONPATH=$RUNPATH:$RUNPATH/SuperBuild/install/lib/python2.7/dist-packages:$RUNPATH/SuperBuild/src/opensfm:$PYTHONPATH
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$RUNPATH/SuperBuild/install/lib
set -e
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
python $DIR/setup.py "$@"
python $DIR/run_matching.py $1
python $DIR/split.py $1
python $DIR/run_reconstructions.py $1
python $DIR/align.py $1
python $DIR/run_dense.py $1
python $DIR/merge.py $1

Wyświetl plik

@ -1,83 +0,0 @@
#!/usr/bin/env python
import argparse
import logging
import multiprocessing
import os
import subprocess
from opensfm.large import metadataset
from opendm import context
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
def run_command(args):
result = subprocess.Popen(args).wait()
if result != 0:
logger.error("The command '{}' exited with return value {}". format(
' '.join(args), result))
class DenseReconstructor:
def __init__(self, command):
self.command = command
def __call__(self, opensfm_submodel_path):
submodel_path = os.path.dirname(opensfm_submodel_path.rstrip('/'))
logger.info("=======================================================")
logger.info("Dense reconstruction submodel {}".format(submodel_path))
logger.info("=======================================================")
# Rename reconstruction.aligned.json
unaligned = os.path.join(opensfm_submodel_path, 'reconstruction.unaligned.json')
aligned = os.path.join(opensfm_submodel_path, 'reconstruction.aligned.json')
main = os.path.join(opensfm_submodel_path, 'reconstruction.json')
if not os.path.isfile(aligned):
logger.warning("No SfM reconstruction for submodel {}."
" Skipping submodel.".format(submodel_path))
return
if not os.path.isfile(unaligned):
os.rename(main, unaligned)
if not os.path.islink(main):
os.symlink(aligned, main)
path, name = os.path.split(submodel_path)
run_command(['python',
self.command,
'--project-path', path,
name,
'--start-with', 'opensfm'])
logger.info("=======================================================")
logger.info("Submodel {} reconstructed".format(submodel_path))
logger.info("=======================================================")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Reconstruct all submodels')
parser.add_argument('dataset',
help='path to the dataset to be processed')
args = parser.parse_args()
path = os.path.join(args.dataset, 'opensfm')
meta_data = metadataset.MetaDataSet(path)
command = os.path.join(context.root_path, 'run.py')
submodel_paths = meta_data.get_submodel_paths()
reconstructor = DenseReconstructor(command)
processes = 1
if processes == 1:
for submodel_path in submodel_paths:
reconstructor(submodel_path)
else:
p = multiprocessing.Pool(processes)
p.map(reconstructor, submodel_paths)

Wyświetl plik

@ -1,34 +0,0 @@
#!/usr/bin/env python
import argparse
import logging
import os
import subprocess
from opendm import context
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
def run_command(args):
result = subprocess.Popen(args).wait()
if result != 0:
logger.error("The command '{}' exited with return value {}". format(
' '.join(args), result))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run matching on a metadataset')
parser.add_argument('dataset',
help='path to the dataset to be processed')
args = parser.parse_args()
command = os.path.join(context.opensfm_path, 'bin', 'opensfm')
path = os.path.join(args.dataset, 'opensfm')
run_command([command, 'extract_metadata', path])
run_command([command, 'detect_features', path])
run_command([command, 'match_features', path])

Wyświetl plik

@ -1,79 +0,0 @@
#!/usr/bin/env python
import argparse
import logging
import multiprocessing
import os
import subprocess
from opensfm.large import metadataset
from opendm import context
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
def run_command(args):
result = subprocess.Popen(args).wait()
if result != 0:
logger.error("The command '{}' exited with return value {}". format(
' '.join(args), result))
class Reconstructor:
def __init__(self, command, run_matching):
self.command = command
self.run_matching = run_matching
def __call__(self, submodel_path):
logger.info("=======================================================")
logger.info("Reconstructing submodel {}".format(submodel_path))
logger.info("=======================================================")
if self.run_matching:
run_command([self.command, 'extract_metadata', submodel_path])
run_command([self.command, 'detect_features', submodel_path])
run_command([self.command, 'match_features', submodel_path])
self._set_matching_done(submodel_path)
run_command([self.command, 'create_tracks', submodel_path])
run_command([self.command, 'reconstruct', submodel_path])
logger.info("=======================================================")
logger.info("Submodel {} reconstructed".format(submodel_path))
logger.info("=======================================================")
def _set_matching_done(self, submodel_path):
"""Tell ODM's opensfm not to rerun matching."""
matching_done_file = os.path.join(submodel_path, 'matching_done.txt')
with open(matching_done_file, 'w') as fout:
fout.write("Matching done!\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Reconstruct all submodels')
parser.add_argument('dataset',
help='path to the dataset to be processed')
parser.add_argument('--run-matching',
help='Run matching for each submodel',
action='store_true')
args = parser.parse_args()
path = os.path.join(args.dataset, 'opensfm')
meta_data = metadataset.MetaDataSet(path)
command = os.path.join(context.opensfm_path, 'bin', 'opensfm')
submodel_paths = meta_data.get_submodel_paths()
reconstructor = Reconstructor(command, args.run_matching)
processes = meta_data.config['processes']
if processes == 1:
for submodel_path in submodel_paths:
reconstructor(submodel_path)
else:
p = multiprocessing.Pool(processes)
p.map(reconstructor, submodel_paths)

Wyświetl plik

@ -1,162 +0,0 @@
#!/usr/bin/env python
"""Setup an ODM metadataset.
A metadatase will be split into multiple submodel folders.
Each submodel is reconstructed independently. Before dense
reconstruction the different submodels are aligned to each
other.
"""
import argparse
import os
import logging
import subprocess
import yaml
from opensfm.io import mkdir_p
from opendm import context
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
def run_command(args):
result = subprocess.Popen(args).wait()
if result != 0:
logger.error("The command '{}' exited with return value {}". format(
' '.join(args), result))
def resize_images(data_path, args):
command = os.path.join(context.root_path, 'run.py')
path, name = os.path.split(data_path.rstrip('/'))
run_command(['python',
command,
'--project-path', path,
name,
'--resize-to', str(args.resize_to),
'--end-with', 'dataset',
])
def is_image_file(filename):
extensions = {'jpg', 'jpeg', 'png', 'tif', 'tiff', 'pgm', 'pnm', 'gif'}
return filename.split('.')[-1].lower() in extensions
def create_image_list(image_path, opensfm_path):
image_files = filter(is_image_file, os.listdir(image_path))
lines = []
relpath = os.path.relpath(image_path, opensfm_path)
for image in image_files:
lines.append(os.path.join(relpath, image))
with open(os.path.join(opensfm_path, 'image_list.txt'), 'w') as fout:
fout.write("\n".join(lines))
def create_config(opensfm_path, args):
config = {
"submodels_relpath": "../submodels/opensfm",
"submodel_relpath_template": "../submodels/submodel_%04d/opensfm",
"submodel_images_relpath_template": "../submodels/submodel_%04d/images",
"submodel_size": args.submodel_size,
"submodel_overlap": args.submodel_overlap,
"feature_process_size": args.resize_to,
"feature_min_frames": args.min_num_features,
"processes": args.num_cores,
"matching_gps_neighbors": args.matcher_neighbors,
}
with open(os.path.join(opensfm_path, 'config.yaml'), 'w') as fout:
yaml.dump(config, fout, default_flow_style=False)
def link_image_groups(data_path, opensfm_path):
src = os.path.join(data_path, 'image_groups.txt')
dst = os.path.join(opensfm_path, 'image_groups.txt')
if os.path.isfile(src) and not os.path.isfile(dst):
os.symlink(src, dst)
def parse_command_line():
parser = argparse.ArgumentParser(description='Setup an ODM metadataset')
parser.add_argument('dataset',
help='path to the dataset to be processed')
# TODO(pau): reduce redundancy with OpenDroneMap/opendm/config.py
parser.add_argument('--resize-to', # currently doesn't support 'orig'
metavar='<integer>',
default=2400,
type=int,
help='resizes images by the largest side')
parser.add_argument('--min-num-features',
metavar='<integer>',
default=4000,
type=int,
help=('Minimum number of features to extract per image. '
'More features leads to better results but slower '
'execution. Default: %(default)s'))
parser.add_argument('--num-cores',
metavar='<positive integer>',
default=4,
type=int,
help=('The maximum number of cores to use. '
'Default: %(default)s'))
parser.add_argument('--matcher-neighbors',
type=int,
metavar='<integer>',
default=8,
help='Number of nearest images to pre-match based on GPS '
'exif data. Set to 0 to skip pre-matching. '
'Neighbors works together with Distance parameter, '
'set both to 0 to not use pre-matching. OpenSFM '
'uses both parameters at the same time, Bundler '
'uses only one which has value, prefering the '
'Neighbors parameter. Default: %(default)s')
parser.add_argument('--submodel-size',
type=int,
default=80,
help='Average number of images per submodel. When '
'splitting a large dataset into smaller '
'submodels, images are grouped into clusters. '
'This value regulates the number of images that '
'each cluster should have on average.')
parser.add_argument('--submodel-overlap',
type=float,
metavar='<positive integer>',
default=150,
help='Radius of the overlap between submodels. '
'After grouping images into clusters, images '
'that are closer than this radius to a cluster '
'are added to the cluster. This is done to ensure '
'that neighboring submodels overlap.')
return parser.parse_args()
if __name__ == '__main__':
args = parse_command_line()
data_path = args.dataset
resize_images(data_path, args)
image_path = os.path.join(data_path, 'images')
opensfm_path = os.path.join(data_path, 'opensfm')
mkdir_p(opensfm_path)
create_image_list(image_path, opensfm_path)
create_config(opensfm_path, args)
link_image_groups(data_path, opensfm_path)

Wyświetl plik

@ -1,32 +0,0 @@
#!/usr/bin/env python
import argparse
import logging
import os
import subprocess
from opendm import context
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
def run_command(args):
result = subprocess.Popen(args).wait()
if result != 0:
logger.error("The command '{}' exited with return value {}". format(
' '.join(args), result))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Split metadaset into submodels')
parser.add_argument('dataset',
help='path to the dataset to be processed')
args = parser.parse_args()
command = os.path.join(context.opensfm_path, 'bin', 'opensfm')
path = os.path.join(args.dataset, 'opensfm')
run_command([command, 'create_submodels', path])

Wyświetl plik

@ -50,21 +50,29 @@ class ODMSplitStage(types.ODM_Stage):
else:
log.ODM_WARNING("Submodels directory already exist at: %s" % tree.submodels_path)
# TODO: on a network workflow we probably stop here
# and let NodeODM take over
# exit(0)
# Find paths of all submodels
mds = metadataset.MetaDataSet(tree.opensfm)
submodel_paths = [os.path.abspath(p) for p in mds.get_submodel_paths()]
# Make sure the image list file has absolute paths
for sp in submodel_paths:
OSFMContext(sp).set_image_list_absolute()
sp_octx = OSFMContext(sp)
sp_octx.set_image_list_absolute()
# Copy GCP file if needed
# One in OpenSfM's directory, one in the project directory
if tree.odm_georeferencing_gcp:
log.ODM_DEBUG("Copying GCP file to %s" % os.path.basename(os.path.abspath(sp_octx.path(".."))))
io.copy(tree.odm_georeferencing_gcp, os.path.abspath(sp_octx.path("..", "gcp_list.txt")))
io.copy(tree.odm_georeferencing_gcp, os.path.abspath(sp_octx.path("gcp_list.txt")))
# Reconstruct each submodel
log.ODM_INFO("Dataset has been split into %s submodels. Reconstructing each submodel..." % len(submodel_paths))
# TODO: on a network workflow we probably stop here
# and let NodeODM take over
# exit(0)
for sp in submodel_paths:
log.ODM_INFO("Reconstructing %s" % sp)
OSFMContext(sp).reconstruct(self.rerun())
@ -132,6 +140,10 @@ class ODMMergeStage(types.ODM_Stage):
reconstruction = outputs['reconstruction']
if outputs['large']:
if not os.path.exists(tree.submodels_path):
log.ODM_ERROR("We reached the merge stage, but %s folder does not exist. Something must have gone wrong at an earlier stage. Check the log and fix possible problem before restarting?" % tree.submodels_path)
exit(1)
# Merge point clouds
if args.merge in ['all', 'pointcloud']:
if not io.file_exists(tree.odm_georeferencing_model_laz) or self.rerun():
@ -185,7 +197,7 @@ class ODMMergeStage(types.ODM_Stage):
'-co "BIGTIFF=YES" '
'-co "BLOCKXSIZE=512" '
'-co "BLOCKYSIZE=512" '
'--config GDAL_CACHEMAX {max_memory}%'
'--config GDAL_CACHEMAX {max_memory}% '
'{input_files} '.format(**kwargs)
)