pull/1587/head
Piero Toffanin 2023-01-25 11:00:02 -05:00
commit 53ec818f6d
68 zmienionych plików z 2078 dodań i 353 usunięć

Wyświetl plik

@ -26,11 +26,6 @@ jobs:
uses: diddlesnaps/snapcraft-multiarch-action@v1
with:
architecture: ${{ matrix.architecture }}
- name: Review
uses: diddlesnaps/snapcraft-review-tools-action@v1
with:
snap: ${{ steps.build.outputs.snap }}
isClassic: 'false'
- name: Publish unstable builds to Edge
if: github.ref == 'refs/heads/master'
uses: snapcore/action-publish@v1

Wyświetl plik

@ -22,6 +22,10 @@ jobs:
id: cuda-toolkit
with:
cuda: '11.4.0'
- name: Setup cmake
uses: jwlawson/actions-setup-cmake@v1.13
with:
cmake-version: '3.24.x'
- name: Extract code signing cert
id: code_sign
uses: timheuer/base64-to-file@v1
@ -49,7 +53,7 @@ jobs:
if: startsWith(github.ref, 'refs/tags/')
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: dist\*.exe
file: dist/*.exe
file_glob: true
tag: ${{ github.ref }}
overwrite: true

Wyświetl plik

@ -58,6 +58,10 @@ jobs:
with:
python-version: '3.8.1'
architecture: 'x64'
- name: Setup cmake
uses: jwlawson/actions-setup-cmake@v1.13
with:
cmake-version: '3.24.x'
- name: Setup Visual C++
uses: ilammy/msvc-dev-cmd@v1
with:

Wyświetl plik

@ -29,7 +29,8 @@ FROM ubuntu:21.04
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" \
PDAL_DRIVER_PATH="/code/SuperBuild/install/bin"
WORKDIR /code

Wyświetl plik

@ -88,7 +88,7 @@ run C:\Users\youruser\datasets\project [--additional --parameters --here]
ODM is now available as a Snap Package from the Snap Store. To install you may use the Snap Store (available itself as a Snap Package) or the command line:
```bash
sudo snap install opendronemap
sudo snap install --edge opendronemap
```
To run, you will need a terminal window into which you can type:
@ -259,6 +259,10 @@ Experimental flags need to be enabled in Docker to use the ```--squash``` flag.
After this, you must restart docker.
## Video Support
Starting from version 3.0.4, ODM can automatically extract images from video files (.mp4 or .mov). Just place one or more video files into the `images` folder and run the program as usual. Subtitles files (.srt) with GPS information are also supported. Place .srt files in the `images` folder, making sure that the filenames match. For example, `my_video.mp4` ==> `my_video.srt` (case-sensitive).
## Developers
Help improve our software! We welcome contributions from everyone, whether to add new features, improve speed, fix existing bugs or add support for more cameras. Check our [code of conduct](https://github.com/OpenDroneMap/documents/blob/master/CONDUCT.md), the [contributing guidelines](https://github.com/OpenDroneMap/documents/blob/master/CONTRIBUTING.md) and [how decisions are made](https://github.com/OpenDroneMap/documents/blob/master/GOVERNANCE.md#how-decisions-are-made).

Wyświetl plik

@ -159,6 +159,7 @@ SETUP_EXTERNAL_PROJECT(Hexer 1.4 ON)
set(custom_libs OpenSfM
LASzip
PDAL
PDALPython
Untwine
Entwine
MvsTexturing
@ -209,7 +210,7 @@ externalproject_add(poissonrecon
externalproject_add(dem2mesh
GIT_REPOSITORY https://github.com/OpenDroneMap/dem2mesh.git
GIT_TAG master
GIT_TAG 300
PREFIX ${SB_BINARY_DIR}/dem2mesh
SOURCE_DIR ${SB_SOURCE_DIR}/dem2mesh
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}

Wyświetl plik

@ -55,7 +55,7 @@ ExternalProject_Add(${_proj_name}
-DBUILD_opencv_photo=ON
-DBUILD_opencv_legacy=ON
-DBUILD_opencv_python3=ON
-DWITH_FFMPEG=OFF
-DWITH_FFMPEG=ON
-DWITH_CUDA=OFF
-DWITH_GTK=OFF
-DWITH_VTK=OFF

Wyświetl plik

@ -53,7 +53,7 @@ ExternalProject_Add(${_proj_name}
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/openMVS
GIT_TAG 292
GIT_TAG 301
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
@ -64,6 +64,7 @@ ExternalProject_Add(${_proj_name}
-DEIGEN3_INCLUDE_DIR=${SB_SOURCE_DIR}/eigen34/
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX=${SB_INSTALL_DIR}
-DOpenMVS_ENABLE_TESTS=OFF
-DOpenMVS_MAX_CUDA_COMPATIBILITY=ON
${GPU_CMAKE_ARGS}
${WIN32_CMAKE_ARGS}

Wyświetl plik

@ -25,7 +25,7 @@ ExternalProject_Add(${_proj_name}
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/
GIT_TAG 291
GIT_TAG 303
#--Update/Patch step----------
UPDATE_COMMAND git submodule update --init --recursive
#--Configure step-------------

Wyświetl plik

@ -16,7 +16,7 @@ ExternalProject_Add(${_proj_name}
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
URL https://github.com/PDAL/PDAL/archive/refs/tags/2.3RC1.zip
URL https://github.com/PDAL/PDAL/archive/refs/tags/2.4.3.zip
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
@ -60,3 +60,4 @@ ExternalProject_Add(${_proj_name}
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

Wyświetl plik

@ -0,0 +1,36 @@
set(_proj_name pdal-python)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
if (WIN32)
set(PP_EXTRA_ARGS -DPYTHON3_EXECUTABLE=${PYTHON_EXE_PATH}
-DPython3_NumPy_INCLUDE_DIRS=${PYTHON_HOME}/lib/site-packages/numpy/core/include)
endif()
ExternalProject_Add(${_proj_name}
DEPENDS pdal
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/pdal-python
GIT_TAG main
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DPDAL_DIR=${SB_INSTALL_DIR}/lib/cmake/PDAL
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}/lib/python3.8/dist-packages
${WIN32_CMAKE_ARGS}
${PP_EXTRA_ARGS}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

Wyświetl plik

@ -1 +1 @@
2.9.2
3.0.4

Wyświetl plik

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2022 APPF-ANU
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

Wyświetl plik

@ -0,0 +1,9 @@
# Semi-automated Masking for 360 images
For usage with 360 images and Open Drone Map (ODM) to mask out the tripod/user/camera mount. 360 models in ODM can be made from 360 images, but unless you mask out the camera mount there will be repeated artifacts along the camera path. ODM supports image masking but requires a mask for each image. Since the 360 camera is generally on a fixed mount (bike helmet, moving tripod, etc), you can make one mask and then duplicate this for all images, but this is tedious to do by hand.
This snippet takes the file path of a single image mask and duplicates it for all images in the dataset. After creating the masks, process the original images and the masks together in ODM you'll get a clean model with the camera mount artifacts eliminated.
Before using this code snippet, open one of your 360 images in an image editor and mask out the helmet or tripod, etc at the bottom of your image. Save this image as a png and then use it as the mask image that will be duplicated for all images in the dataset.
See https://docs.opendronemap.org/masks/ for more details on mask creation.

Wyświetl plik

@ -0,0 +1,62 @@
#!/usr/bin/env python3
import sys
import os
import PIL
from PIL import Image, ExifTags
import shutil
from tqdm import tqdm
import argparse
parser = argparse.ArgumentParser()
# Usage:
# python exif_renamer.py <path to folder of images to rename> <output folder>
parser.add_argument("file_dir", help="input folder of images")
parser.add_argument("output_dir", help="output folder to copy images to")
parser.add_argument("mask_file", help="filename or path to Mask file to be duplicated for all images")
parser.add_argument("-f", "--force", help="don't ask for confirmation", action="store_true")
args = parser.parse_args()
file_dir = args.file_dir
mask_file_path = args.mask_file
output_dir = args.output_dir
file_count = len(os.listdir(file_dir))
if args.force is False:
print("Input dir: " + str(file_dir))
print("Output folder: " + str(output_dir) + " (" + str(file_count) + " files)")
confirmation = input("Confirm processing [Y/N]: ")
if confirmation.lower() in ["y"]:
pass
else:
sys.exit()
os.makedirs(output_dir, exist_ok=True)
no_exif_n = 0
# Uses tqdm() for the progress bar, if not needed swap with
# for filename in os.listdir(file_dir):
for filename in tqdm(os.listdir(file_dir)):
old_path = mask_file_path
#print(mask_file_path)
file_name, file_ext = os.path.splitext(filename)
try:
img = Image.open(old_path)
except PIL.UnidentifiedImageError as img_err:
# if it tries importing a file it can't read as an image
# can be commented out if you just wanna skip errors
sys.stderr.write(str(img_err) + "\n")
continue
new_path = os.path.join(output_dir, file_name + "_mask" + file_ext)
#print(new_path) # debugging
shutil.copy(old_path, new_path)
print("Done!")

Wyświetl plik

@ -51,7 +51,6 @@ commands.create_dem(args.point_cloud,
outdir=outdir,
resolution=args.resolution,
decimation=1,
verbose=True,
max_workers=multiprocessing.cpu_count(),
keep_unfilled_copy=False
)

Wyświetl plik

@ -27,7 +27,8 @@ FROM nvidia/cuda:11.2.0-runtime-ubuntu20.04
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" \
PDAL_DRIVER_PATH="/code/SuperBuild/install/bin"
WORKDIR /code

Wyświetl plik

@ -43,6 +43,7 @@ Source: "licenses\*"; DestDir: "{app}\licenses"; Flags: ignoreversion recursesub
Source: "opendm\*"; DestDir: "{app}\opendm"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "stages\*"; DestDir: "{app}\stages"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "SuperBuild\install\bin\*"; DestDir: "{app}\SuperBuild\install\bin"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "SuperBuild\install\lib\python3.8\*"; DestDir: "{app}\SuperBuild\install\lib\python3.8\*"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "venv\*"; DestDir: "{app}\venv"; Excludes: "__pycache__,pyvenv.cfg"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "python38\*"; DestDir: "{app}\python38"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "console.bat"; DestDir: "{app}"; Flags: ignoreversion

147
opendm/align.py 100644
Wyświetl plik

@ -0,0 +1,147 @@
import os
import shutil
import json
import codem
import dataclasses
import pdal
import numpy as np
import rasterio
from rasterio.crs import CRS
from opendm.utils import double_quote
from opendm import log
from opendm import io
from opendm import system
from opendm.concurrency import get_max_memory
def get_point_cloud_crs(file):
pipeline = pdal.Pipeline(json.dumps([ file ]))
metadata = pipeline.quickinfo
reader_metadata = [val for key, val in metadata.items() if "readers" in key]
crs = CRS.from_string(reader_metadata[0]["srs"]["horizontal"])
return str(crs)
def get_raster_crs(file):
with rasterio.open(file, 'r') as f:
return str(f.crs)
def reproject_point_cloud(file, out_srs):
out_file = io.related_file_path(file, postfix="_reprojected_tmp")
pipeline = pdal.Pipeline(json.dumps([ file, {
"type": "filters.reprojection",
"out_srs": out_srs
}, out_file]))
pipeline.execute()
return out_file
def reproject_raster(file, out_srs):
out_file = io.related_file_path(file, postfix="_reprojected_tmp")
kwargs = {
'input': double_quote(file),
'output': double_quote(out_file),
'out_srs': out_srs,
'max_memory': get_max_memory()
}
system.run('gdalwarp '
'-t_srs {out_srs} '
'{input} '
'{output} '
'--config GDAL_CACHEMAX {max_memory}% '.format(**kwargs))
return out_file
def compute_alignment_matrix(input_laz, align_file, stats_dir):
if os.path.exists(stats_dir):
shutil.rmtree(stats_dir)
os.mkdir(stats_dir)
# Check if we need to reproject align file
input_crs = get_point_cloud_crs(input_laz)
log.ODM_INFO("Input CRS: %s" % input_crs)
_, ext = os.path.splitext(align_file)
repr_func = None
if ext.lower() in [".tif"]:
align_crs = get_raster_crs(align_file)
repr_func = reproject_raster
elif ext.lower() in [".las", ".laz"]:
align_crs = get_point_cloud_crs(align_file)
repr_func = reproject_point_cloud
else:
log.ODM_WARNING("Unsupported alignment file: %s" % align_file)
return
to_delete = []
try:
log.ODM_INFO("Align CRS: %s" % align_crs)
if input_crs != align_crs:
# Reprojection needed
log.ODM_INFO("Reprojecting %s to %s" % (align_file, input_crs))
align_file = repr_func(align_file, input_crs)
to_delete.append(align_file)
conf = dataclasses.asdict(codem.CodemRunConfig(align_file, input_laz, OUTPUT_DIR=stats_dir))
fnd_obj, aoi_obj = codem.preprocess(conf)
fnd_obj.prep()
aoi_obj.prep()
log.ODM_INFO("Aligning reconstruction to %s" % align_file)
log.ODM_INFO("Coarse registration...")
dsm_reg = codem.coarse_registration(fnd_obj, aoi_obj, conf)
log.ODM_INFO("Fine registration...")
icp_reg = codem.fine_registration(fnd_obj, aoi_obj, dsm_reg, conf)
app_reg = codem.registration.ApplyRegistration(
fnd_obj,
aoi_obj,
icp_reg.registration_parameters,
icp_reg.residual_vectors,
icp_reg.residual_origins,
conf,
None,
)
reg = app_reg.get_registration_transformation()
# Write JSON to stats folder
with open(os.path.join(stats_dir, "registration.json"), 'w') as f:
del dsm_reg.registration_parameters['matrix']
del icp_reg.registration_parameters['matrix']
f.write(json.dumps({
'coarse': dsm_reg.registration_parameters,
'fine': icp_reg.registration_parameters,
}, indent=4))
matrix = np.fromstring(reg['matrix'], dtype=float, sep=' ').reshape((4, 4))
return matrix
finally:
for f in to_delete:
if os.path.isfile(f):
os.unlink(f)
def transform_point_cloud(input_laz, a_matrix, output_laz):
pipe = [
input_laz,
{
'type': 'filters.transformation',
'matrix': " ".join(list(map(str, a_matrix.flatten()))),
},
output_laz,
]
p = pdal.Pipeline(json.dumps(pipe))
p.execute()
def transform_obj(input_obj, a_matrix, geo_offset, output_obj):
g_off = np.array([geo_offset[0], geo_offset[1], 0, 0])
with open(input_obj, 'r') as fin:
with open(output_obj, 'w') as fout:
lines = fin.readlines()
for line in lines:
if line.startswith("v "):
v = np.fromstring(line.strip()[2:] + " 1", sep=' ', dtype=float)
vt = (a_matrix.dot((v + g_off)) - g_off)[:3]
fout.write("v " + " ".join(map(str, list(vt))) + '\n')
else:
fout.write(line)

Wyświetl plik

@ -83,15 +83,6 @@ def config(argv=None, parser=None):
nargs='?',
help='Name of dataset (i.e subfolder name within project folder). Default: %(default)s')
parser.add_argument('--resize-to',
metavar='<integer>',
action=StoreValue,
default=2048,
type=int,
help='Legacy option (use --feature-quality instead). Resizes images by the largest side for feature extraction purposes only. '
'Set to -1 to disable. This does not affect the final orthophoto '
'resolution quality and will not resize the original images. Default: %(default)s')
parser.add_argument('--end-with', '-e',
metavar='<string>',
action=StoreValue,
@ -212,15 +203,6 @@ def config(argv=None, parser=None):
'processes. Peak memory requirement is ~1GB per '
'thread and 2 megapixel image resolution. Default: %(default)s'))
parser.add_argument('--depthmap-resolution',
metavar='<positive float>',
action=StoreValue,
type=float,
default=640,
help=('Controls the density of the point cloud by setting the resolution of the depthmap images. Higher values take longer to compute '
'but produce denser point clouds. Overrides the value calculated by --pc-quality.'
'Default: %(default)s'))
parser.add_argument('--use-hybrid-bundle-adjustment',
action=StoreTrue,
nargs=0,
@ -407,6 +389,13 @@ def config(argv=None, parser=None):
help='Filters the point cloud by keeping only a single point around a radius N (in meters). This can be useful to limit the output resolution of the point cloud and remove duplicate points. Set to 0 to disable sampling. '
'Default: %(default)s')
parser.add_argument('--pc-skip-geometric',
action=StoreTrue,
nargs=0,
default=False,
help='Geometric estimates improve the accuracy of the point cloud by computing geometrically consistent depthmaps but may not be usable in larger datasets. This flag disables geometric estimates. '
'Default: %(default)s')
parser.add_argument('--pc-tile',
action=StoreTrue,
nargs=0,
@ -414,13 +403,6 @@ def config(argv=None, parser=None):
help='Reduce the memory usage needed for depthmap fusion by splitting large scenes into tiles. Turn this on if your machine doesn\'t have much RAM and/or you\'ve set --pc-quality to high or ultra. Experimental. '
'Default: %(default)s')
parser.add_argument('--pc-geometric',
action=StoreTrue,
nargs=0,
default=False,
help='Improve the accuracy of the point cloud by computing geometrically consistent depthmaps. This increases processing time, but can improve results in urban scenes. '
'Default: %(default)s')
parser.add_argument('--smrf-scalar',
metavar='<positive float>',
action=StoreValue,
@ -453,20 +435,6 @@ def config(argv=None, parser=None):
help='Simple Morphological Filter window radius parameter (meters). '
'Default: %(default)s')
parser.add_argument('--texturing-data-term',
metavar='<string>',
action=StoreValue,
default='gmi',
choices=['gmi', 'area'],
help=('When texturing the 3D mesh, for each triangle, choose to prioritize images with sharp features (gmi) or those that cover the largest area (area). Default: %(default)s'))
parser.add_argument('--texturing-outlier-removal-type',
metavar='<string>',
action=StoreValue,
default='gauss_clamping',
choices=['none', 'gauss_clamping', 'gauss_damping'],
help=('Type of photometric outlier removal method. Can be one of: %(choices)s. Default: %(default)s'))
parser.add_argument('--texturing-skip-global-seam-leveling',
action=StoreTrue,
nargs=0,
@ -486,14 +454,12 @@ def config(argv=None, parser=None):
help=('Keep faces in the mesh that are not seen in any camera. '
'Default: %(default)s'))
parser.add_argument('--texturing-tone-mapping',
metavar='<string>',
action=StoreValue,
choices=['none', 'gamma'],
default='none',
help='Turn on gamma tone mapping or none for no tone '
'mapping. Can be one of %(choices)s. '
'Default: %(default)s ')
parser.add_argument('--texturing-single-material',
action=StoreTrue,
nargs=0,
default=False,
help=('Generate OBJs that have a single material and a single texture file instead of multiple ones. '
'Default: %(default)s'))
parser.add_argument('--gcp',
metavar='<path string>',
@ -512,12 +478,20 @@ def config(argv=None, parser=None):
action=StoreValue,
default=None,
help=('Path to the image geolocation file containing the camera center coordinates used for georeferencing. '
'Note that omega/phi/kappa are currently not supported (you can set them to 0). '
'If you don''t have values for omega/phi/kappa you can set them to 0. '
'The file needs to '
'use the following format: \n'
'EPSG:<code> or <+proj definition>\n'
'image_name geo_x geo_y geo_z [omega (degrees)] [phi (degrees)] [kappa (degrees)] [horz accuracy (meters)] [vert accuracy (meters)]\n'
'Default: %(default)s'))
parser.add_argument('--align',
metavar='<path string>',
action=StoreValue,
default=None,
help=('Path to a GeoTIFF DEM or a LAS/LAZ point cloud '
'that the reconstruction outputs should be automatically aligned to. Experimental. '
'Default: %(default)s'))
parser.add_argument('--use-exif',
action=StoreTrue,
@ -592,7 +566,7 @@ def config(argv=None, parser=None):
default=False,
help='Set this parameter if you want a striped GeoTIFF. '
'Default: %(default)s')
parser.add_argument('--orthophoto-png',
action=StoreTrue,
nargs=0,
@ -606,7 +580,6 @@ def config(argv=None, parser=None):
default=False,
help='Set this parameter if you want to generate a Google Earth (KMZ) rendering of the orthophoto. '
'Default: %(default)s')
parser.add_argument('--orthophoto-compression',
metavar='<string>',
@ -670,37 +643,30 @@ def config(argv=None, parser=None):
default=False,
help='Create Cloud-Optimized GeoTIFFs instead of normal GeoTIFFs. Default: %(default)s')
parser.add_argument('--verbose', '-v',
action=StoreTrue,
nargs=0,
default=False,
help='Print additional messages to the console. '
'Default: %(default)s')
parser.add_argument('--copy-to',
metavar='<path>',
action=StoreValue,
help='Copy output results to this folder after processing.')
parser.add_argument('--time',
action=StoreTrue,
nargs=0,
default=False,
help='Generates a benchmark file with runtime info. '
'Default: %(default)s')
parser.add_argument('--debug',
action=StoreTrue,
nargs=0,
default=False,
help='Print debug messages. Default: %(default)s')
parser.add_argument('--version',
action='version',
version='ODM {0}'.format(__version__),
help='Displays version number and exits. ')
parser.add_argument('--video-limit',
type=int,
action=StoreValue,
default=500,
metavar='<positive integer>',
help='Maximum number of frames to extract from video files for processing. Set to 0 for no limit. Default: %(default)s')
parser.add_argument('--video-resolution',
type=int,
action=StoreValue,
default=4000,
metavar='<positive integer>',
help='The maximum output resolution of extracted video frames in pixels. Default: %(default)s')
parser.add_argument('--split',
type=int,
action=StoreValue,
@ -811,7 +777,15 @@ def config(argv=None, parser=None):
'If the images have been postprocessed and are already aligned, use this option. '
'Default: %(default)s'))
args = parser.parse_args(argv)
args, unknown = parser.parse_known_args(argv)
DEPRECATED = ["--verbose", "--debug", "--time", "--resize-to", "--depthmap-resolution", "--pc-geometric", "--texturing-data-term", "--texturing-outlier-removal-type", "--texturing-tone-mapping"]
unknown_e = [p for p in unknown if p not in DEPRECATED]
if len(unknown_e) > 0:
raise parser.error("unrecognized arguments: %s" % " ".join(unknown_e))
for p in unknown:
if p in DEPRECATED:
log.ODM_WARNING("%s is no longer a valid argument and will be ignored!" % p)
# check that the project path setting has been set properly
if not args.project_path:

Wyświetl plik

@ -14,7 +14,7 @@ python_packages_paths = [os.path.join(superbuild_path, p) for p in [
'install/lib/python3.9/dist-packages',
'install/lib/python3.8/dist-packages',
'install/lib/python3/dist-packages',
'install/bin/opensfm'
'install/bin/opensfm',
]]
for p in python_packages_paths:
sys.path.append(p)
@ -41,6 +41,7 @@ settings_path = os.path.join(root_path, 'settings.yaml')
# Define supported image extensions
supported_extensions = {'.jpg','.jpeg','.png', '.tif', '.tiff', '.bmp'}
supported_video_extensions = {'.mp4', '.mov'}
# Define the number of cores
num_cores = multiprocessing.cpu_count()

Wyświetl plik

@ -35,11 +35,11 @@ except ModuleNotFoundError:
except:
pass
def classify(lasFile, scalar, slope, threshold, window, verbose=False):
def classify(lasFile, scalar, slope, threshold, window):
start = datetime.now()
try:
pdal.run_pdaltranslate_smrf(lasFile, lasFile, scalar, slope, threshold, window, verbose)
pdal.run_pdaltranslate_smrf(lasFile, lasFile, scalar, slope, threshold, window)
except:
log.ODM_WARNING("Error creating classified file %s" % lasFile)
@ -50,47 +50,23 @@ def rectify(lasFile, debug=False, reclassify_threshold=5, min_area=750, min_poin
start = datetime.now()
try:
# Currently, no Python 2 lib that supports reading and writing LAZ, so we will do it manually until ODM is migrated to Python 3
# When migration is done, we can move to pylas and avoid using PDAL for conversion
tempLasFile = os.path.join(os.path.dirname(lasFile), 'tmp.las')
# Convert LAZ to LAS
cmd = [
'pdal',
'translate',
'-i %s' % lasFile,
'-o %s' % tempLasFile
]
system.run(' '.join(cmd))
log.ODM_INFO("Rectifying {} using with [reclassify threshold: {}, min area: {}, min points: {}]".format(lasFile, reclassify_threshold, min_area, min_points))
run_rectification(
input=tempLasFile, output=tempLasFile, debug=debug, \
input=lasFile, output=lasFile, debug=debug, \
reclassify_plan='median', reclassify_threshold=reclassify_threshold, \
extend_plan='surrounding', extend_grid_distance=5, \
min_area=min_area, min_points=min_points)
# Convert LAS to LAZ
cmd = [
'pdal',
'translate',
'-i %s' % tempLasFile,
'-o %s' % lasFile
]
system.run(' '.join(cmd))
os.remove(tempLasFile)
log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start))
except Exception as e:
raise Exception("Error rectifying ground in file %s: %s" % (lasFile, str(e)))
log.ODM_WARNING("Error rectifying ground in file %s: %s" % (lasFile, str(e)))
log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start))
return lasFile
error = None
def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'], gapfill=True,
outdir='', resolution=0.1, max_workers=1, max_tile_size=4096,
verbose=False, decimation=None, keep_unfilled_copy=False,
decimation=None, keep_unfilled_copy=False,
apply_smoothing=True):
""" Create DEM from multiple radii, and optionally gapfill """
@ -187,7 +163,7 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
d = pdal.json_add_decimation_filter(d, decimation)
pdal.json_add_readers(d, [input_point_cloud])
pdal.run_pipeline(d, verbose=verbose)
pdal.run_pipeline(d)
parallel_map(process_tile, tiles, max_workers)
@ -380,5 +356,4 @@ def window_filter_2d(arr, nodata, window, kernel_size, filter):
win_arr = filter(win_arr)
win_arr[nodata_locs] = nodata
win_arr = win_arr[window[0] - expanded_window[0] : window[2] - expanded_window[0], window[1] - expanded_window[1] : window[3] - expanded_window[1]]
log.ODM_DEBUG("Filtered window: %s" % str(window))
return win_arr

Wyświetl plik

@ -35,7 +35,7 @@ class DistanceDimension(Dimension):
return 'distance_to_ground'
def get_las_type(self):
return 10
return 'float64'
def __calculate_angle(self, model):
"Calculate the angle between the estimated plane and the XY plane"

Wyświetl plik

@ -20,4 +20,4 @@ class ExtendedDimension(Dimension):
return 'extended'
def get_las_type(self):
return 3
return 'uint16'

Wyświetl plik

@ -22,4 +22,4 @@ class PartitionDimension(Dimension):
return self.name
def get_las_type(self):
return 5
return 'uint32'

Wyświetl plik

@ -1,52 +1,39 @@
# TODO: Move to pylas when project migrates to python3
from laspy.file import File
from laspy.header import Header
import laspy
import numpy as np
from ..point_cloud import PointCloud
def read_cloud(point_cloud_path):
# Open point cloud and read its properties
las_file = File(point_cloud_path, mode='r')
header = (las_file.header.copy(), las_file.header.scale, las_file.header.offset,las_file.header.evlrs, las_file.header.vlrs)
[x_scale, y_scale, z_scale] = las_file.header.scale
[x_offset, y_offset, z_offset] = las_file.header.offset
las_file = laspy.read(point_cloud_path)
header = las_file.header
# Calculate the real coordinates
x = las_file.X * x_scale + x_offset
y = las_file.Y * y_scale + y_offset
z = las_file.Z * z_scale + z_offset
x = las_file.x.scaled_array()
y = las_file.y.scaled_array()
z = las_file.z.scaled_array()
cloud = PointCloud.with_dimensions(x, y, z, las_file.Classification, las_file.red, las_file.green, las_file.blue)
# Close the file
las_file.close()
cloud = PointCloud.with_dimensions(x, y, z, las_file.classification.array, las_file.red, las_file.green, las_file.blue)
# Return the result
return header, cloud
def write_cloud(header, point_cloud, output_point_cloud_path, write_extra_dimensions=False):
(h, scale, offset, evlrs, vlrs) = header
# Open output file
output_las_file = File(output_point_cloud_path, mode='w', header=h, evlrs=evlrs, vlrs=vlrs)
output_las_file = laspy.LasData(header)
if write_extra_dimensions:
# Create new dimensions
for name, dimension in point_cloud.extra_dimensions_metadata.items():
output_las_file.define_new_dimension(name=name, data_type=dimension.get_las_type(), description="Dimension added by Ground Extend")
extra_dims = [laspy.ExtraBytesParams(name=name, type=dimension.get_las_type(), description="Dimension added by Ground Extend") for name, dimension in point_cloud.extra_dimensions_metadata.items()]
output_las_file.add_extra_dims(extra_dims)
# Assign dimension values
for dimension_name, values in point_cloud.extra_dimensions.items():
setattr(output_las_file, dimension_name, values)
# Adapt points to scale and offset
[x_scale, y_scale, z_scale] = scale
[x_offset, y_offset, z_offset] = offset
[x, y] = np.hsplit(point_cloud.xy, 2)
output_las_file.X = (x.ravel() - x_offset) / x_scale
output_las_file.Y = (y.ravel() - y_offset) / y_scale
output_las_file.Z = (point_cloud.z - z_offset) / z_scale
output_las_file.x = x.ravel()
output_las_file.y = y.ravel()
output_las_file.z = point_cloud.z
# Set color
[red, green, blue] = np.hsplit(point_cloud.rgb, 3)
@ -55,11 +42,6 @@ def write_cloud(header, point_cloud, output_point_cloud_path, write_extra_dimens
output_las_file.blue = blue.ravel()
# Set classification
output_las_file.Classification = point_cloud.classification.astype(np.uint8)
output_las_file.classification = point_cloud.classification.astype(np.uint8)
# Set header
output_las_file.header.scale = scale
output_las_file.header.offset = offset
# Close files
output_las_file.close()
output_las_file.write(output_point_cloud_path)

Wyświetl plik

@ -133,22 +133,13 @@ def json_add_readers(json, filenames):
return json
def json_print(json):
""" Pretty print JSON """
log.ODM_DEBUG(jsonlib.dumps(json, indent=4, separators=(',', ': ')))
""" Run PDAL commands """
def run_pipeline(json, verbose=False):
def run_pipeline(json):
""" Run PDAL Pipeline with provided JSON """
if verbose:
json_print(json)
# write to temp file
f, jsonfile = tempfile.mkstemp(suffix='.json')
if verbose:
log.ODM_INFO('Pipeline file: %s' % jsonfile)
os.write(f, jsonlib.dumps(json).encode('utf8'))
os.close(f)
@ -157,14 +148,11 @@ def run_pipeline(json, verbose=False):
'pipeline',
'-i %s' % double_quote(jsonfile)
]
if verbose or sys.platform == 'win32':
system.run(' '.join(cmd))
else:
system.run(' '.join(cmd) + ' > /dev/null 2>&1')
system.run(' '.join(cmd))
os.remove(jsonfile)
def run_pdaltranslate_smrf(fin, fout, scalar, slope, threshold, window, verbose=False):
def run_pdaltranslate_smrf(fin, fout, scalar, slope, threshold, window):
""" Run PDAL translate """
cmd = [
'pdal',
@ -178,12 +166,9 @@ def run_pdaltranslate_smrf(fin, fout, scalar, slope, threshold, window, verbose=
'--filters.smrf.window=%s' % window,
]
if verbose:
log.ODM_INFO(' '.join(cmd))
system.run(' '.join(cmd))
def merge_point_clouds(input_files, output_file, verbose=False):
def merge_point_clouds(input_files, output_file):
if len(input_files) == 0:
log.ODM_WARNING("Cannot merge point clouds, no point clouds to merge.")
return
@ -194,8 +179,5 @@ def merge_point_clouds(input_files, output_file, verbose=False):
' '.join(map(double_quote, input_files + [output_file])),
]
if verbose:
log.ODM_INFO(' '.join(cmd))
system.run(' '.join(cmd))

Wyświetl plik

@ -29,7 +29,7 @@ class GCPFile:
if line != "" and line[0] != "#":
parts = line.split()
if len(parts) >= 6:
self.entries.append(line)
self.entries.append(line)
else:
log.ODM_WARNING("Malformed GCP line: %s" % line)
@ -37,6 +37,35 @@ class GCPFile:
for entry in self.entries:
yield self.parse_entry(entry)
def check_entries(self):
coords = {}
gcps = {}
errors = 0
for entry in self.iter_entries():
k = entry.coords_key()
coords[k] = coords.get(k, 0) + 1
if k not in gcps:
gcps[k] = []
gcps[k].append(entry)
for k in coords:
if coords[k] < 3:
description = "insufficient" if coords[k] < 2 else "not ideal"
for entry in gcps[k]:
log.ODM_WARNING(str(entry))
log.ODM_WARNING("The number of images where the GCP %s has been tagged are %s" % (k, description))
log.ODM_WARNING("You should tag at least %s more images" % (3 - coords[k]))
log.ODM_WARNING("=====================================")
errors += 1
if len(coords) < 3:
log.ODM_WARNING("Low number of GCPs detected (%s). For best results use at least 5." % (3 - len(coords)))
log.ODM_WARNING("=====================================")
errors += 1
if errors > 0:
log.ODM_WARNING("Some issues detected with GCPs (but we're going to process this anyway)")
def parse_entry(self, entry):
if entry:
parts = entry.split()
@ -204,6 +233,9 @@ class GCPEntry:
self.py = py
self.filename = filename
self.extras = extras
def coords_key(self):
return "{} {} {}".format(self.x, self.y, self.z)
def __str__(self):
return "{} {} {} {} {} {} {}".format(self.x, self.y, self.z,

Wyświetl plik

@ -26,7 +26,7 @@ def has_popsift_and_can_handle_texsize(width, height):
from opensfm import pypopsift
fits = pypopsift.fits_texture(int(width * 1.02), int(height * 1.02))
if not fits:
log.ODM_WARNING("Image size (%sx%spx) would not fit in GPU memory, falling back to CPU" % (width, height))
log.ODM_WARNING("Image size (%sx%spx) would not fit in GPU memory, try lowering --feature-quality. Falling back to CPU" % (width, height))
return fits
except (ModuleNotFoundError, ImportError):
return False

Wyświetl plik

@ -51,15 +51,19 @@ def image_scale_factor(target_resolution, reconstruction_json, gsd_error_estimat
:param reconstruction_json path to OpenSfM's reconstruction.json
:param gsd_error_estimate percentage of estimated error in the GSD calculation to set an upper bound on resolution.
:return A down-scale (<= 1) value to apply to images to achieve the target resolution by comparing the current GSD of the reconstruction.
If a GSD cannot be computed, it just returns 1. Returned scale values are never higher than 1.
If a GSD cannot be computed, it just returns 1. Returned scale values are never higher than 1 and are always obtained by dividing by 2 (e.g. 0.5, 0.25, etc.)
"""
gsd = opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=has_gcp)
if gsd is not None and target_resolution > 0:
gsd = gsd * (1 + gsd_error_estimate)
return min(1, gsd / target_resolution)
isf = min(1.0, abs(gsd) / target_resolution)
ret = 0.5
while ret >= isf:
ret /= 2.0
return ret * 2.0
else:
return 1
return 1.0
def cap_resolution(resolution, reconstruction_json, gsd_error_estimate = 0.1, gsd_scaling = 1.0, ignore_gsd=False,

Wyświetl plik

@ -85,3 +85,7 @@ def path_or_json_string_to_dict(string):
raise ValueError("{0} is not a valid JSON file.".format(string))
else:
raise ValueError("{0} is not a valid JSON file or string.".format(string))
def touch(file):
with open(file, 'w') as fout:
fout.write("Done!\n")

Wyświetl plik

@ -119,7 +119,6 @@ def parse_srs_header(header):
:param header (str) line
:return Proj object
"""
log.ODM_INFO('Parsing SRS header: %s' % header)
header = header.strip()
ref = header.split(' ')
@ -155,4 +154,15 @@ def parse_srs_header(header):
'Modify your input and try again.' % header)
raise RuntimeError(e)
return srs
return srs
def utm_srs_from_ll(lon, lat):
utm_zone, hemisphere = get_utm_zone_and_hemisphere_from(lon, lat)
return parse_srs_header("WGS84 UTM %s%s" % (utm_zone, hemisphere))
def utm_transformers_from_ll(lon, lat):
source_srs = CRS.from_epsg(4326)
target_srs = utm_srs_from_ll(lon, lat)
ll_to_utm = transformer(source_srs, target_srs)
utm_to_ll = transformer(target_srs, source_srs)
return ll_to_utm, utm_to_ll

Wyświetl plik

@ -43,7 +43,6 @@ def memory():
class ODMLogger:
def __init__(self):
self.show_debug = False
self.json = None
self.json_output_file = None
self.start_time = datetime.datetime.now()
@ -134,10 +133,6 @@ class ODMLogger:
def exception(self, msg):
self.log(FAIL, msg, "EXCEPTION")
def debug(self, msg):
if self.show_debug:
self.log(OKGREEN, msg, "DEBUG")
def close(self):
if self.json is not None and self.json_output_file is not None:
try:
@ -154,4 +149,3 @@ ODM_INFO = logger.info
ODM_WARNING = logger.warning
ODM_ERROR = logger.error
ODM_EXCEPTION = logger.exception
ODM_DEBUG = logger.debug

Wyświetl plik

@ -8,7 +8,7 @@ from opendm import concurrency
from scipy import signal
import numpy as np
def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05, depth=8, samples=1, maxVertexCount=100000, verbose=False, available_cores=None, method='gridded', smooth_dsm=True):
def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05, depth=8, samples=1, maxVertexCount=100000, available_cores=None, method='gridded', smooth_dsm=True):
# Create DSM from point cloud
# Create temporary directory
@ -21,7 +21,7 @@ def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05,
radius_steps = [dsm_radius]
for _ in range(2):
radius_steps.append(radius_steps[-1] * 2) # 2 is arbitrary
radius_steps.append(radius_steps[-1] * math.sqrt(2)) # sqrt(2) is arbitrary
log.ODM_INFO('Creating DSM for 2.5D mesh')
@ -33,20 +33,18 @@ def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05,
gapfill=True,
outdir=tmp_directory,
resolution=dsm_resolution,
verbose=verbose,
max_workers=available_cores,
apply_smoothing=smooth_dsm
)
if method == 'gridded':
mesh = dem_to_mesh_gridded(os.path.join(tmp_directory, 'mesh_dsm.tif'), outMesh, maxVertexCount, verbose, maxConcurrency=max(1, available_cores))
mesh = dem_to_mesh_gridded(os.path.join(tmp_directory, 'mesh_dsm.tif'), outMesh, maxVertexCount, maxConcurrency=max(1, available_cores))
elif method == 'poisson':
dsm_points = dem_to_points(os.path.join(tmp_directory, 'mesh_dsm.tif'), os.path.join(tmp_directory, 'dsm_points.ply'), verbose)
dsm_points = dem_to_points(os.path.join(tmp_directory, 'mesh_dsm.tif'), os.path.join(tmp_directory, 'dsm_points.ply'))
mesh = screened_poisson_reconstruction(dsm_points, outMesh, depth=depth,
samples=samples,
maxVertexCount=maxVertexCount,
threads=max(1, available_cores - 1), # poissonrecon can get stuck on some machines if --threads == all cores
verbose=verbose)
threads=max(1, available_cores - 1)), # poissonrecon can get stuck on some machines if --threads == all cores
else:
raise 'Not a valid method: ' + method
@ -57,14 +55,13 @@ def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05,
return mesh
def dem_to_points(inGeotiff, outPointCloud, verbose=False):
def dem_to_points(inGeotiff, outPointCloud):
log.ODM_INFO('Sampling points from DSM: %s' % inGeotiff)
kwargs = {
'bin': context.dem2points_path,
'outfile': outPointCloud,
'infile': inGeotiff,
'verbose': '-verbose' if verbose else ''
'infile': inGeotiff
}
system.run('"{bin}" -inputFile "{infile}" '
@ -72,12 +69,12 @@ def dem_to_points(inGeotiff, outPointCloud, verbose=False):
'-skirtHeightThreshold 1.5 '
'-skirtIncrements 0.2 '
'-skirtHeightCap 100 '
' {verbose} '.format(**kwargs))
'-verbose '.format(**kwargs))
return outPointCloud
def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, verbose=False, maxConcurrency=1):
def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, maxConcurrency=1):
log.ODM_INFO('Creating mesh from DSM: %s' % inGeotiff)
mesh_path, mesh_filename = os.path.split(outMesh)
@ -99,8 +96,7 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, verbose=False, maxCo
'outfile': outMeshDirty,
'infile': inGeotiff,
'maxVertexCount': maxVertexCount,
'maxConcurrency': maxConcurrency,
'verbose': '-verbose' if verbose else ''
'maxConcurrency': maxConcurrency
}
system.run('"{bin}" -inputFile "{infile}" '
'-outputFile "{outfile}" '
@ -108,7 +104,7 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, verbose=False, maxCo
'-maxVertexCount {maxVertexCount} '
'-maxConcurrency {maxConcurrency} '
'-edgeSwapThreshold 0.15 '
' {verbose} '.format(**kwargs))
'-verbose '.format(**kwargs))
break
except Exception as e:
maxConcurrency = math.floor(maxConcurrency / 2)
@ -138,7 +134,7 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, verbose=False, maxCo
return outMesh
def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples = 1, maxVertexCount=100000, pointWeight=4, threads=context.num_cores, verbose=False):
def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples = 1, maxVertexCount=100000, pointWeight=4, threads=context.num_cores):
mesh_path, mesh_filename = os.path.split(outMesh)
# mesh_path = path/to
@ -165,8 +161,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
'depth': depth,
'samples': samples,
'pointWeight': pointWeight,
'threads': int(threads),
'verbose': '--verbose' if verbose else ''
'threads': int(threads)
}
# Run PoissonRecon
@ -178,8 +173,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
'--samplesPerNode {samples} '
'--threads {threads} '
'--bType 2 '
'--linearFit '
'{verbose}'.format(**poissonReconArgs))
'--linearFit '.format(**poissonReconArgs))
except Exception as e:
log.ODM_WARNING(str(e))

Wyświetl plik

@ -0,0 +1 @@
from .objpacker import obj_pack

Wyświetl plik

@ -0,0 +1 @@
from .imagepacker import pack

Wyświetl plik

@ -0,0 +1,239 @@
#! /usr/bin/python
# The MIT License (MIT)
# Copyright (c) 2015 Luke Gaynor
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import rasterio
import numpy as np
import math
# Based off of the great writeup, demo and code at:
# http://codeincomplete.com/posts/2011/5/7/bin_packing/
class Block():
"""A rectangular block, to be packed"""
def __init__(self, w, h, data=None, padding=0):
self.w = w
self.h = h
self.x = None
self.y = None
self.fit = None
self.data = data
self.padding = padding # not implemented yet
def __str__(self):
return "({x},{y}) ({w}x{h}): {data}".format(
x=self.x,y=self.y, w=self.w,h=self.h, data=self.data)
class _BlockNode():
"""A BlockPacker node"""
def __init__(self, x, y, w, h, used=False, right=None, down=None):
self.x = x
self.y = y
self.w = w
self.h = h
self.used = used
self.right = right
self.down = down
def __repr__(self):
return "({x},{y}) ({w}x{h})".format(x=self.x,y=self.y,w=self.w,h=self.h)
class BlockPacker():
"""Packs blocks of varying sizes into a single, larger block"""
def __init__(self):
self.root = None
def fit(self, blocks):
nblocks = len(blocks)
w = blocks[0].w# if nblocks > 0 else 0
h = blocks[0].h# if nblocks > 0 else 0
self.root = _BlockNode(0,0, w,h)
for block in blocks:
node = self.find_node(self.root, block.w, block.h)
if node:
# print("split")
node_fit = self.split_node(node, block.w, block.h)
block.x = node_fit.x
block.y = node_fit.y
else:
# print("grow")
node_fit = self.grow_node(block.w, block.h)
block.x = node_fit.x
block.y = node_fit.y
def find_node(self, root, w, h):
if root.used:
# raise Exception("used")
node = self.find_node(root.right, w, h)
if node:
return node
return self.find_node(root.down, w, h)
elif w <= root.w and h <= root.h:
return root
else:
return None
def split_node(self, node, w, h):
node.used = True
node.down = _BlockNode(
node.x, node.y + h,
node.w, node.h - h
)
node.right = _BlockNode(
node.x + w, node.y,
node.w - w, h
)
return node
def grow_node(self, w, h):
can_grow_down = w <= self.root.w
can_grow_right = h <= self.root.h
# try to keep the packing square
should_grow_right = can_grow_right and self.root.h >= (self.root.w + w)
should_grow_down = can_grow_down and self.root.w >= (self.root.h + h)
if should_grow_right:
return self.grow_right(w, h)
elif should_grow_down:
return self.grow_down(w, h)
elif can_grow_right:
return self.grow_right(w, h)
elif can_grow_down:
return self.grow_down(w, h)
else:
raise Exception("no valid expansion avaliable!")
def grow_right(self, w, h):
old_root = self.root
self.root = _BlockNode(
0, 0,
old_root.w + w, old_root.h,
down=old_root,
right=_BlockNode(self.root.w, 0, w, self.root.h),
used=True
)
node = self.find_node(self.root, w, h)
if node:
return self.split_node(node, w, h)
else:
return None
def grow_down(self, w, h):
old_root = self.root
self.root = _BlockNode(
0, 0,
old_root.w, old_root.h + h,
down=_BlockNode(0, self.root.h, self.root.w, h),
right=old_root,
used=True
)
node = self.find_node(self.root, w, h)
if node:
return self.split_node(node, w, h)
else:
return None
def crop_by_extents(image, extent):
if min(extent.min_x,extent.min_y) < 0 or max(extent.max_x,extent.max_y) > 1:
print("\tWARNING! UV Coordinates lying outside of [0:1] space!")
_, h, w = image.shape
minx = max(math.floor(extent.min_x*w), 0)
miny = max(math.floor(extent.min_y*h), 0)
maxx = min(math.ceil(extent.max_x*w), w)
maxy = min(math.ceil(extent.max_y*h), h)
image = image[:, miny:maxy, minx:maxx]
delta_w = maxx - minx
delta_h = maxy - miny
# offset from origin x, y, horizontal scale, vertical scale
changes = (minx, miny, delta_w / w, delta_h / h)
return (image, changes)
def pack(obj, background=(0,0,0,0), format="PNG", extents=None):
blocks = []
image_name_map = {}
profile = None
for mat in obj['materials']:
filename = obj['materials'][mat]
with rasterio.open(filename, 'r') as f:
profile = f.profile
image = f.read()
image = np.flip(image, axis=1)
changes = None
if extents and extents[mat]:
image, changes = crop_by_extents(image, extents[mat])
image_name_map[filename] = image
_, h, w = image.shape
# using filename so we can pass back UV info without storing it in image
blocks.append(Block(w, h, data=(filename, mat, changes)))
# sort by width, descending (widest first)
blocks.sort(key=lambda block: -block.w)
packer = BlockPacker()
packer.fit(blocks)
# output_image = Image.new("RGBA", (packer.root.w, packer.root.h))
output_image = np.zeros((profile['count'], packer.root.h, packer.root.w), dtype=profile['dtype'])
uv_changes = {}
for block in blocks:
fname, mat, changes = block.data
image = image_name_map[fname]
_, im_h, im_w = image.shape
uv_changes[mat] = {
"offset": (
# should be in [0, 1] range
(block.x - (changes[0] if changes else 0))/output_image.shape[2],
# UV origin is bottom left, PIL assumes top left!
(block.y - (changes[1] if changes else 0))/output_image.shape[1]
),
"aspect": (
((1/changes[2]) if changes else 1) * (im_w/output_image.shape[2]),
((1/changes[3]) if changes else 1) * (im_h/output_image.shape[1])
),
}
output_image[:, block.y:block.y + im_h, block.x:block.x + im_w] = image
output_image = np.flip(output_image, axis=1)
return output_image, uv_changes, profile

Wyświetl plik

@ -0,0 +1,53 @@
#! /usr/bin/python
# The MIT License (MIT)
# Copyright (c) 2015 Luke Gaynor
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class AABB():
def __init__(self, min_x=None, min_y=None, max_x=None, max_y=None):
self.min_x = min_x
self.min_y = min_y
self.max_x = max_x
self.max_y = max_y
def add(self, x,y):
self.min_x = min(self.min_x, x) if self.min_x is not None else x
self.min_y = min(self.min_y, y) if self.min_y is not None else y
self.max_x = max(self.max_x, x) if self.max_x is not None else x
self.max_y = max(self.max_y, y) if self.max_y is not None else y
def uv_wrap(self):
return (self.max_x - self.min_x, self.max_y - self.min_y)
def tiling(self):
if self.min_x and self.max_x and self.min_y and self.max_y:
if self.min_x < 0 or self.min_y < 0 or self.max_x > 1 or self.max_y > 1:
return (self.max_x - self.min_x, self.max_y - self.min_y)
return None
def __repr__(self):
return "({},{}) ({},{})".format(
self.min_x,
self.min_y,
self.max_x,
self.max_y
)

Wyświetl plik

@ -0,0 +1,235 @@
import os
import rasterio
import warnings
import numpy as np
try:
from .imagepacker.utils import AABB
from .imagepacker import pack
except ImportError:
from imagepacker.utils import AABB
from imagepacker import pack
warnings.filterwarnings("ignore", category=rasterio.errors.NotGeoreferencedWarning)
def load_obj(obj_path, _info=print):
if not os.path.isfile(obj_path):
raise IOError("Cannot open %s" % obj_path)
obj_base_path = os.path.dirname(os.path.abspath(obj_path))
obj = {
'filename': os.path.basename(obj_path),
'root_dir': os.path.dirname(os.path.abspath(obj_path)),
'mtl_filenames': [],
'materials': {},
}
uvs = []
faces = {}
current_material = "_"
with open(obj_path) as f:
_info("Loading %s" % obj_path)
for line in f:
if line.startswith("mtllib "):
# Materials
mtl_file = "".join(line.split()[1:]).strip()
obj['materials'].update(load_mtl(mtl_file, obj_base_path, _info=_info))
obj['mtl_filenames'].append(mtl_file)
# elif line.startswith("v "):
# # Vertices
# vertices.append(list(map(float, line.split()[1:4])))
elif line.startswith("vt "):
# UVs
uvs.append(list(map(float, line.split()[1:3])))
# elif line.startswith("vn "):
# normals.append(list(map(float, line.split()[1:4])))
elif line.startswith("usemtl "):
mtl_name = "".join(line.split()[1:]).strip()
if not mtl_name in obj['materials']:
raise Exception("%s material is missing" % mtl_name)
current_material = mtl_name
elif line.startswith("f "):
if current_material not in faces:
faces[current_material] = []
a,b,c = line.split()[1:]
at = int(a.split("/")[1])
bt = int(b.split("/")[1])
ct = int(c.split("/")[1])
faces[current_material].append((at - 1, bt - 1, ct - 1))
obj['uvs'] = np.array(uvs, dtype=np.float32)
obj['faces'] = faces
return obj
def load_mtl(mtl_file, obj_base_path, _info=print):
mtl_file = os.path.join(obj_base_path, mtl_file)
if not os.path.isfile(mtl_file):
raise IOError("Cannot open %s" % mtl_file)
mats = {}
current_mtl = ""
with open(mtl_file) as f:
for line in f:
if line.startswith("newmtl "):
current_mtl = "".join(line.split()[1:]).strip()
elif line.startswith("map_Kd ") and current_mtl:
map_kd_filename = "".join(line.split()[1:]).strip()
map_kd = os.path.join(obj_base_path, map_kd_filename)
if not os.path.isfile(map_kd):
raise IOError("Cannot open %s" % map_kd)
mats[current_mtl] = map_kd
return mats
def write_obj_changes(obj_file, mtl_file, uv_changes, single_mat, output_dir, _info=print):
with open(obj_file) as f:
obj_lines = f.readlines()
out_lines = []
uv_lines = []
current_material = None
printed_mtllib = False
printed_usemtl = False
_info("Transforming UV coordinates")
for line_idx, line in enumerate(obj_lines):
if line.startswith("mtllib"):
if not printed_mtllib:
out_lines.append("mtllib %s\n" % mtl_file)
printed_mtllib = True
else:
out_lines.append("# \n")
elif line.startswith("usemtl"):
if not printed_usemtl:
out_lines.append("usemtl %s\n" % single_mat)
printed_usemtl = True
else:
out_lines.append("# \n")
current_material = line[7:].strip()
elif line.startswith("vt"):
uv_lines.append(line_idx)
out_lines.append(line)
elif line.startswith("f"):
for v in line[2:].split():
parts = v.split("/")
if len(parts) >= 2 and parts[1]:
uv_idx = int(parts[1]) - 1 # uv indexes start from 1
uv_line_idx = uv_lines[uv_idx]
uv_line = obj_lines[uv_line_idx][3:]
uv = [float(uv.strip()) for uv in uv_line.split()]
if current_material and current_material in uv_changes:
changes = uv_changes[current_material]
uv[0] = uv[0] * changes["aspect"][0] + changes["offset"][0]
uv[1] = uv[1] * changes["aspect"][1] + changes["offset"][1]
out_lines[uv_line_idx] = "vt %s %s\n" % (uv[0], uv[1])
out_lines.append(line)
else:
out_lines.append(line)
out_file = os.path.join(output_dir, os.path.basename(obj_file))
_info("Writing %s" % out_file)
with open(out_file, 'w') as f:
f.writelines(out_lines)
def write_output_tex(img, profile, path, _info=print):
_, w, h = img.shape
profile['width'] = w
profile['height'] = h
if 'tiled' in profile:
profile['tiled'] = False
_info("Writing %s (%sx%s pixels)" % (path, w, h))
with rasterio.open(path, 'w', **profile) as dst:
for b in range(1, img.shape[0] + 1):
dst.write(img[b - 1], b)
sidecar = path + '.aux.xml'
if os.path.isfile(sidecar):
os.unlink(sidecar)
def write_output_mtl(src_mtl, mat_file, dst_mtl):
with open(src_mtl, 'r') as src:
lines = src.readlines()
out = []
found_map = False
single_mat = None
for l in lines:
if l.startswith("newmtl"):
single_mat = "".join(l.split()[1:]).strip()
out.append(l)
elif l.startswith("map_Kd"):
out.append("map_Kd %s\n" % mat_file)
break
else:
out.append(l)
with open(dst_mtl, 'w') as dst:
dst.write("".join(out))
if single_mat is None:
raise Exception("Could not find material name in file")
return single_mat
def obj_pack(obj_file, output_dir=None, _info=print):
if not output_dir:
output_dir = os.path.join(os.path.dirname(os.path.abspath(obj_file)), "packed")
obj = load_obj(obj_file, _info=_info)
if not obj['mtl_filenames']:
raise Exception("No MTL files found, nothing to do")
if os.path.abspath(obj_file) == os.path.abspath(os.path.join(output_dir, os.path.basename(obj_file))):
raise Exception("This will overwrite %s. Choose a different output directory" % obj_file)
if len(obj['mtl_filenames']) <= 1 and len(obj['materials']) <= 1:
raise Exception("File already has a single material, nothing to do")
# Compute AABB for UVs
_info("Computing texture bounds")
extents = {}
for material in obj['materials']:
bounds = AABB()
faces = obj['faces'][material]
for f in faces:
for uv_idx in f:
uv = obj['uvs'][uv_idx]
bounds.add(uv[0], uv[1])
extents[material] = bounds
_info("Binary packing...")
output_image, uv_changes, profile = pack(obj, extents=extents)
mtl_file = obj['mtl_filenames'][0]
mat_file = os.path.basename(obj['materials'][next(iter(obj['materials']))])
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
write_output_tex(output_image, profile, os.path.join(output_dir, mat_file), _info=_info)
single_mat = write_output_mtl(os.path.join(obj['root_dir'], mtl_file), mat_file, os.path.join(output_dir, mtl_file))
write_obj_changes(obj_file, mtl_file, uv_changes, single_mat, output_dir, _info=_info)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Packs textured .OBJ Wavefront files into a single materials")
parser.add_argument("obj", help="Path to the .OBJ file")
parser.add_argument("-o","--output-dir", help="Output directory")
args = parser.parse_args()
obj_pack(args.obj, args.output_dir)

Wyświetl plik

@ -216,8 +216,8 @@ def merge(input_ortho_and_ortho_cuts, output_orthophoto, orthophoto_vars={}):
left, bottom, right, top = src.bounds
xs.extend([left, right])
ys.extend([bottom, top])
if src.profile["count"] < 4:
raise ValueError("Inputs must be at least 4-band rasters")
if src.profile["count"] < 2:
raise ValueError("Inputs must be at least 2-band rasters")
dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)
log.ODM_INFO("Output bounds: %r %r %r %r" % (dst_w, dst_s, dst_e, dst_n))

Wyświetl plik

@ -202,27 +202,22 @@ class OSFMContext:
# Compute feature_process_size
feature_process_size = 2048 # default
if ('resize_to_is_set' in args) and args.resize_to > 0:
# Legacy
log.ODM_WARNING("Legacy option --resize-to (this might be removed in a future version). Use --feature-quality instead.")
feature_process_size = int(args.resize_to)
feature_quality_scale = {
'ultra': 1,
'high': 0.5,
'medium': 0.25,
'low': 0.125,
'lowest': 0.0675,
}
max_dim = find_largest_photo_dim(photos)
if max_dim > 0:
log.ODM_INFO("Maximum photo dimensions: %spx" % str(max_dim))
feature_process_size = int(max_dim * feature_quality_scale[args.feature_quality])
log.ODM_INFO("Photo dimensions for feature extraction: %ipx" % feature_process_size)
else:
feature_quality_scale = {
'ultra': 1,
'high': 0.5,
'medium': 0.25,
'low': 0.125,
'lowest': 0.0675,
}
max_dim = find_largest_photo_dim(photos)
if max_dim > 0:
log.ODM_INFO("Maximum photo dimensions: %spx" % str(max_dim))
feature_process_size = int(max_dim * feature_quality_scale[args.feature_quality])
log.ODM_INFO("Photo dimensions for feature extraction: %ipx" % feature_process_size)
else:
log.ODM_WARNING("Cannot compute max image dimensions, going with defaults")
log.ODM_WARNING("Cannot compute max image dimensions, going with defaults")
# create config file for OpenSfM
if args.matcher_neighbors > 0:

Wyświetl plik

@ -166,10 +166,6 @@ class ODM_Photo:
# parse values from metadata
self.parse_exif_values(path_file)
# print log message
log.ODM_DEBUG('Loaded {}'.format(self))
def __str__(self):
return '{} | camera: {} {} | dimensions: {} x {} | lat: {} | lon: {} | alt: {} | band: {} ({})'.format(
self.filename, self.camera_make, self.camera_model, self.width, self.height,

Wyświetl plik

@ -71,7 +71,7 @@ def split(input_point_cloud, outdir, filename_template, capacity, dims=None):
return [os.path.join(outdir, f) for f in os.listdir(outdir)]
def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=16, sample_radius=0, boundary=None, verbose=False, max_concurrency=1):
def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=16, sample_radius=0, boundary=None, max_concurrency=1):
"""
Filters a point cloud
"""
@ -82,8 +82,7 @@ def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=
args = [
'--input "%s"' % input_point_cloud,
'--output "%s"' % output_point_cloud,
'--concurrency %s' % max_concurrency,
'--verbose' if verbose else '',
'--concurrency %s' % max_concurrency
]
if sample_radius > 0:

Wyświetl plik

@ -12,9 +12,11 @@ RS_DATABASE = {
'dji fc6310': 33, # Phantom 4 Professional
'dji fc7203': 20, # Mavic Mini v1
'dji fc2103': 32, # DJI Mavic Air 1
'dji fc3170': 27, # DJI Mavic Air 2
'dji fc3411': 32, # DJI Mavic Air 2S
'dji fc220': 64, # DJI Mavic Pro (Platinum)
'hasselblad l1d-20c': lambda p: 47 if p.get_capture_megapixels() < 17 else 56, # DJI Mavic 2 Pro (at 16:10 => 16.8MP 47ms, at 3:2 => 19.9MP 56ms. 4:3 has 17.7MP with same image height as 3:2 which can be concluded as same sensor readout)
'dji fc3582': lambda p: 26 if p.get_capture_megapixels() < 48 else 60, # DJI Mini 3 pro (at 48MP readout is 60ms, at 12MP it's 26ms)
@ -26,8 +28,12 @@ RS_DATABASE = {
'gopro hero4 black': 30, # GoPro Hero 4 Black
'gopro hero8 black': 17, # GoPro Hero 8 Black
'teracube teracube one': 32 # TeraCube TeraCube_One TR1907Q Mobile Phone
'teracube teracube one': 32, # TeraCube TeraCube_One TR1907Q Mobile Phone
'fujifilm x-a5': 186, # FUJIFILM X-A5 Mirrorless Interchangeable Lens Camera
'fujifilm x-t2': 35, # FUJIFILM X-T2 Mirrorless Interchangeable Lens Camera
# Help us add more!
# See: https://github.com/OpenDroneMap/RSCalibration for instructions
@ -72,3 +78,4 @@ def get_rolling_shutter_readout(photo, override_value=0):
log.ODM_WARNING("Rolling shutter readout time for \"%s %s\" is not in our database, using default of %sms which might be incorrect. Use --rolling-shutter-readout to set an actual value (see https://github.com/OpenDroneMap/RSCalibration for instructions on how to calculate this value)" % (make, model, DEFAULT_RS_READOUT))
warn_db_missing[key] = True
return float(DEFAULT_RS_READOUT)

Wyświetl plik

@ -23,7 +23,7 @@ def get_origin(shot):
"""The origin of the pose in world coordinates."""
return -get_rotation_matrix(np.array(shot['rotation'])).T.dot(np.array(shot['translation']))
def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset=None, pseudo_geotiff=None):
def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset=None, pseudo_geotiff=None, a_matrix=None):
"""
Extract shots from OpenSfM's reconstruction.json
"""
@ -92,6 +92,11 @@ def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset
utm_coords = [origin[0] + utm_offset[0],
origin[1] + utm_offset[1],
origin[2]]
if a_matrix is not None:
rotation = list(np.array(rotation).dot(a_matrix[:3,:3]))
utm_coords = list(a_matrix.dot(np.hstack((np.array(utm_coords), 1)))[:-1])
translation = utm_coords
trans_coords = crstrans.TransformPoint(utm_coords[0], utm_coords[1], utm_coords[2])

Wyświetl plik

@ -7,6 +7,7 @@ import subprocess
import string
import signal
import io
import shutil
from collections import deque
from opendm import context
@ -144,3 +145,30 @@ def which(program):
p=os.path.join(p,program)
if os.path.exists(p) and os.access(p,os.X_OK):
return p
def link_file(src, dst):
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
if not os.path.isfile(dst):
if sys.platform == 'win32':
os.link(src, dst)
else:
os.symlink(os.path.relpath(os.path.abspath(src), os.path.dirname(os.path.abspath(dst))), dst)
def move_files(src, dst):
if not os.path.isdir(dst):
raise IOError("Not a directory: %s" % dst)
for f in os.listdir(src):
if os.path.isfile(os.path.join(src, f)):
shutil.move(os.path.join(src, f), dst)
def delete_files(folder, exclude=()):
if not os.path.isdir(folder):
return
for f in os.listdir(folder):
if os.path.isfile(os.path.join(folder, f)):
if not exclude or not f.endswith(exclude):
os.unlink(os.path.join(folder, f))

Wyświetl plik

@ -41,7 +41,7 @@ def extract_temperatures_dji(photo, image, dataset_tree):
except ValueError as e:
log.ODM_ERROR("Error during extracting temperature values for file %s : %s" % photo.filename, e)
else:
log.ODM_DEBUG("Only DJI M2EA currently supported, please wait for new updates")
log.ODM_WARNING("Only DJI M2EA currently supported, please wait for new updates")
return image
# Extract raw sensor values from generated image into numpy array
raw_sensor_np = np.array(img)

Wyświetl plik

@ -107,11 +107,13 @@ class ODM_Reconstruction(object):
if gcp.exists():
if gcp.entries_count() == 0:
raise RuntimeError("This GCP file does not have any entries. Are the entries entered in the proper format?")
gcp.check_entries()
# Convert GCP file to a UTM projection since the rest of the pipeline
# does not handle other SRS well.
rejected_entries = []
utm_gcp = GCPFile(gcp.create_utm_copy(output_gcp_file, filenames=[p.filename for p in self.photos], rejected_entries=rejected_entries, include_extras=False))
utm_gcp = GCPFile(gcp.create_utm_copy(output_gcp_file, filenames=[p.filename for p in self.photos], rejected_entries=rejected_entries, include_extras=True))
if not utm_gcp.exists():
raise RuntimeError("Could not project GCP file to UTM. Please double check your GCP file for mistakes.")
@ -239,7 +241,7 @@ class ODM_GeoRef(object):
return (self.utm_east_offset, self.utm_north_offset)
class ODM_Tree(object):
def __init__(self, root_path, gcp_file = None, geo_file = None):
def __init__(self, root_path, gcp_file = None, geo_file = None, align_file = None):
# root path to the project
self.root_path = io.absolute_path_file(root_path)
self.input_images = os.path.join(self.root_path, 'images')
@ -294,6 +296,7 @@ class ODM_Tree(object):
self.odm_georeferencing_gcp = gcp_file or io.find('gcp_list.txt', self.root_path)
self.odm_georeferencing_gcp_utm = os.path.join(self.odm_georeferencing, 'gcp_list_utm.txt')
self.odm_geo_file = geo_file or io.find('geo.txt', self.root_path)
self.odm_align_file = align_file or io.find('align.laz', self.root_path) or io.find('align.las', self.root_path) or io.find('align.tif', self.root_path)
self.odm_georeferencing_proj = 'proj.txt'
self.odm_georeferencing_model_txt_geo = os.path.join(
@ -304,6 +307,9 @@ class ODM_Tree(object):
self.odm_georeferencing, 'odm_georeferenced_model.laz')
self.odm_georeferencing_model_las = os.path.join(
self.odm_georeferencing, 'odm_georeferenced_model.las')
self.odm_georeferencing_alignment_matrix = os.path.join(
self.odm_georeferencing, 'alignment_matrix.json'
)
# odm_orthophoto
self.odm_orthophoto_render = os.path.join(self.odm_orthophoto, 'odm_orthophoto_render.tif')
@ -362,8 +368,10 @@ class ODM_Stage:
if outputs.get('tree') is None:
raise Exception("Assert violation: tree variable is missing from outputs dictionary.")
if self.args.time:
try:
system.benchmark(start_time, outputs['tree'].benchmarking, self.name)
except Exception as e:
log.ODM_WARNING("Cannot write benchmark file: %s" % str(e))
log.ODM_INFO('Finished %s stage' % self.name)
self.update_progress_end()

Wyświetl plik

@ -1,41 +1,46 @@
import os, shutil
import numpy as np
import json
from opendm import log
from opendm.photo import find_largest_photo_dims
from osgeo import gdal
from opendm.loghelpers import double_quote
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def get_depthmap_resolution(args, photos):
if 'depthmap_resolution_is_set' in args:
# Override pc-quality
return int(args.depthmap_resolution)
max_dims = find_largest_photo_dims(photos)
min_dim = 320 # Never go lower than this
if max_dims is not None:
w, h = max_dims
max_dim = max(w, h)
megapixels = (w * h) / 1e6
multiplier = 1
if megapixels < 6:
multiplier = 2
elif megapixels > 42:
multiplier = 0.5
pc_quality_scale = {
'ultra': 0.5,
'high': 0.25,
'medium': 0.125,
'low': 0.0675,
'lowest': 0.03375
}
return max(min_dim, int(max_dim * pc_quality_scale[args.pc_quality] * multiplier))
else:
max_dims = find_largest_photo_dims(photos)
min_dim = 320 # Never go lower than this
if max_dims is not None:
w, h = max_dims
max_dim = max(w, h)
megapixels = (w * h) / 1e6
multiplier = 1
if megapixels < 6:
multiplier = 2
elif megapixels > 42:
multiplier = 0.5
pc_quality_scale = {
'ultra': 0.5,
'high': 0.25,
'medium': 0.125,
'low': 0.0675,
'lowest': 0.03375
}
return max(min_dim, int(max_dim * pc_quality_scale[args.pc_quality] * multiplier))
else:
log.ODM_WARNING("Cannot compute max image dimensions, going with default depthmap_resolution of 640")
return 640 # Sensible default
log.ODM_WARNING("Cannot compute max image dimensions, going with default depthmap_resolution of 640")
return 640 # Sensible default
def get_raster_stats(geotiff):
stats = []
@ -102,4 +107,10 @@ def rm_r(path):
elif os.path.exists(path):
os.remove(path)
except:
log.ODM_WARNING("Cannot remove %s" % path)
log.ODM_WARNING("Cannot remove %s" % path)
def np_to_json(arr):
return json.dumps(arr, cls=NumpyEncoder)
def np_from_json(json_dump):
return np.asarray(json.loads(json_dump))

Wyświetl plik

Wyświetl plik

@ -0,0 +1,128 @@
import cv2
import numpy as np
class ThresholdBlurChecker:
def __init__(self, threshold):
self.threshold = threshold
def NeedPreProcess(self):
return False
def PreProcess(self, video_path, start_frame, end_frame):
return
def IsBlur(self, image_bw, id):
var = cv2.Laplacian(image_bw, cv2.CV_64F).var()
return var, var < self.threshold
class SimilarityChecker:
def __init__(self, threshold, max_features=500):
self.threshold = threshold
self.max_features = max_features
self.last_image = None
self.last_image_id = None
self.last_image_features = None
def IsSimilar(self, image_bw, id):
if self.last_image is None:
self.last_image = image_bw
self.last_image_id = id
self.last_image_features = cv2.goodFeaturesToTrack(image_bw, self.max_features, 0.01, 10)
return 0, False, None
# Detect features
features, status, _ = cv2.calcOpticalFlowPyrLK(self.last_image, image_bw, self.last_image_features, None)
# Filter out the "bad" features (i.e. those that are not tracked successfully)
good_features = features[status == 1]
good_features2 = self.last_image_features[status == 1]
# Calculate the difference between the locations of the good features in the two frames
distance = np.average(np.abs(good_features2 - good_features))
res = distance < self.threshold
if (not res):
self.last_image = image_bw
self.last_image_id = id
self.last_image_features = cv2.goodFeaturesToTrack(image_bw, self.max_features, 0.01, 10)
return distance, res, self.last_image_id
class NaiveBlackFrameChecker:
def __init__(self, threshold):
self.threshold = threshold
def PreProcess(self, video_path, start_frame, end_frame, width=800, height=600):
return
def NeedPreProcess(self):
return False
def IsBlack(self, image_bw, id):
return np.average(image_bw) < self.threshold
class BlackFrameChecker:
def __init__(self, picture_black_ratio_th=0.98, pixel_black_th=0.30):
self.picture_black_ratio_th = picture_black_ratio_th if picture_black_ratio_th is not None else 0.98
self.pixel_black_th = pixel_black_th if pixel_black_th is not None else 0.30
self.luminance_minimum_value = None
self.luminance_range_size = None
self.absolute_threshold = None
def NeedPreProcess(self):
return True
def PreProcess(self, video_path, start_frame, end_frame):
# Open video file
cap = cv2.VideoCapture(video_path)
# Set frame start and end indices
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
frame_end = end_frame
if end_frame == -1:
frame_end = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Initialize luminance range size and minimum value
self.luminance_range_size = 0
self.luminance_minimum_value = 255
frame_index = start_frame if start_frame is not None else 0
# Read and process frames from video file
while (cap.isOpened() and (end_frame is None or frame_index <= end_frame)):
ret, frame = cap.read()
if not ret:
break
# Convert frame to grayscale
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray_frame_min = gray_frame.min()
gray_frame_max = gray_frame.max()
# Update luminance range size and minimum value
self.luminance_range_size = max(self.luminance_range_size, gray_frame_max - gray_frame_min)
self.luminance_minimum_value = min(self.luminance_minimum_value, gray_frame_min)
frame_index += 1
# Calculate absolute threshold for considering a pixel "black"
self.absolute_threshold = self.luminance_minimum_value + self.pixel_black_th * self.luminance_range_size
# Close video file
cap.release()
def IsBlack(self, image_bw, id):
# Count number of pixels < self.absolute_threshold
nb_black_pixels = np.sum(image_bw < self.absolute_threshold)
# Calculate ratio of black pixels
ratio_black_pixels = nb_black_pixels / (image_bw.shape[0] * image_bw.shape[1])
# Check if ratio of black pixels is above threshold
return ratio_black_pixels >= self.picture_black_ratio_th

Wyświetl plik

@ -0,0 +1,46 @@
import argparse
import datetime
import os
class Parameters:
def __init__(self, args):
# "input" -> path to input video file(s), use ',' to separate multiple files")
# "output" -> path to output directory")
# "start" -> start frame index")
# "end" -> end frame index")
# "output-resolution" -> Override output resolution (ex. 1024)")
# "blur-threshold" -> blur measures that fall below this value will be considered 'blurry'. Good value is 300
# "distance-threshold" -> distance measures that fall below this value will be considered 'similar'")
# "black-ratio-threshold" -> Set the threshold for considering a frame 'black'. Express the minimum value for the ratio: nb_black_pixels / nb_pixels. Default value is 0.98")
# "pixel-black-threshold" -> Set the threshold for considering a pixel 'black'. The threshold expresses the maximum pixel luminance value for which a pixel is considered 'black'. Good value is 0.30 (30%)")
# "use-srt" -> Use SRT files for extracting metadata (same name as video file with .srt extension)")
# "limit" -> Maximum number of output frames
# "frame-format" -> frame format (jpg, png, tiff, etc.)")
# "stats-file" -> Save statistics to csv file")
if not os.path.exists(args["output"]):
os.makedirs(args["output"])
self.input = args["input"]
if isinstance(self.input, str):
self.input = [self.input]
self.output = args["output"]
self.start = args.get("start", 0)
self.end = args.get("end", None)
self.limit = args.get("limit", None)
self.blur_threshold = args.get("blur_threshold", None)
self.distance_threshold = args.get("distance_threshold", None)
self.black_ratio_threshold = args.get("black_ratio_threshold", None)
self.pixel_black_threshold = args.get("pixel_black_threshold", None)
self.use_srt = "use_srt" in args
self.frame_format = args.get("frame_format", "jpg")
self.max_dimension = args.get("max_dimension", None)
self.stats_file = args.get("stats_file", None)
# We will resize the image to this size before processing
self.internal_resolution = 800

Wyświetl plik

@ -0,0 +1,206 @@
from datetime import datetime
from opendm import location, log
import re
def match_single(regexes, line, dtype=int):
if isinstance(regexes, str):
regexes = [(regexes, dtype)]
for i in range(len(regexes)):
if isinstance(regexes[i], str):
regexes[i] = (regexes[i], dtype)
try:
for r, transform in regexes:
match = re.search(r, line)
if match:
res = match.group(1)
return transform(res)
except Exception as e:
log.ODM_WARNING("Cannot parse SRT line \"%s\": %s", (line, str(e)))
return None
class SrtFileParser:
def __init__(self, filename):
self.filename = filename
self.data = []
self.gps_data = []
self.ll_to_utm = None
self.utm_to_ll = None
def get_entry(self, timestamp: datetime):
if not self.data:
self.parse()
# check min and max
if timestamp < self.data[0]["start"] or timestamp > self.data[len(self.data) - 1]["end"]:
return None
for entry in self.data:
if entry["start"] <= timestamp and entry["end"] >= timestamp:
return entry
return None
def get_gps(self, timestamp):
if not self.data:
self.parse()
# Initialize on first call
prev_coords = None
if not self.gps_data:
for d in self.data:
lat, lon, alt = d.get('latitude'), d.get('longitude'), d.get('altitude')
tm = d.get('start')
if lat is not None and lon is not None:
if self.ll_to_utm is None:
self.ll_to_utm, self.utm_to_ll = location.utm_transformers_from_ll(lon, lat)
coords = self.ll_to_utm.TransformPoint(lon, lat, alt)
# First or new (in X/Y only)
add = (not len(self.gps_data)) or (coords[0], coords[1]) != (self.gps_data[-1][1][0], self.gps_data[-1][1][1])
if add:
self.gps_data.append((tm, coords))
# No data available
if not len(self.gps_data) or self.gps_data[0][0] > timestamp:
return None
# Interpolate
start = None
for i in range(len(self.gps_data)):
tm, coords = self.gps_data[i]
# Perfect match
if timestamp == tm:
return self.utm_to_ll.TransformPoint(*coords)
elif tm > timestamp:
end = i
start = i - 1
if start < 0:
return None
gd_s = self.gps_data[start]
gd_e = self.gps_data[end]
sx, sy, sz = gd_s[1]
ex, ey, ez = gd_e[1]
dt = (gd_e[0] - gd_s[0]).total_seconds()
if dt >= 10:
return None
dx = (ex - sx) / dt
dy = (ey - sy) / dt
dz = (ez - sz) / dt
t = (timestamp - gd_s[0]).total_seconds()
return self.utm_to_ll.TransformPoint(
sx + dx * t,
sy + dy * t,
sz + dz * t
)
def parse(self):
# SRT metadata is not standarized, we support the following formats:
# DJI mavic air 2
# 1
# 00:00:00,000 --> 00:00:00,016
# <font size="36">SrtCnt : 1, DiffTime : 16ms
# 2023-01-06 18:56:48,380,821
# [iso : 3200] [shutter : 1/60.0] [fnum : 280] [ev : 0] [ct : 3925] [color_md : default] [focal_len : 240] [latitude: 0.000000] [longitude: 0.000000] [altitude: 0.000000] </font>
# DJI Mavic Mini
# 1
# 00:00:00,000 --> 00:00:01,000
# F/2.8, SS 206.14, ISO 150, EV 0, GPS (-82.6669, 27.7716, 10), D 2.80m, H 0.00m, H.S 0.00m/s, V.S 0.00m/s
with open(self.filename, 'r') as f:
iso = None
shutter = None
fnum = None
focal_len = None
latitude = None
longitude = None
altitude = None
start = None
end = None
for line in f:
# Check if line is empty
if not line.strip():
if start is not None:
self.data.append({
"start": start,
"end": end,
"iso": iso,
"shutter": shutter,
"fnum": fnum,
"focal_len": focal_len,
"latitude": latitude,
"longitude": longitude,
"altitude": altitude
})
iso = None
shutter = None
fnum = None
ct = None
focal_len = None
latitude = None
longitude = None
altitude = None
start = None
end = None
continue
# Remove html tags
line = re.sub('<[^<]+?>', '', line)
# Search this "00:00:00,000 --> 00:00:00,016"
match = re.search("(\d{2}:\d{2}:\d{2},\d+) --> (\d{2}:\d{2}:\d{2},\d+)", line)
if match:
start = datetime.strptime(match.group(1), "%H:%M:%S,%f")
end = datetime.strptime(match.group(2), "%H:%M:%S,%f")
iso = match_single([
"iso : (\d+)",
"ISO (\d+)"
], line)
shutter = match_single([
"shutter : \d+/(\d+\.?\d*)"
"SS (\d+\.?\d*)"
], line)
fnum = match_single([
("fnum : (\d+)", lambda v: float(v)/100.0),
("F/([\d\.]+)", float),
], line)
focal_len = match_single("focal_len : (\d+)", line)
latitude = match_single([
("latitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
("GPS \([\d\.\-]+,? ([\d\.\-]+),? [\d\.\-]+\)", lambda v: float(v) if v != 0 else None),
], line)
longitude = match_single([
("longitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
("GPS \(([\d\.\-]+),? [\d\.\-]+,? [\d\.\-]+\)", lambda v: float(v) if v != 0 else None),
], line)
altitude = match_single([
("altitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
("GPS \([\d\.\-]+,? [\d\.\-]+,? ([\d\.\-]+)\)", lambda v: float(v) if v != 0 else None),
], line)

Wyświetl plik

@ -0,0 +1,351 @@
import datetime
from fractions import Fraction
import io
from math import ceil, floor
import time
import cv2
import os
import collections
from PIL import Image
import numpy as np
import piexif
from opendm import log
from opendm.video.srtparser import SrtFileParser
from opendm.video.parameters import Parameters
from opendm.video.checkers import BlackFrameChecker, SimilarityChecker, ThresholdBlurChecker
class Video2Dataset:
def __init__(self, parameters : Parameters):
self.parameters = parameters
self.blur_checker = ThresholdBlurChecker(parameters.blur_threshold) if parameters.blur_threshold is not None else None
self.similarity_checker = SimilarityChecker(parameters.distance_threshold) if parameters.distance_threshold is not None else None
self.black_checker = BlackFrameChecker(parameters.black_ratio_threshold, parameters.pixel_black_threshold) if parameters.black_ratio_threshold is not None or parameters.pixel_black_threshold is not None else None
self.frame_index = parameters.start
self.f = None
def ProcessVideo(self):
self.date_now = None
start = time.time()
if (self.parameters.stats_file is not None):
self.f = open(self.parameters.stats_file, "w")
self.f.write("global_idx;file_name;frame_index;blur_score;is_blurry;is_black;last_frame_index;similarity_score;is_similar;written\n")
self.global_idx = 0
output_file_paths = []
# foreach input file
for input_file in self.parameters.input:
# get file name
file_name = os.path.basename(input_file)
log.ODM_INFO("Processing video: {}".format(input_file))
# get video info
video_info = get_video_info(input_file)
log.ODM_INFO(video_info)
# Set pseudo start time
if self.date_now is None:
try:
self.date_now = datetime.datetime.fromtimestamp(os.path.getmtime(input_file))
except:
self.date_now = datetime.datetime.now()
else:
self.date_now += datetime.timedelta(seconds=video_info.total_frames / video_info.frame_rate)
log.ODM_INFO("Use pseudo start time: %s" % self.date_now)
if self.parameters.use_srt:
name = os.path.splitext(input_file)[0]
srt_files = [name + ".srt", name + ".SRT"]
srt_parser = None
for srt_file in srt_files:
if os.path.exists(srt_file):
log.ODM_INFO("Loading SRT file: {}".format(srt_file))
try:
srt_parser = SrtFileParser(srt_file)
srt_parser.parse()
break
except Exception as e:
log.ODM_INFO("Error parsing SRT file: {}".format(e))
srt_parser = None
else:
srt_parser = None
if (self.black_checker is not None and self.black_checker.NeedPreProcess()):
start2 = time.time()
log.ODM_INFO("Preprocessing for black frame checker... this might take a bit")
self.black_checker.PreProcess(input_file, self.parameters.start, self.parameters.end)
end = time.time()
log.ODM_INFO("Preprocessing time: {:.2f}s".format(end - start2))
log.ODM_INFO("Calculated luminance_range_size is {}".format(self.black_checker.luminance_range_size))
log.ODM_INFO("Calculated luminance_minimum_value is {}".format(self.black_checker.luminance_minimum_value))
log.ODM_INFO("Calculated absolute_threshold is {}".format(self.black_checker.absolute_threshold))
# open video file
cap = cv2.VideoCapture(input_file)
if (not cap.isOpened()):
log.ODM_INFO("Error opening video stream or file")
return
if (self.parameters.start is not None):
cap.set(cv2.CAP_PROP_POS_FRAMES, self.parameters.start)
self.frame_index = self.parameters.start
start_frame = self.parameters.start
else:
start_frame = 0
frames_to_process = self.parameters.end - start_frame + 1 if (self.parameters.end is not None) else video_info.total_frames - start_frame
progress = 0
while (cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
if (self.parameters.end is not None and self.frame_index > self.parameters.end):
break
# Calculate progress percentage
prev_progress = progress
progress = floor((self.frame_index - start_frame + 1) / frames_to_process * 100)
if progress != prev_progress:
print("[{}][{:3d}%] Processing frame {}/{}: ".format(file_name, progress, self.frame_index - start_frame + 1, frames_to_process), end="\r")
stats = self.ProcessFrame(frame, video_info, srt_parser)
if stats is not None and self.parameters.stats_file is not None:
self.WriteStats(input_file, stats)
# Add element to array
if stats is not None and "written" in stats.keys():
output_file_paths.append(stats["path"])
cap.release()
if self.f is not None:
self.f.close()
if self.parameters.limit is not None and self.parameters.limit > 0 and self.global_idx >= self.parameters.limit:
log.ODM_INFO("Limit of {} frames reached, trimming dataset".format(self.parameters.limit))
output_file_paths = limit_files(output_file_paths, self.parameters.limit)
end = time.time()
log.ODM_INFO("Total processing time: {:.2f}s".format(end - start))
return output_file_paths
def ProcessFrame(self, frame, video_info, srt_parser):
res = {"frame_index": self.frame_index, "global_idx": self.global_idx}
frame_bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
h, w = frame_bw.shape
resolution = self.parameters.internal_resolution
if resolution < w or resolution < h:
m = max(w, h)
factor = resolution / m
frame_bw = cv2.resize(frame_bw, (int(ceil(w * factor)), int(ceil(h * factor))), interpolation=cv2.INTER_NEAREST)
if (self.blur_checker is not None):
blur_score, is_blurry = self.blur_checker.IsBlur(frame_bw, self.frame_index)
res["blur_score"] = blur_score
res["is_blurry"] = is_blurry
if is_blurry:
# print ("blurry, skipping")
self.frame_index += 1
return res
if (self.black_checker is not None):
is_black = self.black_checker.IsBlack(frame_bw, self.frame_index)
res["is_black"] = is_black
if is_black:
# print ("black, skipping")
self.frame_index += 1
return res
if (self.similarity_checker is not None):
similarity_score, is_similar, last_frame_index = self.similarity_checker.IsSimilar(frame_bw, self.frame_index)
res["similarity_score"] = similarity_score
res["is_similar"] = is_similar
res["last_frame_index"] = last_frame_index
if is_similar:
# print ("similar to {}, skipping".format(self.similarity_checker.last_image_id))
self.frame_index += 1
return res
path = self.SaveFrame(frame, video_info, srt_parser)
res["written"] = True
res["path"] = path
self.frame_index += 1
self.global_idx += 1
return res
def SaveFrame(self, frame, video_info, srt_parser: SrtFileParser):
max_dim = self.parameters.max_dimension
if max_dim is not None:
h, w, _ = frame.shape
if max_dim < w or max_dim < h:
m = max(w, h)
factor = max_dim / m
frame = cv2.resize(frame, (int(ceil(w * factor)), int(ceil(h * factor))), interpolation=cv2.INTER_AREA)
path = os.path.join(self.parameters.output,
"{}_{}_{}.{}".format(video_info.basename, self.global_idx, self.frame_index, self.parameters.frame_format))
_, buf = cv2.imencode('.' + self.parameters.frame_format, frame)
delta = datetime.timedelta(seconds=(self.frame_index / video_info.frame_rate))
elapsed_time = datetime.datetime(1900, 1, 1) + delta
img = Image.open(io.BytesIO(buf))
entry = gps_coords = None
if srt_parser is not None:
entry = srt_parser.get_entry(elapsed_time)
gps_coords = srt_parser.get_gps(elapsed_time)
exif_time = (elapsed_time + (self.date_now - datetime.datetime(1900, 1, 1)))
elapsed_time_str = exif_time.strftime("%Y:%m:%d %H:%M:%S")
subsec_time_str = exif_time.strftime("%f")
# Exif dict contains the following keys: '0th', 'Exif', 'GPS', '1st', 'thumbnail'
# Set the EXIF metadata
exif_dict = {
"0th": {
piexif.ImageIFD.Software: "ODM",
piexif.ImageIFD.DateTime: elapsed_time_str,
piexif.ImageIFD.XResolution: (frame.shape[1], 1),
piexif.ImageIFD.YResolution: (frame.shape[0], 1),
piexif.ImageIFD.Make: "DJI" if video_info.basename.lower().startswith("dji") else "Unknown",
piexif.ImageIFD.Model: "Unknown"
},
"Exif": {
piexif.ExifIFD.DateTimeOriginal: elapsed_time_str,
piexif.ExifIFD.DateTimeDigitized: elapsed_time_str,
piexif.ExifIFD.SubSecTime: subsec_time_str,
piexif.ExifIFD.PixelXDimension: frame.shape[1],
piexif.ExifIFD.PixelYDimension: frame.shape[0],
}}
if entry is not None:
if entry["shutter"] is not None:
exif_dict["Exif"][piexif.ExifIFD.ExposureTime] = (1, int(entry["shutter"]))
if entry["focal_len"] is not None:
exif_dict["Exif"][piexif.ExifIFD.FocalLength] = (entry["focal_len"], 100)
if entry["fnum"] is not None:
exif_dict["Exif"][piexif.ExifIFD.FNumber] = float_to_rational(entry["fnum"])
if entry["iso"] is not None:
exif_dict["Exif"][piexif.ExifIFD.ISOSpeedRatings] = entry["iso"]
if gps_coords is not None:
exif_dict["GPS"] = get_gps_location(elapsed_time, gps_coords[1], gps_coords[0], gps_coords[2])
exif_bytes = piexif.dump(exif_dict)
img.save(path, exif=exif_bytes, quality=95)
return path
def WriteStats(self, input_file, stats):
self.f.write("{};{};{};{};{};{};{};{};{};{}\n".format(
stats["global_idx"],
input_file,
stats["frame_index"],
stats["blur_score"] if "blur_score" in stats else "",
stats["is_blurry"] if "is_blurry" in stats else "",
stats["is_black"] if "is_black" in stats else "",
stats["last_frame_index"] if "last_frame_index" in stats else "",
stats["similarity_score"] if "similarity_score" in stats else "",
stats["is_similar"] if "is_similar" in stats else "",
stats["written"] if "written" in stats else "").replace(".", ","))
def get_video_info(input_file):
video = cv2.VideoCapture(input_file)
basename = os.path.splitext(os.path.basename(input_file))[0]
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
frame_rate = video.get(cv2.CAP_PROP_FPS)
video.release()
return collections.namedtuple("VideoInfo", ["total_frames", "frame_rate", "basename"])(total_frames, frame_rate, basename)
def float_to_rational(f):
f = Fraction(f).limit_denominator()
return (f.numerator, f.denominator)
def limit_files(paths, limit):
if len(paths) <= limit:
return paths
to_keep = []
all_idxes = np.arange(0, len(paths))
keep_idxes = np.linspace(0, len(paths) - 1, limit, dtype=int)
remove_idxes = set(all_idxes) - set(keep_idxes)
p = np.array(paths)
to_keep = list(p[keep_idxes])
for idx in remove_idxes:
os.remove(paths[idx])
return to_keep
def to_deg(value, loc):
"""convert decimal coordinates into degrees, munutes and seconds tuple
Keyword arguments: value is float gps-value, loc is direction list ["S", "N"] or ["W", "E"]
return: tuple like (25, 13, 48.343 ,'N')
"""
if value < 0:
loc_value = loc[0]
elif value > 0:
loc_value = loc[1]
else:
loc_value = ""
abs_value = abs(value)
deg = int(abs_value)
t1 = (abs_value-deg)*60
min = int(t1)
sec = round((t1 - min)* 60, 5)
return (deg, min, sec, loc_value)
def get_gps_location(elapsed_time, lat, lng, altitude):
lat_deg = to_deg(lat, ["S", "N"])
lng_deg = to_deg(lng, ["W", "E"])
exiv_lat = (float_to_rational(lat_deg[0]), float_to_rational(lat_deg[1]), float_to_rational(lat_deg[2]))
exiv_lng = (float_to_rational(lng_deg[0]), float_to_rational(lng_deg[1]), float_to_rational(lng_deg[2]))
gps_ifd = {
piexif.GPSIFD.GPSVersionID: (2, 0, 0, 0),
piexif.GPSIFD.GPSDateStamp: elapsed_time.strftime('%Y:%m:%d')
}
if lat is not None and lng is not None:
gps_ifd[piexif.GPSIFD.GPSLatitudeRef] = lat_deg[3]
gps_ifd[piexif.GPSIFD.GPSLatitude] = exiv_lat
gps_ifd[piexif.GPSIFD.GPSLongitudeRef] = lng_deg[3]
gps_ifd[piexif.GPSIFD.GPSLongitude] = exiv_lng
if altitude is not None:
gps_ifd[piexif.GPSIFD.GPSAltitudeRef] = 0
gps_ifd[piexif.GPSIFD.GPSAltitude] = float_to_rational(round(altitude))
return gps_ifd

Wyświetl plik

@ -29,7 +29,8 @@ FROM ubuntu:21.04
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" \
PDAL_DRIVER_PATH="/code/SuperBuild/install/bin"
WORKDIR /code

Wyświetl plik

@ -7,7 +7,7 @@ ODMExifRead==3.0.4
Fiona==1.8.17 ; sys_platform == 'linux' or sys_platform == 'darwin'
https://github.com/OpenDroneMap/windows-deps/raw/main/Fiona-1.8.19-cp38-cp38-win_amd64.whl ; sys_platform == 'win32'
joblib==1.1.0
laspy==1.7.0
laspy[lazrs]==2.3.0
lxml==4.6.1
matplotlib==3.3.3
networkx==2.5
@ -31,4 +31,8 @@ xmltodict==0.12.0
fpdf2==2.4.6
Shapely==1.7.1
onnxruntime==1.12.1
pygltflib==1.15.3
pygltflib==1.15.3
codem==0.24.0
trimesh==3.17.1
pandas==1.5.2
piexif==1.1.3

Wyświetl plik

@ -15,6 +15,7 @@ from opendm import ai
from opendm.skyremoval.skyfilter import SkyFilter
from opendm.bgfilter import BgFilter
from opendm.concurrency import parallel_map
from opendm.video.video2dataset import Parameters, Video2Dataset
def save_images_database(photos, database_file):
with open(database_file, 'w') as f:
@ -46,32 +47,37 @@ def load_images_database(database_file):
class ODMLoadDatasetStage(types.ODM_Stage):
def process(self, args, outputs):
outputs['start_time'] = system.now_raw()
tree = types.ODM_Tree(args.project_path, args.gcp, args.geo)
tree = types.ODM_Tree(args.project_path, args.gcp, args.geo, args.align)
outputs['tree'] = tree
if args.time and io.file_exists(tree.benchmarking):
if io.file_exists(tree.benchmarking):
# Delete the previously made file
os.remove(tree.benchmarking)
with open(tree.benchmarking, 'a') as b:
b.write('ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores))
# check if the image filename is supported
def valid_image_filename(filename):
try:
os.remove(tree.benchmarking)
with open(tree.benchmarking, 'a') as b:
b.write('ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores))
except Exception as e:
log.ODM_WARNING("Cannot write benchmark file: %s" % str(e))
def valid_filename(filename, supported_extensions):
(pathfn, ext) = os.path.splitext(filename)
return ext.lower() in context.supported_extensions and pathfn[-5:] != "_mask"
return ext.lower() in supported_extensions and pathfn[-5:] != "_mask"
# Get supported images from dir
def get_images(in_dir):
log.ODM_DEBUG(in_dir)
entries = os.listdir(in_dir)
valid, rejects = [], []
for f in entries:
if valid_image_filename(f):
if valid_filename(f, context.supported_extensions):
valid.append(f)
else:
rejects.append(f)
return valid, rejects
def search_video_files(in_dir):
entries = os.listdir(in_dir)
return [os.path.join(in_dir, f) for f in entries if valid_filename(f, context.supported_video_extensions)]
def find_mask(photo_path, masks):
(pathfn, ext) = os.path.splitext(os.path.basename(photo_path))
k = "{}_mask".format(pathfn)
@ -83,6 +89,8 @@ class ODMLoadDatasetStage(types.ODM_Stage):
return mask
else:
log.ODM_WARNING("Image mask {} has a space. Spaces are currently not supported for image masks.".format(mask))
# get images directory
images_dir = tree.dataset_raw
@ -98,6 +106,51 @@ class ODMLoadDatasetStage(types.ODM_Stage):
if not os.path.exists(images_dir):
raise system.ExitException("There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s" % (images_dir, args.project_path))
# Check if we need to extract video frames
frames_db_file = os.path.join(images_dir, 'frames.json')
if not os.path.exists(frames_db_file) or self.rerun():
video_files = search_video_files(images_dir)
# If we're re-running the pipeline, and frames have been extracted during a previous run
# we need to remove those before re-extracting them
if len(video_files) > 0 and os.path.exists(frames_db_file) and self.rerun():
log.ODM_INFO("Re-run, removing previously extracted video frames")
frames = []
try:
with open(frames_db_file, 'r') as f:
frames = json.loads(f.read())
except Exception as e:
log.ODM_WARNING("Cannot check previous video extraction: %s" % str(e))
for f in frames:
fp = os.path.join(images_dir, f)
if os.path.isfile(fp):
os.remove(fp)
if len(video_files) > 0:
log.ODM_INFO("Found video files (%s), extracting frames" % len(video_files))
try:
params = Parameters({
"input": video_files,
"output": images_dir,
"blur_threshold": 300,
"distance_threshold": 10,
"black_ratio_threshold": 0.98,
"pixel_black_threshold": 0.30,
"use_srt": True,
"max_dimension": args.video_resolution,
"limit": args.video_limit,
})
v2d = Video2Dataset(params)
frames = v2d.ProcessVideo()
with open(frames_db_file, 'w') as f:
f.write(json.dumps([os.path.basename(f) for f in frames]))
except Exception as e:
log.ODM_WARNING("Could not extract video frames: %s" % str(e))
files, rejects = get_images(images_dir)
if files:
# create ODMPhoto list
@ -153,14 +206,14 @@ class ODMLoadDatasetStage(types.ODM_Stage):
if args.sky_removal:
# For each image that :
# - Doesn't already have a mask, AND
# - Is not nadir (or if orientation info is missing), AND
# - Is not nadir (or if orientation info is missing, or if camera lens is fisheye), AND
# - There are no spaces in the image filename (OpenSfM requirement)
# Automatically generate a sky mask
# Generate list of sky images
sky_images = []
for p in photos:
if p.mask is None and (p.pitch is None or (abs(p.pitch) > 20)) and (not " " in p.filename):
if p.mask is None and (args.camera_lens in ['fisheye', 'spherical'] or p.pitch is None or (abs(p.pitch) > 20)) and (not " " in p.filename):
sky_images.append({'file': os.path.join(images_dir, p.filename), 'p': p})
if len(sky_images) > 0:

Wyświetl plik

@ -7,6 +7,7 @@ from opendm import context
from opendm import types
from opendm.multispectral import get_primary_band_name
from opendm.photo import find_largest_photo_dim
from opendm.objpacker import obj_pack
class ODMMvsTexStage(types.ODM_Stage):
def process(self, args, outputs):
@ -68,11 +69,15 @@ class ODMMvsTexStage(types.ODM_Stage):
system.mkdir_p(r['out_dir'])
odm_textured_model_obj = os.path.join(r['out_dir'], tree.odm_textured_model_obj)
unaligned_obj = io.related_file_path(odm_textured_model_obj, postfix="_unaligned")
if not io.file_exists(odm_textured_model_obj) or self.rerun():
log.ODM_INFO('Writing MVS Textured file in: %s'
% odm_textured_model_obj)
if os.path.isfile(unaligned_obj):
os.unlink(unaligned_obj)
# Format arguments to fit Mvs-Texturing app
skipGlobalSeamLeveling = ""
skipLocalSeamLeveling = ""
@ -93,12 +98,12 @@ class ODMMvsTexStage(types.ODM_Stage):
'bin': context.mvstex_path,
'out_dir': os.path.join(r['out_dir'], "odm_textured_model_geo"),
'model': r['model'],
'dataTerm': args.texturing_data_term,
'outlierRemovalType': args.texturing_outlier_removal_type,
'dataTerm': 'gmi',
'outlierRemovalType': 'gauss_clamping',
'skipGlobalSeamLeveling': skipGlobalSeamLeveling,
'skipLocalSeamLeveling': skipLocalSeamLeveling,
'keepUnseenFaces': keepUnseenFaces,
'toneMapping': args.texturing_tone_mapping,
'toneMapping': 'none',
'nadirMode': nadir,
'maxTextureSize': '--max_texture_size=%s' % max_texture_size,
'nvm_file': r['nvm_file'],
@ -125,6 +130,26 @@ class ODMMvsTexStage(types.ODM_Stage):
'{labelingFile} '
'{maxTextureSize} '.format(**kwargs))
# Single material?
if args.texturing_single_material and r['primary'] and (not r['nadir'] or args.skip_3dmodel):
log.ODM_INFO("Packing to single material")
packed_dir = os.path.join(r['out_dir'], 'packed')
if io.dir_exists(packed_dir):
log.ODM_INFO("Removing old packed directory {}".format(packed_dir))
shutil.rmtree(packed_dir)
try:
obj_pack(os.path.join(r['out_dir'], tree.odm_textured_model_obj), packed_dir, _info=log.ODM_INFO)
# Move packed/* into texturing folder
system.delete_files(r['out_dir'], (".vec", ))
system.move_files(packed_dir, r['out_dir'])
if os.path.isdir(packed_dir):
os.rmdir(packed_dir)
except Exception as e:
log.ODM_WARNING(str(e))
# Backward compatibility: copy odm_textured_model_geo.mtl to odm_textured_model.mtl
# for certain older WebODM clients which expect a odm_textured_model.mtl
# to be present for visualization
@ -133,7 +158,7 @@ class ODMMvsTexStage(types.ODM_Stage):
if io.file_exists(geo_mtl):
nongeo_mtl = os.path.join(r['out_dir'], 'odm_textured_model.mtl')
shutil.copy(geo_mtl, nongeo_mtl)
progress += progress_per_run
self.update_progress(progress)
else:

Wyświetl plik

@ -26,17 +26,13 @@ class ODMApp:
"""
Initializes the application and defines the ODM application pipeline stages
"""
if args.debug:
log.logger.show_debug = True
json_log_paths = [os.path.join(args.project_path, "log.json")]
if args.copy_to:
json_log_paths.append(args.copy_to)
log.logger.init_json_output(json_log_paths, args)
dataset = ODMLoadDatasetStage('dataset', args, progress=5.0,
verbose=args.verbose)
dataset = ODMLoadDatasetStage('dataset', args, progress=5.0)
split = ODMSplitStage('split', args, progress=75.0)
merge = ODMMergeStage('merge', args, progress=100.0)
opensfm = ODMOpenSfMStage('opensfm', args, progress=25.0)
@ -47,15 +43,12 @@ class ODMApp:
oct_tree=max(1, min(14, args.mesh_octree_depth)),
samples=1.0,
point_weight=4.0,
max_concurrency=args.max_concurrency,
verbose=args.verbose)
max_concurrency=args.max_concurrency)
texturing = ODMMvsTexStage('mvs_texturing', args, progress=70.0)
georeferencing = ODMGeoreferencingStage('odm_georeferencing', args, progress=80.0,
gcp_file=args.gcp,
verbose=args.verbose)
gcp_file=args.gcp)
dem = ODMDEMStage('odm_dem', args, progress=90.0,
max_concurrency=args.max_concurrency,
verbose=args.verbose)
max_concurrency=args.max_concurrency)
orthophoto = ODMOrthoPhotoStage('odm_orthophoto', args, progress=98.0)
report = ODMReport('odm_report', args, progress=99.0)
postprocess = ODMPostProcess('odm_postprocess', args, progress=100.0)

Wyświetl plik

@ -1,4 +1,4 @@
import os, json
import os, json, math
from shutil import copyfile
from opendm import io
@ -58,8 +58,7 @@ class ODMDEMStage(types.ODM_Stage):
args.smrf_scalar,
args.smrf_slope,
args.smrf_threshold,
args.smrf_window,
verbose=args.verbose
args.smrf_window
)
with open(pc_classify_marker, 'w') as f:
@ -73,7 +72,7 @@ class ODMDEMStage(types.ODM_Stage):
self.update_progress(progress)
if args.pc_rectify:
commands.rectify(dem_input, args.debug)
commands.rectify(dem_input, False)
# Do we need to process anything here?
if (args.dsm or args.dtm) and pc_model_found:
@ -91,7 +90,7 @@ class ODMDEMStage(types.ODM_Stage):
radius_steps = [(resolution / 100.0) / 2.0]
for _ in range(args.dem_gapfill_steps - 1):
radius_steps.append(radius_steps[-1] * 2) # 2 is arbitrary, maybe there's a better value?
radius_steps.append(radius_steps[-1] * math.sqrt(2)) # sqrt(2) is arbitrary, maybe there's a better value?
for product in products:
commands.create_dem(
@ -103,7 +102,6 @@ class ODMDEMStage(types.ODM_Stage):
outdir=odm_dem_root,
resolution=resolution / 100.0,
decimation=args.dem_decimation,
verbose=args.verbose,
max_workers=args.max_concurrency,
keep_unfilled_copy=args.dem_euclidean_map
)

Wyświetl plik

@ -53,7 +53,6 @@ class ODMFilterPoints(types.ODM_Stage):
standard_deviation=args.pc_filter,
sample_radius=args.pc_sample,
boundary=boundary_offset(outputs.get('boundary'), reconstruction.get_proj_offset()),
verbose=args.verbose,
max_concurrency=args.max_concurrency)
# Quick check

Wyświetl plik

@ -1,4 +1,5 @@
import os
import shutil
import struct
import pipes
import fiona
@ -18,6 +19,8 @@ from opendm import point_cloud
from opendm.multispectral import get_primary_band_name
from opendm.osfm import OSFMContext
from opendm.boundary import as_polygon, export_to_bounds_files
from opendm.align import compute_alignment_matrix, transform_point_cloud, transform_obj
from opendm.utils import np_to_json
class ODMGeoreferencingStage(types.ODM_Stage):
def process(self, args, outputs):
@ -29,6 +32,9 @@ class ODMGeoreferencingStage(types.ODM_Stage):
gcp_export_file = tree.path("odm_georeferencing", "ground_control_points.gpkg")
gcp_gml_export_file = tree.path("odm_georeferencing", "ground_control_points.gml")
gcp_geojson_export_file = tree.path("odm_georeferencing", "ground_control_points.geojson")
unaligned_model = io.related_file_path(tree.odm_georeferencing_model_laz, postfix="_unaligned")
if os.path.isfile(unaligned_model) and self.rerun():
os.unlink(unaligned_model)
if reconstruction.has_gcp() and (not io.file_exists(gcp_export_file) or self.rerun()):
octx = OSFMContext(tree.opensfm)
@ -166,7 +172,64 @@ class ODMGeoreferencingStage(types.ODM_Stage):
else:
log.ODM_INFO("Converting point cloud (non-georeferenced)")
system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params))
stats_dir = tree.path("opensfm", "stats", "codem")
if os.path.exists(stats_dir) and self.rerun():
shutil.rmtree(stats_dir)
if tree.odm_align_file is not None:
alignment_file_exists = io.file_exists(tree.odm_georeferencing_alignment_matrix)
if not alignment_file_exists or self.rerun():
if alignment_file_exists:
os.unlink(tree.odm_georeferencing_alignment_matrix)
a_matrix = None
try:
a_matrix = compute_alignment_matrix(tree.odm_georeferencing_model_laz, tree.odm_align_file, stats_dir)
except Exception as e:
log.ODM_WARNING("Cannot compute alignment matrix: %s" % str(e))
if a_matrix is not None:
log.ODM_INFO("Alignment matrix: %s" % a_matrix)
# Align point cloud
if os.path.isfile(unaligned_model):
os.rename(unaligned_model, tree.odm_georeferencing_model_laz)
os.rename(tree.odm_georeferencing_model_laz, unaligned_model)
try:
transform_point_cloud(unaligned_model, a_matrix, tree.odm_georeferencing_model_laz)
log.ODM_INFO("Transformed %s" % tree.odm_georeferencing_model_laz)
except Exception as e:
log.ODM_WARNING("Cannot transform point cloud: %s" % str(e))
os.rename(unaligned_model, tree.odm_georeferencing_model_laz)
# Align textured models
for texturing in [tree.odm_texturing, tree.odm_25dtexturing]:
obj = os.path.join(texturing, "odm_textured_model_geo.obj")
if os.path.isfile(obj):
unaligned_obj = io.related_file_path(obj, postfix="_unaligned")
if os.path.isfile(unaligned_obj):
os.rename(unaligned_obj, obj)
os.rename(obj, unaligned_obj)
try:
transform_obj(unaligned_obj, a_matrix, [reconstruction.georef.utm_east_offset, reconstruction.georef.utm_north_offset], obj)
log.ODM_INFO("Transformed %s" % obj)
except Exception as e:
log.ODM_WARNING("Cannot transform textured model: %s" % str(e))
os.rename(unaligned_obj, obj)
with open(tree.odm_georeferencing_alignment_matrix, "w") as f:
f.write(np_to_json(a_matrix))
else:
log.ODM_WARNING("Alignment to %s will be skipped." % tree.odm_align_file)
else:
log.ODM_WARNING("Already computed alignment")
elif io.file_exists(tree.odm_georeferencing_alignment_matrix):
os.unlink(tree.odm_georeferencing_alignment_matrix)
point_cloud.post_point_cloud_steps(args, tree, self.rerun())
else:
log.ODM_WARNING('Found a valid georeferenced model in: %s'

Wyświetl plik

@ -27,9 +27,7 @@ class ODMeshingStage(types.ODM_Stage):
samples=self.params.get('samples'),
maxVertexCount=self.params.get('max_vertex'),
pointWeight=self.params.get('point_weight'),
threads=max(1, self.params.get('max_concurrency') - 1), # poissonrecon can get stuck on some machines if --threads == all cores
verbose=self.params.get('verbose'))
threads=max(1, self.params.get('max_concurrency') - 1)), # poissonrecon can get stuck on some machines if --threads == all cores
else:
log.ODM_WARNING('Found a valid ODM Mesh file in: %s' %
tree.odm_mesh)
@ -68,7 +66,6 @@ class ODMeshingStage(types.ODM_Stage):
depth=self.params.get('oct_tree'),
maxVertexCount=self.params.get('max_vertex'),
samples=self.params.get('samples'),
verbose=self.params.get('verbose'),
available_cores=args.max_concurrency,
method='poisson' if args.fast_orthophoto else 'gridded',
smooth_dsm=True)

Wyświetl plik

@ -18,7 +18,6 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
def process(self, args, outputs):
tree = outputs['tree']
reconstruction = outputs['reconstruction']
verbose = '-verbose' if args.verbose else ''
# define paths and create working directories
system.mkdir_p(tree.odm_orthophoto)
@ -42,8 +41,7 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
'corners': tree.odm_orthophoto_corners,
'res': resolution,
'bands': '',
'depth_idx': '',
'verbose': verbose
'depth_idx': ''
}
models = []
@ -85,7 +83,7 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
# run odm_orthophoto
system.run('"{odm_ortho_bin}" -inputFiles {models} '
'-logFile "{log}" -outputFile "{ortho}" -resolution {res} {verbose} '
'-logFile "{log}" -outputFile "{ortho}" -resolution {res} -verbose '
'-outputCornerFile "{corners}" {bands} {depth_idx}'.format(**kwargs))
# Create georeferenced GeoTiff
@ -167,5 +165,5 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
else:
log.ODM_WARNING('Found a valid orthophoto in: %s' % tree.odm_orthophoto_tif)
if args.optimize_disk_space and io.file_exists(tree.odm_orthophoto_render):
if io.file_exists(tree.odm_orthophoto_render):
os.remove(tree.odm_orthophoto_render)

Wyświetl plik

@ -14,7 +14,7 @@ from opendm.point_cloud import export_info_json
from opendm.cropper import Cropper
from opendm.orthophoto import get_orthophoto_vars, get_max_memory, generate_png
from opendm.tiles.tiler import generate_colored_hillshade
from opendm.utils import get_raster_stats
from opendm.utils import get_raster_stats, np_from_json
def hms(seconds):
h = seconds // 3600
@ -49,7 +49,14 @@ class ODMReport(types.ODM_Stage):
if not io.file_exists(shots_geojson) or self.rerun():
# Extract geographical camera shots
if reconstruction.is_georeferenced():
shots = get_geojson_shots_from_opensfm(tree.opensfm_reconstruction, utm_srs=reconstruction.get_proj_srs(), utm_offset=reconstruction.georef.utm_offset())
# Check if alignment has been performed (we need to transform our shots if so)
a_matrix = None
if io.file_exists(tree.odm_georeferencing_alignment_matrix):
with open(tree.odm_georeferencing_alignment_matrix, 'r') as f:
a_matrix = np_from_json(f.read())
log.ODM_INFO("Aligning shots to %s" % a_matrix)
shots = get_geojson_shots_from_opensfm(tree.opensfm_reconstruction, utm_srs=reconstruction.get_proj_srs(), utm_offset=reconstruction.georef.utm_offset(), a_matrix=a_matrix)
else:
# Pseudo geo
shots = get_geojson_shots_from_opensfm(tree.opensfm_reconstruction, pseudo_geotiff=tree.odm_orthophoto_tif)
@ -73,6 +80,7 @@ class ODMReport(types.ODM_Stage):
odm_stats_json = os.path.join(tree.odm_report, "stats.json")
octx = OSFMContext(tree.opensfm)
osfm_stats_json = octx.path("stats", "stats.json")
codem_stats_json = octx.path("stats", "codem", "registration.json")
odm_stats = None
point_cloud_file = None
views_dimension = None
@ -111,6 +119,11 @@ class ODMReport(types.ODM_Stage):
'average_gsd': gsd.opensfm_reconstruction_average_gsd(octx.recon_file(), use_all_shots=reconstruction.has_gcp()),
}
# Add CODEM stats
if os.path.exists(codem_stats_json):
with open(codem_stats_json, 'r') as f:
odm_stats['align'] = json.loads(f.read())
with open(odm_stats_json, 'w') as f:
f.write(json.dumps(odm_stats))
else:

Wyświetl plik

@ -61,11 +61,11 @@ class ODMOpenMVSStage(types.ODM_Stage):
number_views_fuse = 2
densify_ini_file = os.path.join(tree.openmvs, 'Densify.ini')
subres_levels = 2 # The number of lower resolutions to process before estimating output resolution depthmap.
filter_point_th = -20
config = [
" --resolution-level %s" % int(resolution_level),
'--dense-config-file "%s"' % densify_ini_file,
"--min-resolution %s" % depthmap_resolution,
"--max-resolution %s" % int(outputs['undist_image_max_size']),
"--max-threads %s" % args.max_concurrency,
"--number-views-fuse %s" % number_views_fuse,
@ -77,7 +77,8 @@ class ODMOpenMVSStage(types.ODM_Stage):
gpu_config = []
use_gpu = has_gpu(args)
if use_gpu:
gpu_config.append("--cuda-device -3")
#gpu_config.append("--cuda-device -3")
gpu_config.append("--cuda-device -1")
else:
gpu_config.append("--cuda-device -2")
@ -85,24 +86,22 @@ class ODMOpenMVSStage(types.ODM_Stage):
config.append("--fusion-mode 1")
extra_config = []
if not args.pc_geometric:
if args.pc_skip_geometric:
extra_config.append("--geometric-iters 0")
masks_dir = os.path.join(tree.opensfm, "undistorted", "masks")
masks = os.path.exists(masks_dir) and len(os.listdir(masks_dir)) > 0
if masks:
extra_config.append("--ignore-mask-label 0")
sharp = args.pc_geometric
with open(densify_ini_file, 'w+') as f:
f.write("Optimize = %s\n" % (7 if sharp else 3))
f.write("Optimize = 7\n")
def run_densify():
system.run('"%s" "%s" %s' % (context.omvs_densify_path,
openmvs_scene_file,
' '.join(config + gpu_config + extra_config)))
try:
run_densify()
except system.SubprocessException as e:
@ -112,6 +111,11 @@ class ODMOpenMVSStage(types.ODM_Stage):
log.ODM_WARNING("OpenMVS failed with GPU, is your graphics card driver up to date? Falling back to CPU.")
gpu_config = ["--cuda-device -2"]
run_densify()
elif (e.errorCode == 137 or e.errorCode == 3221226505) and not args.pc_tile:
log.ODM_WARNING("OpenMVS ran out of memory, we're going to turn on tiling to see if we can process this.")
args.pc_tile = True
config.append("--fusion-mode 1")
run_densify()
else:
raise e
@ -157,7 +161,6 @@ class ODMOpenMVSStage(types.ODM_Stage):
# Fuse
config = [
'--resolution-level %s' % int(resolution_level),
'--min-resolution %s' % depthmap_resolution,
'--max-resolution %s' % int(outputs['undist_image_max_size']),
'--dense-config-file "%s"' % subscene_densify_ini_file,
'--number-views-fuse %s' % number_views_fuse,
@ -168,27 +171,27 @@ class ODMOpenMVSStage(types.ODM_Stage):
try:
system.run('"%s" "%s" %s' % (context.omvs_densify_path, sf, ' '.join(config + gpu_config + extra_config)))
except:
log.ODM_WARNING("Sub-scene %s could not be reconstructed, skipping..." % sf)
if not io.file_exists(scene_ply_unfiltered):
scene_ply_files.pop()
log.ODM_WARNING("Could not compute PLY for subscene %s" % sf)
else:
# Filter
if args.pc_filter > 0:
system.run('"%s" "%s" --filter-point-cloud -1 -v 0 %s' % (context.omvs_densify_path, scene_dense_mvs, ' '.join(gpu_config)))
system.run('"%s" "%s" --filter-point-cloud %s -v 0 %s' % (context.omvs_densify_path, scene_dense_mvs, filter_point_th, ' '.join(gpu_config)))
else:
# Just rename
log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_ply_unfiltered, scene_ply))
os.rename(scene_ply_unfiltered, scene_ply)
except:
log.ODM_WARNING("Sub-scene %s could not be reconstructed, skipping..." % sf)
if not io.file_exists(scene_ply):
scene_ply_files.pop()
log.ODM_WARNING("Could not compute PLY for subscene %s" % sf)
else:
log.ODM_WARNING("Found existing dense scene file %s" % scene_ply)
# Merge
log.ODM_INFO("Merging %s scene files" % len(scene_ply_files))
if len(scene_ply_files) == 0:
log.ODM_ERROR("Could not compute dense point cloud (no PLY files available).")
raise system.ExitException("Could not compute dense point cloud (no PLY files available).")
if len(scene_ply_files) == 1:
# Simply rename
os.replace(scene_ply_files[0], tree.openmvs_model)
@ -197,22 +200,35 @@ class ODMOpenMVSStage(types.ODM_Stage):
# Merge
fast_merge_ply(scene_ply_files, tree.openmvs_model)
else:
def skip_filtering():
# Just rename
scene_dense_ply = os.path.join(tree.openmvs, 'scene_dense.ply')
if not os.path.exists(scene_dense_ply):
raise system.ExitException("Dense reconstruction failed. This could be due to poor georeferencing or insufficient image overlap.")
log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_dense_ply, tree.openmvs_model))
os.rename(scene_dense_ply, tree.openmvs_model)
# Filter all at once
if args.pc_filter > 0:
if os.path.exists(scene_dense):
config = [
"--filter-point-cloud -1",
"--filter-point-cloud %s" % filter_point_th,
'-i "%s"' % scene_dense,
"-v 0"
]
system.run('"%s" %s' % (context.omvs_densify_path, ' '.join(config + gpu_config + extra_config)))
try:
system.run('"%s" %s' % (context.omvs_densify_path, ' '.join(config + gpu_config + extra_config)))
except system.SubprocessException as e:
if e.errorCode == 137 or e.errorCode == 3221226505:
log.ODM_WARNING("OpenMVS filtering ran out of memory, visibility checks will be skipped.")
skip_filtering()
else:
raise e
else:
raise system.ExitException("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...")
else:
# Just rename
scene_dense_ply = os.path.join(tree.openmvs, 'scene_dense.ply')
log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_dense_ply, tree.openmvs_model))
os.rename(scene_dense_ply, tree.openmvs_model)
skip_filtering()
self.update_progress(95)

Wyświetl plik

@ -20,6 +20,7 @@ from opendm import point_cloud
from opendm.utils import double_quote
from opendm.tiles.tiler import generate_dem_tiles
from opendm.cogeo import convert_to_cogeo
from opendm import multispectral
class ODMSplitStage(types.ODM_Stage):
def process(self, args, outputs):
@ -54,11 +55,13 @@ class ODMSplitStage(types.ODM_Stage):
log.ODM_INFO("Setting max-concurrency to %s to better handle remote splits" % args.max_concurrency)
log.ODM_INFO("Large dataset detected (%s photos) and split set at %s. Preparing split merge." % (len(photos), args.split))
multiplier = (1.0 / len(reconstruction.multi_camera)) if reconstruction.multi_camera else 1.0
config = [
"submodels_relpath: " + os.path.join("..", "submodels", "opensfm"),
"submodel_relpath_template: " + os.path.join("..", "submodels", "submodel_%04d", "opensfm"),
"submodel_images_relpath_template: " + os.path.join("..", "submodels", "submodel_%04d", "images"),
"submodel_size: %s" % args.split,
"submodel_size: %s" % max(2, int(float(args.split) * multiplier)),
"submodel_overlap: %s" % args.split_overlap,
]
@ -88,12 +91,12 @@ class ODMSplitStage(types.ODM_Stage):
for sp in submodel_paths:
sp_octx = OSFMContext(sp)
submodel_images_dir = os.path.abspath(sp_octx.path("..", "images"))
# Copy filtered GCP file if needed
# One in OpenSfM's directory, one in the submodel project directory
if reconstruction.gcp and reconstruction.gcp.exists():
submodel_gcp_file = os.path.abspath(sp_octx.path("..", "gcp_list.txt"))
submodel_images_dir = os.path.abspath(sp_octx.path("..", "images"))
if reconstruction.gcp.make_filtered_copy(submodel_gcp_file, submodel_images_dir):
log.ODM_INFO("Copied filtered GCP file to %s" % submodel_gcp_file)
@ -107,6 +110,19 @@ class ODMSplitStage(types.ODM_Stage):
io.copy(tree.odm_geo_file, geo_dst_path)
log.ODM_INFO("Copied GEO file to %s" % geo_dst_path)
# If this is a multispectral dataset,
# we need to link the multispectral images
if reconstruction.multi_camera:
submodel_images = os.listdir(submodel_images_dir)
primary_band_name = multispectral.get_primary_band_name(reconstruction.multi_camera, args.primary_band)
_, p2s = multispectral.compute_band_maps(reconstruction.multi_camera, primary_band_name)
for filename in p2s:
if filename in submodel_images:
secondary_band_photos = p2s[filename]
for p in secondary_band_photos:
system.link_file(os.path.join(tree.dataset_raw, p.filename), submodel_images_dir)
# Reconstruct each submodel
log.ODM_INFO("Dataset has been split into %s submodels. Reconstructing each submodel..." % len(submodel_paths))
self.update_progress(25)

Wyświetl plik

@ -14,6 +14,7 @@ set GDAL_DATA=%GDALBASE%\data\gdal
set GDAL_DRIVER_PATH=%GDALBASE%\gdalplugins
set OSFMBASE=%ODMBASE%SuperBuild\install\bin\opensfm\bin
set SBBIN=%ODMBASE%SuperBuild\install\bin
set PDAL_DRIVER_PATH=%ODMBASE%SuperBuild\install\bin
set PATH=%GDALBASE%;%SBBIN%;%OSFMBASE%
set PROJ_LIB=%GDALBASE%\data\proj