Merge pull request #1082 from pierotofy/reflectance

Radiometric Calibration, Sentera6X sensor support
pull/1091/head
Piero Toffanin 2020-03-19 10:20:10 -04:00 zatwierdzone przez GitHub
commit a94f060351
Nie znaleziono w bazie danych klucza dla tego podpisu
ID klucza GPG: 4AEE18F83AFDEB23
19 zmienionych plików z 984 dodań i 249 usunięć

Wyświetl plik

@ -129,7 +129,7 @@ endforeach()
externalproject_add(mve
GIT_REPOSITORY https://github.com/OpenDroneMap/mve.git
GIT_TAG 098
GIT_TAG 099
UPDATE_COMMAND ""
SOURCE_DIR ${SB_SOURCE_DIR}/elibs/mve
CONFIGURE_COMMAND ""

Wyświetl plik

@ -9,7 +9,7 @@ ExternalProject_Add(${_proj_name}
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}/${_proj_name}
GIT_REPOSITORY https://github.com/OpenDroneMap/mvs-texturing
GIT_TAG master
GIT_TAG 099
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------

Wyświetl plik

@ -9,7 +9,7 @@ ExternalProject_Add(${_proj_name}
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/
GIT_TAG 098
GIT_TAG 099
#--Update/Patch step----------
UPDATE_COMMAND git submodule update --init --recursive
#--Configure step-------------

Wyświetl plik

@ -246,7 +246,9 @@ void OdmOrthoPhoto::saveTIFF(const std::string &filename, GDALDataType dataType)
}
// Alpha
if (dataType == GDT_UInt16){
if (dataType == GDT_Float32){
finalizeAlphaBand<float>();
}else if (dataType == GDT_UInt16){
finalizeAlphaBand<uint16_t>();
}else if (dataType == GDT_Byte){
finalizeAlphaBand<uint8_t>();
@ -504,6 +506,10 @@ void OdmOrthoPhoto::createOrthoPhoto()
log_ << "Texture depth: 16bit\n";
initBands<uint16_t>(texture.channels());
if (primary) initAlphaBand<uint16_t>();
}else if (textureDepth == CV_32F){
log_ << "Texture depth: 32bit (float)\n";
initBands<float>(texture.channels());
if (primary) initAlphaBand<float>();
}else{
std::cerr << "Unsupported bit depth value: " << textureDepth;
exit(1);
@ -537,6 +543,8 @@ void OdmOrthoPhoto::createOrthoPhoto()
drawTexturedTriangle<uint8_t>(texture, polygon, meshCloud, uvs, faceIndex+faceOff);
}else if (textureDepth == CV_16U){
drawTexturedTriangle<uint16_t>(texture, polygon, meshCloud, uvs, faceIndex+faceOff);
}else if (textureDepth == CV_32F){
drawTexturedTriangle<float>(texture, polygon, meshCloud, uvs, faceIndex+faceOff);
}
}
faceOff += faces.size();
@ -555,6 +563,8 @@ void OdmOrthoPhoto::createOrthoPhoto()
saveTIFF(outputFile_, GDT_Byte);
}else if (textureDepth == CV_16U){
saveTIFF(outputFile_, GDT_UInt16);
}else if (textureDepth == CV_32F){
saveTIFF(outputFile_, GDT_Float32);
}else{
std::cerr << "Unsupported bit depth value: " << textureDepth;
exit(1);

Wyświetl plik

@ -48,19 +48,37 @@ def url_string(string):
class RerunFrom(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, processopts[processopts.index(values):])
setattr(namespace, self.dest + '_is_set', True)
class StoreTrue(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, True)
setattr(namespace, self.dest + '_is_set', True)
class StoreValue(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
setattr(namespace, self.dest + '_is_set', True)
parser = SettingsParser(description='OpenDroneMap',
usage='%(prog)s [options] <project name>',
yaml_file=open(context.settings_path))
args = None
def config():
def config(argv=None):
global args
if args is not None and argv is None:
return args
parser.add_argument('--project-path',
metavar='<path>',
action=StoreValue,
help='Path to the project folder')
parser.add_argument('name',
metavar='<project name>',
action=StoreValue,
type=alphanumeric_string,
default='code',
nargs='?',
@ -68,6 +86,7 @@ def config():
parser.add_argument('--resize-to',
metavar='<integer>',
action=StoreValue,
default=2048,
type=int,
help='Resizes images by the largest side for feature extraction purposes only. '
@ -76,6 +95,7 @@ def config():
parser.add_argument('--end-with', '-e',
metavar='<string>',
action=StoreValue,
default='odm_orthophoto',
choices=processopts,
help=('Can be one of:' + ' | '.join(processopts)))
@ -84,11 +104,13 @@ def config():
rerun.add_argument('--rerun', '-r',
metavar='<string>',
action=StoreValue,
choices=processopts,
help=('Can be one of:' + ' | '.join(processopts)))
rerun.add_argument('--rerun-all',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='force rerun of all tasks')
@ -108,6 +130,7 @@ def config():
parser.add_argument('--min-num-features',
metavar='<integer>',
action=StoreValue,
default=8000,
type=int,
help=('Minimum number of features to extract per image. '
@ -115,17 +138,19 @@ def config():
'execution. Default: %(default)s'))
parser.add_argument('--feature-type',
metavar='<string>',
default='sift',
choices=['sift', 'hahog'],
help=('Choose the algorithm for extracting keypoints and computing descriptors. '
'Can be one of: [sift, hahog]. Default: '
'%(default)s'))
metavar='<string>',
action=StoreValue,
default='sift',
choices=['sift', 'hahog'],
help=('Choose the algorithm for extracting keypoints and computing descriptors. '
'Can be one of: [sift, hahog]. Default: '
'%(default)s'))
parser.add_argument('--matcher-neighbors',
type=int,
metavar='<integer>',
action=StoreValue,
default=8,
type=int,
help='Number of nearest images to pre-match based on GPS '
'exif data. Set to 0 to skip pre-matching. '
'Neighbors works together with Distance parameter, '
@ -136,6 +161,7 @@ def config():
parser.add_argument('--matcher-distance',
metavar='<integer>',
action=StoreValue,
default=0,
type=int,
help='Distance threshold in meters to find pre-matching '
@ -144,13 +170,15 @@ def config():
'pre-matching. Default: %(default)s')
parser.add_argument('--use-fixed-camera-params',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Turn off camera parameter optimization during bundler')
parser.add_argument('--cameras',
default='',
metavar='<json>',
action=StoreValue,
type=path_or_json_string,
help='Use the camera parameters computed from '
'another dataset instead of calculating them. '
@ -160,6 +188,7 @@ def config():
parser.add_argument('--camera-lens',
metavar='<string>',
action=StoreValue,
default='auto',
choices=['auto', 'perspective', 'brown', 'fisheye', 'spherical'],
help=('Set a camera projection type. Manually setting a value '
@ -168,8 +197,22 @@ def config():
'set to one of: [auto, perspective, brown, fisheye, spherical]. Default: '
'%(default)s'))
parser.add_argument('--radiometric-calibration',
metavar='<string>',
action=StoreValue,
default='none',
choices=['none', 'camera', 'camera+sun'],
help=('Set the radiometric calibration to perform on images. '
'When processing multispectral images you should set this option '
'to obtain reflectance values (otherwise you will get digital number values). '
'[camera] applies black level, vignetting, row gradient gain/exposure compensation (if appropriate EXIF tags are found). '
'[camera+sun] is experimental, applies all the corrections of [camera], plus compensates for spectral radiance registered via a downwelling light sensor (DLS) taking in consideration the angle of the sun. '
'Can be set to one of: [none, camera, camera+sun]. Default: '
'%(default)s'))
parser.add_argument('--max-concurrency',
metavar='<positive integer>',
action=StoreValue,
default=context.num_cores,
type=int,
help=('The maximum number of processes to use in various '
@ -178,6 +221,7 @@ def config():
parser.add_argument('--depthmap-resolution',
metavar='<positive float>',
action=StoreValue,
type=float,
default=640,
help=('Controls the density of the point cloud by setting the resolution of the depthmap images. Higher values take longer to compute '
@ -186,6 +230,7 @@ def config():
parser.add_argument('--opensfm-depthmap-min-consistent-views',
metavar='<integer: 2 <= x <= 9>',
action=StoreValue,
type=int,
default=3,
help=('Minimum number of views that should reconstruct a point for it to be valid. Use lower values '
@ -195,6 +240,7 @@ def config():
parser.add_argument('--opensfm-depthmap-method',
metavar='<string>',
action=StoreValue,
default='PATCH_MATCH',
choices=['PATCH_MATCH', 'BRUTE_FORCE', 'PATCH_MATCH_SAMPLE'],
help=('Raw depthmap computation algorithm. '
@ -204,6 +250,7 @@ def config():
parser.add_argument('--opensfm-depthmap-min-patch-sd',
metavar='<positive float>',
action=StoreValue,
type=float,
default=1,
help=('When using PATCH_MATCH or PATCH_MATCH_SAMPLE, controls the standard deviation threshold to include patches. '
@ -211,13 +258,15 @@ def config():
'Default: %(default)s'))
parser.add_argument('--use-hybrid-bundle-adjustment',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Run local bundle adjustment for every image added to the reconstruction and a global '
'adjustment every 100 images. Speeds up reconstruction for very large datasets.')
parser.add_argument('--mve-confidence',
metavar='<float: 0 <= x <= 1>',
action=StoreValue,
type=float,
default=0.60,
help=('Discard points that have less than a certain confidence threshold. '
@ -226,22 +275,26 @@ def config():
'Default: %(default)s'))
parser.add_argument('--use-3dmesh',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Use a full 3D mesh to compute the orthophoto instead of a 2.5D mesh. This option is a bit faster and provides similar results in planar areas.')
parser.add_argument('--skip-3dmodel',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Skip generation of a full 3D model. This can save time if you only need 2D results such as orthophotos and DEMs.')
parser.add_argument('--use-opensfm-dense',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Use opensfm to compute dense point cloud alternatively')
parser.add_argument('--ignore-gsd',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Ignore Ground Sampling Distance (GSD). GSD '
'caps the maximum resolution of image outputs and '
@ -250,6 +303,7 @@ def config():
parser.add_argument('--mesh-size',
metavar='<positive integer>',
action=StoreValue,
default=200000,
type=int,
help=('The maximum vertex count of the output mesh. '
@ -257,6 +311,7 @@ def config():
parser.add_argument('--mesh-octree-depth',
metavar='<positive integer>',
action=StoreValue,
default=10,
type=int,
help=('Oct-tree depth used in the mesh reconstruction, '
@ -265,6 +320,7 @@ def config():
parser.add_argument('--mesh-samples',
metavar='<float >= 1.0>',
action=StoreValue,
default=1.0,
type=float,
help=('Number of points per octree node, recommended '
@ -272,6 +328,7 @@ def config():
parser.add_argument('--mesh-point-weight',
metavar='<positive float>',
action=StoreValue,
default=4,
type=float,
help=('This floating point value specifies the importance'
@ -282,15 +339,16 @@ def config():
'Default= %(default)s'))
parser.add_argument('--fast-orthophoto',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Skips dense reconstruction and 3D model generation. '
'It generates an orthophoto directly from the sparse reconstruction. '
'If you just need an orthophoto and do not need a full 3D model, turn on this option. '
'Experimental.')
'If you just need an orthophoto and do not need a full 3D model, turn on this option.')
parser.add_argument('--crop',
metavar='<positive float>',
action=StoreValue,
default=3,
type=float,
help=('Automatically crop image outputs by creating a smooth buffer '
@ -299,7 +357,8 @@ def config():
'Default: %(default)s'))
parser.add_argument('--pc-classify',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Classify the point cloud outputs using a Simple Morphological Filter. '
'You can control the behavior of this option by tweaking the --dem-* parameters. '
@ -307,22 +366,26 @@ def config():
'%(default)s')
parser.add_argument('--pc-csv',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Export the georeferenced point cloud in CSV format. Default: %(default)s')
parser.add_argument('--pc-las',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Export the georeferenced point cloud in LAS format. Default: %(default)s')
parser.add_argument('--pc-ept',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Export the georeferenced point cloud in Entwine Point Tile (EPT) format. Default: %(default)s')
parser.add_argument('--pc-filter',
metavar='<positive float>',
action=StoreValue,
type=float,
default=2.5,
help='Filters the point cloud by removing points that deviate more than N standard deviations from the local mean. Set to 0 to disable filtering.'
@ -331,6 +394,7 @@ def config():
parser.add_argument('--pc-sample',
metavar='<positive float>',
action=StoreValue,
type=float,
default=0,
help='Filters the point cloud by keeping only a single point around a radius N (in meters). This can be useful to limit the output resolution of the point cloud. Set to 0 to disable sampling.'
@ -339,6 +403,7 @@ def config():
parser.add_argument('--smrf-scalar',
metavar='<positive float>',
action=StoreValue,
type=float,
default=1.25,
help='Simple Morphological Filter elevation scalar parameter. '
@ -347,6 +412,7 @@ def config():
parser.add_argument('--smrf-slope',
metavar='<positive float>',
action=StoreValue,
type=float,
default=0.15,
help='Simple Morphological Filter slope parameter (rise over run). '
@ -355,6 +421,7 @@ def config():
parser.add_argument('--smrf-threshold',
metavar='<positive float>',
action=StoreValue,
type=float,
default=0.5,
help='Simple Morphological Filter elevation threshold parameter (meters). '
@ -363,6 +430,7 @@ def config():
parser.add_argument('--smrf-window',
metavar='<positive float>',
action=StoreValue,
type=float,
default=18.0,
help='Simple Morphological Filter window radius parameter (meters). '
@ -371,6 +439,7 @@ def config():
parser.add_argument('--texturing-data-term',
metavar='<string>',
action=StoreValue,
default='gmi',
choices=['gmi', 'area'],
help=('Data term: [area, gmi]. Default: '
@ -378,6 +447,7 @@ def config():
parser.add_argument('--texturing-nadir-weight',
metavar='<integer: 0 <= x <= 32>',
action=StoreValue,
default=16,
type=int,
help=('Affects orthophotos only. '
@ -388,6 +458,7 @@ def config():
parser.add_argument('--texturing-outlier-removal-type',
metavar='<string>',
action=StoreValue,
default='gauss_clamping',
choices=['none', 'gauss_clamping', 'gauss_damping'],
help=('Type of photometric outlier removal method: '
@ -395,36 +466,42 @@ def config():
'%(default)s'))
parser.add_argument('--texturing-skip-visibility-test',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help=('Skip geometric visibility test. Default: '
' %(default)s'))
parser.add_argument('--texturing-skip-global-seam-leveling',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help=('Skip global seam leveling. Useful for IR data.'
'Default: %(default)s'))
parser.add_argument('--texturing-skip-local-seam-leveling',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Skip local seam blending. Default: %(default)s')
parser.add_argument('--texturing-skip-hole-filling',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help=('Skip filling of holes in the mesh. Default: '
' %(default)s'))
parser.add_argument('--texturing-keep-unseen-faces',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help=('Keep faces in the mesh that are not seen in any camera. '
'Default: %(default)s'))
parser.add_argument('--texturing-tone-mapping',
metavar='<string>',
action=StoreValue,
choices=['none', 'gamma'],
default='none',
help='Turn on gamma tone mapping or none for no tone '
@ -433,6 +510,7 @@ def config():
parser.add_argument('--gcp',
metavar='<path string>',
action=StoreValue,
default=None,
help=('path to the file containing the ground control '
'points used for georeferencing. Default: '
@ -441,25 +519,29 @@ def config():
'northing height pixelrow pixelcol imagename'))
parser.add_argument('--use-exif',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help=('Use this tag if you have a gcp_list.txt but '
'want to use the exif geotags instead'))
parser.add_argument('--dtm',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Use this tag to build a DTM (Digital Terrain Model, ground only) using a simple '
'morphological filter. Check the --dem* and --smrf* parameters for finer tuning.')
parser.add_argument('--dsm',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Use this tag to build a DSM (Digital Surface Model, ground + objects) using a progressive '
'morphological filter. Check the --dem* parameters for finer tuning.')
parser.add_argument('--dem-gapfill-steps',
metavar='<positive integer>',
action=StoreValue,
default=3,
type=int,
help='Number of steps used to fill areas with gaps. Set to 0 to disable gap filling. '
@ -470,6 +552,7 @@ def config():
parser.add_argument('--dem-resolution',
metavar='<float>',
action=StoreValue,
type=float,
default=5,
help='DSM/DTM resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate. To remove the cap, check --ignore-gsd also.'
@ -477,6 +560,7 @@ def config():
parser.add_argument('--dem-decimation',
metavar='<positive integer>',
action=StoreValue,
default=1,
type=int,
help='Decimate the points before generating the DEM. 1 is no decimation (full quality). '
@ -484,7 +568,8 @@ def config():
'generation.\nDefault=%(default)s')
parser.add_argument('--dem-euclidean-map',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Computes an euclidean raster map for each DEM. '
'The map reports the distance from each cell to the nearest '
@ -495,25 +580,29 @@ def config():
parser.add_argument('--orthophoto-resolution',
metavar='<float > 0.0>',
action=StoreValue,
default=5,
type=float,
help=('Orthophoto resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate. To remove the cap, check --ignore-gsd also.\n'
'Default: %(default)s'))
parser.add_argument('--orthophoto-no-tiled',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Set this parameter if you want a stripped geoTIFF.\n'
'Default: %(default)s')
parser.add_argument('--orthophoto-png',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Set this parameter if you want to generate a PNG rendering of the orthophoto.\n'
'Default: %(default)s')
parser.add_argument('--orthophoto-compression',
metavar='<string>',
action=StoreValue,
type=str,
choices=['JPEG', 'LZW', 'PACKBITS', 'DEFLATE', 'LZMA', 'NONE'],
default='DEFLATE',
@ -522,7 +611,8 @@ def config():
'are doing. Options: %(choices)s.\nDefault: %(default)s')
parser.add_argument('--orthophoto-cutline',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Generates a polygon around the cropping area '
'that cuts the orthophoto around the edges of features. This polygon '
@ -531,24 +621,28 @@ def config():
'%(default)s')
parser.add_argument('--build-overviews',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Build orthophoto overviews using gdaladdo.')
parser.add_argument('--verbose', '-v',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Print additional messages to the console\n'
'Default: %(default)s')
parser.add_argument('--time',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Generates a benchmark file with runtime info\n'
'Default: %(default)s')
parser.add_argument('--debug',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help='Print debug messages\n'
'Default: %(default)s')
@ -560,6 +654,7 @@ def config():
parser.add_argument('--split',
type=int,
action=StoreValue,
default=999999,
metavar='<positive integer>',
help='Average number of images per submodel. When '
@ -570,6 +665,7 @@ def config():
parser.add_argument('--split-overlap',
type=float,
action=StoreValue,
metavar='<positive integer>',
default=150,
help='Radius of the overlap between submodels. '
@ -580,6 +676,7 @@ def config():
parser.add_argument('--sm-cluster',
metavar='<string>',
action=StoreValue,
type=url_string,
default=None,
help='URL to a ClusterODM instance '
@ -589,6 +686,7 @@ def config():
parser.add_argument('--merge',
metavar='<string>',
action=StoreValue,
default='all',
choices=['all', 'pointcloud', 'orthophoto', 'dem'],
help=('Choose what to merge in the merge step in a split dataset. '
@ -597,20 +695,22 @@ def config():
'%(default)s'))
parser.add_argument('--force-gps',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help=('Use images\' GPS exif data for reconstruction, even if there are GCPs present.'
'This flag is useful if you have high precision GPS measurements. '
'If there are no GCPs, this flag does nothing. Default: %(default)s'))
parser.add_argument('--pc-rectify',
action='store_true',
action=StoreTrue,
nargs=0,
default=False,
help=('Perform ground rectification on the point cloud. This means that wrongly classified ground '
'points will be re-classified and gaps will be filled. Useful for generating DTMs. '
'Default: %(default)s'))
args = parser.parse_args()
args = parser.parse_args(argv)
# check that the project path setting has been set properly
if not args.project_path:
@ -646,5 +746,9 @@ def config():
except exceptions.NodeConnectionError as e:
log.ODM_ERROR("Cluster node seems to be offline: %s" % str(e))
sys.exit(1)
# if args.radiometric_calibration != "none" and not args.texturing_skip_global_seam_leveling:
# log.ODM_WARNING("radiometric-calibration is turned on, automatically setting --texturing-skip-global-seam-leveling")
# args.texturing_skip_global_seam_leveling = True
return args

145
opendm/dls.py 100644
Wyświetl plik

@ -0,0 +1,145 @@
"""
MicaSense Downwelling Light Sensor Utilities
Copyright 2017 MicaSense, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
# for DLS correction, we need the sun position at the time the image was taken
# this can be computed using the pysolar package (ver 0.6)
# https://pypi.python.org/pypi/Pysolar/0.6
# we import multiple times with checking here because the case of Pysolar is
# different depending on the python version :(
import imp
havePysolar = False
try:
import pysolar.solar as pysolar
havePysolar = True
except ImportError:
try:
import Pysolar.solar as pysolar
havePysolar = True
except ImportError:
import pysolar.solar as pysolar
havePysolar = True
finally:
if not havePysolar:
print("Unable to import pysolar")
def fresnel(phi):
return __multilayer_transmission(phi, n=[1.000277,1.6,1.38])
# define functions to compute the DLS-Sun angle:
def __fresnel_transmission(phi, n1=1.000277, n2=1.38, polarization=[.5, .5]):
"""compute fresnel transmission between media with refractive indices n1 and n2"""
# computes the reflection and transmittance
# for incidence angles phi for transition from medium
# with refractive index n1 to n2
# teflon e.g. n2=1.38
# polycarbonate n2=1.6
# polarization=[.5,.5] - unpolarized light
# polarization=[1.,0] - s-polarized light - perpendicular to plane of incidence
# polarization=[0,1.] - p-polarized light - parallel to plane of incidence
f1 = np.cos(phi)
f2 = np.sqrt(1-(n1/n2*np.sin(phi))**2)
Rs = ((n1*f1-n2*f2)/(n1*f1+n2*f2))**2
Rp = ((n1*f2-n2*f1)/(n1*f2+n2*f1))**2
T = 1.-polarization[0]*Rs-polarization[1]*Rp
if T > 1: T= 0.
if T < 0: T = 0.
if np.isnan(T): T = 0.
return T
def __multilayer_transmission(phi, n, polarization=[.5, .5]):
T = 1.0
phi_eff = np.copy(phi)
for i in range(0,len(n)-1):
n1 = n[i]
n2 = n[i+1]
phi_eff = np.arcsin(np.sin(phi_eff)/n1)
T *= __fresnel_transmission(phi_eff, n1, n2, polarization=polarization)
return T
# get the position of the sun in North-East-Down (NED) coordinate system
def ned_from_pysolar(sunAzimuth, sunAltitude):
"""Convert pysolar coordinates to NED coordinates."""
elements = (
np.cos(sunAzimuth) * np.cos(sunAltitude),
np.sin(sunAzimuth) * np.cos(sunAltitude),
-np.sin(sunAltitude),
)
return np.array(elements).transpose()
# get the sensor orientation in North-East-Down coordinates
# pose is a yaw/pitch/roll tuple of angles measured for the DLS
# ori is the 3D orientation vector of the DLS in body coordinates (typically [0,0,-1])
def get_orientation(pose, ori):
"""Generate an orientation vector from yaw/pitch/roll angles in radians."""
yaw, pitch, roll = pose
c1 = np.cos(-yaw)
s1 = np.sin(-yaw)
c2 = np.cos(-pitch)
s2 = np.sin(-pitch)
c3 = np.cos(-roll)
s3 = np.sin(-roll)
Ryaw = np.array([[c1, s1, 0], [-s1, c1, 0], [0, 0, 1]])
Rpitch = np.array([[c2, 0, -s2], [0, 1, 0], [s2, 0, c2]])
Rroll = np.array([[1, 0, 0], [0, c3, s3], [0, -s3, c3]])
R = np.dot(Ryaw, np.dot(Rpitch, Rroll))
n = np.dot(R, ori)
return n
# from the current position (lat,lon,alt) tuple
# and time (UTC), as well as the sensor orientation (yaw,pitch,roll) tuple
# compute a sensor sun angle - this is needed as the actual sun irradiance
# (for clear skies) is related to the measured irradiance by:
# I_measured = I_direct * cos (sun_sensor_angle) + I_diffuse
# For clear sky, I_direct/I_diffuse ~ 6 and we can simplify this to
# I_measured = I_direct * (cos (sun_sensor_angle) + 1/6)
def compute_sun_angle(
position,
pose,
utc_datetime,
sensor_orientation,
):
""" compute the sun angle using pysolar functions"""
altitude = 0
azimuth = 0
import warnings
with warnings.catch_warnings(): # Ignore pysolar leap seconds offset warning
warnings.simplefilter("ignore")
try:
altitude = pysolar.get_altitude(position[0], position[1], utc_datetime)
azimuth = pysolar.get_azimuth(position[0], position[1], utc_datetime)
except AttributeError: # catch 0.6 version of pysolar required for python 2.7 support
altitude = pysolar.GetAltitude(position[0], position[1], utc_datetime)
azimuth = 180-pysolar.GetAzimuth(position[0], position[1], utc_datetime)
sunAltitude = np.radians(np.array(altitude))
sunAzimuth = np.radians(np.array(azimuth))
sunAzimuth = sunAzimuth % (2 * np.pi ) #wrap range 0 to 2*pi
nSun = ned_from_pysolar(sunAzimuth, sunAltitude)
nSensor = np.array(get_orientation(pose, sensor_orientation))
angle = np.arccos(np.dot(nSun, nSensor))
return nSun, nSensor, angle, sunAltitude, sunAzimuth

Wyświetl plik

@ -0,0 +1,149 @@
from opendm import dls
import math
import numpy as np
from opendm import log
# Loosely based on https://github.com/micasense/imageprocessing/blob/master/micasense/utils.py
def dn_to_radiance(photo, image):
"""
Convert Digital Number values to Radiance values
:param photo ODM_Photo
:param image numpy array containing image data
:return numpy array with radiance image values
"""
image = image.astype("float32")
# Handle thermal bands (experimental)
if photo.band_name == 'LWIR':
image -= (273.15 * 100.0) # Convert Kelvin to Celsius
image *= 0.01
return image
# All others
a1, a2, a3 = photo.get_radiometric_calibration()
dark_level = photo.get_dark_level()
exposure_time = photo.exposure_time
gain = photo.get_gain()
photometric_exp = photo.get_photometric_exposure()
if a1 is None and photometric_exp is None:
log.ODM_WARNING("Cannot perform radiometric calibration, no FNumber/Exposure Time or Radiometric Calibration EXIF tags found in %s. Using Digital Number." % photo.filename)
return image
if a1 is None and photometric_exp is not None:
a1 = photometric_exp
V, x, y = vignette_map(photo)
if x is None:
x, y = np.meshgrid(np.arange(photo.width), np.arange(photo.height))
if dark_level is not None:
image -= dark_level
# Normalize DN to 0 - 1.0
bit_depth_max = photo.get_bit_depth_max()
if bit_depth_max:
image /= bit_depth_max
if V is not None:
# vignette correction
image *= V
if exposure_time and a2 is not None and a3 is not None:
# row gradient correction
R = 1.0 / (1.0 + a2 * y / exposure_time - a3 * y)
image *= R
# Floor any negative radiances to zero (can happend due to noise around blackLevel)
if dark_level is not None:
image[image < 0] = 0
# apply the radiometric calibration - i.e. scale by the gain-exposure product and
# multiply with the radiometric calibration coefficient
if gain is not None and exposure_time is not None:
image /= (gain * exposure_time)
image *= a1
return image
def vignette_map(photo):
x_vc, y_vc = photo.get_vignetting_center()
polynomial = photo.get_vignetting_polynomial()
if x_vc and polynomial:
# append 1., so that we can call with numpy polyval
polynomial.append(1.0)
vignette_poly = np.array(polynomial)
# perform vignette correction
# get coordinate grid across image
x, y = np.meshgrid(np.arange(photo.width), np.arange(photo.height))
# meshgrid returns transposed arrays
# x = x.T
# y = y.T
# compute matrix of distances from image center
r = np.hypot((x - x_vc), (y - y_vc))
# compute the vignette polynomial for each distance - we divide by the polynomial so that the
# corrected image is image_corrected = image_original * vignetteCorrection
vignette = 1.0 / np.polyval(vignette_poly, r)
return vignette, x, y
return None, None, None
def dn_to_reflectance(photo, image, use_sun_sensor=True):
radiance = dn_to_radiance(photo, image)
irradiance = compute_irradiance(photo, use_sun_sensor=use_sun_sensor)
return radiance * math.pi / irradiance
def compute_irradiance(photo, use_sun_sensor=True):
# Thermal?
if photo.band_name == "LWIR":
return 1.0
# Some cameras (Micasense) store the value (nice! just return)
hirradiance = photo.get_horizontal_irradiance()
if hirradiance is not None:
return hirradiance
# TODO: support for calibration panels
if use_sun_sensor and photo.get_sun_sensor():
# Estimate it
dls_orientation_vector = np.array([0,0,-1])
sun_vector_ned, sensor_vector_ned, sun_sensor_angle, \
solar_elevation, solar_azimuth = dls.compute_sun_angle([photo.latitude, photo.longitude],
photo.get_dls_pose(),
photo.get_utc_time(),
dls_orientation_vector)
angular_correction = dls.fresnel(sun_sensor_angle)
# TODO: support for direct and scattered irradiance
direct_to_diffuse_ratio = 6.0 # Assumption, clear skies
spectral_irradiance = photo.get_sun_sensor()
percent_diffuse = 1.0 / direct_to_diffuse_ratio
sensor_irradiance = spectral_irradiance / angular_correction
# Find direct irradiance in the plane normal to the sun
untilted_direct_irr = sensor_irradiance / (percent_diffuse + np.cos(sun_sensor_angle))
direct_irradiance = untilted_direct_irr
scattered_irradiance = untilted_direct_irr * percent_diffuse
# compute irradiance on the ground using the solar altitude angle
horizontal_irradiance = direct_irradiance * np.sin(solar_elevation) + scattered_irradiance
return horizontal_irradiance
elif use_sun_sensor:
log.ODM_WARNING("No sun sensor values found for %s" % photo.filename)
return 1.0

Wyświetl plik

@ -2,7 +2,7 @@
OpenSfM related utils
"""
import os, shutil, sys, json
import os, shutil, sys, json, argparse
import yaml
from opendm import io
from opendm import log
@ -11,6 +11,7 @@ from opendm import context
from opendm import camera
from opensfm.large import metadataset
from opensfm.large import tools
from opensfm.commands import undistort
class OSFMContext:
def __init__(self, opensfm_project_path):
@ -242,6 +243,19 @@ class OSFMContext:
log.ODM_WARNING("Cannot export cameras to %s. %s." % (output, str(e)))
else:
log.ODM_INFO("Already extracted cameras")
def convert_and_undistort(self, rerun=False, imageFilter=None):
log.ODM_INFO("Undistorting %s ..." % self.opensfm_project_path)
undistorted_images_path = self.path("undistorted", "images")
if not io.dir_exists(undistorted_images_path) or rerun:
cmd = undistort.Command(imageFilter)
parser = argparse.ArgumentParser()
cmd.add_arguments(parser)
cmd.run(parser.parse_args([self.opensfm_project_path]))
else:
log.ODM_WARNING("Found an undistorted directory in %s" % undistorted_images_path)
def update_config(self, cfg_dict):
cfg_file = self.get_config_file_path()
@ -263,9 +277,9 @@ class OSFMContext:
def name(self):
return os.path.basename(os.path.abspath(self.path("..")))
def get_submodel_argv(project_name = None, submodels_path = None, submodel_name = None):
def get_submodel_argv(args, submodels_path = None, submodel_name = None):
"""
Gets argv for a submodel starting from the argv passed to the application startup.
Gets argv for a submodel starting from the args passed to the application startup.
Additionally, if project_name, submodels_path and submodel_name are passed, the function
handles the <project name> value and --project-path detection / override.
When all arguments are set to None, --project-path and project name are always removed.
@ -281,82 +295,73 @@ def get_submodel_argv(project_name = None, submodels_path = None, submodel_name
removing --gcp (the GCP path if specified is always "gcp_list.txt")
reading the contents of --cameras
"""
assure_always = ['--orthophoto-cutline', '--dem-euclidean-map', '--skip-3dmodel']
remove_always_2 = ['--split', '--split-overlap', '--rerun-from', '--rerun', '--gcp', '--end-with', '--sm-cluster']
remove_always_1 = ['--rerun-all', '--pc-csv', '--pc-las', '--pc-ept']
read_json_always = ['--cameras']
assure_always = ['orthophoto_cutline', 'dem_euclidean_map', 'skip_3dmodel']
remove_always = ['split', 'split_overlap', 'rerun_from', 'rerun', 'gcp', 'end_with', 'sm_cluster', 'rerun_all', 'pc_csv', 'pc_las', 'pc_ept']
read_json_always = ['cameras']
argv = sys.argv
result = [argv[0]] # Startup script (/path/to/run.py)
result = [argv[0]]
i = 1
found_args = {}
args_dict = vars(args).copy()
set_keys = [k[:-len("_is_set")] for k in args_dict.keys() if k.endswith("_is_set")]
while i < len(argv):
arg = argv[i]
if i == 1 and project_name and submodel_name and arg == project_name:
i += 1
continue
elif i == len(argv) - 1:
# Project name?
if project_name and submodel_name and arg == project_name:
result.append(submodel_name)
found_args['project_name'] = True
i += 1
continue
if arg == '--project-path':
if submodels_path:
result.append(arg)
result.append(submodels_path)
found_args[arg] = True
i += 2
elif arg in assure_always:
result.append(arg)
found_args[arg] = True
i += 1
elif arg == '--crop':
result.append(arg)
crop_value = float(argv[i + 1])
if crop_value == 0:
crop_value = 0.015625
result.append(str(crop_value))
found_args[arg] = True
i += 2
elif arg in read_json_always:
# Handle project name and project path (special case)
if "name" in set_keys:
del args_dict["name"]
set_keys.remove("name")
if "project_path" in set_keys:
del args_dict["project_path"]
set_keys.remove("project_path")
# Remove parameters
set_keys = [k for k in set_keys if k not in remove_always]
# Assure parameters
for k in assure_always:
if not k in set_keys:
set_keys.append(k)
args_dict[k] = True
# Read JSON always
for k in read_json_always:
if k in set_keys:
try:
jsond = io.path_or_json_string_to_dict(argv[i + 1])
result.append(arg)
result.append(json.dumps(jsond))
found_args[arg] = True
if isinstance(args_dict[k], str):
args_dict[k] = io.path_or_json_string_to_dict(args_dict[k])
if isinstance(args_dict[k], dict):
args_dict[k] = json.dumps(args_dict[k])
except ValueError as e:
log.ODM_WARNING("Cannot parse/read JSON: {}".format(str(e)))
finally:
i += 2
elif arg in remove_always_2:
i += 2
elif arg in remove_always_1:
i += 1
else:
result.append(arg)
i += 1
# Handle crop (cannot be zero for split/merge)
if "crop" in set_keys:
crop_value = float(args_dict["crop"])
if crop_value == 0:
crop_value = 0.015625
args_dict["crop"] = crop_value
# Populate result
for k in set_keys:
result.append("--%s" % k.replace("_", "-"))
# No second value for booleans
if isinstance(args_dict[k], bool) and args_dict[k] == True:
continue
result.append(str(args_dict[k]))
if not found_args.get('--project-path') and submodels_path:
result.append('--project-path')
if submodels_path:
result.append("--project-path")
result.append(submodels_path)
for arg in assure_always:
if not found_args.get(arg):
result.append(arg)
if not found_args.get('project_name') and submodel_name:
if submodel_name:
result.append(submodel_name)
return result
def get_submodel_args_dict():
submodel_argv = get_submodel_argv()
def get_submodel_args_dict(args):
submodel_argv = get_submodel_argv(args)
result = {}
i = 0

364
opendm/photo.py 100644
Wyświetl plik

@ -0,0 +1,364 @@
import io
import logging
import re
import exifread
import numpy as np
from six import string_types
from datetime import datetime, timedelta
import pytz
import log
import system
import xmltodict as x2d
from opendm import get_image_size
class ODM_Photo:
""" ODMPhoto - a class for ODMPhotos
"""
def __init__(self, path_file):
# Standard tags (virtually all photos have these)
self.filename = io.extract_file_from_path_file(path_file)
self.width = None
self.height = None
self.camera_make = ''
self.camera_model = ''
# Geo tags
self.latitude = None
self.longitude = None
self.altitude = None
# Multi-band fields
self.band_name = 'RGB'
self.band_index = 0
# Multi-spectral fields
self.fnumber = None
self.radiometric_calibration = None
self.black_level = None
# Capture info
self.exposure_time = None
self.iso_speed = None
self.bits_per_sample = None
self.vignetting_center = None
self.vignetting_polynomial = None
self.spectral_irradiance = None
self.horizontal_irradiance = None
self.irradiance_scale_to_si = None
self.utc_time = None
# DLS
self.sun_sensor = None
self.dls_yaw = None
self.dls_pitch = None
self.dls_roll = None
# self.center_wavelength = None
# self.bandwidth = None
# parse values from metadata
self.parse_exif_values(path_file)
# print log message
log.ODM_DEBUG('Loaded {}'.format(self))
def __str__(self):
return '{} | camera: {} {} | dimensions: {} x {} | lat: {} | lon: {} | alt: {} | band: {} ({})'.format(
self.filename, self.camera_make, self.camera_model, self.width, self.height,
self.latitude, self.longitude, self.altitude, self.band_name, self.band_index)
def parse_exif_values(self, _path_file):
# Disable exifread log
logging.getLogger('exifread').setLevel(logging.CRITICAL)
with open(_path_file, 'rb') as f:
tags = exifread.process_file(f, details=False)
try:
if 'Image Make' in tags:
self.camera_make = tags['Image Make'].values.encode('utf8')
if 'Image Model' in tags:
self.camera_model = tags['Image Model'].values.encode('utf8')
if 'GPS GPSAltitude' in tags:
self.altitude = self.float_value(tags['GPS GPSAltitude'])
if 'GPS GPSAltitudeRef' in tags and self.int_value(tags['GPS GPSAltitudeRef']) > 0:
self.altitude *= -1
if 'GPS GPSLatitude' in tags and 'GPS GPSLatitudeRef' in tags:
self.latitude = self.dms_to_decimal(tags['GPS GPSLatitude'], tags['GPS GPSLatitudeRef'])
if 'GPS GPSLongitude' in tags and 'GPS GPSLongitudeRef' in tags:
self.longitude = self.dms_to_decimal(tags['GPS GPSLongitude'], tags['GPS GPSLongitudeRef'])
except IndexError as e:
log.ODM_WARNING("Cannot read basic EXIF tags for %s: %s" % (_path_file, e.message))
try:
if 'Image Tag 0xC61A' in tags:
self.black_level = self.list_values(tags['Image Tag 0xC61A'])
elif 'BlackLevel' in tags:
self.black_level = self.list_values(tags['BlackLevel'])
if 'EXIF ExposureTime' in tags:
self.exposure_time = self.float_value(tags['EXIF ExposureTime'])
if 'EXIF FNumber' in tags:
self.fnumber = self.float_value(tags['EXIF FNumber'])
if 'EXIF ISOSpeed' in tags:
self.iso_speed = self.int_value(tags['EXIF ISOSpeed'])
elif 'EXIF PhotographicSensitivity' in tags:
self.iso_speed = self.int_value(tags['EXIF PhotographicSensitivity'])
elif 'EXIF ISOSpeedRatings' in tags:
self.iso_speed = self.int_value(tags['EXIF ISOSpeedRatings'])
if 'Image BitsPerSample' in tags:
self.bits_per_sample = self.int_value(tags['Image BitsPerSample'])
if 'EXIF DateTimeOriginal' in tags:
str_time = tags['EXIF DateTimeOriginal'].values.encode('utf8')
utc_time = datetime.strptime(str_time, "%Y:%m:%d %H:%M:%S")
subsec = 0
if 'EXIF SubSecTime' in tags:
subsec = self.int_value(tags['EXIF SubSecTime'])
negative = 1.0
if subsec < 0:
negative = -1.0
subsec *= -1.0
subsec = float('0.{}'.format(int(subsec)))
subsec *= negative
ms = subsec * 1e3
utc_time += timedelta(milliseconds = ms)
timezone = pytz.timezone('UTC')
epoch = timezone.localize(datetime.utcfromtimestamp(0))
self.utc_time = (timezone.localize(utc_time) - epoch).total_seconds() * 1000.0
except Exception as e:
log.ODM_WARNING("Cannot read extended EXIF tags for %s: %s" % (_path_file, e.message))
# Extract XMP tags
f.seek(0)
xmp = self.get_xmp(f)
for tags in xmp:
try:
band_name = self.get_xmp_tag(tags, 'Camera:BandName')
if band_name is not None:
self.band_name = band_name.replace(" ", "")
self.set_attr_from_xmp_tag('band_index', tags, [
'DLS:SensorId', # Micasense RedEdge
'@Camera:RigCameraIndex', # Parrot Sequoia, Sentera 21244-00_3.2MP-GS-0001
'Camera:RigCameraIndex', # MicaSense Altum
])
self.set_attr_from_xmp_tag('radiometric_calibration', tags, [
'MicaSense:RadiometricCalibration',
])
self.set_attr_from_xmp_tag('vignetting_center', tags, [
'Camera:VignettingCenter',
'Sentera:VignettingCenter',
])
self.set_attr_from_xmp_tag('vignetting_polynomial', tags, [
'Camera:VignettingPolynomial',
'Sentera:VignettingPolynomial',
])
self.set_attr_from_xmp_tag('horizontal_irradiance', tags, [
'Camera:HorizontalIrradiance'
], float)
self.set_attr_from_xmp_tag('irradiance_scale_to_si', tags, [
'Camera:IrradianceScaleToSIUnits'
], float)
self.set_attr_from_xmp_tag('sun_sensor', tags, [
'Camera:SunSensor',
], float)
self.set_attr_from_xmp_tag('spectral_irradiance', tags, [
'Camera:SpectralIrradiance',
'Camera:Irradiance',
], float)
if 'DLS:Yaw' in tags:
self.set_attr_from_xmp_tag('dls_yaw', tags, ['DLS:Yaw'], float)
self.set_attr_from_xmp_tag('dls_pitch', tags, ['DLS:Pitch'], float)
self.set_attr_from_xmp_tag('dls_roll', tags, ['DLS:Roll'], float)
except Exception as e:
log.ODM_WARNING("Cannot read XMP tags for %s: %s" % (_path_file, e.message))
# self.set_attr_from_xmp_tag('center_wavelength', tags, [
# 'Camera:CentralWavelength'
# ], float)
# self.set_attr_from_xmp_tag('bandwidth', tags, [
# 'Camera:WavelengthFWHM'
# ], float)
self.width, self.height = get_image_size.get_image_size(_path_file)
# Sanitize band name since we use it in folder paths
self.band_name = re.sub('[^A-Za-z0-9]+', '', self.band_name)
def set_attr_from_xmp_tag(self, attr, xmp_tags, tags, cast=None):
v = self.get_xmp_tag(xmp_tags, tags)
if v is not None:
if cast is None:
setattr(self, attr, v)
else:
setattr(self, attr, cast(v))
def get_xmp_tag(self, xmp_tags, tags):
if isinstance(tags, str):
tags = [tags]
for tag in tags:
if tag in xmp_tags:
t = xmp_tags[tag]
if isinstance(t, string_types):
return str(t)
elif isinstance(t, dict):
items = t.get('rdf:Seq', {}).get('rdf:li', {})
if items:
if isinstance(items, string_types):
return items
return " ".join(items)
elif isinstance(t, int) or isinstance(t, float):
return t
# From https://github.com/mapillary/OpenSfM/blob/master/opensfm/exif.py
def get_xmp(self, file):
img_str = str(file.read())
xmp_start = img_str.find('<x:xmpmeta')
xmp_end = img_str.find('</x:xmpmeta')
if xmp_start < xmp_end:
xmp_str = img_str[xmp_start:xmp_end + 12]
xdict = x2d.parse(xmp_str)
xdict = xdict.get('x:xmpmeta', {})
xdict = xdict.get('rdf:RDF', {})
xdict = xdict.get('rdf:Description', {})
if isinstance(xdict, list):
return xdict
else:
return [xdict]
else:
return []
def dms_to_decimal(self, dms, sign):
"""Converts dms coords to decimal degrees"""
degrees, minutes, seconds = self.float_values(dms)
return (-1 if sign.values[0] in 'SWsw' else 1) * (
degrees +
minutes / 60 +
seconds / 3600
)
def float_values(self, tag):
if isinstance(tag.values, list):
return map(lambda v: float(v.num) / float(v.den), tag.values)
else:
return [float(tag.values.num) / float(tag.values.den)]
def float_value(self, tag):
v = self.float_values(tag)
if len(v) > 0:
return v[0]
def int_values(self, tag):
if isinstance(tag.values, list):
return map(int, tag.values)
else:
return [int(tag.values)]
def int_value(self, tag):
v = self.int_values(tag)
if len(v) > 0:
return v[0]
def list_values(self, tag):
return " ".join(map(str, tag.values))
def get_radiometric_calibration(self):
if isinstance(self.radiometric_calibration, str):
parts = self.radiometric_calibration.split(" ")
if len(parts) == 3:
return list(map(float, parts))
return [None, None, None]
def get_dark_level(self):
if self.black_level:
levels = np.array([float(v) for v in self.black_level.split(" ")])
return levels.mean()
def get_gain(self):
#(gain = ISO/100)
if self.iso_speed:
return self.iso_speed / 100.0
def get_vignetting_center(self):
if self.vignetting_center:
parts = self.vignetting_center.split(" ")
if len(parts) == 2:
return list(map(float, parts))
return [None, None]
def get_vignetting_polynomial(self):
if self.vignetting_polynomial:
parts = self.vignetting_polynomial.split(" ")
if len(parts) > 0:
coeffs = list(map(float, parts))
# Different camera vendors seem to use different ordering for the coefficients
if self.camera_make != "Sentera":
coeffs.reverse()
return coeffs
def get_utc_time(self):
if self.utc_time:
return datetime.utcfromtimestamp(self.utc_time / 1000)
def get_photometric_exposure(self):
# H ~= (exposure_time) / (f_number^2)
if self.fnumber is not None and self.exposure_time > 0:
return self.exposure_time / (self.fnumber * self.fnumber)
def get_horizontal_irradiance(self):
if self.horizontal_irradiance is not None:
scale = 1.0 # Assumed
if self.irradiance_scale_to_si is not None:
scale = self.irradiance_scale_to_si
return self.horizontal_irradiance * scale
def get_sun_sensor(self):
if self.sun_sensor is not None:
# TODO: Presence of XMP:SunSensorExposureTime
# and XMP:SunSensorSensitivity might
# require additional logic. If these two tags are present,
# then sun_sensor is not in physical units?
return self.sun_sensor / 65535.0 # normalize uint16 (is this correct?)
elif self.spectral_irradiance is not None:
scale = 1.0 # Assumed
if self.irradiance_scale_to_si is not None:
scale = self.irradiance_scale_to_si
return self.spectral_irradiance * scale
def get_dls_pose(self):
if self.dls_yaw is not None:
return [self.dls_yaw, self.dls_pitch, self.dls_roll]
return [0.0, 0.0, 0.0]
def get_bit_depth_max(self):
if self.bits_per_sample:
return float(2 ** self.bits_per_sample)
return None

Wyświetl plik

@ -8,6 +8,7 @@ import zipfile
import glob
from opendm import log
from opendm import system
from opendm import config
from pyodm import Node, exceptions
from pyodm.utils import AtomicCounter
from pyodm.types import TaskStatus
@ -354,7 +355,7 @@ class Task:
# Upload task
task = self.node.create_task(images,
get_submodel_args_dict(),
get_submodel_args_dict(config.config()),
progress_callback=print_progress,
skip_post_processing=True,
outputs=outputs)
@ -470,8 +471,7 @@ class ToolchainTask(Task):
log.ODM_INFO("=============================")
submodels_path = os.path.abspath(self.path(".."))
project_name = os.path.basename(os.path.abspath(os.path.join(submodels_path, "..")))
argv = get_submodel_argv(project_name, submodels_path, submodel_name)
argv = get_submodel_argv(config.config(), submodels_path, submodel_name)
# Re-run the ODM toolchain on the submodel
system.run(" ".join(map(quote, argv)), env_vars=os.environ.copy())

Wyświetl plik

@ -1,8 +1,6 @@
import cv2
import exifread
import re
import os
from fractions import Fraction
from opendm import get_image_size
from opendm import location
from opendm.gcp import GCPFile
@ -16,131 +14,7 @@ import system
import context
import logging
from opendm.progress import progressbc
class ODM_Photo:
""" ODMPhoto - a class for ODMPhotos
"""
def __init__(self, path_file):
# general purpose
self.filename = io.extract_file_from_path_file(path_file)
self.width = None
self.height = None
# other attributes
self.camera_make = ''
self.camera_model = ''
self.latitude = None
self.longitude = None
self.altitude = None
self.band_name = 'RGB'
self.band_index = 0
# parse values from metadata
self.parse_exif_values(path_file)
# print log message
log.ODM_DEBUG('Loaded {}'.format(self))
def __str__(self):
return '{} | camera: {} {} | dimensions: {} x {} | lat: {} | lon: {} | alt: {} | band: {} ({})'.format(
self.filename, self.camera_make, self.camera_model, self.width, self.height,
self.latitude, self.longitude, self.altitude, self.band_name, self.band_index)
def parse_exif_values(self, _path_file):
# Disable exifread log
logging.getLogger('exifread').setLevel(logging.CRITICAL)
with open(_path_file, 'rb') as f:
tags = exifread.process_file(f, details=False)
try:
if 'Image Make' in tags:
self.camera_make = tags['Image Make'].values.encode('utf8')
if 'Image Model' in tags:
self.camera_model = tags['Image Model'].values.encode('utf8')
if 'GPS GPSAltitude' in tags:
self.altitude = self.float_values(tags['GPS GPSAltitude'])[0]
if 'GPS GPSAltitudeRef' in tags and self.int_values(tags['GPS GPSAltitudeRef'])[0] > 0:
self.altitude *= -1
if 'GPS GPSLatitude' in tags and 'GPS GPSLatitudeRef' in tags:
self.latitude = self.dms_to_decimal(tags['GPS GPSLatitude'], tags['GPS GPSLatitudeRef'])
if 'GPS GPSLongitude' in tags and 'GPS GPSLongitudeRef' in tags:
self.longitude = self.dms_to_decimal(tags['GPS GPSLongitude'], tags['GPS GPSLongitudeRef'])
except IndexError as e:
log.ODM_WARNING("Cannot read EXIF tags for %s: %s" % (_path_file, e.message))
# Extract XMP tags
f.seek(0)
xmp = self.get_xmp(f)
# Find band name and camera index (if available)
camera_index_tags = [
'DLS:SensorId', # Micasense RedEdge
'@Camera:RigCameraIndex', # Parrot Sequoia
'Camera:RigCameraIndex', # MicaSense Altum
]
for tags in xmp:
if 'Camera:BandName' in tags:
cbt = tags['Camera:BandName']
band_name = None
if isinstance(cbt, string_types):
band_name = str(tags['Camera:BandName'])
elif isinstance(cbt, dict):
items = cbt.get('rdf:Seq', {}).get('rdf:li', {})
if items:
band_name = " ".join(items)
if band_name is not None:
self.band_name = band_name.replace(" ", "")
else:
log.ODM_WARNING("Camera:BandName tag found in XMP, but we couldn't parse it. Multispectral bands might be improperly classified.")
for cit in camera_index_tags:
if cit in tags:
self.band_index = int(tags[cit])
self.width, self.height = get_image_size.get_image_size(_path_file)
# Sanitize band name since we use it in folder paths
self.band_name = re.sub('[^A-Za-z0-9]+', '', self.band_name)
# From https://github.com/mapillary/OpenSfM/blob/master/opensfm/exif.py
def get_xmp(self, file):
img_str = str(file.read())
xmp_start = img_str.find('<x:xmpmeta')
xmp_end = img_str.find('</x:xmpmeta')
if xmp_start < xmp_end:
xmp_str = img_str[xmp_start:xmp_end + 12]
xdict = x2d.parse(xmp_str)
xdict = xdict.get('x:xmpmeta', {})
xdict = xdict.get('rdf:RDF', {})
xdict = xdict.get('rdf:Description', {})
if isinstance(xdict, list):
return xdict
else:
return [xdict]
else:
return []
def dms_to_decimal(self, dms, sign):
"""Converts dms coords to decimal degrees"""
degrees, minutes, seconds = self.float_values(dms)
return (-1 if sign.values[0] in 'SWsw' else 1) * (
degrees +
minutes / 60 +
seconds / 3600
)
def float_values(self, tag):
return map(lambda v: float(v.num) / float(v.den), tag.values)
def int_values(self, tag):
return map(int, tag.values)
from opendm.photo import ODM_Photo
class ODM_Reconstruction(object):
@ -253,6 +127,12 @@ class ODM_Reconstruction(object):
with open(file, 'w') as f:
f.write(self.georef.proj4())
def get_photo(self, filename):
for p in self.photos:
if p.filename == filename:
return p
class ODM_GeoRef(object):
@staticmethod
def FromProj(projstring):

Wyświetl plik

@ -13,6 +13,7 @@ networkx==2.2
scipy==1.2.1
numpy==1.15.4
pyproj==2.2.2
Pysolar==0.6
psutil==5.6.3
joblib==0.13.2
Fiona==1.8.9.post2

6
run.py
Wyświetl plik

@ -14,12 +14,16 @@ from stages.odm_app import ODMApp
if __name__ == '__main__':
args = config.config()
log.ODM_INFO('Initializing OpenDroneMap app - %s' % system.now())
log.ODM_INFO('Initializing ODM - %s' % system.now())
# Print args
args_dict = vars(args)
log.ODM_INFO('==============')
for k in sorted(args_dict.keys()):
# Skip _is_set keys
if k.endswith("_is_set"):
continue
# Don't leak token
if k == 'sm_cluster' and args_dict[k] is not None:
log.ODM_INFO('%s: True' % k)

Wyświetl plik

@ -9,6 +9,7 @@ from opendm import system
from shutil import copyfile
from opendm import progress
def save_images_database(photos, database_file):
with open(database_file, 'w') as f:
f.write(json.dumps(map(lambda p: p.__dict__, photos)))
@ -38,7 +39,6 @@ def load_images_database(database_file):
class ODMLoadDatasetStage(types.ODM_Stage):
def process(self, args, outputs):
# Load tree
tree = types.ODM_Tree(args.project_path, args.gcp)
outputs['tree'] = tree

Wyświetl plik

@ -9,6 +9,7 @@ from opendm import gsd
from opendm import point_cloud
from opendm import types
from opendm.osfm import OSFMContext
from opendm import multispectral
class ODMOpenSfMStage(types.ODM_Stage):
def process(self, args, outputs):
@ -57,12 +58,14 @@ class ODMOpenSfMStage(types.ODM_Stage):
octx.touch(updated_config_flag_file)
# These will be used for texturing / MVS
undistorted_images_path = octx.path("undistorted", "images")
if not io.dir_exists(undistorted_images_path) or self.rerun():
octx.run('undistort')
if args.radiometric_calibration == "none":
octx.convert_and_undistort(self.rerun())
else:
log.ODM_WARNING("Found an undistorted directory in %s" % undistorted_images_path)
def radiometric_calibrate(shot_id, image):
photo = reconstruction.get_photo(shot_id)
return multispectral.dn_to_reflectance(photo, image, use_sun_sensor=args.radiometric_calibration=="camera+sun")
octx.convert_and_undistort(self.rerun(), radiometric_calibrate)
self.update_progress(80)

Wyświetl plik

@ -144,7 +144,7 @@ class ODMSplitStage(types.ODM_Stage):
log.ODM_INFO("Processing %s" % sp_octx.name())
log.ODM_INFO("========================")
argv = get_submodel_argv(args.name, tree.submodels_path, sp_octx.name())
argv = get_submodel_argv(args, tree.submodels_path, sp_octx.name())
# Re-run the ODM toolchain on the submodel
system.run(" ".join(map(quote, argv)), env_vars=os.environ.copy())

Wyświetl plik

@ -0,0 +1,3 @@
{
"test": "1"
}

Wyświetl plik

@ -0,0 +1,2 @@
---
project_path: '/test'

65
tests/test_osfm.py 100644
Wyświetl plik

@ -0,0 +1,65 @@
import unittest
import os
from opendm.osfm import get_submodel_argv, get_submodel_args_dict
from opendm import config
class TestOSFM(unittest.TestCase):
def setUp(self):
pass
def test_get_submodel_argv(self):
# Base
args = config.config(["--project-path", "/datasets"])
self.assertEqual(get_submodel_argv(args)[1:],
['--orthophoto-cutline', '--dem-euclidean-map', '--skip-3dmodel'])
self.assertEqual(get_submodel_argv(args, "/submodels", "submodel_0000")[1:],
['--orthophoto-cutline', '--dem-euclidean-map', '--skip-3dmodel', '--project-path', '/submodels', 'submodel_0000'])
# Base + project name
args = config.config(["--project-path", "/datasets", "brighton"])
self.assertEqual(get_submodel_argv(args)[1:],
['--orthophoto-cutline', '--dem-euclidean-map', '--skip-3dmodel'])
self.assertEqual(get_submodel_argv(args, "/submodels", "submodel_0000")[1:],
['--orthophoto-cutline', '--dem-euclidean-map', '--skip-3dmodel', '--project-path', '/submodels', 'submodel_0000'])
# Project name + base
args = config.config(["brighton", "--project-path", "/datasets"])
self.assertEqual(get_submodel_argv(args)[1:],
['--orthophoto-cutline', '--dem-euclidean-map', '--skip-3dmodel'])
self.assertEqual(get_submodel_argv(args, "/submodels", "submodel_0000")[1:],
['--orthophoto-cutline', '--dem-euclidean-map', '--skip-3dmodel', '--project-path', '/submodels', 'submodel_0000'])
# Crop
args = config.config(["brighton", "--project-path", "/datasets", "--crop", "0"])
self.assertEqual(get_submodel_argv(args)[1:],
['--crop', '0.015625', '--orthophoto-cutline', '--dem-euclidean-map', '--skip-3dmodel'])
self.assertEqual(get_submodel_argv(args, "/submodels", "submodel_0000")[1:],
['--crop', '0.015625', '--orthophoto-cutline', '--dem-euclidean-map', '--skip-3dmodel', '--project-path', '/submodels', 'submodel_0000'])
# With sm-cluster, pc-csv and others
args = config.config(["--project-path", "/datasets", "--split", "200", "--pc-csv"])
self.assertEqual(get_submodel_argv(args)[1:],
['--orthophoto-cutline', '--dem-euclidean-map', '--skip-3dmodel'])
self.assertEqual(get_submodel_argv(args, "/submodels", "submodel_0000")[1:],
['--orthophoto-cutline', '--dem-euclidean-map', '--skip-3dmodel', '--project-path', '/submodels', 'submodel_0000'])
# Cameras JSON
args = config.config(["--project-path", "/datasets", "--cameras", os.path.join(os.path.dirname(os.path.realpath(__file__)), "assets", "sample.json")])
self.assertEqual(get_submodel_argv(args)[1:],
['--cameras', '{"test": "1"}', '--orthophoto-cutline', '--dem-euclidean-map', '--skip-3dmodel'])
# Camera JSON string
args = config.config(["--project-path", "/datasets", "--cameras", '{"test": "1"}'])
self.assertEqual(get_submodel_argv(args)[1:],
['--cameras', '{"test": "1"}', '--orthophoto-cutline', '--dem-euclidean-map', '--skip-3dmodel'])
def test_get_submodel_argv_dict(self):
# Base
args = config.config(["--project-path", "/datasets"])
self.assertEqual(get_submodel_args_dict(args),
{'orthophoto-cutline': True, 'skip-3dmodel': True, 'dem-euclidean-map': True})
if __name__ == '__main__':
unittest.main()