pull/1530/head
Piero Toffanin 2022-08-09 15:16:00 -04:00
commit 30a0ed2a44
46 zmienionych plików z 801 dodań i 171 usunięć

Wyświetl plik

@ -46,6 +46,7 @@ jobs:
file: ./portable.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
no-cache: true
tags: |
${{ steps.docker_meta.outputs.tags }}
opendronemap/odm:latest

Wyświetl plik

@ -33,6 +33,7 @@ jobs:
file: ./gpu.Dockerfile
platforms: linux/amd64
push: true
no-cache: true
tags: opendronemap/odm:gpu
# Trigger NodeODM build
- name: Dispatch NodeODM Build Event

1
.gitignore vendored
Wyświetl plik

@ -27,3 +27,4 @@ settings.yaml
.setupdevenv
__pycache__
*.snap
storage/

Wyświetl plik

@ -11,6 +11,9 @@ WORKDIR /code
# Copy everything
COPY . ./
# Use old-releases for 21.04
RUN printf "deb http://old-releases.ubuntu.com/ubuntu/ hirsute main restricted\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-updates main restricted\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute universe\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-updates universe\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute multiverse\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-updates multiverse\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-backports main restricted universe multiverse" > /etc/apt/sources.list
# Run the build
RUN bash configure.sh install
@ -36,6 +39,9 @@ COPY --from=builder /code /code
# Copy the Python libraries installed via pip from the builder
COPY --from=builder /usr/local /usr/local
# Use old-releases for 21.04
RUN printf "deb http://old-releases.ubuntu.com/ubuntu/ hirsute main restricted\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-updates main restricted\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute universe\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-updates universe\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute multiverse\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-updates multiverse\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-backports main restricted universe multiverse" > /etc/apt/sources.list
# Install shared libraries that we depend on via APT, but *not*
# the -dev packages to save space!
# Also run a smoke test on ODM and OpenSfM

Wyświetl plik

@ -98,15 +98,6 @@ option(ODM_BUILD_OpenCV "Force to build OpenCV library" OFF)
SETUP_EXTERNAL_PROJECT(OpenCV ${ODM_OpenCV_Version} ${ODM_BUILD_OpenCV})
# ---------------------------------------------------------------------------------------------
# Point Cloud Library (PCL)
#
set(ODM_PCL_Version 1.8.0)
option(ODM_BUILD_PCL "Force to build PCL library" OFF)
SETUP_EXTERNAL_PROJECT(PCL ${ODM_PCL_Version} ${ODM_BUILD_PCL})
# ---------------------------------------------------------------------------------------------
# Google Flags library (GFlags)
#
@ -201,9 +192,9 @@ externalproject_add(dem2points
)
externalproject_add(odm_orthophoto
DEPENDS pcl opencv
DEPENDS opencv
GIT_REPOSITORY https://github.com/OpenDroneMap/odm_orthophoto.git
GIT_TAG main
GIT_TAG 288b
PREFIX ${SB_BINARY_DIR}/odm_orthophoto
SOURCE_DIR ${SB_SOURCE_DIR}/odm_orthophoto
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}

Wyświetl plik

@ -10,6 +10,10 @@ if (WIN32)
-DOPENCV_BIN_INSTALL_PATH=${SB_INSTALL_DIR}/bin)
endif()
# TODO: - add PYTHON HOME
# - add NUMPY paths, packages paths?
# - re-run cmake . from opencv dir, check that bindings are being built.
ExternalProject_Add(${_proj_name}
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp

Wyświetl plik

@ -52,7 +52,7 @@ ExternalProject_Add(${_proj_name}
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/openMVS
GIT_TAG 287
GIT_TAG 288
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------

Wyświetl plik

@ -19,7 +19,11 @@ ExternalProject_Add(${_proj_name}
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/
<<<<<<< HEAD
GIT_TAG m1native
=======
GIT_TAG 288
>>>>>>> 0bbd16d24fa4a1367ff540b22bce24016872a910
#--Update/Patch step----------
UPDATE_COMMAND git submodule update --init --recursive
#--Configure step-------------

Wyświetl plik

@ -12,7 +12,7 @@ if(CUDA_FOUND)
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/pypopsift
GIT_TAG 281
GIT_TAG 288
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------

Wyświetl plik

@ -1 +1 @@
2.8.7
2.8.8

Wyświetl plik

@ -17,7 +17,7 @@ except ImportError:
run `sudo apt-get install libgdal-dev` \
# Check Gdal version with \
gdal-config --version \
#install correspondig gdal version with pip : \
#install corresponding gdal version with pip : \
pip3 install GDAL==2.4.0")
@ -50,7 +50,7 @@ a Geotif with NDVI, NDRE and GNDVI agricultural indices')
if __name__ == "__main__":
# Supress/hide warning when dividing by zero
# Suppress/hide warning when dividing by zero
numpy.seterr(divide='ignore', invalid='ignore')
rootdir = os.path.dirname(os.path.abspath(__file__))

Wyświetl plik

@ -11,7 +11,7 @@ except ImportError:
run `sudo apt-get install libgdal-dev` \
# Check Gdal version with \
gdal-config --version \
#install correspondig gdal version with pip : \
#install corresponding gdal version with pip : \
pip3 install GDAL==2.4.0")
def parse_args():

Wyświetl plik

@ -381,7 +381,7 @@ with rasterio.open(dem_path) as dem_raster:
if not outfile.endswith(".tif"):
outfile = outfile + ".tif"
with rasterio.open(outfile, 'w', **profile) as wout:
with rasterio.open(outfile, 'w', BIGTIFF="IF_SAFER", **profile) as wout:
for b in range(num_bands):
wout.write(imgout[b], b + 1)
if with_alpha:

Wyświetl plik

@ -88,7 +88,7 @@ try:
elif typ == 'tgi':
indeks = calcTgi(red, green, blue)
with rasterio.open(outFileName, 'w', **profile) as dst:
with rasterio.open(outFileName, 'w', BIGTIFF="IF_SAFER", **profile) as dst:
dst.write(indeks.astype(rasterio.float32), 1)
except rasterio.errors.RasterioIOError:
print bcolors.FAIL + 'Orthophoto file not found or access denied' + bcolors.ENDC

Wyświetl plik

@ -5,14 +5,10 @@ First of all, thank you for taking the time to report an issue.
Before you continue, make sure you are in the right place. Please open an issue only to report faults and bugs. For questions and discussion please open a topic on http://community.opendronemap.org/c/opendronemap.
Please use the format below to report bugs and faults. It will help improve the resolution process.
Please use the format below to report bugs and faults.
****************************************
### How did you install OpenDroneMap? (Docker, natively, ...)?
[Type answer here]
### What's your browser and operating system? (Copy/paste the output of https://www.whatismybrowser.com/)
### How did you install ODM? (Docker, installer, natively, ...)?
[Type answer here]
@ -24,7 +20,7 @@ Please use the format below to report bugs and faults. It will help improve the
[Type answer here]
### How can we reproduce this? (What steps did you do to trigger the problem? What parameters are you using for processing? If possible please include a copy of your dataset uploaded on Google Drive or Dropbox. Be detailed)
### How can we reproduce this? What steps did you do to trigger the problem? If this is an issue with processing a dataset, YOU MUST include a copy of your dataset uploaded on Google Drive or Dropbox (otherwise we cannot reproduce this).
[Type answer here]

51
opendm/ai.py 100644
Wyświetl plik

@ -0,0 +1,51 @@
import os
from opendm.net import download
from opendm import log
import zipfile
import time
def get_model(namespace, url, version, name = "model.onnx"):
version = version.replace(".", "_")
base_dir = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")), "storage", "models")
namespace_dir = os.path.join(base_dir, namespace)
versioned_dir = os.path.join(namespace_dir, version)
if not os.path.isdir(versioned_dir):
os.makedirs(versioned_dir, exist_ok=True)
# Check if we need to download it
model_file = os.path.join(versioned_dir, name)
if not os.path.isfile(model_file):
log.ODM_INFO("Downloading AI model from %s ..." % url)
last_update = 0
def callback(progress):
nonlocal last_update
time_has_elapsed = time.time() - last_update >= 2
if time_has_elapsed or int(progress) == 100:
log.ODM_INFO("Downloading: %s%%" % int(progress))
last_update = time.time()
try:
downloaded_file = download(url, versioned_dir, progress_callback=callback)
except Exception as e:
log.ODM_WARNING("Cannot download %s: %s" % (url, str(e)))
return None
if os.path.basename(downloaded_file).lower().endswith(".zip"):
log.ODM_INFO("Extracting %s ..." % downloaded_file)
with zipfile.ZipFile(downloaded_file, 'r') as z:
z.extractall(versioned_dir)
os.remove(downloaded_file)
if not os.path.isfile(model_file):
log.ODM_WARNING("Cannot find %s (is the URL to the AI model correct?)" % model_file)
return None
else:
return model_file
else:
return model_file

Wyświetl plik

@ -22,7 +22,7 @@ def compute_boundary_from_shots(reconstruction_json, buffer=0, reconstruction_of
for shot_image in reconstruction['shots']:
shot = reconstruction['shots'][shot_image]
if shot['gps_dop'] < 999999:
if shot.get('gps_dop', 999999) < 999999:
camera = reconstruction['cameras'][shot['camera']]
p = ogr.Geometry(ogr.wkbPoint)

Wyświetl plik

@ -66,7 +66,7 @@ def parallel_map(func, items, max_workers=1, single_thread_fallback=True):
i = 1
for t in items:
pq.put((i, t.copy()))
pq.put((i, t))
i += 1
def stop_workers():

Wyświetl plik

@ -237,6 +237,12 @@ def config(argv=None, parser=None):
'Can be one of: %(choices)s. Default: '
'%(default)s'))
parser.add_argument('--sky-removal',
action=StoreTrue,
nargs=0,
default=False,
help='Automatically compute image masks using AI to remove the sky. Experimental. Default: %(default)s')
parser.add_argument('--use-3dmesh',
action=StoreTrue,
nargs=0,
@ -307,7 +313,7 @@ def config(argv=None, parser=None):
default=3,
type=float,
help=('Automatically crop image outputs by creating a smooth buffer '
'around the dataset boundaries, shrinked by N meters. '
'around the dataset boundaries, shrunk by N meters. '
'Use 0 to disable cropping. '
'Default: %(default)s'))
@ -329,6 +335,14 @@ def config(argv=None, parser=None):
'This can help remove far away background artifacts (sky, background landscapes, etc.). See also --boundary. '
'Default: %(default)s')
parser.add_argument('--auto-boundary-distance',
metavar='<positive float>',
action=StoreValue,
type=float,
default=0,
help='Specify the distance between camera shot locations and the outer edge of the boundary when computing the boundary with --auto-boundary. Set to 0 to automatically choose a value. '
'Default: %(default)s')
parser.add_argument('--pc-quality',
metavar='<string>',
action=StoreValue,

Wyświetl plik

@ -96,7 +96,7 @@ class Cropper:
convexhull = geomcol.ConvexHull()
# If buffer distance is specified
# Create two buffers, one shrinked by
# Create two buffers, one shrunk by
# N + 3 and then that buffer expanded by 3
# so that we get smooth corners. \m/
BUFFER_SMOOTH_DISTANCE = 3
@ -185,7 +185,7 @@ class Cropper:
convexhull = geomcol.ConvexHull()
# If buffer distance is specified
# Create two buffers, one shrinked by
# Create two buffers, one shrunk by
# N + 3 and then that buffer expanded by 3
# so that we get smooth corners. \m/
BUFFER_SMOOTH_DISTANCE = 3

Wyświetl plik

@ -35,7 +35,7 @@ def write_raster(data, file):
'crs': None
}
with rasterio.open(file, 'w', **profile) as wout:
with rasterio.open(file, 'w', BIGTIFF="IF_SAFER", **profile) as wout:
wout.write(data, 1)
def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrency=1, scale=1):

Wyświetl plik

@ -5,6 +5,8 @@ import numpy
import math
import time
import shutil
import functools
from joblib import delayed, Parallel
from opendm.system import run
from opendm import point_cloud
from opendm import io
@ -270,7 +272,7 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
'"{tiles_vrt}" "{geotiff}"'.format(**kwargs))
if apply_smoothing:
median_smoothing(geotiff_path, output_path)
median_smoothing(geotiff_path, output_path, num_workers=max_workers)
os.remove(geotiff_path)
else:
os.replace(geotiff_path, output_path)
@ -319,7 +321,7 @@ def compute_euclidean_map(geotiff_path, output_path, overwrite=False):
return output_path
def median_smoothing(geotiff_path, output_path, smoothing_iterations=1):
def median_smoothing(geotiff_path, output_path, smoothing_iterations=1, window_size=512, num_workers=1):
""" Apply median smoothing """
start = datetime.now()
@ -331,24 +333,52 @@ def median_smoothing(geotiff_path, output_path, smoothing_iterations=1):
with rasterio.open(geotiff_path) as img:
nodata = img.nodatavals[0]
dtype = img.dtypes[0]
shape = img.shape
arr = img.read()[0]
nodata_locs = numpy.where(arr == nodata)
# Median filter (careful, changing the value 5 might require tweaking)
# the lines below. There's another numpy function that takes care of
# these edge cases, but it's slower.
for i in range(smoothing_iterations):
log.ODM_INFO("Smoothing iteration %s" % str(i + 1))
arr = ndimage.median_filter(arr, size=9, output=dtype, mode='nearest')
rows, cols = numpy.meshgrid(numpy.arange(0, shape[0], window_size), numpy.arange(0, shape[1], window_size))
rows = rows.flatten()
cols = cols.flatten()
rows_end = numpy.minimum(rows + window_size, shape[0])
cols_end= numpy.minimum(cols + window_size, shape[1])
windows = numpy.dstack((rows, cols, rows_end, cols_end)).reshape(-1, 4)
# Median filter leaves a bunch of zeros in nodata areas
arr[nodata_locs] = nodata
filter = functools.partial(ndimage.median_filter, size=9, output=dtype, mode='nearest')
# threading backend and GIL released filter are important for memory efficiency and multi-core performance
window_arrays = Parallel(n_jobs=num_workers, backend='threading')(delayed(window_filter_2d)(arr, nodata , window, 9, filter) for window in windows)
for window, win_arr in zip(windows, window_arrays):
arr[window[0]:window[2], window[1]:window[3]] = win_arr
log.ODM_INFO("Smoothing completed in %s" % str(datetime.now() - start))
# write output
with rasterio.open(output_path, 'w', **img.profile) as imgout:
with rasterio.open(output_path, 'w', BIGTIFF="IF_SAFER", **img.profile) as imgout:
imgout.write(arr, 1)
log.ODM_INFO('Completed smoothing to create %s in %s' % (output_path, datetime.now() - start))
return output_path
log.ODM_INFO('Completed smoothing to create %s in %s' % (output_path, datetime.now() - start))
return output_path
def window_filter_2d(arr, nodata, window, kernel_size, filter):
"""
Apply a filter to dem within a window, expects to work with kernal based filters
:param geotiff_path: path to the geotiff to filter
:param window: the window to apply the filter, should be a list contains row start, col_start, row_end, col_end
:param kernel_size: the size of the kernel for the filter, works with odd numbers, need to test if it works with even numbers
:param filter: the filter function which takes a 2d array as input and filter results as output.
"""
shape = arr.shape[:2]
if window[0] < 0 or window[1] < 0 or window[2] > shape[0] or window[3] > shape[1]:
raise Exception('Window is out of bounds')
expanded_window = [ max(0, window[0] - kernel_size // 2), max(0, window[1] - kernel_size // 2), min(shape[0], window[2] + kernel_size // 2), min(shape[1], window[3] + kernel_size // 2) ]
win_arr = arr[expanded_window[0]:expanded_window[2], expanded_window[1]:expanded_window[3]]
# Should have a better way to handle nodata, similar to the way the filter algorithms handle the border (reflection, nearest, interpolation, etc).
# For now will follow the old approach to guarantee identical outputs
nodata_locs = win_arr == nodata
win_arr = filter(win_arr)
win_arr[nodata_locs] = nodata
win_arr = win_arr[window[0] - expanded_window[0] : window[2] - expanded_window[0], window[1] - expanded_window[1] : window[3] - expanded_window[1]]
log.ODM_DEBUG("Filtered window: %s" % str(window))
return win_arr

Wyświetl plik

@ -25,19 +25,18 @@ def dn_to_radiance(photo, image):
image = image.astype("float32")
if len(image.shape) != 3:
raise ValueError("Image should have shape length of 3 (got: %s)" % len(image.shape))
# Handle thermal bands (experimental)
if photo.band_name == 'LWIR':
image -= (273.15 * 100.0) # Convert Kelvin to Celsius
image *= 0.01
return image
# Thermal (this should never happen, but just in case..)
if photo.is_thermal():
return image
# All others
a1, a2, a3 = photo.get_radiometric_calibration()
dark_level = photo.get_dark_level()
exposure_time = photo.exposure_time
gain = photo.get_gain()
gain_adjustment = photo.gain_adjustment
photometric_exp = photo.get_photometric_exposure()
if a1 is None and photometric_exp is None:
@ -58,7 +57,9 @@ def dn_to_radiance(photo, image):
bit_depth_max = photo.get_bit_depth_max()
if bit_depth_max:
image /= bit_depth_max
else:
log.ODM_WARNING("Cannot normalize DN for %s, bit depth is missing" % photo.filename)
if V is not None:
# vignette correction
V = np.repeat(V[:, :, np.newaxis], image.shape[2], axis=2)
@ -82,6 +83,9 @@ def dn_to_radiance(photo, image):
image *= a1
if gain_adjustment is not None:
image *= gain_adjustment
return image
def vignette_map(photo):
@ -106,8 +110,12 @@ def vignette_map(photo):
# compute the vignette polynomial for each distance - we divide by the polynomial so that the
# corrected image is image_corrected = image_original * vignetteCorrection
vignette = np.polyval(vignette_poly, r)
# DJI is special apparently
if photo.camera_make != "DJI":
vignette = 1.0 / vignette
vignette = 1.0 / np.polyval(vignette_poly, r)
return vignette, x, y
return None, None, None
@ -122,7 +130,7 @@ def compute_irradiance(photo, use_sun_sensor=True):
if photo.is_thermal():
return 1.0
# Some cameras (Micasense) store the value (nice! just return)
# Some cameras (Micasense, DJI) store the value (nice! just return)
hirradiance = photo.get_horizontal_irradiance()
if hirradiance is not None:
return hirradiance
@ -381,12 +389,24 @@ def compute_homography(image_filename, align_image_filename):
return h, (align_image_gray.shape[1], align_image_gray.shape[0])
algo = 'feat'
result = compute_using(find_features_homography)
warp_matrix = None
dimension = None
algo = None
if result[0] is None:
if max_dim > 320:
algo = 'feat'
result = compute_using(find_features_homography)
if result[0] is None:
algo = 'ecc'
log.ODM_INFO("Can't use features matching, will use ECC (this might take a bit)")
result = compute_using(find_ecc_homography)
if result[0] is None:
algo = None
else: # ECC only for low resolution images
algo = 'ecc'
log.ODM_INFO("Can't use features matching, will use ECC (this might take a bit)")
log.ODM_INFO("Using ECC (this might take a bit)")
result = compute_using(find_ecc_homography)
if result[0] is None:
algo = None
@ -396,7 +416,7 @@ def compute_homography(image_filename, align_image_filename):
except Exception as e:
log.ODM_WARNING("Compute homography: %s" % str(e))
return None, None, (None, None)
return None, (None, None), None
def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000, termination_eps=1e-8, start_eps=1e-4):
pyramid_levels = 0
@ -413,10 +433,14 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000,
if align_image_gray.shape[0] != image_gray.shape[0]:
align_image_gray = to_8bit(align_image_gray)
image_gray = to_8bit(image_gray)
fx = align_image_gray.shape[1]/image_gray.shape[1]
fy = align_image_gray.shape[0]/image_gray.shape[0]
image_gray = cv2.resize(image_gray, None,
fx=align_image_gray.shape[1]/image_gray.shape[1],
fy=align_image_gray.shape[0]/image_gray.shape[0],
interpolation=cv2.INTER_AREA)
fx=fx,
fy=fy,
interpolation=(cv2.INTER_AREA if (fx < 1.0 and fy < 1.0) else cv2.INTER_LANCZOS4))
# Build pyramids
image_gray_pyr = [image_gray]
@ -430,8 +454,9 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000,
align_image_pyr.insert(0, cv2.resize(align_image_pyr[0], None, fx=1/2, fy=1/2,
interpolation=cv2.INTER_AREA))
# Define the motion model
# Define the motion model, scale the initial warp matrix to smallest level
warp_matrix = np.eye(3, 3, dtype=np.float32)
warp_matrix = warp_matrix * np.array([[1,1,2],[1,1,2],[0.5,0.5,1]], dtype=np.float32)**(1-(pyramid_levels+1))
for level in range(pyramid_levels+1):
ig = gradient(gaussian(image_gray_pyr[level]))
@ -453,6 +478,7 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000,
if level != pyramid_levels:
log.ODM_INFO("Could not compute ECC warp_matrix at pyramid level %s, resetting matrix" % level)
warp_matrix = np.eye(3, 3, dtype=np.float32)
warp_matrix = warp_matrix * np.array([[1,1,2],[1,1,2],[0.5,0.5,1]], dtype=np.float32)**(1-(pyramid_levels+1))
else:
raise e
@ -462,29 +488,33 @@ def find_ecc_homography(image_gray, align_image_gray, number_of_iterations=1000,
return warp_matrix
def find_features_homography(image_gray, align_image_gray, feature_retention=0.25):
def find_features_homography(image_gray, align_image_gray, feature_retention=0.7, min_match_count=10):
# Detect SIFT features and compute descriptors.
detector = cv2.SIFT_create(edgeThreshold=10, contrastThreshold=0.1)
kp_image, desc_image = detector.detectAndCompute(image_gray, None)
kp_align_image, desc_align_image = detector.detectAndCompute(align_image_gray, None)
# Match
bf = cv2.BFMatcher(cv2.NORM_L1,crossCheck=True)
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
try:
matches = bf.match(desc_image, desc_align_image)
matches = flann.knnMatch(desc_image, desc_align_image, k=2)
except Exception as e:
log.ODM_INFO("Cannot match features")
return None
# Sort by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Filter good matches following Lowe's ratio test
good_matches = []
for m, n in matches:
if m.distance < feature_retention * n.distance:
good_matches.append(m)
# Remove bad matches
num_good_matches = int(len(matches) * feature_retention)
matches = matches[:num_good_matches]
matches = good_matches
if len(matches) < 4:
log.ODM_INFO("Insufficient features: %s" % len(matches))
if len(matches) < min_match_count:
return None
# Debug

164
opendm/net.py 100644
Wyświetl plik

@ -0,0 +1,164 @@
import requests
import math
import os
import time
try:
import queue
except ImportError:
import Queue as queue
import threading
from pyodm.utils import AtomicCounter
from pyodm.exceptions import RangeNotAvailableError, OdmError
from urllib3.exceptions import ReadTimeoutError
def download(url, destination, progress_callback=None, parallel_downloads=16, parallel_chunks_size=10, timeout=30):
"""Download files in parallel (download accelerator)
Args:
url (str): URL to download
destination (str): directory where to download file. If the directory does not exist, it will be created.
progress_callback (function): an optional callback with one parameter, the download progress percentage.
parallel_downloads (int): maximum number of parallel downloads if the node supports http range.
parallel_chunks_size (int): size in MB of chunks for parallel downloads
timeout (int): seconds before timing out
Returns:
str: path to file
"""
if not os.path.exists(destination):
os.makedirs(destination, exist_ok=True)
try:
download_stream = requests.get(url, timeout=timeout, stream=True)
headers = download_stream.headers
output_path = os.path.join(destination, os.path.basename(url))
# Keep track of download progress (if possible)
content_length = download_stream.headers.get('content-length')
total_length = int(content_length) if content_length is not None else None
downloaded = 0
chunk_size = int(parallel_chunks_size * 1024 * 1024)
use_fallback = False
accept_ranges = headers.get('accept-ranges')
# Can we do parallel downloads?
if accept_ranges is not None and accept_ranges.lower() == 'bytes' and total_length is not None and total_length > chunk_size and parallel_downloads > 1:
num_chunks = int(math.ceil(total_length / float(chunk_size)))
num_workers = parallel_downloads
class nonloc:
completed_chunks = AtomicCounter(0)
merge_chunks = [False] * num_chunks
error = None
def merge():
current_chunk = 0
with open(output_path, "wb") as out_file:
while current_chunk < num_chunks and nonloc.error is None:
if nonloc.merge_chunks[current_chunk]:
chunk_file = "%s.part%s" % (output_path, current_chunk)
with open(chunk_file, "rb") as fd:
out_file.write(fd.read())
os.unlink(chunk_file)
current_chunk += 1
else:
time.sleep(0.1)
def worker():
while True:
task = q.get()
part_num, bytes_range = task
if bytes_range is None or nonloc.error is not None:
q.task_done()
break
try:
# Download chunk
res = requests.get(url, stream=True, timeout=timeout, headers={'Range': 'bytes=%s-%s' % bytes_range})
if res.status_code == 206:
with open("%s.part%s" % (output_path, part_num), 'wb') as fd:
bytes_written = 0
try:
for chunk in res.iter_content(4096):
bytes_written += fd.write(chunk)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
raise OdmError(str(e))
if bytes_written != (bytes_range[1] - bytes_range[0] + 1):
# Process again
q.put((part_num, bytes_range))
return
with nonloc.completed_chunks.lock:
nonloc.completed_chunks.value += 1
if progress_callback is not None:
progress_callback(100.0 * nonloc.completed_chunks.value / num_chunks)
nonloc.merge_chunks[part_num] = True
else:
nonloc.error = RangeNotAvailableError()
except OdmError as e:
time.sleep(5)
q.put((part_num, bytes_range))
except Exception as e:
nonloc.error = e
finally:
q.task_done()
q = queue.PriorityQueue()
threads = []
for i in range(num_workers):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
merge_thread = threading.Thread(target=merge)
merge_thread.start()
range_start = 0
for i in range(num_chunks):
range_end = min(range_start + chunk_size - 1, total_length - 1)
q.put((i, (range_start, range_end)))
range_start = range_end + 1
# block until all tasks are done
while not all(nonloc.merge_chunks) and nonloc.error is None:
time.sleep(0.1)
# stop workers
for i in range(len(threads)):
q.put((-1, None))
for t in threads:
t.join()
merge_thread.join()
if nonloc.error is not None:
if isinstance(nonloc.error, RangeNotAvailableError):
use_fallback = True
else:
raise nonloc.error
else:
use_fallback = True
if use_fallback:
# Single connection, boring download
with open(output_path, 'wb') as fd:
for chunk in download_stream.iter_content(4096):
downloaded += len(chunk)
if progress_callback is not None and total_length is not None:
progress_callback((100.0 * float(downloaded) / total_length))
fd.write(chunk)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, ReadTimeoutError) as e:
raise OdmError(e)
return output_path

Wyświetl plik

@ -44,9 +44,25 @@ def generate_png(orthophoto_file, output_file=None, outsize=None):
# See if we need to select top three bands
bandparam = ""
gtif = gdal.Open(orthophoto_file)
if gtif.RasterCount > 4:
bandparam = "-b 1 -b 2 -b 3 -a_nodata 0"
bands = []
for idx in range(1, gtif.RasterCount+1):
bands.append(gtif.GetRasterBand(idx).GetColorInterpretation())
bands = dict(zip(bands, range(1, len(bands)+1)))
try:
red = bands.get(gdal.GCI_RedBand)
green = bands.get(gdal.GCI_GreenBand)
blue = bands.get(gdal.GCI_BlueBand)
if red is None or green is None or blue is None:
raise Exception("Cannot find bands")
bandparam = "-b %s -b %s -b %s -a_nodata 0" % (red, green, blue)
except:
bandparam = "-b 1 -b 2 -b 3 -a_nodata 0"
gtif = None
osparam = ""
if outsize is not None:
@ -154,7 +170,7 @@ def feather_raster(input_raster, output_raster, blend_distance=20):
else:
log.ODM_WARNING("%s does not have an alpha band, cannot feather raster!" % input_raster)
with rasterio.open(output_raster, 'w', **rast.profile) as dst:
with rasterio.open(output_raster, 'w', BIGTIFF="IF_SAFER", **rast.profile) as dst:
dst.colorinterp = rast.colorinterp
dst.write(out_image)

Wyświetl plik

@ -405,6 +405,8 @@ class OSFMContext:
if hasattr(self, 'gpu_sift_feature_extraction'):
log.ODM_WARNING("GPU SIFT extraction failed, maybe the graphics card is not supported? Attempting fallback to CPU")
self.update_config({'feature_type': "SIFT"})
if os.path.exists(features_dir):
shutil.rmtree(features_dir)
self.run('detect_features')
else:
raise e

Wyświetl plik

@ -109,12 +109,14 @@ class ODM_Photo:
# Multi-band fields
self.band_name = 'RGB'
self.band_index = 0
self.capture_uuid = None # DJI only
self.capture_uuid = None
# Multi-spectral fields
self.fnumber = None
self.radiometric_calibration = None
self.black_level = None
self.gain = None
self.gain_adjustment = None
# Capture info
self.exposure_time = None
@ -142,9 +144,13 @@ class ODM_Photo:
self.dls_roll = None
# Aircraft speed
self.speedX = None
self.speedY = None
self.speedZ = None
self.speed_x = None
self.speed_y = None
self.speed_z = None
# Original image width/height at capture time (before possible resizes)
self.exif_width = None
self.exif_height = None
# self.center_wavelength = None
# self.bandwidth = None
@ -217,7 +223,7 @@ class ODM_Photo:
self.camera_model = "unknown"
if 'GPS GPSAltitude' in tags:
self.altitude = self.float_value(tags['GPS GPSAltitude'])
if 'GPS GPSAltitudeRef' in tags and self.int_value(tags['GPS GPSAltitudeRef']) > 0:
if 'GPS GPSAltitudeRef' in tags and self.int_value(tags['GPS GPSAltitudeRef']) is not None and self.int_value(tags['GPS GPSAltitudeRef']) > 0:
self.altitude *= -1
if 'GPS GPSLatitude' in tags and 'GPS GPSLatitudeRef' in tags:
self.latitude = self.dms_to_decimal(tags['GPS GPSLatitude'], tags['GPS GPSLatitudeRef'])
@ -239,7 +245,9 @@ class ODM_Photo:
self.black_level = self.list_values(tags['Image Tag 0xC61A'])
elif 'BlackLevel' in tags:
self.black_level = self.list_values(tags['BlackLevel'])
elif 'Image BlackLevel' in tags:
self.black_level = self.list_values(tags['Image BlackLevel'])
if 'EXIF ExposureTime' in tags:
self.exposure_time = self.float_value(tags['EXIF ExposureTime'])
@ -252,10 +260,10 @@ class ODM_Photo:
self.iso_speed = self.int_value(tags['EXIF PhotographicSensitivity'])
elif 'EXIF ISOSpeedRatings' in tags:
self.iso_speed = self.int_value(tags['EXIF ISOSpeedRatings'])
if 'Image BitsPerSample' in tags:
self.bits_per_sample = self.int_value(tags['Image BitsPerSample'])
if 'EXIF DateTimeOriginal' in tags:
str_time = tags['EXIF DateTimeOriginal'].values
utc_time = datetime.strptime(str_time, "%Y:%m:%d %H:%M:%S")
@ -277,10 +285,15 @@ class ODM_Photo:
if 'MakerNote SpeedX' in tags and \
'MakerNote SpeedY' in tags and \
'MakerNote SpeedZ' in tags:
self.speedX = self.float_value(tags['MakerNote SpeedX'])
self.speedY = self.float_value(tags['MakerNote SpeedY'])
self.speedZ = self.float_value(tags['MakerNote SpeedZ'])
self.speed_x = self.float_value(tags['MakerNote SpeedX'])
self.speed_y = self.float_value(tags['MakerNote SpeedY'])
self.speed_z = self.float_value(tags['MakerNote SpeedZ'])
if 'EXIF ExifImageWidth' in tags and \
'EXIF ExifImageLength' in tags:
self.exif_width = self.int_value(tags['EXIF ExifImageWidth'])
self.exif_height = self.int_value(tags['EXIF ExifImageLength'])
except Exception as e:
log.ODM_WARNING("Cannot read extended EXIF tags for %s: %s" % (self.filename, str(e)))
@ -339,9 +352,18 @@ class ODM_Photo:
self.set_attr_from_xmp_tag('capture_uuid', xtags, [
'@drone-dji:CaptureUUID', # DJI
'MicaSense:CaptureId', # MicaSense Altum
'@Camera:ImageUniqueID', # sentera 6x
])
self.set_attr_from_xmp_tag('gain', xtags, [
'@drone-dji:SensorGain'
], float)
self.set_attr_from_xmp_tag('gain_adjustment', xtags, [
'@drone-dji:SensorGainAdjustment'
], float)
# Camera make / model for some cameras is stored in the XMP
if self.camera_make == '':
self.set_attr_from_xmp_tag('camera_make', xtags, [
@ -385,13 +407,13 @@ class ODM_Photo:
if '@drone-dji:FlightXSpeed' in xtags and \
'@drone-dji:FlightYSpeed' in xtags and \
'@drone-dji:FlightZSpeed' in xtags:
self.set_attr_from_xmp_tag('speedX', xtags, [
self.set_attr_from_xmp_tag('speed_x', xtags, [
'@drone-dji:FlightXSpeed'
], float)
self.set_attr_from_xmp_tag('speedY', xtags, [
self.set_attr_from_xmp_tag('speed_y', xtags, [
'@drone-dji:FlightYSpeed',
], float)
self.set_attr_from_xmp_tag('speedZ', xtags, [
self.set_attr_from_xmp_tag('speed_z', xtags, [
'@drone-dji:FlightZSpeed',
], float)
@ -616,7 +638,7 @@ class ODM_Photo:
if len(parts) == 3:
return list(map(float, parts))
return [None, None, None]
return [None, None, None]
def get_dark_level(self):
if self.black_level:
@ -624,8 +646,10 @@ class ODM_Photo:
return levels.mean()
def get_gain(self):
#(gain = ISO/100)
if self.iso_speed:
if self.gain is not None:
return self.gain
elif self.iso_speed:
#(gain = ISO/100)
return self.iso_speed / 100.0
def get_vignetting_center(self):
@ -644,6 +668,7 @@ class ODM_Photo:
# Different camera vendors seem to use different ordering for the coefficients
if self.camera_make != "Sentera":
coeffs.reverse()
return coeffs
def get_utc_time(self):
@ -662,6 +687,9 @@ class ODM_Photo:
scale = self.irradiance_scale_to_si
return self.horizontal_irradiance * scale
elif self.camera_make == "DJI" and self.spectral_irradiance is not None:
# Phantom 4 Multispectral saves this value in @drone-dji:Irradiance
return self.spectral_irradiance
def get_sun_sensor(self):
if self.sun_sensor is not None:
@ -685,6 +713,11 @@ class ODM_Photo:
def get_bit_depth_max(self):
if self.bits_per_sample:
return float(2 ** self.bits_per_sample)
else:
# If it's a JPEG, this must be 256
_, ext = os.path.splitext(self.filename)
if ext.lower() in [".jpeg", ".jpg"]:
return 256.0
return None
@ -715,9 +748,15 @@ class ODM_Photo:
def is_thermal(self):
#Added for support M2EA camera sensor
if(self.camera_make == "DJI"):
return self.camera_model == "MAVIC2-ENTERPRISE-ADVANCED" and self.width == 640 and self.height == 512
if(self.camera_make == "DJI" and self.camera_model == "MAVIC2-ENTERPRISE-ADVANCED" and self.width == 640 and self.height == 512):
return True
#Added for support DJI H20T camera sensor
if(self.camera_make == "DJI" and self.camera_model == "ZH20T" and self.width == 640 and self.height == 512):
return True
return self.band_name.upper() in ["LWIR"] # TODO: more?
def is_rgb(self):
return self.band_name.upper() in ["RGB", "REDGREENBLUE"]
def camera_id(self):
return " ".join(
@ -775,10 +814,10 @@ class ODM_Photo:
# Speed is not useful without GPS
if self.has_speed() and has_gps:
d['speed'] = [self.speedY, self.speedX, self.speedZ]
d['speed'] = [self.speed_y, self.speed_x, self.speed_z]
if rolling_shutter:
d['rolling_shutter'] = get_rolling_shutter_readout(self.camera_make, self.camera_model, rolling_shutter_readout)
d['rolling_shutter'] = get_rolling_shutter_readout(self, rolling_shutter_readout)
return d
@ -793,9 +832,9 @@ class ODM_Photo:
self.kappa is not None
def has_speed(self):
return self.speedX is not None and \
self.speedY is not None and \
self.speedZ is not None
return self.speed_x is not None and \
self.speed_y is not None and \
self.speed_z is not None
def has_geo(self):
return self.latitude is not None and \
@ -853,3 +892,15 @@ class ODM_Photo:
self.omega = math.degrees(math.atan2(-ceb[1][2], ceb[2][2]))
self.phi = math.degrees(math.asin(ceb[0][2]))
self.kappa = math.degrees(math.atan2(-ceb[0][1], ceb[0][0]))
def get_capture_megapixels(self):
if self.exif_width is not None and self.exif_height is not None:
# Accurate so long as resizing / postprocess software
# did not fiddle with the tags
return self.exif_width * self.exif_height / 1e6
elif self.width is not None and self.height is not None:
# Fallback, might not be accurate since the image
# could have been resized
return self.width * self.height / 1e6
else:
return 0.0

Wyświetl plik

@ -504,8 +504,7 @@ class ToolchainTask(Task):
seed_touch_files=["opensfm/features/empty",
"opensfm/matches/empty",
"opensfm/exif/empty"],
outputs=["odm_orthophoto/odm_orthophoto.tif",
"odm_orthophoto/cutline.gpkg",
outputs=["odm_orthophoto/cutline.gpkg",
"odm_orthophoto/odm_orthophoto_cut.tif",
"odm_orthophoto/odm_orthophoto_feathered.tif",
"odm_dem",
@ -513,4 +512,4 @@ class ToolchainTask(Task):
"odm_georeferencing"])
else:
log.ODM_INFO("Already processed toolchain for %s" % submodel_name)
handle_result()
handle_result()

Wyświetl plik

@ -13,14 +13,20 @@ RS_DATABASE = {
'dji fc7203': 20, # Mavic Mini v1
'dji fc3170': 27, # DJI Mavic Air 2
'dji fc3411': 32, # DJI Mavic Air 2S
'dji fc3582': lambda p: 26 if p.get_capture_megapixels() < 48 else 60, # DJI Mini 3 pro (at 48MP readout is 60ms, at 12MP it's 26ms)
'dji fc350': 30, # Inspire 1
'yuneec e90': 44, # Yuneec E90
'gopro hero4 black': 30, # GoPro Hero 4 Black
'gopro hero8 black': 17, # GoPro Hero 8 Black
'teracube teracube one': 32 # TeraCube TeraCube_One TR1907Q Mobile Phone
# Help us add more!
# See: https://github.com/OpenDroneMap/RSCalibration for instructions
}
@ -30,16 +36,34 @@ def make_model_key(make, model):
return ("%s %s" % (make.strip(), model.strip())).lower().strip()
warn_db_missing = {}
info_db_found = {}
def get_rolling_shutter_readout(make, model, override_value=0):
def get_rolling_shutter_readout(photo, override_value=0):
global warn_db_missing
global info_db_found
make, model = photo.camera_make, photo.camera_model
if override_value > 0:
return override_value
key = make_model_key(make, model)
if key in RS_DATABASE:
return float(RS_DATABASE[key])
rsd = RS_DATABASE[key]
val = DEFAULT_RS_READOUT
if isinstance(rsd, int) or isinstance(rsd, float):
val = float(rsd)
elif callable(rsd):
val = float(rsd(photo))
else:
log.ODM_WARNING("Invalid rolling shutter calibration entry, returning default of %sms" % DEFAULT_RS_READOUT)
if not key in info_db_found:
log.ODM_INFO("Rolling shutter profile for \"%s %s\" selected, using %sms as --rolling-shutter-readout." % (make, model, val))
info_db_found[key] = True
return val
else:
# Warn once
if not key in warn_db_missing:

Wyświetl plik

@ -0,0 +1,37 @@
import numpy as np
# Based on Fast Guided Filter
# Kaiming He, Jian Sun
# https://arxiv.org/abs/1505.00996
def box(img, radius):
dst = np.zeros_like(img)
(r, c) = img.shape
s = [radius, 1]
c_sum = np.cumsum(img, 0)
dst[0:radius+1, :, ...] = c_sum[radius:2*radius+1, :, ...]
dst[radius+1:r-radius, :, ...] = c_sum[2*radius+1:r, :, ...] - c_sum[0:r-2*radius-1, :, ...]
dst[r-radius:r, :, ...] = np.tile(c_sum[r-1:r, :, ...], s) - c_sum[r-2*radius-1:r-radius-1, :, ...]
s = [1, radius]
c_sum = np.cumsum(dst, 1)
dst[:, 0:radius+1, ...] = c_sum[:, radius:2*radius+1, ...]
dst[:, radius+1:c-radius, ...] = c_sum[:, 2*radius+1 : c, ...] - c_sum[:, 0 : c-2*radius-1, ...]
dst[:, c-radius: c, ...] = np.tile(c_sum[:, c-1:c, ...], s) - c_sum[:, c-2*radius-1 : c-radius-1, ...]
return dst
def guided_filter(img, guide, radius, eps):
(r, c) = img.shape
CNT = box(np.ones([r, c]), radius)
mean_img = box(img, radius) / CNT
mean_guide = box(guide, radius) / CNT
a = ((box(img * guide, radius) / CNT) - mean_img * mean_guide) / (((box(img * img, radius) / CNT) - mean_img * mean_img) + eps)
b = mean_guide - a * mean_img
return (box(a, radius) / CNT) * img + (box(b, radius) / CNT)

Wyświetl plik

@ -0,0 +1,103 @@
import time
import numpy as np
import cv2
import os
import onnx
import onnxruntime as ort
from .guidedfilter import guided_filter
from opendm import log
from threading import Lock
mutex = Lock()
# Use GPU if it is available, otherwise CPU
provider = "CUDAExecutionProvider" if "CUDAExecutionProvider" in ort.get_available_providers() else "CPUExecutionProvider"
class SkyFilter():
def __init__(self, model, width = 384, height = 384):
self.model = model
self.width, self.height = width, height
log.ODM_INFO(' ?> Using provider %s' % provider)
self.load_model()
def load_model(self):
log.ODM_INFO(' -> Loading the model')
onnx_model = onnx.load(self.model)
# Check the model
try:
onnx.checker.check_model(onnx_model)
except onnx.checker.ValidationError as e:
log.ODM_INFO(' !> The model is invalid: %s' % e)
raise
else:
log.ODM_INFO(' ?> The model is valid!')
self.session = ort.InferenceSession(self.model, providers=[provider])
def get_mask(self, img):
height, width, c = img.shape
# Resize image to fit the model input
new_img = cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_AREA)
new_img = np.array(new_img, dtype=np.float32)
# Input vector for onnx model
input_v = np.expand_dims(new_img.transpose((2, 0, 1)), axis=0)
ort_inputs = {self.session.get_inputs()[0].name: input_v}
# Run the model
with mutex:
ort_outs = self.session.run(None, ort_inputs)
# Get the output
output = np.array(ort_outs)
output = output[0][0].transpose((1, 2, 0))
output = cv2.resize(output, (width, height), interpolation=cv2.INTER_LANCZOS4)
output = np.array([output, output, output]).transpose((1, 2, 0))
output = np.clip(output, a_max=1.0, a_min=0.0)
return self.refine(output, img)
def refine(self, pred, img):
guided_filter_radius, guided_filter_eps = 20, 0.01
refined = guided_filter(img[:,:,2], pred[:,:,0], guided_filter_radius, guided_filter_eps)
res = np.clip(refined, a_min=0, a_max=1)
# Convert res to CV_8UC1
res = np.array(res * 255., dtype=np.uint8)
# Thresholding
res = cv2.threshold(res, 127, 255, cv2.THRESH_BINARY_INV)[1]
return res
def run_img(self, img_path, dest):
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
return None
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.array(img / 255., dtype=np.float32)
mask = self.get_mask(img)
img_name = os.path.basename(img_path)
fpath = os.path.join(dest, img_name)
fname, _ = os.path.splitext(fpath)
mask_name = fname + '_mask.png'
cv2.imwrite(mask_name, mask)
return mask_name

Wyświetl plik

@ -1,6 +1,7 @@
from opendm import log
from opendm.thermal_tools import dji_unpack
import cv2
import os
def resize_to_match(image, match_photo = None):
"""
@ -39,6 +40,16 @@ def dn_to_temperature(photo, image, dataset_tree):
image -= (273.15 * 100.0) # Convert Kelvin to Celsius
image *= 0.01
return image
elif photo.camera_make == "DJI" and photo.camera_model == "ZH20T":
filename, file_extension = os.path.splitext(photo.filename)
# DJI H20T high gain mode supports measurement of -40~150 celsius degrees
if file_extension.lower() in [".tif", ".tiff"] and image.min() >= 23315: # Calibrated grayscale tif
image = image.astype("float32")
image -= (273.15 * 100.0) # Convert Kelvin to Celsius
image *= 0.01
return image
else:
return image
elif photo.camera_make == "DJI" and photo.camera_model == "MAVIC2-ENTERPRISE-ADVANCED":
image = dji_unpack.extract_temperatures_dji(photo, image, dataset_tree)
image = image.astype("float32")

Wyświetl plik

@ -26,6 +26,7 @@ class ODM_Reconstruction(object):
self.georef = None
self.gcp = None
self.multi_camera = self.detect_multi_camera()
self.filter_photos()
def detect_multi_camera(self):
"""
@ -64,11 +65,41 @@ class ODM_Reconstruction(object):
return None
def filter_photos(self):
if not self.multi_camera:
return # Nothing to do, use all images
else:
# Sometimes people might try process both RGB + Blue/Red/Green bands
# because these are the contents of the SD card from a drone (e.g. DJI P4 Multispectral)
# But we don't want to process both, so we discard the RGB files in favor
bands = {}
for b in self.multi_camera:
bands[b['name'].lower()] = b['name']
if ('rgb' in bands or 'redgreenblue' in bands) and \
('red' in bands and 'green' in bands and 'blue' in bands):
band_to_remove = bands['rgb'] if 'rgb' in bands else bands['redgreenblue']
self.multi_camera = [b for b in self.multi_camera if b['name'] != band_to_remove]
photos_before = len(self.photos)
self.photos = [p for p in self.photos if p.band_name != band_to_remove]
photos_after = len(self.photos)
log.ODM_WARNING("RGB images detected alongside individual Red/Green/Blue images, we will use individual bands (skipping %s images)" % (photos_before - photos_after))
def is_georeferenced(self):
return self.georef is not None
def has_gcp(self):
return self.is_georeferenced() and self.gcp is not None and self.gcp.exists()
def has_geotagged_photos(self):
for photo in self.photos:
if photo.latitude is None and photo.longitude is None:
return False
return True
def georeference_with_gcp(self, gcp_file, output_coords_file, output_gcp_file, output_model_txt_geo, rerun=False):
if not io.file_exists(output_coords_file) or not io.file_exists(output_gcp_file) or rerun:

Wyświetl plik

@ -11,6 +11,9 @@ WORKDIR /code
# Copy everything
COPY . ./
# Use old-releases for 21.04
RUN printf "deb http://old-releases.ubuntu.com/ubuntu/ hirsute main restricted\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-updates main restricted\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute universe\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-updates universe\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute multiverse\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-updates multiverse\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-backports main restricted universe multiverse" > /etc/apt/sources.list
# Run the build
RUN PORTABLE_INSTALL=YES bash configure.sh install
@ -36,6 +39,9 @@ COPY --from=builder /code /code
# Copy the Python libraries installed via pip from the builder
COPY --from=builder /usr/local /usr/local
# Use old-releases for 21.04
RUN printf "deb http://old-releases.ubuntu.com/ubuntu/ hirsute main restricted\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-updates main restricted\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute universe\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-updates universe\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute multiverse\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-updates multiverse\ndeb http://old-releases.ubuntu.com/ubuntu/ hirsute-backports main restricted universe multiverse" > /etc/apt/sources.list
# Install shared libraries that we depend on via APT, but *not*
# the -dev packages to save space!
RUN bash configure.sh installruntimedepsonly \

Wyświetl plik

@ -3,7 +3,7 @@ attrs==20.3.0
beautifulsoup4==4.9.3
cloudpickle==1.6.0
edt==2.0.2
ODMExifRead==3.0.3
ODMExifRead==3.0.4
Fiona==1.8.17 ; sys_platform == 'linux' or sys_platform == 'darwin'
https://github.com/OpenDroneMap/windows-deps/raw/main/Fiona-1.8.19-cp38-cp38-win_amd64.whl ; sys_platform == 'win32'
joblib==1.1.0
@ -11,7 +11,7 @@ laspy==1.7.0
lxml==4.6.1
matplotlib==3.3.3
networkx==2.5
numpy==1.21.1
numpy==1.23.1
Pillow==8.3.2
vmem==1.0.1
pyodm==1.5.8
@ -29,3 +29,5 @@ scipy==1.8.1
xmltodict==0.12.0
fpdf2==2.4.6
Shapely==1.7.1
onnx==1.12.0
onnxruntime==1.11.1

Wyświetl plik

@ -37,7 +37,6 @@ parts:
- gdal-bin
- gfortran # to build scipy
- git
- libboost-log-dev
- libgdal-dev
- libgeotiff-dev
- libjsoncpp-dev
@ -54,7 +53,6 @@ parts:
- swig3.0
stage-packages:
- gdal-bin
- libboost-log1.71.0
- libgdal26
- libgeotiff5
- libjsoncpp1
@ -114,33 +112,23 @@ parts:
build-packages:
- libcgal-dev
- libboost-program-options-dev
- libboost-iostreams-dev
- libboost-serialization-dev
- libboost-system-dev
stage-packages:
- libboost-program-options1.71.0
- libboost-iostreams1.71.0
- libboost-serialization1.71.0
- libboost-system1.71.0
opensfm:
source: .
plugin: nil
build-packages:
- libboost-date-time-dev
- libboost-filesystem-dev
- libboost-iostreams-dev
- libboost-python-dev
- libboost-regex-dev
- libboost-serialization-dev
- libboost-system-dev
- libboost-thread-dev
- libgoogle-glog-dev
- libsuitesparse-dev
stage-packages:
- libamd2
- libboost-date-time1.71.0
- libboost-filesystem1.71.0
- libboost-iostreams1.71.0
- libboost-python1.71.0
- libboost-regex1.71.0
- libboost-serialization1.71.0
- libboost-system1.71.0
- libboost-thread1.71.0
- libcamd2
- libccolamd2
- libcholmod3

Wyświetl plik

@ -37,7 +37,6 @@ parts:
- gdal-bin
- gfortran # to build scipy
- git
- libboost-log-dev
- libgdal-dev
- libgeotiff-dev
- libjsoncpp-dev
@ -54,7 +53,6 @@ parts:
- swig3.0
stage-packages:
- gdal-bin
- libboost-log1.74.0
- libgdal28
- libgeotiff5
- libjsoncpp24
@ -114,33 +112,23 @@ parts:
build-packages:
- libcgal-dev
- libboost-program-options-dev
- libboost-iostreams-dev
- libboost-serialization-dev
- libboost-system-dev
stage-packages:
- libboost-program-options1.74.0
- libboost-iostreams1.74.0
- libboost-serialization1.74.0
- libboost-system1.74.0
opensfm:
source: .
plugin: nil
build-packages:
- libboost-date-time-dev
- libboost-filesystem-dev
- libboost-iostreams-dev
- libboost-python-dev
- libboost-regex-dev
- libboost-serialization-dev
- libboost-system-dev
- libboost-thread-dev
- libgoogle-glog-dev
- libsuitesparse-dev
stage-packages:
- libamd2
- libboost-date-time1.74.0
- libboost-filesystem1.74.0
- libboost-iostreams1.74.0
- libboost-python1.74.0
- libboost-regex1.74.0
- libboost-serialization1.74.0
- libboost-system1.74.0
- libboost-thread1.74.0
- libcamd2
- libccolamd2
- libcholmod3

Wyświetl plik

@ -11,6 +11,9 @@ from opendm.geo import GeoFile
from shutil import copyfile
from opendm import progress
from opendm import boundary
from opendm import ai
from opendm.skyremoval.skyfilter import SkyFilter
from opendm.concurrency import parallel_map
def save_images_database(photos, database_file):
with open(database_file, 'w') as f:
@ -113,7 +116,7 @@ class ODMLoadDatasetStage(types.ODM_Stage):
try:
p = types.ODM_Photo(f)
p.set_mask(find_mask(f, masks))
photos += [p]
photos.append(p)
dataset_list.write(photos[-1].filename + '\n')
except PhotoCorruptedException:
log.ODM_WARNING("%s seems corrupted and will not be used" % os.path.basename(f))
@ -145,6 +148,49 @@ class ODMLoadDatasetStage(types.ODM_Stage):
for p in photos:
p.override_camera_projection(args.camera_lens)
# Automatic sky removal
if args.sky_removal:
# For each image that :
# - Doesn't already have a mask, AND
# - Is not nadir (or if orientation info is missing), AND
# - There are no spaces in the image filename (OpenSfM requirement)
# Automatically generate a sky mask
# Generate list of sky images
sky_images = []
for p in photos:
if p.mask is None and (p.pitch is None or (abs(p.pitch) > 20)) and (not " " in p.filename):
sky_images.append({'file': os.path.join(images_dir, p.filename), 'p': p})
if len(sky_images) > 0:
log.ODM_INFO("Automatically generating sky masks for %s images" % len(sky_images))
model = ai.get_model("skyremoval", "https://github.com/OpenDroneMap/SkyRemoval/releases/download/v1.0.5/model.zip", "v1.0.5")
if model is not None:
sf = SkyFilter(model=model)
def parallel_sky_filter(item):
try:
mask_file = sf.run_img(item['file'], images_dir)
# Check and set
if mask_file is not None and os.path.isfile(mask_file):
item['p'].set_mask(os.path.basename(mask_file))
log.ODM_INFO("Wrote %s" % os.path.basename(mask_file))
else:
log.ODM_WARNING("Cannot generate mask for %s" % img)
except Exception as e:
log.ODM_WARNING("Cannot generate mask for %s: %s" % (img, str(e)))
parallel_map(parallel_sky_filter, sky_images, max_workers=args.max_concurrency)
log.ODM_INFO("Sky masks generation completed!")
else:
log.ODM_WARNING("Cannot load AI model (you might need to be connected to the internet?)")
else:
log.ODM_INFO("No sky masks will be generated (masks already provided, or images are nadir)")
# End sky removal
# Save image database for faster restart
save_images_database(photos, images_database_file)
else:

Wyświetl plik

@ -27,7 +27,7 @@ class ODMMvsTexStage(types.ODM_Stage):
subdir = ""
if not primary and band is not None:
subdir = band
if not args.skip_3dmodel and (primary or args.use_3dmesh):
nonloc.runs += [{
'out_dir': os.path.join(tree.odm_texturing, subdir),
@ -79,11 +79,11 @@ class ODMMvsTexStage(types.ODM_Stage):
keepUnseenFaces = ""
nadir = ""
if (self.params.get('skip_glob_seam_leveling')):
if args.texturing_skip_global_seam_leveling:
skipGlobalSeamLeveling = "--skip_global_seam_leveling"
if (self.params.get('skip_loc_seam_leveling')):
if args.texturing_skip_local_seam_leveling:
skipLocalSeamLeveling = "--skip_local_seam_leveling"
if (self.params.get('keep_unseen_faces')):
if args.texturing_keep_unseen_faces:
keepUnseenFaces = "--keep_unseen_faces"
if (r['nadir']):
nadir = '--nadir_mode'
@ -93,12 +93,12 @@ class ODMMvsTexStage(types.ODM_Stage):
'bin': context.mvstex_path,
'out_dir': os.path.join(r['out_dir'], "odm_textured_model_geo"),
'model': r['model'],
'dataTerm': self.params.get('data_term'),
'outlierRemovalType': self.params.get('outlier_rem_type'),
'dataTerm': args.texturing_data_term,
'outlierRemovalType': args.texturing_outlier_removal_type,
'skipGlobalSeamLeveling': skipGlobalSeamLeveling,
'skipLocalSeamLeveling': skipLocalSeamLeveling,
'keepUnseenFaces': keepUnseenFaces,
'toneMapping': self.params.get('tone_mapping'),
'toneMapping': args.texturing_tone_mapping,
'nadirMode': nadir,
'maxTextureSize': '--max_texture_size=%s' % max_texture_size,
'nvm_file': r['nvm_file'],

Wyświetl plik

@ -49,13 +49,7 @@ class ODMApp:
point_weight=4.0,
max_concurrency=args.max_concurrency,
verbose=args.verbose)
texturing = ODMMvsTexStage('mvs_texturing', args, progress=70.0,
data_term=args.texturing_data_term,
outlier_rem_type=args.texturing_outlier_removal_type,
skip_glob_seam_leveling=args.texturing_skip_global_seam_leveling,
skip_loc_seam_leveling=args.texturing_skip_local_seam_leveling,
keep_unseen_faces=args.texturing_keep_unseen_faces,
tone_mapping=args.texturing_tone_mapping)
texturing = ODMMvsTexStage('mvs_texturing', args, progress=70.0)
georeferencing = ODMGeoreferencingStage('odm_georeferencing', args, progress=80.0,
gcp_file=args.gcp,
verbose=args.verbose)

Wyświetl plik

@ -16,6 +16,8 @@ class ODMFilterPoints(types.ODM_Stage):
if not os.path.exists(tree.odm_filterpoints): system.mkdir_p(tree.odm_filterpoints)
inputPointCloud = ""
# check if reconstruction was done before
if not io.file_exists(tree.filtered_point_cloud) or self.rerun():
if args.fast_orthophoto:
@ -27,10 +29,21 @@ class ODMFilterPoints(types.ODM_Stage):
if args.auto_boundary:
if reconstruction.is_georeferenced():
if not 'boundary' in outputs:
avg_gsd = gsd.opensfm_reconstruction_average_gsd(tree.opensfm_reconstruction)
outputs['boundary'] = compute_boundary_from_shots(tree.opensfm_reconstruction, avg_gsd * 20, reconstruction.get_proj_offset()) # 20 is arbitrary
if outputs['boundary'] is None:
log.ODM_WARNING("Cannot compute boundary from camera shots")
boundary_distance = None
if args.auto_boundary_distance > 0:
boundary_distance = args.auto_boundary_distance
else:
avg_gsd = gsd.opensfm_reconstruction_average_gsd(tree.opensfm_reconstruction)
if avg_gsd is not None:
boundary_distance = avg_gsd * 20 # 20 is arbitrary
if boundary_distance is not None:
outputs['boundary'] = compute_boundary_from_shots(tree.opensfm_reconstruction, boundary_distance, reconstruction.get_proj_offset())
if outputs['boundary'] is None:
log.ODM_WARNING("Cannot compute boundary from camera shots")
else:
log.ODM_WARNING("Cannot compute boundary (GSD cannot be estimated)")
else:
log.ODM_WARNING("--auto-boundary set but so is --boundary, will use --boundary")
else:

Wyświetl plik

@ -42,6 +42,7 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
'corners': tree.odm_orthophoto_corners,
'res': resolution,
'bands': '',
'depth_idx': '',
'verbose': verbose
}
@ -62,6 +63,21 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
subdir = band['name'].lower()
models.append(os.path.join(base_dir, subdir, model_file))
kwargs['bands'] = '-bands %s' % (','.join([double_quote(b['name']) for b in reconstruction.multi_camera]))
# If a RGB band is present,
# use bit depth of the first non-RGB band
depth_idx = None
all_bands = [b['name'].lower() for b in reconstruction.multi_camera]
for b in ['rgb', 'redgreenblue']:
if b in all_bands:
for idx in range(len(all_bands)):
if all_bands[idx] != b:
depth_idx = idx
break
break
if depth_idx is not None:
kwargs['depth_idx'] = '-outputDepthIdx %s' % depth_idx
else:
models.append(os.path.join(base_dir, model_file))
@ -70,7 +86,7 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
# run odm_orthophoto
system.run('"{odm_ortho_bin}" -inputFiles {models} '
'-logFile "{log}" -outputFile "{ortho}" -resolution {res} {verbose} '
'-outputCornerFile "{corners}" {bands}'.format(**kwargs))
'-outputCornerFile "{corners}" {bands} {depth_idx}'.format(**kwargs))
# Create georeferenced GeoTiff
geotiffcreated = False

Wyświetl plik

@ -26,8 +26,19 @@ class ODMSplitStage(types.ODM_Stage):
tree = outputs['tree']
reconstruction = outputs['reconstruction']
photos = reconstruction.photos
outputs['large'] = False
outputs['large'] = len(photos) > args.split
should_split = len(photos) > args.split
if should_split:
# check for availability of either image_groups.txt (split-merge) or geotagged photos
image_groups_file = os.path.join(args.project_path, "image_groups.txt")
if 'split_image_groups_is_set' in args:
image_groups_file = os.path.abspath(args.split_image_groups)
if io.file_exists(image_groups_file) or reconstruction.has_geotagged_photos():
outputs['large'] = True
else:
log.ODM_WARNING('Could not perform split-merge as GPS information in photos or image_groups.txt is missing.')
if outputs['large']:
# If we have a cluster address, we'll use a distributed workflow
@ -113,9 +124,6 @@ class ODMSplitStage(types.ODM_Stage):
self.update_progress(50)
mds = metadataset.MetaDataSet(tree.opensfm)
submodel_paths = [os.path.abspath(p) for p in mds.get_submodel_paths()]
# Align
octx.align_reconstructions(self.rerun())
@ -317,4 +325,4 @@ class ODMMergeStage(types.ODM_Stage):
log.ODM_INFO("Normal dataset, nothing to merge.")
self.progress = 0.0

Wyświetl plik

@ -18,7 +18,7 @@ class TestCamera(unittest.TestCase):
camera_id = list(c.keys())[0]
self.assertTrue('v2 ' not in camera_id)
self.assertRaises(RuntimeError, camera.get_cameras_from_opensfm, 'tests/assets/nonexistant.json')
self.assertRaises(RuntimeError, camera.get_cameras_from_opensfm, 'tests/assets/nonexistent.json')
self.assertRaises(ValueError, camera.get_cameras_from_opensfm, 'tests/assets/gcp_extras.txt')
self.assertFalse('k1_prior' in c[camera_id])

Wyświetl plik

@ -10,6 +10,8 @@ if defined _OLD_CODEPAGE (
set ODMBASE=%~dp0
set GDALBASE=%ODMBASE%venv\Lib\site-packages\osgeo
set GDAL_DATA=%GDALBASE%\data\gdal
set GDAL_DRIVER_PATH=%GDALBASE%\gdalplugins
set OSFMBASE=%ODMBASE%SuperBuild\install\bin\opensfm\bin
set SBBIN=%ODMBASE%SuperBuild\install\bin