Merge branch 'OpenDroneMap:master' into master

pull/1435/head
Stephen Mather 2022-03-11 16:53:17 -05:00 zatwierdzone przez GitHub
commit 92299bce23
Nie znaleziono w bazie danych klucza dla tego podpisu
ID klucza GPG: 4AEE18F83AFDEB23
92 zmienionych plików z 3089 dodań i 581 usunięć

Wyświetl plik

@ -9,7 +9,8 @@ on:
jobs:
build:
runs-on: ubuntu-latest
runs-on: self-hosted
timeout-minutes: 2880
steps:
- name: Checkout
uses: actions/checkout@v2
@ -21,6 +22,10 @@ jobs:
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
config-inline: |
[worker.oci]
max-parallelism = 1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
@ -39,7 +44,7 @@ jobs:
uses: docker/build-push-action@v2
with:
file: ./portable.Dockerfile
platforms: linux/amd64
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ steps.docker_meta.outputs.tags }}
@ -85,4 +90,8 @@ jobs:
run: |
echo "Docker image digest: ${{ steps.docker_build.outputs.digest }}"
echo "WSL AMD64 rootfs URL: ${{ steps.upload-amd64-wsl-rootfs.browser_download_url }}"
# Trigger NodeODM build
- name: Dispatch NodeODM Build Event
id: nodeodm_dispatch
run: |
curl -X POST -u "${{secrets.PAT_USERNAME}}:${{secrets.PAT_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/OpenDroneMap/NodeODM/actions/workflows/publish-docker.yaml/dispatches --data '{"ref": "master"}'

Wyświetl plik

@ -34,3 +34,8 @@ jobs:
platforms: linux/amd64
push: true
tags: opendronemap/odm:gpu
# Trigger NodeODM build
- name: Dispatch NodeODM Build Event
id: nodeodm_dispatch
run: |
curl -X POST -u "${{secrets.PAT_USERNAME}}:${{secrets.PAT_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/OpenDroneMap/NodeODM/actions/workflows/publish-docker-gpu.yaml/dispatches --data '{"ref": "master"}'

Wyświetl plik

@ -9,7 +9,7 @@ on:
jobs:
build:
runs-on: windows-latest
runs-on: windows-2019
steps:
- name: Checkout
uses: actions/checkout@v2
@ -18,10 +18,10 @@ jobs:
with:
python-version: '3.8.1'
architecture: 'x64'
- name: Setup Visual C++
uses: ilammy/msvc-dev-cmd@v1
- uses: Jimver/cuda-toolkit@v0.2.4
id: cuda-toolkit
with:
arch: x64
cuda: '11.4.0'
- name: Extract code signing cert
id: code_sign
uses: timheuer/base64-to-file@v1
@ -38,7 +38,7 @@ jobs:
env:
CODE_SIGN_CERT_PATH: ${{ steps.code_sign.outputs.filePath }}
run: |
python configure.py dist --signtool-path $((Get-Command signtool).Source) --code-sign-cert-path $env:CODE_SIGN_CERT_PATH
python configure.py dist --code-sign-cert-path $env:CODE_SIGN_CERT_PATH
- name: Upload Setup File
uses: actions/upload-artifact@v2
with:

Wyświetl plik

@ -49,7 +49,7 @@ jobs:
isClassic: 'false'
windows:
runs-on: windows-latest
runs-on: windows-2019
steps:
- name: Checkout
uses: actions/checkout@v2

Wyświetl plik

@ -1,18 +0,0 @@
cmake_minimum_required(VERSION 2.8)
project(OpenDroneMap C CXX)
# TODO(edgar): add option in order to point to CMAKE_PREFIX_PATH
# if we want to build SuperBuild in an external directory.
# It is assumed that SuperBuild have been compiled.
# Set third party libs location
set(CMAKE_PREFIX_PATH "${CMAKE_CURRENT_SOURCE_DIR}/SuperBuild/install")
# move binaries to the same bin directory
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
option(ODM_BUILD_SLAM "Build SLAM module" OFF)
# Add ODM sub-modules
add_subdirectory(modules)

Wyświetl plik

@ -1,8 +1,8 @@
FROM ubuntu:20.04 AS builder
FROM ubuntu:21.04 AS builder
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/src/opensfm" \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
# Prepare directories
@ -21,11 +21,11 @@ RUN bash configure.sh clean
### Use a second image for the final asset to reduce the number and
# size of the layers.
FROM ubuntu:20.04
FROM ubuntu:21.04
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/src/opensfm" \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
WORKDIR /code
@ -38,9 +38,11 @@ COPY --from=builder /usr/local /usr/local
# Install shared libraries that we depend on via APT, but *not*
# the -dev packages to save space!
# Also run a smoke test on ODM and OpenSfM
RUN bash configure.sh installruntimedepsonly \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
&& bash run.sh --help \
&& bash -c "eval $(python3 /code/opendm/context.py) && python3 -c 'from opensfm import io, pymap'"
# Entry point
ENTRYPOINT ["python3", "/code/run.py"]

Wyświetl plik

@ -25,7 +25,7 @@ docker run -ti --rm -v c:/Users/youruser/datasets:/datasets opendronemap/odm --p
docker run -ti --rm -v /home/youruser/datasets:/datasets opendronemap/odm --project-path /datasets project
```
You can pass [additional parameters](https://docs.opendronemap.org/arguments.html) by appending them to the command:
You can pass [additional parameters](https://docs.opendronemap.org/arguments/) by appending them to the command:
```bash
docker run -ti --rm -v /datasets:/datasets opendronemap/odm --project-path /datasets project [--additional --parameters --here]
@ -264,7 +264,13 @@ If you have questions, join the developer's chat at https://community.opendronem
2. Submit a pull request with detailed changes and test results
3. Have fun!
### Credits
### Troubleshooting
The dev environment makes use of `opendronemap/nodeodm` by default. You may want to run
`docker pull opendronemap/nodeodm` before running `./start-dev-env.sh` to avoid using an old cached version.
In order to make a clean build, remove `~/.odm-dev-home` and `ODM/.setupdevenv`.
## Credits
ODM makes use of [several libraries](https://github.com/OpenDroneMap/ODM/blob/master/snap/snapcraft.yaml#L36) and other awesome open source projects to perform its tasks. Among them we'd like to highlight:
@ -278,6 +284,6 @@ ODM makes use of [several libraries](https://github.com/OpenDroneMap/ODM/blob/ma
- [PoissonRecon](https://github.com/mkazhdan/PoissonRecon)
### Citation
## Citation
> *OpenDroneMap Authors* ODM - A command line toolkit to generate maps, point clouds, 3D models and DEMs from drone, balloon or kite images. **OpenDroneMap/ODM GitHub Page** 2020; [https://github.com/OpenDroneMap/ODM](https://github.com/OpenDroneMap/ODM)

Wyświetl plik

@ -70,6 +70,10 @@ if (WIN32)
file(GLOB COPY_DLLS "${VCPKG_ROOT}/installed/x64-windows/bin/*.dll")
file(COPY ${COPY_DLLS} DESTINATION "${SB_INSTALL_DIR}/bin")
message("Copying CUDA DLLs...")
file(GLOB CUDA_DLLS "$ENV{CUDA_PATH}/bin/cudart64*.dll")
file(COPY ${CUDA_DLLS} DESTINATION "${SB_INSTALL_DIR}/bin")
set(WIN32_GDAL_ARGS -DGDAL_FOUND=TRUE -DGDAL_LIBRARY=${GDAL_LIBRARY} -DGDAL_INCLUDE_DIR=${GDAL_INCLUDE_DIR})
else()
set(PYTHON_EXE_PATH "/usr/bin/python3")
@ -136,6 +140,8 @@ set(custom_libs OpenSfM
Untwine
MvsTexturing
OpenMVS
FPCFilter
PyPopsift
)
# Build entwine only on Linux
@ -145,7 +151,7 @@ endif()
externalproject_add(mve
GIT_REPOSITORY https://github.com/OpenDroneMap/mve.git
GIT_TAG 250
GIT_TAG 262
UPDATE_COMMAND ""
SOURCE_DIR ${SB_SOURCE_DIR}/mve
CMAKE_ARGS ${WIN32_CMAKE_ARGS}
@ -161,7 +167,7 @@ include(ProcessorCount)
ProcessorCount(nproc)
if (WIN32)
set (POISSON_BUILD_CMD ${CMAKE_MAKE_PROGRAM} ${SB_SOURCE_DIR}/PoissonRecon/PoissonRecon.vcxproj /p:configuration=${CMAKE_BUILD_TYPE} /p:PlatformToolset=${CMAKE_VS_PLATFORM_TOOLSET} /p:WindowsTargetPlatformVersion=${CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION})
set (POISSON_BUILD_CMD ${CMAKE_MAKE_PROGRAM} ${SB_SOURCE_DIR}/PoissonRecon/PoissonRecon.vcxproj /p:configuration=${CMAKE_BUILD_TYPE} /p:Platform=x64 /p:PlatformToolset=${CMAKE_VS_PLATFORM_TOOLSET} /p:WindowsTargetPlatformVersion=${CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION})
set (POISSON_BIN_PATH "x64/${CMAKE_BUILD_TYPE}/PoissonRecon.exe")
else()
set (POISSON_BUILD_CMD make -j${nproc} poissonrecon)
@ -169,7 +175,7 @@ else()
endif()
externalproject_add(poissonrecon
GIT_REPOSITORY https://github.com/OpenDroneMap/PoissonRecon.git
GIT_TAG 257
GIT_TAG 272
PREFIX ${SB_BINARY_DIR}/PoissonRecon
SOURCE_DIR ${SB_SOURCE_DIR}/PoissonRecon
UPDATE_COMMAND ""

Wyświetl plik

@ -23,7 +23,7 @@ ExternalProject_Add(${_proj_name}
-DADDITIONAL_LINK_DIRECTORIES_PATHS=${SB_INSTALL_DIR}/lib
-DWITH_TESTS=OFF
-DWITH_ZSTD=OFF
-DCMAKE_BUILD_TYPE=Release
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}

Wyświetl plik

@ -0,0 +1,27 @@
set(_proj_name fpcfilter)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
ExternalProject_Add(${_proj_name}
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/FPCFilter
GIT_TAG main
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

Wyświetl plik

@ -15,7 +15,7 @@ ExternalProject_Add(${_proj_name}
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DCMAKE_BUILD_TYPE:STRING=Release
-DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}

Wyświetl plik

@ -9,14 +9,14 @@ ExternalProject_Add(${_proj_name}
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}/${_proj_name}
GIT_REPOSITORY https://github.com/OpenDroneMap/mvs-texturing
GIT_TAG 250
GIT_TAG 280
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DRESEARCH=OFF
-DCMAKE_BUILD_TYPE:STRING=Release
-DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
${WIN32_CMAKE_ARGS}
#--Build step-----------------

Wyświetl plik

@ -57,7 +57,7 @@ ExternalProject_Add(${_proj_name}
-DBUILD_opencv_ts=OFF
-DBUILD_opencv_xfeatures2d=ON
-DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=int64_t
-DCMAKE_BUILD_TYPE:STRING=Release
-DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
${WIN32_CMAKE_ARGS}
${WIN32_CMAKE_EXTRA_ARGS}

Wyświetl plik

@ -2,8 +2,8 @@ set(_proj_name openmvs)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
externalproject_add(vcg
GIT_REPOSITORY https://github.com/cdcseacave/VCG.git
GIT_TAG master
GIT_REPOSITORY https://github.com/OpenDroneMap/VCG.git
GIT_TAG 280
UPDATE_COMMAND ""
SOURCE_DIR ${SB_SOURCE_DIR}/vcg
CONFIGURE_COMMAND ""
@ -12,15 +12,47 @@ externalproject_add(vcg
INSTALL_COMMAND ""
)
externalproject_add(eigen34
GIT_REPOSITORY https://gitlab.com/libeigen/eigen.git
GIT_TAG 3.4
UPDATE_COMMAND ""
SOURCE_DIR ${SB_SOURCE_DIR}/eigen34
CONFIGURE_COMMAND ""
BUILD_IN_SOURCE 1
BUILD_COMMAND ""
INSTALL_COMMAND ""
)
SET(ARM64_CMAKE_ARGS "")
if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64" )
SET(ARM64_CMAKE_ARGS -DOpenMVS_USE_SSE=OFF)
endif()
SET(GPU_CMAKE_ARGS "")
if(UNIX)
if (EXISTS "/usr/local/cuda/lib64/stubs")
SET(GPU_CMAKE_ARGS -DCMAKE_LIBRARY_PATH=/usr/local/cuda/lib64/stubs)
endif()
endif()
if(WIN32)
# On Windows systems without NVIDIA GPUs, OpenMVS will not launch
# unless a CUDA DLL is available; we download a dummy DLL
# generated with https://github.com/ykhwong/dummy-dll-generator that is
# loaded UNLESS the real CUDA DLL is available, since it will
# be loaded before our dummy DLL.
file(DOWNLOAD "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/nvcuda_dummy.dll" "${SB_INSTALL_DIR}/bin/nvcuda.dll")
endif()
ExternalProject_Add(${_proj_name}
DEPENDS ceres opencv vcg
DEPENDS ceres opencv vcg eigen34
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/openMVS
GIT_TAG 256
GIT_TAG 270
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
@ -28,9 +60,13 @@ ExternalProject_Add(${_proj_name}
CMAKE_ARGS
-DOpenCV_DIR=${SB_INSTALL_DIR}/lib/cmake/opencv4
-DVCG_ROOT=${SB_SOURCE_DIR}/vcg
-DCMAKE_BUILD_TYPE=Release
-DEIGEN3_INCLUDE_DIR=${SB_SOURCE_DIR}/eigen34/
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX=${SB_INSTALL_DIR}
-DOpenMVS_MAX_CUDA_COMPATIBILITY=ON
${GPU_CMAKE_ARGS}
${WIN32_CMAKE_ARGS}
${ARM64_CMAKE_ARGS}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------

Wyświetl plik

@ -19,7 +19,7 @@ ExternalProject_Add(${_proj_name}
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/
GIT_TAG 257
GIT_TAG 280
#--Update/Patch step----------
UPDATE_COMMAND git submodule update --init --recursive
#--Configure step-------------

Wyświetl plik

@ -39,7 +39,7 @@ ExternalProject_Add(${_proj_name}
-DWITH_LIBUSB=OFF
-DWITH_PCAP=OFF
-DWITH_PXCAPI=OFF
-DCMAKE_BUILD_TYPE=Release
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DPCL_VERBOSITY_LEVEL=Error
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
-DPCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32=ON

Wyświetl plik

@ -45,7 +45,7 @@ ExternalProject_Add(${_proj_name}
-DLASZIP_INCLUDE_DIR=${SB_INSTALL_DIR}/include
-DLASZIP_LIBRARY=${LASZIP_LIB}
-DWITH_TESTS=OFF
-DCMAKE_BUILD_TYPE=Release
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
${WIN32_CMAKE_ARGS}
${WIN32_GDAL_ARGS}

Wyświetl plik

@ -0,0 +1,36 @@
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/pypopsift")
# Pypopsift
find_package(CUDA 7.0)
if(CUDA_FOUND)
ExternalProject_Add(pypopsift
DEPENDS opensfm
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/pypopsift
GIT_TAG 271
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/pypopsift
CMAKE_ARGS
-DOUTPUT_DIR=${SB_INSTALL_DIR}/bin/opensfm/opensfm
-DCMAKE_INSTALL_PREFIX=${SB_INSTALL_DIR}
${WIN32_CMAKE_ARGS}
${ARM64_CMAKE_ARGS}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)
else()
message(WARNING "Could not find CUDA >= 7.0")
endif()

Wyświetl plik

@ -16,7 +16,7 @@ ExternalProject_Add(${_proj_name}
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DPDAL_DIR=${SB_INSTALL_DIR}/lib/cmake/PDAL
-DCMAKE_BUILD_TYPE=Release
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}

Wyświetl plik

@ -1 +1 @@
2.5.7
2.8.1

Wyświetl plik

@ -3,7 +3,7 @@ if sys.platform != 'win32':
print("This script is for Windows only! Use configure.sh instead.")
exit(1)
if sys.version_info.major != 3 or sys.version_info.minor != 8:
print("You neeed to use Python 3.8.x (due to the requirements.txt). You are using %s instead." % platform.python_version())
print("You need to use Python 3.8.x (due to the requirements.txt). You are using %s instead." % platform.python_version())
exit(1)
import argparse
@ -34,11 +34,6 @@ parser.add_argument('--code-sign-cert-path',
default='',
required=False,
help='Path to pfx code signing certificate')
parser.add_argument('--signtool-path',
type=str,
default='',
required=False,
help='Path to signtool.exe')
args = parser.parse_args()
@ -171,6 +166,14 @@ def dist():
with zipfile.ZipFile(pythonzip_path) as z:
z.extractall("python38")
# Download signtool
signtool_path = os.path.join("SuperBuild", "download", "signtool.exe")
signtool_url = "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/signtool.exe"
if not os.path.exists(signtool_path):
print("Downloading %s" % signtool_url)
with urllib.request.urlopen(signtool_url) as response, open(signtool_path, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
# Download innosetup
if not os.path.isdir("innosetup"):
innosetupzip_path = os.path.join("SuperBuild", "download", "innosetup.zip")
@ -188,8 +191,8 @@ def dist():
# Run
cs_flags = ""
if args.code_sign_cert_path and args.signtool_path:
cs_flags = '"/Ssigntool=%s sign /f %s /t http://timestamp.sectigo.com $f"' % (args.signtool_path, args.code_sign_cert_path)
if args.code_sign_cert_path:
cs_flags = '"/Ssigntool=%s sign /f %s /fd SHA1 /t http://timestamp.sectigo.com $f"' % (signtool_path, args.code_sign_cert_path)
run("innosetup\\iscc /Qp " + cs_flags + " \"innosetup.iss\"")
print("Done! Setup created in dist/")

Wyświetl plik

@ -3,21 +3,21 @@
# Ensure the DEBIAN_FRONTEND environment variable is set for apt-get calls
APT_GET="env DEBIAN_FRONTEND=noninteractive $(command -v apt-get)"
check_version(){
check_version(){
UBUNTU_VERSION=$(lsb_release -r)
case "$UBUNTU_VERSION" in
*"20.04"*)
*"20.04"*|*"21.04"*)
echo "Ubuntu: $UBUNTU_VERSION, good!"
;;
*"18.04"*|*"16.04"*)
echo "ODM 2.1 has upgraded to Ubuntu 20.04, but you're on $UBUNTU_VERSION"
echo "ODM 2.1 has upgraded to Ubuntu 21.04, but you're on $UBUNTU_VERSION"
echo "* The last version of ODM that supports Ubuntu 16.04 is v1.0.2."
echo "* The last version of ODM that supports Ubuntu 18.04 is v2.0.0."
echo "We recommend you to upgrade, or better yet, use docker."
exit 1
;;
*)
echo "You are not on Ubuntu 20.04 (detected: $UBUNTU_VERSION)"
echo "You are not on Ubuntu 21.04 (detected: $UBUNTU_VERSION)"
echo "It might be possible to run ODM on a newer version of Ubuntu, however, you cannot rely on this script."
exit 1
;;
@ -54,10 +54,13 @@ ensure_prereqs() {
echo "Installing tzdata"
sudo $APT_GET install -y -qq tzdata
echo "Enabling PPA for Ubuntu GIS"
sudo $APT_GET install -y -qq --no-install-recommends software-properties-common
sudo add-apt-repository -y ppa:ubuntugis/ubuntugis-unstable
sudo $APT_GET update
UBUNTU_VERSION=$(lsb_release -r)
if [[ "$UBUNTU_VERSION" == *"20.04"* ]]; then
echo "Enabling PPA for Ubuntu GIS"
sudo $APT_GET install -y -qq --no-install-recommends software-properties-common
sudo add-apt-repository -y ppa:ubuntugis/ubuntugis-unstable
sudo $APT_GET update
fi
echo "Installing Python PIP"
sudo $APT_GET install -y -qq --no-install-recommends \
@ -77,7 +80,13 @@ installdepsfromsnapcraft() {
*) key=build-packages; ;; # shouldn't be needed, but it's here just in case
esac
cat snap/snapcraft.yaml | \
UBUNTU_VERSION=$(lsb_release -r)
SNAPCRAFT_FILE="snapcraft.yaml"
if [[ "$UBUNTU_VERSION" == *"21.04"* ]]; then
SNAPCRAFT_FILE="snapcraft21.yaml"
fi
cat snap/$SNAPCRAFT_FILE | \
shyaml get-values-0 parts.$section.$key | \
xargs -0 sudo $APT_GET install -y -qq --no-install-recommends
}
@ -95,7 +104,6 @@ installruntimedepsonly() {
installdepsfromsnapcraft runtime opensfm
echo "Installing OpenMVS Dependencies"
installdepsfromsnapcraft runtime openmvs
}
installreqs() {
@ -118,11 +126,11 @@ installreqs() {
echo "Installing OpenMVS Dependencies"
installdepsfromsnapcraft build openmvs
set -e
pip install --ignore-installed -r requirements.txt
if [ ! -z "$GPU_INSTALL" ]; then
pip install --ignore-installed -r requirements.gpu.txt
fi
#if [ ! -z "$GPU_INSTALL" ]; then
#fi
set +e
}
install() {

Wyświetl plik

@ -103,7 +103,7 @@ def writeMetadata(filename, format3d):
'--stereo='+format3d,
filename,
filename+'.injected'])
# check metadata injector was succesful
# check metadata injector was successful
if os.path.exists(filename+'.injected'):
os.remove(filename)
os.rename(filename+'.injected', filename)

Wyświetl plik

@ -0,0 +1,29 @@
# Merge Preview
Quickly projects drone images on a map by using georeferencing, camera angles and a global DTM. The images are then merged using ODM's split-merge algorithms.
Quality is obviously not good, works only for nadir-only images and requires the images to have gimbal/camera angle information (not all drones provide this information).
Usage:
```
# Install DDB (required for geoprojection)
curl -fsSL https://get.dronedb.app -o get-ddb.sh
sh get-ddb.sh
# Run
python3 mergepreview.py -i images/*.JPG --size 25%
```
## Example
![screen](https://user-images.githubusercontent.com/1951843/134249725-e178489a-e271-4244-abed-e624cd510b88.png)
[Sheffield Park](https://community.opendronemap.org/t/sheffield-park-1/58) images processed with this script.
## Disclaimer
This script is highly experimental. We welcome contributions to improve it.

Wyświetl plik

@ -0,0 +1,126 @@
import argparse
import sys
sys.path.append("../../")
import os
from opendm import orthophoto
from opendm.cutline import compute_cutline
import glob
from opendm.system import run
from opendm import log
import shutil
parser = argparse.ArgumentParser(description='Quick Merge Preview')
parser.add_argument('input',
metavar='<paths>',
nargs='+',
help='Path to input images or image folder')
parser.add_argument('--size', '-s',
metavar='<percentage>',
type=str,
help='Size in percentage terms',
default='25%')
parser.add_argument('--force', '-f',
action='store_true',
default=False,
help="Force remove existing directories")
args = parser.parse_args()
try:
log.ODM_INFO("Checking for DDB...")
run("ddb --version")
except:
log.ODM_ERROR("ddb is not installed. Install it first: https://docs.dronedb.app")
if len(args.input) == 1 and os.path.isdir(args.input[0]):
input_images = []
for ext in ["JPG", "JPEG", "TIF", "tiff", "tif", "TIFF"]:
input_images += glob.glob(os.path.join(args.input[0], "*.%s" % ext))
else:
input_images = args.input
log.ODM_INFO("Processing %s images" % len(input_images))
if len(input_images) == 0:
log.ODM_ERROR("No images")
exit(1)
cwd_path = os.path.dirname(input_images[0])
tmp_path = os.path.join(cwd_path, "tmp")
if os.path.isdir(tmp_path):
if args.force:
log.ODM_INFO("Removing previous directory %s" % tmp_path)
shutil.rmtree(tmp_path)
else:
log.ODM_ERROR("%s exists. Pass --force to override." % tmp_path)
exit(1)
os.makedirs(tmp_path)
for f in input_images:
name, _ = os.path.splitext(os.path.basename(f))
geojson = os.path.join(tmp_path, "%s.geojson" % name)
gpkg = os.path.join(tmp_path, "%s.gpkg" % name)
run("ddb geoproj \"%s\" \"%s\" -s \"%s\"" % (tmp_path, f, args.size))
# Bounds (GPKG)
run("ddb info --format geojson --geometry polygon \"%s\" > \"%s\"" % (f, geojson))
run("ogr2ogr \"%s\" \"%s\"" % (gpkg, geojson))
log.ODM_INFO("Computing cutlines")
projected_images = glob.glob(os.path.join(tmp_path, "*.tif"))
all_orthos_and_ortho_cuts = []
for f in projected_images:
name, _ = os.path.splitext(os.path.basename(f))
cutline_file = os.path.join(tmp_path, "%s_cutline.gpkg" % name)
bounds_file_path = os.path.join(tmp_path, "%s.gpkg" % name)
compute_cutline(f,
bounds_file_path,
cutline_file,
4,
scale=1)
cut_raster = os.path.join(tmp_path, "%s_cut.tif" % name)
orthophoto.compute_mask_raster(f, cutline_file,
cut_raster,
blend_distance=20, only_max_coords_feature=True)
feathered_raster = os.path.join(tmp_path, "%s_feathered.tif" % name)
orthophoto.feather_raster(f, feathered_raster,
blend_distance=20
)
all_orthos_and_ortho_cuts.append([feathered_raster, cut_raster])
log.ODM_INFO("Merging...")
if len(all_orthos_and_ortho_cuts) > 1:
# TODO: histogram matching via rasterio
# currently parts have different color tones
output_file = os.path.join(cwd_path, 'mergepreview.tif')
if os.path.isfile(output_file):
os.remove(output_file)
orthophoto.merge(all_orthos_and_ortho_cuts, output_file, {
'TILED': 'YES',
'COMPRESS': 'LZW',
'PREDICTOR': '2',
'BIGTIFF': 'IF_SAFER',
'BLOCKXSIZE': 512,
'BLOCKYSIZE': 512
})
log.ODM_INFO("Wrote %s" % output_file)
shutil.rmtree(tmp_path)
else:
log.ODM_ERROR("Error: no orthos found to merge")
exit(1)

Wyświetl plik

@ -0,0 +1,112 @@
#!/usr/bin/env python3
# A script to calculate agricultural indices
# NDVI - Normalized Difference Vegetation Index - (NIRRED)/(NIR + RED)
# NDRE - Normalized Difference Red Edge - (NIRRE)/(NIR + RE)
# GNDVI - Green NDVI - (NIRGREEN)/(NIR + GREEN)
# https://support.micasense.com/hc/en-us/articles/226531127-Creating-agricultural-indices-NDVI-NDRE-in-QGIS-
# requires python-gdal
import numpy
import argparse
import os.path
try:
from osgeo import gdal
from osgeo import osr
except ImportError:
raise ImportError("You need to install python-gdal : \
run `sudo apt-get install libgdal-dev` \
# Check Gdal version with \
gdal-config --version \
#install correspondig gdal version with pip : \
pip3 install GDAL==2.4.0")
def parse_args():
argument_parser = argparse.ArgumentParser('Createa from a multispectral orthophoto \
a Geotif with NDVI, NDRE and GNDVI agricultural indices')
argument_parser.add_argument("orthophoto", metavar="<orthophoto.tif>",
type=argparse.FileType('r'),
help="The CIR orthophoto. Must be a GeoTiff.")
argument_parser.add_argument("-red", type=int,
help="Red band number")
argument_parser.add_argument("-green", type=int,
help="Green band number")
argument_parser.add_argument("-blue", type=int,
help="Blue band number")
argument_parser.add_argument("-re", type=int,
help="RedEdge band number")
argument_parser.add_argument("-nir", type=int,
help="NIR band number")
argument_parser.add_argument("out", metavar="<outfile.tif>",
type=argparse.FileType('w'),
help="The output file.")
argument_parser.add_argument("--overwrite", "-o",
action='store_true',
default=False,
help="Will overwrite output file if it exists. ")
return argument_parser.parse_args()
if __name__ == "__main__":
# Supress/hide warning when dividing by zero
numpy.seterr(divide='ignore', invalid='ignore')
rootdir = os.path.dirname(os.path.abspath(__file__))
# Parse args
args = parse_args()
if not args.overwrite and os.path.isfile(os.path.join(rootdir, args.out.name)):
print("File exists, rename or use -o to overwrite.")
exit()
# import raster
print("Reading file")
raster = gdal.Open(args.orthophoto.name)
orthophoto = raster.ReadAsArray()
# parse out bands
print("Reading rasters")
red_matrix=orthophoto[args.red-1].astype(float)
green_matrix=orthophoto[args.green-1].astype(float)
blue_matrix=orthophoto[args.blue-1].astype(float)
re_matrix=orthophoto[args.re-1].astype(float)
nir_matrix=orthophoto[args.nir-1].astype(float)
outfile = args.out
# NDVI
print("Computing NDVI")
#ndvi = calc_ndvi(nir_matrix, red_matrix)
ndvi = (nir_matrix.astype(float) - red_matrix.astype(float)) / (nir_matrix + red_matrix)
# NDRE
print("Computing NDRE")
#ndre = calc_ndre(nir_matrix, re_matrix)
ndre = (nir_matrix.astype(float) - re_matrix.astype(float)) / (nir_matrix + re_matrix)
# GNDVI
print("Computing GNDVI")
#gndvi = calc_gndvi(nir_matrix, green_matrix)
gndvi = (nir_matrix.astype(float) - green_matrix.astype(float)) / (nir_matrix + green_matrix)
__import__("IPython").embed()
print("Saving Files")
# export raster
for name, matrix in zip(['ndvi', 'ndre', 'gndvi' ] ,[ndvi,ndre,gndvi] ):
print(name)
out_driver = gdal.GetDriverByName('GTiff')\
.Create(name+'_'+outfile.name, int(ndvi.shape[1]), int(ndvi.shape[0]), 1, gdal.GDT_Float32)
outband = out_driver.GetRasterBand(1)
outband.SetDescription(name.capitalize())
outband.WriteArray(matrix)
outcrs = osr.SpatialReference()
outcrs.ImportFromWkt(raster.GetProjectionRef())
out_driver.SetProjection(outcrs.ExportToWkt())
out_driver.SetGeoTransform(raster.GetGeoTransform())
outband.FlushCache()

Wyświetl plik

@ -0,0 +1,73 @@
#!/usr/bin/env python3
# A script to rename.
# requires python-gdal
import argparse
import sys
try:
from osgeo import gdal
except ImportError:
raise ImportError("You need to install python-gdal : \
run `sudo apt-get install libgdal-dev` \
# Check Gdal version with \
gdal-config --version \
#install correspondig gdal version with pip : \
pip3 install GDAL==2.4.0")
def parse_args():
""" Parse arguments """
argument_parser = argparse.ArgumentParser(
"A script that rename inplace Sentera AGX710 Geotiff orthophoto. ")
argument_parser.add_argument("orthophoto", metavar="<orthophoto.tif>",
type=argparse.FileType('r'),
help="The input orthophoto. Must be a GeoTiff.")
return argument_parser.parse_args()
def rename_sentera_agx710_layers(name):
""" Only rename Geotif built from Sentera AGX710 images with ODM """
if raster.RasterCount != 7:
raise ImportError(F'File {name} does not have 7 layers as a regular\
Geotif built from Sentera AGX710 images with ODM')
if 'RedGreenBlue' in raster.GetRasterBand(1).GetDescription() and \
'RedEdgeGarbageNIR' in raster.GetRasterBand(2).GetDescription():
print("Sentera AGX710 Geotiff file has been detected.\
Layers are name are :")
print("RedGreenBlue for Band 1\nRedEdgeGarbageNIR for Band 2\
\nNone for Band 3\nNone for Band 4\nNone for Band 5\nNone for Band 6")
print("\nAfter renaming bands will be :")
print("Red for Band 1\nGreen for Band 2\nBlue for Band 3\n\
RedEdge for Band 4\nGarbage for Band 5\nNIR for Band 6")
answer = input(
"Are you sure you want to rename the layers of the input file ? [yes/no] ")
if answer =='yes':
raster.GetRasterBand(1).SetDescription('Red')
raster.GetRasterBand(2).SetDescription('Green')
raster.GetRasterBand(3).SetDescription('Blue')
raster.GetRasterBand(4).SetDescription('RedEdge')
raster.GetRasterBand(5).SetDescription('Garbage')
raster.GetRasterBand(6).SetDescription('NIR')
# raster.GetRasterBand(7).SetDescription('Alpha')
else:
print("No renaming")
else :
print(F'No need for band renaming in {name}')
sys.exit()
if __name__ == "__main__":
# Parse args
args = parse_args()
# import raster
raster = gdal.Open(args.orthophoto.name, gdal.GA_Update)
# Rename layers
rename_sentera_agx710_layers(args.orthophoto.name)
# de-reference the datasets, which triggers gdal to save
raster = None

Wyświetl plik

@ -2,7 +2,7 @@
![image](https://user-images.githubusercontent.com/1951843/111536715-fc91c380-8740-11eb-844c-5b7960186391.png)
This tool is capable of orthorectifying individual images (or all images) from an ODM reconstruction. It does not account for visibility occlusion, so you will get artifacts near buildings (help us improve this?).
This tool is capable of orthorectifying individual images (or all images) from an existing ODM reconstruction.
![image](https://user-images.githubusercontent.com/1951843/111529183-3ad6b500-8738-11eb-9960-b1aa676f863b.png)
@ -17,7 +17,7 @@ docker run -ti --rm -v /home/youruser/datasets:/datasets opendronemap/odm --proj
You can run the orthorectification module by running:
```
docker run -ti --rm -v /home/youruser/datasets:/datasets --entrypoint /code/contrib/orthorectify/orthorectify.py opendronemap/odm /datasets/project
docker run -ti --rm -v /home/youruser/datasets:/datasets --entrypoint /code/contrib/orthorectify/run.sh opendronemap/odm /datasets/project
```
This will start the orthorectification process for all images in the dataset. See additional flags you can pass at the end of the command above:
@ -26,7 +26,8 @@ This will start the orthorectification process for all images in the dataset. Se
usage: orthorectify.py [-h] [--dem DEM] [--no-alpha NO_ALPHA]
[--interpolation {nearest,bilinear}]
[--outdir OUTDIR] [--image-list IMAGE_LIST]
[--images IMAGES]
[--images IMAGES] [--threads THREADS]
[--skip-visibility-test SKIP_VISIBILITY_TEST]
dataset
Orthorectification Tool
@ -50,9 +51,12 @@ optional arguments:
image filenames to orthorectify. By
default all images in a dataset are
processed. Default: img_list.txt
--images IMAGES Comma-separeted list of filenames to
--images IMAGES Comma-separated list of filenames to
rectify. Use as an alternative to --image-
list. Default: process all images.
--skip-visibility-test SKIP_VISIBILITY_TEST
Skip visibility testing (faster but leaves
artifacts due to relief displacement)
```
## Roadmap
@ -60,5 +64,6 @@ optional arguments:
Help us improve this module! We could add:
- [ ] GPU support for faster processing
- [ ] Visibility checks
- [ ] Merging of multiple orthorectified images (blending, filtering, seam leveling)
- [ ] Faster visibility test
- [ ] Different methods for orthorectification (direct)

Wyświetl plik

@ -6,12 +6,14 @@ import os
import sys
sys.path.insert(0, os.path.join("..", "..", os.path.dirname(__file__)))
from math import sqrt
import rasterio
import numpy as np
import numpy.ma as ma
import multiprocessing
import argparse
import functools
from skimage.draw import line
from opensfm import dataset
default_dem_path = "odm_dem/dsm.tif"
@ -45,12 +47,14 @@ parser.add_argument('--image-list',
parser.add_argument('--images',
type=str,
default="",
help="Comma-separeted list of filenames to rectify. Use as an alternative to --image-list. Default: process all images.")
help="Comma-separated list of filenames to rectify. Use as an alternative to --image-list. Default: process all images.")
parser.add_argument('--threads',
type=int,
default=multiprocessing.cpu_count(),
help="Number of CPU processes to use. Default: %(default)s")
parser.add_argument('--skip-visibility-test',
type=bool,
help="Skip visibility testing (faster but leaves artifacts due to relief displacement)")
args = parser.parse_args()
dataset_path = args.dataset
@ -112,11 +116,16 @@ with rasterio.open(dem_path) as dem_raster:
dem_has_nodata = dem_raster.profile.get('nodata') is not None
if dem_has_nodata:
dem_min_value = ma.array(dem, mask=dem==dem_raster.nodata).min()
m = ma.array(dem, mask=dem==dem_raster.nodata)
dem_min_value = m.min()
dem_max_value = m.max()
else:
dem_min_value = dem.min()
dem_max_value = dem.max()
print("DEM Minimum: %s" % dem_min_value)
print("DEM Maximum: %s" % dem_max_value)
h, w = dem.shape
crs = dem_raster.profile.get('crs')
@ -132,18 +141,18 @@ with rasterio.open(dem_path) as dem_raster:
exit(1)
with open(coords_file) as f:
line = f.readline() # discard
l = f.readline() # discard
# second line is a northing/easting offset
line = f.readline().rstrip()
dem_offset_x, dem_offset_y = map(float, line.split(" "))
l = f.readline().rstrip()
dem_offset_x, dem_offset_y = map(float, l.split(" "))
print("DEM offset: (%s, %s)" % (dem_offset_x, dem_offset_y))
print("DEM dimensions: %sx%s pixels" % (w, h))
# Read reconstruction
udata = dataset.UndistortedDataSet(dataset.DataSet(os.path.join(dataset_path, "opensfm")))
udata = dataset.UndistortedDataSet(dataset.DataSet(os.path.join(dataset_path, "opensfm")), undistorted_data_path=os.path.join(dataset_path, "opensfm", "undistorted"))
reconstructions = udata.load_undistorted_reconstruction()
if len(reconstructions) == 0:
raise Exception("No reconstructions available")
@ -160,6 +169,8 @@ with rasterio.open(dem_path) as dem_raster:
r = shot.pose.get_rotation_matrix()
Xs, Ys, Zs = shot.pose.get_origin()
cam_grid_y, cam_grid_x = dem_raster.index(Xs + dem_offset_x, Ys + dem_offset_y)
a1 = r[0][0]
b1 = r[0][1]
c1 = r[0][2]
@ -170,15 +181,26 @@ with rasterio.open(dem_path) as dem_raster:
b3 = r[2][1]
c3 = r[2][2]
if not args.skip_visibility_test:
distance_map = np.full((h, w), np.nan)
for j in range(0, h):
for i in range(0, w):
distance_map[j][i] = sqrt((cam_grid_x - i) ** 2 + (cam_grid_y - j) ** 2)
distance_map[distance_map==0] = 1e-7
print("Camera pose: (%f, %f, %f)" % (Xs, Ys, Zs))
img_h, img_w, num_bands = shot_image.shape
half_img_w = (img_w - 1) / 2.0
half_img_h = (img_h - 1) / 2.0
print("Image dimensions: %sx%s pixels" % (img_w, img_h))
f = shot.camera.focal * max(img_h, img_w)
has_nodata = dem_raster.profile.get('nodata') is not None
def process_pixels(step):
imgout = np.full((num_bands, dem_bbox_h, dem_bbox_w), np.nan)
minx = dem_bbox_w
miny = dem_bbox_h
maxx = 0
@ -192,13 +214,14 @@ with rasterio.open(dem_path) as dem_raster:
im_i = i - dem_bbox_minx
# World coordinates
Xa, Ya = dem_raster.xy(j, i)
Za = dem[j][i]
# Skip nodata
if has_nodata and Za == dem_raster.nodata:
continue
Xa, Ya = dem_raster.xy(j, i)
# Remove offset (our cameras don't have the geographic offset)
Xa -= dem_offset_x
Ya -= dem_offset_y
@ -209,11 +232,27 @@ with rasterio.open(dem_path) as dem_raster:
dz = (Za - Zs)
den = a3 * dx + b3 * dy + c3 * dz
x = (img_w - 1) / 2.0 - (f * (a1 * dx + b1 * dy + c1 * dz) / den)
y = (img_h - 1) / 2.0 - (f * (a2 * dx + b2 * dy + c2 * dz) / den)
x = half_img_w - (f * (a1 * dx + b1 * dy + c1 * dz) / den)
y = half_img_h - (f * (a2 * dx + b2 * dy + c2 * dz) / den)
if x >= 0 and y >= 0 and x <= img_w - 1 and y <= img_h - 1:
# Visibility test
if not args.skip_visibility_test:
check_dem_points = np.column_stack(line(i, j, cam_grid_x, cam_grid_y))
check_dem_points = check_dem_points[np.all(np.logical_and(np.array([0, 0]) <= check_dem_points, check_dem_points < [w, h]), axis=1)]
visible = True
for p in check_dem_points:
ray_z = Zs + (distance_map[p[1]][p[0]] / distance_map[j][i]) * dz
if ray_z > dem_max_value:
break
if dem[p[1]][p[0]] > ray_z:
visible = False
break
if not visible:
continue
if interpolation == 'bilinear':
xi = img_w - 1 - x
yi = img_h - 1 - y
@ -291,6 +330,7 @@ with rasterio.open(dem_path) as dem_raster:
# Merge image
imgout, _ = results[0]
for j in range(dem_bbox_miny, dem_bbox_maxy + 1):
im_j = j - dem_bbox_miny
resimg, _ = results[j % max_workers]
@ -308,10 +348,10 @@ with rasterio.open(dem_path) as dem_raster:
miny = min(bounds[1], miny)
maxx = max(bounds[2], maxx)
maxy = max(bounds[3], maxy)
print("Output bounds: (%s, %s), (%s, %s) pixels" % (minx, miny, maxx, maxy))
if minx <= maxx and miny <= maxy:
imgout = imgout[:,miny:maxy,minx:maxx]
imgout = imgout[:,miny:maxy+1,minx:maxx+1]
if with_alpha:
alpha = np.zeros((imgout.shape[1], imgout.shape[2]), dtype=np.uint8)

Wyświetl plik

@ -0,0 +1,3 @@
#!/bin/bash
PYTHONPATH=$PYTHONPATH:/code/SuperBuild/install/bin/opensfm python3 orthorectify.py "$@"

Wyświetl plik

@ -0,0 +1,16 @@
# Resize
Resize a dataset (and optional GCP file).
Resizes images, keeps Exif data. The EXIF width and height attributes will be updated accordingly also. ODM GCP files are scaled also.
Usage:
```
pip install -r requirements.txt
python3 resize.py -i images/ -o resized/ 25%
python3 resize.py -i images/1.JPG -o resized.JPG 25%
python3 resize.py -i gcp_list.txt -o resized_gcp_list.txt
```
Originally forked from https://github.com/pierotofy/exifimageresize

Wyświetl plik

@ -0,0 +1,2 @@
Pillow==8.0.1
piexif==1.1.2

Wyświetl plik

@ -0,0 +1,169 @@
import argparse
import os
import glob
import shutil
from PIL import Image
import piexif
import multiprocessing
from multiprocessing.pool import ThreadPool
import sys
sys.path.append("../../")
from opendm.gcp import GCPFile
parser = argparse.ArgumentParser(description='Exif Image Resize')
parser.add_argument('--input', '-i',
metavar='<path>',
required=True,
help='Path to input image/GCP or image folder')
parser.add_argument('--output', '-o',
metavar='<path>',
required=True,
help='Path to output image/GCP or image folder')
parser.add_argument('--force', '-f',
action='store_true',
default=False,
help='Overwrite results')
parser.add_argument('amount',
metavar='<pixel|percentage%>',
type=str,
help='Pixel of largest side or percentage to resize images by')
args = parser.parse_args()
def die(msg):
print(msg)
exit(1)
class nonloc:
errors = 0
def resize_image(image_path, out_path, resize_to, out_path_is_file=False):
"""
:param image_path: path to the image
:param out_path: path to the output directory or file
:param resize_to: percentage ("perc%") or pixels
"""
try:
im = Image.open(image_path)
path, ext = os.path.splitext(image_path)
if out_path_is_file:
resized_image_path = out_path
else:
resized_image_path = os.path.join(out_path, os.path.basename(image_path))
width, height = im.size
max_side = max(width, height)
if isinstance(resize_to, str) and resize_to.endswith("%"):
ratio = float(resize_to[:-1]) / 100.0
else:
ratio = float(resize_to) / float(max_side)
resized_width = int(width * ratio)
resized_height = int(height * ratio)
im.thumbnail((resized_width, resized_height), Image.LANCZOS)
driver = ext[1:].upper()
if driver == 'JPG':
driver = 'JPEG'
if 'exif' in im.info:
exif_dict = piexif.load(im.info['exif'])
exif_dict['Exif'][piexif.ExifIFD.PixelXDimension] = resized_width
exif_dict['Exif'][piexif.ExifIFD.PixelYDimension] = resized_height
im.save(resized_image_path, driver, exif=piexif.dump(exif_dict), quality=100)
else:
im.save(resized_image_path, driver, quality=100)
im.close()
print("{} ({}x{}) --> {} ({}x{})".format(image_path, width, height, resized_image_path, resized_width, resized_height))
except (IOError, ValueError) as e:
print("Error: Cannot resize {}: {}.".format(image_path, str(e)))
nonloc.errors += 1
def resize_gcp(gcp_path, out_path, resize_to, out_path_is_file=False):
"""
:param gcp_path: path to the GCP
:param out_path: path to the output directory or file
:param resize_to: percentage ("perc%") or pixels
"""
try:
if out_path_is_file:
resized_gcp_path = out_path
else:
resized_gcp_path = os.path.join(out_path, os.path.basename(gcp_path))
if resize_to.endswith("%"):
ratio = float(resize_to[:-1]) / 100.0
else:
ratio = resize_to
gcp = GCPFile(gcp_path)
if gcp.entries_count() > 0:
gcp.make_resized_copy(resized_gcp_path, ratio)
else:
raise ValueError("No GCP entries")
print("{} --> {}".format(gcp_path, resized_gcp_path))
except (IOError, ValueError) as e:
print("Error: Cannot resize {}: {}.".format(gcp_path, str(e)))
nonloc.errors += 1
if not args.amount.endswith("%"):
args.amount = float(args.amount)
if args.amount <= 0:
die("Invalid amount")
else:
try:
if float(args.amount[:-1]) <= 0:
die("Invalid amount")
except:
die("Invalid amount")
files = []
gcps = []
if os.path.isdir(args.input):
for ext in ["JPG", "JPEG", "PNG", "TIFF", "TIF"]:
files += glob.glob("{}/*.{}".format(args.input, ext))
files += glob.glob("{}/*.{}".format(args.input, ext.lower()))
gcps = glob.glob("{}/*.txt".format(args.input))
elif os.path.exists(args.input):
_, ext = os.path.splitext(args.input)
if ext.lower() == ".txt":
gcps = [args.input]
else:
files = [args.input]
else:
die("{} does not exist".format(args.input))
create_dir = len(files) > 1 or args.output.endswith("/") or len(gcps) > 1
if create_dir and os.path.isdir(args.output):
if not args.force:
die("{} exists, pass --force to overwrite results".format(args.output))
else:
shutil.rmtree(args.output)
elif not create_dir and os.path.isfile(args.output):
if not args.force:
die("{} exists, pass --force to overwrite results".format(args.output))
else:
os.remove(args.output)
if create_dir:
os.makedirs(args.output)
pool = ThreadPool(processes=multiprocessing.cpu_count())
def resize(file):
_, ext = os.path.splitext(file)
if ext.lower() == ".txt":
return resize_gcp(file, args.output, args.amount, not create_dir)
else:
return resize_image(file, args.output, args.amount, not create_dir)
pool.map(resize, files + gcps)
print("Process completed, {} errors.".format(nonloc.errors))

Wyświetl plik

@ -9,4 +9,9 @@ do
fi
done
/usr/bin/g++_real -march=nehalem "${args[@]}"
ARCH=nehalem
if [[ $(uname -m) == "aarch64" ]]; then
ARCH=armv8-a
fi
/usr/bin/g++_real -march=$ARCH "${args[@]}"

Wyświetl plik

@ -9,4 +9,9 @@ do
fi
done
/usr/bin/gcc_real -march=nehalem "${args[@]}"
ARCH=nehalem
if [[ $(uname -m) == "aarch64" ]]; then
ARCH=armv8-a
fi
/usr/bin/gcc_real -march=$ARCH "${args[@]}"

Wyświetl plik

@ -1,8 +1,8 @@
FROM nvidia/cuda:11.2.0-runtime-ubuntu20.04 AS builder
FROM nvidia/cuda:11.2.0-devel-ubuntu20.04 AS builder
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/src/opensfm" \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
# Prepare directories
@ -22,10 +22,11 @@ RUN bash configure.sh clean
### Use a second image for the final asset to reduce the number and
# size of the layers.
FROM nvidia/cuda:11.2.0-runtime-ubuntu20.04
#FROM nvidia/cuda:11.2.0-devel-ubuntu20.04
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/src/opensfm" \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
WORKDIR /code
@ -36,14 +37,13 @@ COPY --from=builder /code /code
# Copy the Python libraries installed via pip from the builder
COPY --from=builder /usr/local /usr/local
# Install OpenCL Drivers
RUN apt update && apt install -y nvidia-opencl-icd-340 intel-opencl-icd
# Install shared libraries that we depend on via APT, but *not*
# the -dev packages to save space!
# Also run a smoke test on ODM and OpenSfM
RUN bash configure.sh installruntimedepsonly \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
&& bash run.sh --help \
&& bash -c "eval $(python3 /code/opendm/context.py) && python3 -c 'from opensfm import io, pymap'"
# Entry point
ENTRYPOINT ["python3", "/code/run.py"]

Wyświetl plik

@ -154,7 +154,7 @@ documentation for any purpose and without fee, provided that:
all copies of this software and any modification thereof and in
supporting documentation;
2. Any color-handling application which displays TekHVC color
cooordinates identifies these as TekHVC color coordinates in any
coordinates identifies these as TekHVC color coordinates in any
interface that displays these coordinates and in any associated
documentation;
3. The term "TekHVC" is always used, and is only used, in association

121
opendm/boundary.py 100644
Wyświetl plik

@ -0,0 +1,121 @@
import fiona
import fiona.crs
import os
import io
import json
from opendm import system
from pyproj import CRS
from opendm.location import transformer
from opendm.utils import double_quote
from osgeo import ogr
from opendm.shots import get_origin
def compute_boundary_from_shots(reconstruction_json, buffer=0, reconstruction_offset=(0, 0)):
if not os.path.isfile(reconstruction_json):
raise IOError(reconstruction_json + " does not exist.")
with open(reconstruction_json) as f:
data = json.load(f)
reconstruction = data[0]
mp = ogr.Geometry(ogr.wkbMultiPoint)
for shot_image in reconstruction['shots']:
shot = reconstruction['shots'][shot_image]
if shot['gps_dop'] < 999999:
camera = reconstruction['cameras'][shot['camera']]
p = ogr.Geometry(ogr.wkbPoint)
origin = get_origin(shot)
p.AddPoint_2D(origin[0] + reconstruction_offset[0], origin[1] + reconstruction_offset[1])
mp.AddGeometry(p)
if mp.GetGeometryCount() < 3:
return None
convexhull = mp.ConvexHull()
boundary = convexhull.Buffer(buffer)
return load_boundary(boundary.ExportToJson())
def load_boundary(boundary_json, reproject_to_proj4=None):
if not isinstance(boundary_json, str):
boundary_json = json.dumps(boundary_json)
with fiona.open(io.BytesIO(boundary_json.encode('utf-8')), 'r') as src:
if len(src) != 1:
raise IOError("Boundary must have a single polygon (found: %s)" % len(src))
geom = src[0]['geometry']
if geom['type'] != 'Polygon':
raise IOError("Boundary must have a polygon feature (found: %s)" % geom['type'])
rings = geom['coordinates']
if len(rings) == 0:
raise IOError("Boundary geometry has no rings")
coords = rings[0]
if len(coords) == 0:
raise IOError("Boundary geometry has no coordinates")
dimensions = len(coords[0])
if reproject_to_proj4 is not None:
t = transformer(CRS.from_proj4(fiona.crs.to_string(src.crs)),
CRS.from_proj4(reproject_to_proj4))
coords = [t.TransformPoint(*c)[:dimensions] for c in coords]
return coords
def boundary_offset(boundary, reconstruction_offset):
if boundary is None or reconstruction_offset is None:
return boundary
res = []
dims = len(boundary[0])
for c in boundary:
if dims == 2:
res.append((c[0] - reconstruction_offset[0], c[1] - reconstruction_offset[1]))
else:
res.append((c[0] - reconstruction_offset[0], c[1] - reconstruction_offset[1], c[2]))
return res
def as_polygon(boundary):
if boundary is None:
return None
return "POLYGON((" + ", ".join([" ".join(map(str, c)) for c in boundary]) + "))"
def as_geojson(boundary):
return '{"type":"FeatureCollection","features":[{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[%s]}}]}' % str(list(map(list, boundary)))
def export_to_bounds_files(boundary, proj4, bounds_json_file, bounds_gpkg_file):
with open(bounds_json_file, "w") as f:
f.write(json.dumps({
"type": "FeatureCollection",
"name": "bounds",
"features": [{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [boundary]
}
}]
}))
if os.path.isfile(bounds_gpkg_file):
os.remove(bounds_gpkg_file)
kwargs = {
'proj4': proj4,
'input': double_quote(bounds_json_file),
'output': double_quote(bounds_gpkg_file)
}
system.run('ogr2ogr -overwrite -f GPKG -a_srs "{proj4}" {output} {input}'.format(**kwargs))

Wyświetl plik

@ -1,5 +1,6 @@
from vmem import virtual_memory
import os
import sys
try:
import Queue as queue
except:

Wyświetl plik

@ -11,7 +11,7 @@ import sys
# parse arguments
processopts = ['dataset', 'split', 'merge', 'opensfm', 'openmvs', 'odm_filterpoints',
'odm_meshing', 'mvs_texturing', 'odm_georeferencing',
'odm_dem', 'odm_orthophoto', 'odm_report']
'odm_dem', 'odm_orthophoto', 'odm_report', 'odm_postprocess']
with open(os.path.join(context.root_path, 'VERSION')) as version_file:
__version__ = version_file.read().strip()
@ -95,7 +95,7 @@ def config(argv=None, parser=None):
parser.add_argument('--end-with', '-e',
metavar='<string>',
action=StoreValue,
default='odm_report',
default='odm_postprocess',
choices=processopts,
help='End processing at this stage. Can be one of: %(choices)s. Default: %(default)s')
@ -122,7 +122,7 @@ def config(argv=None, parser=None):
parser.add_argument('--min-num-features',
metavar='<integer>',
action=StoreValue,
default=8000,
default=10000,
type=int,
help=('Minimum number of features to extract per image. '
'More features can be useful for finding more matches between images, '
@ -133,7 +133,7 @@ def config(argv=None, parser=None):
metavar='<string>',
action=StoreValue,
default='sift',
choices=['sift', 'hahog'],
choices=['akaze', 'hahog', 'orb', 'sift'],
help=('Choose the algorithm for extracting keypoints and computing descriptors. '
'Can be one of: %(choices)s. Default: '
'%(default)s'))
@ -151,8 +151,8 @@ def config(argv=None, parser=None):
metavar='<string>',
action=StoreValue,
default='flann',
choices=['flann', 'bow'],
help=('Matcher algorithm, Fast Library for Approximate Nearest Neighbors or Bag of Words. FLANN is slower, but more stable. BOW is faster, but can sometimes miss valid matches. '
choices=['bow', 'bruteforce', 'flann'],
help=('Matcher algorithm, Fast Library for Approximate Nearest Neighbors or Bag of Words. FLANN is slower, but more stable. BOW is faster, but can sometimes miss valid matches. BRUTEFORCE is very slow but robust.'
'Can be one of: %(choices)s. Default: '
'%(default)s'))
@ -162,19 +162,7 @@ def config(argv=None, parser=None):
default=8,
type=int,
help='Number of nearest images to pre-match based on GPS '
'exif data. Set to 0 to skip pre-matching. '
'Neighbors works together with Distance parameter, '
'set both to 0 to not use pre-matching. Default: %(default)s')
parser.add_argument('--matcher-distance',
metavar='<integer>',
action=StoreValue,
default=0,
type=int,
help='Distance threshold in meters to find pre-matching '
'images based on GPS exif data. Set both '
'matcher-neighbors and this to 0 to skip '
'pre-matching. Default: %(default)s')
'exif data. Set to 0 to skip pre-matching. Default: %(default)s')
parser.add_argument('--use-fixed-camera-params',
action=StoreTrue,
@ -192,12 +180,12 @@ def config(argv=None, parser=None):
'Can be specified either as path to a cameras.json file or as a '
'JSON string representing the contents of a '
'cameras.json file. Default: %(default)s')
parser.add_argument('--camera-lens',
metavar='<string>',
action=StoreValue,
default='auto',
choices=['auto', 'perspective', 'brown', 'fisheye', 'spherical'],
choices=['auto', 'perspective', 'brown', 'fisheye', 'spherical', 'equirectangular', 'dual'],
help=('Set a camera projection type. Manually setting a value '
'can help improve geometric undistortion. By default the application '
'tries to determine a lens type from the images metadata. Can be one of: %(choices)s. Default: '
@ -230,8 +218,8 @@ def config(argv=None, parser=None):
action=StoreValue,
type=float,
default=640,
help=('Legacy option (use --pc-quality instead). Controls the density of the point cloud by setting the resolution of the depthmap images. Higher values take longer to compute '
'but produce denser point clouds. '
help=('Controls the density of the point cloud by setting the resolution of the depthmap images. Higher values take longer to compute '
'but produce denser point clouds. Overrides the value calculated by --pc-quality.'
'Default: %(default)s'))
parser.add_argument('--use-hybrid-bundle-adjustment',
@ -241,6 +229,15 @@ def config(argv=None, parser=None):
help='Run local bundle adjustment for every image added to the reconstruction and a global '
'adjustment every 100 images. Speeds up reconstruction for very large datasets. Default: %(default)s')
parser.add_argument('--sfm-algorithm',
metavar='<string>',
action=StoreValue,
default='incremental',
choices=['incremental', 'triangulation'],
help=('Choose the structure from motion algorithm. For aerial datasets, if camera GPS positions and angles are available, triangulation can generate better results. '
'Can be one of: %(choices)s. Default: '
'%(default)s'))
parser.add_argument('--use-3dmesh',
action=StoreTrue,
nargs=0,
@ -258,7 +255,13 @@ def config(argv=None, parser=None):
nargs=0,
default=False,
help='Skip generation of PDF report. This can save time if you don\'t need a report. Default: %(default)s')
parser.add_argument('--skip-orthophoto',
action=StoreTrue,
nargs=0,
default=False,
help='Skip generation of the orthophoto. This can save time if you only need 3D results or DEMs. Default: %(default)s')
parser.add_argument('--ignore-gsd',
action=StoreTrue,
nargs=0,
@ -303,6 +306,24 @@ def config(argv=None, parser=None):
'Use 0 to disable cropping. '
'Default: %(default)s'))
parser.add_argument('--boundary',
default='',
metavar='<json>',
action=StoreValue,
type=path_or_json_string,
help='GeoJSON polygon limiting the area of the reconstruction. '
'Can be specified either as path to a GeoJSON file or as a '
'JSON string representing the contents of a '
'GeoJSON file. Default: %(default)s')
parser.add_argument('--auto-boundary',
action=StoreTrue,
nargs=0,
default=False,
help='Automatically set a boundary using camera shot locations to limit the area of the reconstruction. '
'This can help remove far away background artifacts (sky, background landscapes, etc.). See also --boundary. '
'Default: %(default)s')
parser.add_argument('--pc-quality',
metavar='<string>',
action=StoreValue,
@ -747,6 +768,9 @@ def config(argv=None, parser=None):
if args.fast_orthophoto:
log.ODM_INFO('Fast orthophoto is turned on, automatically setting --skip-3dmodel')
args.skip_3dmodel = True
# if not 'sfm_algorithm_is_set' in args:
# log.ODM_INFO('Fast orthophoto is turned on, automatically setting --sfm-algorithm to triangulation')
# args.sfm_algorithm = 'triangulation'
if args.pc_rectify and not args.pc_classify:
log.ODM_INFO("Ground rectify is turned on, automatically turning on point cloud classification")
@ -771,8 +795,4 @@ def config(argv=None, parser=None):
log.ODM_ERROR("Cluster node seems to be offline: %s" % str(e))
sys.exit(1)
# if args.radiometric_calibration != "none" and not args.texturing_skip_global_seam_leveling:
# log.ODM_WARNING("radiometric-calibration is turned on, automatically setting --texturing-skip-global-seam-leveling")
# args.texturing_skip_global_seam_leveling = True
return args

Wyświetl plik

@ -11,6 +11,7 @@ superbuild_bin_path = os.path.join(superbuild_path, 'install', 'bin')
# add opencv,opensfm to python path
python_packages_paths = [os.path.join(superbuild_path, p) for p in [
'install/lib/python3.9/dist-packages',
'install/lib/python3.8/dist-packages',
'install/lib/python3/dist-packages',
'install/bin/opensfm'
@ -33,6 +34,8 @@ mvstex_path = os.path.join(superbuild_bin_path, "texrecon")
omvs_densify_path = os.path.join(superbuild_bin_path, "OpenMVS", "DensifyPointCloud")
omvs_reconstructmesh_path = os.path.join(superbuild_bin_path, "OpenMVS", "ReconstructMesh")
fpcfilter_path = os.path.join(superbuild_bin_path, "FPCFilter")
odm_orthophoto_path = os.path.join(superbuild_bin_path, "odm_orthophoto")
settings_path = os.path.join(root_path, 'settings.yaml')

Wyświetl plik

@ -5,6 +5,7 @@ from opendm.point_cloud import export_summary_json
from osgeo import ogr
import json, os
from opendm.concurrency import get_max_memory
from opendm.utils import double_quote
class Cropper:
def __init__(self, storage_dir, files_prefix = "crop"):
@ -41,9 +42,9 @@ class Cropper:
try:
kwargs = {
'gpkg_path': gpkg_path,
'geotiffInput': original_geotiff,
'geotiffOutput': geotiff_path,
'gpkg_path': double_quote(gpkg_path),
'geotiffInput': double_quote(original_geotiff),
'geotiffOutput': double_quote(geotiff_path),
'options': ' '.join(map(lambda k: '-co {}={}'.format(k, gdal_options[k]), gdal_options)),
'warpOptions': ' '.join(warp_options),
'max_memory': get_max_memory()
@ -252,10 +253,13 @@ class Cropper:
bounds_gpkg_path = os.path.join(self.storage_dir, '{}.bounds.gpkg'.format(self.files_prefix))
if os.path.isfile(bounds_gpkg_path):
os.remove(bounds_gpkg_path)
# Convert bounds to GPKG
kwargs = {
'input': bounds_geojson_path,
'output': bounds_gpkg_path,
'input': double_quote(bounds_geojson_path),
'output': double_quote(bounds_gpkg_path),
'proj4': pc_proj4
}

Wyświetl plik

@ -145,9 +145,12 @@ def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrenc
if len(polygons) == 0:
log.ODM_WARNING("No polygons, cannot compute cutline")
return
log.ODM_INFO("Merging polygons")
cutline_polygons = unary_union(polygons)
if not hasattr(cutline_polygons, '__getitem__'):
cutline_polygons = [cutline_polygons]
largest_cutline = cutline_polygons[0]
max_area = largest_cutline.area
for p in cutline_polygons:

Wyświetl plik

@ -12,7 +12,7 @@ from opendm import system
from opendm.concurrency import get_max_memory, parallel_map
from scipy import ndimage
from datetime import datetime
from osgeo.utils.gdal_fillnodata import main as gdal_fillnodata
from opendm.vendor.gdal_fillnodata import main as gdal_fillnodata
from opendm import log
try:
import Queue as queue
@ -39,7 +39,7 @@ def rectify(lasFile, debug=False, reclassify_threshold=5, min_area=750, min_poin
try:
# Currently, no Python 2 lib that supports reading and writing LAZ, so we will do it manually until ODM is migrated to Python 3
# When migration is done, we can move to pylas and avoid using PDAL for convertion
# When migration is done, we can move to pylas and avoid using PDAL for conversion
tempLasFile = os.path.join(os.path.dirname(lasFile), 'tmp.las')
# Convert LAZ to LAS
@ -186,7 +186,7 @@ def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56']
for t in tiles:
if not os.path.exists(t['filename']):
raise Exception("Error creating %s, %s failed to be created" % (output_file, t['filename']))
# Create virtual raster
tiles_vrt_path = os.path.abspath(os.path.join(outdir, "tiles.vrt"))
tiles_file_list = os.path.abspath(os.path.join(outdir, "tiles_list.txt"))
@ -315,23 +315,17 @@ def median_smoothing(geotiff_path, output_path, smoothing_iterations=1):
dtype = img.dtypes[0]
arr = img.read()[0]
nodata_locs = numpy.where(arr == nodata)
# Median filter (careful, changing the value 5 might require tweaking)
# the lines below. There's another numpy function that takes care of
# these edge cases, but it's slower.
for i in range(smoothing_iterations):
log.ODM_INFO("Smoothing iteration %s" % str(i + 1))
arr = ndimage.median_filter(arr, size=5, output=dtype)
# Fill corner points with nearest value
if arr.shape >= (4, 4):
arr[0][:2] = arr[1][0] = arr[1][1]
arr[0][-2:] = arr[1][-1] = arr[2][-1]
arr[-1][:2] = arr[-2][0] = arr[-2][1]
arr[-1][-2:] = arr[-2][-1] = arr[-2][-2]
arr = ndimage.median_filter(arr, size=9, output=dtype, mode='nearest')
# Median filter leaves a bunch of zeros in nodata areas
locs = numpy.where(arr == 0.0)
arr[locs] = nodata
arr[nodata_locs] = nodata
# write output
with rasterio.open(output_path, 'w', **img.profile) as imgout:

Wyświetl plik

@ -5,7 +5,7 @@ from .partition_plan import PartitionPlan, Partition
class QuadPartitions(PartitionPlan):
"""This partition plan starts with one big partition that includes the whole point cloud. It then divides it into four partitions, based on some criteria.
Each of these partitions are then divided into four other partitions and so on. The algorithm has two possible stopping criterias:
Each of these partitions are then divided into four other partitions and so on. The algorithm has two possible stopping criteria:
if subdividing a partition would imply that one of the new partitions contains fewer that a given amount of points, or that one of the new partitions as an area smaller that the given size,
then the partition is not divided."""

Wyświetl plik

@ -1,6 +1,6 @@
#!/usr/bin/env python
################################################################################
# lidar2dems - utilties for creating DEMs from LiDAR data
# lidar2dems - utilities for creating DEMs from LiDAR data
#
# AUTHOR: Matthew Hanson, matt.a.hanson@gmail.com
#
@ -155,7 +155,7 @@ def run_pipeline(json, verbose=False):
cmd = [
'pdal',
'pipeline',
'-i %s' % jsonfile
'-i %s' % double_quote(jsonfile)
]
if verbose or sys.platform == 'win32':
system.run(' '.join(cmd))

Wyświetl plik

@ -16,7 +16,10 @@ class GCPFile:
if self.exists():
with open(self.gcp_path, 'r') as f:
contents = f.read().strip()
# Strip eventual BOM characters
contents = contents.replace('\ufeff', '')
lines = list(map(str.strip, contents.split('\n')))
if lines:
self.raw_srs = lines[0] # SRS
@ -51,6 +54,25 @@ class GCPFile:
def exists(self):
return bool(self.gcp_path and os.path.exists(self.gcp_path))
def make_resized_copy(self, gcp_file_output, ratio):
"""
Creates a new resized GCP file from an existing GCP file. If one already exists, it will be removed.
:param gcp_file_output output path of new GCP file
:param ratio scale GCP coordinates by this value
:return path to new GCP file
"""
output = [self.raw_srs]
for entry in self.iter_entries():
entry.px *= ratio
entry.py *= ratio
output.append(str(entry))
with open(gcp_file_output, 'w') as f:
f.write('\n'.join(output) + '\n')
return gcp_file_output
def wgs84_utm_zone(self):
"""
Finds the UTM zone where the first point of the GCP falls into

Wyświetl plik

@ -1,23 +1,91 @@
import os
import sys
import shutil
import ctypes
from opendm import log
from repoze.lru import lru_cache
def gpu_disabled_by_user():
return bool(os.environ.get('ODM_NO_GPU'))
@lru_cache(maxsize=None)
def has_gpus():
if os.environ.get('ODM_NO_GPU'):
def has_popsift_and_can_handle_texsize(width, height):
# We first check that we have the required compute capabilities
# As we do not support compute capabilities less than 3.5
try:
compute_major, compute_minor = get_cuda_compute_version(0)
if compute_major < 3 or (compute_major == 3 and compute_minor < 5):
# Not supported
log.ODM_WARNING("CUDA compute platform is not supported (detected: %s.%s but we need at least 3.5)" % (compute_major, compute_minor))
return False
except Exception as e:
log.ODM_WARNING("Cannot use GPU for feature extraction: %s" % str(e))
return False
try:
from opensfm import pypopsift
fits = pypopsift.fits_texture(int(width * 1.02), int(height * 1.02))
if not fits:
log.ODM_WARNING("Image size (%sx%spx) would not fit in GPU memory, falling back to CPU" % (width, height))
return fits
except (ModuleNotFoundError, ImportError):
return False
except Exception as e:
log.ODM_WARNING(str(e))
return False
@lru_cache(maxsize=None)
def get_cuda_compute_version(device_id = 0):
cuda_lib = "libcuda.so"
if sys.platform == 'win32':
cuda_lib = "nvcuda.dll"
nvcuda = ctypes.cdll.LoadLibrary(cuda_lib)
nvcuda.cuInit.argtypes = (ctypes.c_uint32, )
nvcuda.cuInit.restypes = (ctypes.c_int32)
if nvcuda.cuInit(0) != 0:
raise Exception("Cannot initialize CUDA")
nvcuda.cuDeviceGetCount.argtypes = (ctypes.POINTER(ctypes.c_int32), )
nvcuda.cuDeviceGetCount.restypes = (ctypes.c_int32)
device_count = ctypes.c_int32()
if nvcuda.cuDeviceGetCount(ctypes.byref(device_count)) != 0:
raise Exception("Cannot get device count")
if device_count.value == 0:
raise Exception("No devices")
nvcuda.cuDeviceComputeCapability.argtypes = (ctypes.POINTER(ctypes.c_int32), ctypes.POINTER(ctypes.c_int32), ctypes.c_int32)
nvcuda.cuDeviceComputeCapability.restypes = (ctypes.c_int32)
compute_major = ctypes.c_int32()
compute_minor = ctypes.c_int32()
if nvcuda.cuDeviceComputeCapability(ctypes.byref(compute_major), ctypes.byref(compute_minor), device_id) != 0:
raise Exception("Cannot get CUDA compute version")
return (compute_major.value, compute_minor.value)
@lru_cache(maxsize=None)
def has_gpu():
if gpu_disabled_by_user():
log.ODM_INFO("Disabling GPU features (ODM_NO_GPU is set)")
return False
try:
import pyopencl
except:
return False
try:
platforms = pyopencl.get_platforms()
for p in platforms:
log.ODM_INFO("Found GPU device: %s" % p.name)
return len(platforms) > 0
except Exception as e:
return False
if sys.platform == 'win32':
nvcuda_path = os.path.join(os.environ.get('SYSTEMROOT'), 'system32', 'nvcuda.dll')
if os.path.isfile(nvcuda_path):
log.ODM_INFO("CUDA drivers detected")
return True
else:
log.ODM_INFO("No CUDA drivers detected, using CPU")
return False
else:
if shutil.which('nvidia-smi') is not None:
log.ODM_INFO("nvidia-smi detected")
return True
else:
log.ODM_INFO("nvidia-smi not found in PATH, using CPU")
return False

Wyświetl plik

@ -4,6 +4,7 @@ import numpy as np
import math
from repoze.lru import lru_cache
from opendm import log
from opendm.shots import get_origin
def rounded_gsd(reconstruction_json, default_value=None, ndigits=0, ignore_gsd=False):
"""
@ -61,12 +62,15 @@ def image_scale_factor(target_resolution, reconstruction_json, gsd_error_estimat
return 1
def cap_resolution(resolution, reconstruction_json, gsd_error_estimate = 0.1, ignore_gsd=False, ignore_resolution=False, has_gcp=False):
def cap_resolution(resolution, reconstruction_json, gsd_error_estimate = 0.1, gsd_scaling = 1.0, ignore_gsd=False,
ignore_resolution=False, has_gcp=False):
"""
:param resolution resolution in cm / pixel
:param reconstruction_json path to OpenSfM's reconstruction.json
:param gsd_error_estimate percentage of estimated error in the GSD calculation to set an upper bound on resolution.
:param gsd_scaling scaling of estimated GSD.
:param ignore_gsd when set to True, forces the function to just return resolution.
:param ignore_resolution when set to True, forces the function to return a value based on GSD.
:return The max value between resolution and the GSD computed from the reconstruction.
If a GSD cannot be computed, or ignore_gsd is set to True, it just returns resolution. Units are in cm / pixel.
"""
@ -76,14 +80,16 @@ def cap_resolution(resolution, reconstruction_json, gsd_error_estimate = 0.1, ig
gsd = opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=has_gcp or ignore_resolution)
if gsd is not None:
gsd = gsd * (1 - gsd_error_estimate)
gsd = gsd * (1 - gsd_error_estimate) * gsd_scaling
if gsd > resolution or ignore_resolution:
log.ODM_WARNING('Maximum resolution set to GSD - {}% ({} cm / pixel, requested resolution was {} cm / pixel)'.format(gsd_error_estimate * 100, round(gsd, 2), round(resolution, 2)))
log.ODM_WARNING('Maximum resolution set to {} * (GSD - {}%) '
'({:.2f} cm / pixel, requested resolution was {:.2f} cm / pixel)'
.format(gsd_scaling, gsd_error_estimate * 100, gsd, resolution))
return gsd
else:
return resolution
else:
log.ODM_WARNING('Cannot calculate GSD, using requested resolution of {}'.format(round(resolution, 2)))
log.ODM_WARNING('Cannot calculate GSD, using requested resolution of {:.2f}'.format(resolution))
return resolution
@ -116,7 +122,8 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
shot = reconstruction['shots'][shotImage]
if use_all_shots or shot['gps_dop'] < 999999:
camera = reconstruction['cameras'][shot['camera']]
shot_height = shot['translation'][2]
shot_origin = get_origin(shot)
shot_height = shot_origin[2]
focal_ratio = camera.get('focal', camera.get('focal_x'))
if not focal_ratio:
log.ODM_WARNING("Cannot parse focal values from %s. This is likely an unsupported camera model." % reconstruction_json)
@ -134,6 +141,7 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
return None
def calculate_gsd(sensor_width, flight_height, focal_length, image_width):
"""
:param sensor_width in millimeters
@ -154,6 +162,7 @@ def calculate_gsd(sensor_width, flight_height, focal_length, image_width):
else:
return None
def calculate_gsd_from_focal_ratio(focal_ratio, flight_height, image_width):
"""
:param focal_ratio focal length (mm) / sensor_width (mm)

Wyświetl plik

@ -122,6 +122,7 @@ def parse_srs_header(header):
log.ODM_INFO('Parsing SRS header: %s' % header)
header = header.strip()
ref = header.split(' ')
try:
if ref[0] == 'WGS84' and ref[1] == 'UTM':
datum = ref[0]

Wyświetl plik

@ -20,6 +20,8 @@ def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05,
log.ODM_INFO('Created temporary directory: %s' % tmp_directory)
radius_steps = [dsm_radius]
for _ in range(2):
radius_steps.append(radius_steps[-1] * 2) # 2 is arbitrary
log.ODM_INFO('Creating DSM for 2.5D mesh')
@ -105,6 +107,7 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, verbose=False, maxCo
'-maxTileLength 2000 '
'-maxVertexCount {maxVertexCount} '
'-maxConcurrency {maxConcurrency} '
'-edgeSwapThreshold 0.15 '
' {verbose} '.format(**kwargs))
break
except Exception as e:
@ -126,8 +129,8 @@ def dem_to_mesh_gridded(inGeotiff, outMesh, maxVertexCount, verbose=False, maxCo
system.run('"{reconstructmesh}" -i "{infile}" '
'-o "{outfile}" '
'--remove-spikes 0 --remove-spurious 20 --smooth 0 '
'--target-face-num {max_faces} '.format(**cleanupArgs))
'--remove-spikes 0 --remove-spurious 0 --smooth 0 '
'--target-face-num {max_faces} -v 0'.format(**cleanupArgs))
# Delete intermediate results
os.remove(outMeshDirty)
@ -163,7 +166,6 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
'samples': samples,
'pointWeight': pointWeight,
'threads': int(threads),
'memory': int(concurrency.get_max_memory_mb(4, 0.8) // 1024),
'verbose': '--verbose' if verbose else ''
}
@ -175,7 +177,6 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
'--pointWeight {pointWeight} '
'--samplesPerNode {samples} '
'--threads {threads} '
'--maxMemory {memory} '
'--bType 2 '
'--linearFit '
'{verbose}'.format(**poissonReconArgs))
@ -207,7 +208,7 @@ def screened_poisson_reconstruction(inPointCloud, outMesh, depth = 8, samples =
system.run('"{reconstructmesh}" -i "{infile}" '
'-o "{outfile}" '
'--remove-spikes 0 --remove-spurious 20 --smooth 0 '
'--target-face-num {max_faces} '.format(**cleanupArgs))
'--target-face-num {max_faces} -v 0'.format(**cleanupArgs))
# Delete intermediate results
os.remove(outMeshDirty)

Wyświetl plik

@ -70,7 +70,7 @@ def dn_to_radiance(photo, image):
R = np.repeat(R[:, :, np.newaxis], image.shape[2], axis=2)
image *= R
# Floor any negative radiances to zero (can happend due to noise around blackLevel)
# Floor any negative radiances to zero (can happen due to noise around blackLevel)
if dark_level is not None:
image[image < 0] = 0

Wyświetl plik

@ -70,7 +70,7 @@ def generate_kmz(orthophoto_file, output_file=None, outsize=None):
'--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, bandparam, get_max_memory()))
def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir):
if args.crop > 0:
if args.crop > 0 or args.boundary:
Cropper.crop(bounds_file_path, orthophoto_file, get_orthophoto_vars(args), keep_original=not args.optimize_disk_space, warp_options=['-dstalpha'])
if args.build_overviews and not args.cog:

Wyświetl plik

@ -2,22 +2,28 @@
OpenSfM related utils
"""
import os, shutil, sys, json, argparse
import os, shutil, sys, json, argparse, copy
import yaml
import numpy as np
import pyproj
from pyproj import CRS
from opendm import io
from opendm import log
from opendm import system
from opendm import context
from opendm import camera
from opendm.utils import get_depthmap_resolution
from opendm.photo import find_largest_photo_dim
from opendm import location
from opendm.photo import find_largest_photo_dim, find_largest_photo
from opensfm.large import metadataset
from opensfm.large import tools
from opensfm.actions import undistort
from opensfm.dataset import DataSet
from opensfm.types import Reconstruction
from opensfm import report
from opendm.multispectral import get_photos_by_band
from opendm.gpu import has_gpus
from opendm.gpu import has_popsift_and_can_handle_texsize, has_gpu
from opensfm import multiview, exif
from opensfm.actions.export_geocoords import _transform
class OSFMContext:
def __init__(self, opensfm_project_path):
@ -25,7 +31,7 @@ class OSFMContext:
def run(self, command):
osfm_bin = os.path.join(context.opensfm_path, 'bin', 'opensfm')
system.run('%s %s "%s"' %
system.run('"%s" %s "%s"' %
(osfm_bin, command, self.opensfm_project_path))
def is_reconstruction_done(self):
@ -45,6 +51,7 @@ class OSFMContext:
if not io.file_exists(reconstruction_file) or rerun:
self.run('reconstruct')
self.check_merge_partial_reconstructions()
else:
log.ODM_WARNING('Found a valid OpenSfM reconstruction file in: %s' % reconstruction_file)
@ -57,6 +64,49 @@ class OSFMContext:
"You could also try to increase the --min-num-features parameter."
"The program will now exit.")
def check_merge_partial_reconstructions(self):
if self.reconstructed():
data = DataSet(self.opensfm_project_path)
reconstructions = data.load_reconstruction()
tracks_manager = data.load_tracks_manager()
if len(reconstructions) > 1:
log.ODM_WARNING("Multiple reconstructions detected (%s), this might be an indicator that some areas did not have sufficient overlap" % len(reconstructions))
log.ODM_INFO("Attempting merge")
merged = Reconstruction()
merged.set_reference(reconstructions[0].reference)
for ix_r, rec in enumerate(reconstructions):
if merged.reference != rec.reference:
# Should never happen
continue
log.ODM_INFO("Merging reconstruction %s" % ix_r)
for camera in rec.cameras.values():
merged.add_camera(camera)
for point in rec.points.values():
try:
new_point = merged.create_point(point.id, point.coordinates)
new_point.color = point.color
except RuntimeError as e:
log.ODM_WARNING("Cannot merge shot id %s (%s)" % (shot.id, str(e)))
continue
for shot in rec.shots.values():
merged.add_shot(shot)
try:
obsdict = tracks_manager.get_shot_observations(shot.id)
except RuntimeError:
log.ODM_WARNING("Shot id %s missing from tracks_manager!" % shot.id)
continue
for track_id, obs in obsdict.items():
if track_id in merged.points:
merged.add_observation(shot.id, track_id, obs)
data.save_reconstruction([merged])
def setup(self, args, images_path, reconstruction, append_config = [], rerun=False):
"""
@ -111,37 +161,6 @@ class OSFMContext:
except Exception as e:
log.ODM_WARNING("Cannot set camera_models_overrides.json: %s" % str(e))
use_bow = args.matcher_type == "bow"
feature_type = "SIFT"
# GPSDOP override if we have GPS accuracy information (such as RTK)
if 'gps_accuracy_is_set' in args:
log.ODM_INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy)
log.ODM_INFO("Writing exif overrides")
exif_overrides = {}
for p in photos:
if 'gps_accuracy_is_set' in args:
dop = args.gps_accuracy
elif p.get_gps_dop() is not None:
dop = p.get_gps_dop()
else:
dop = args.gps_accuracy # default value
if p.latitude is not None and p.longitude is not None:
exif_overrides[p.filename] = {
'gps': {
'latitude': p.latitude,
'longitude': p.longitude,
'altitude': p.altitude if p.altitude is not None else 0,
'dop': dop,
}
}
with open(os.path.join(self.opensfm_project_path, "exif_overrides.json"), 'w') as f:
f.write(json.dumps(exif_overrides))
# Check image masks
masks = []
for p in photos:
@ -179,8 +198,6 @@ class OSFMContext:
else:
log.ODM_WARNING("Cannot compute max image dimensions, going with defaults")
depthmap_resolution = get_depthmap_resolution(args, photos)
# create config file for OpenSfM
config = [
"use_exif_size: no",
@ -189,38 +206,58 @@ class OSFMContext:
"feature_min_frames: %s" % args.min_num_features,
"processes: %s" % args.max_concurrency,
"matching_gps_neighbors: %s" % args.matcher_neighbors,
"matching_gps_distance: %s" % args.matcher_distance,
"matching_gps_distance: 0",
"matching_graph_rounds: 50",
"optimize_camera_parameters: %s" % ('no' if args.use_fixed_camera_params or args.cameras else 'yes'),
"reconstruction_algorithm: %s" % (args.sfm_algorithm),
"undistorted_image_format: tif",
"bundle_outlier_filtering_type: AUTO",
"sift_peak_threshold: 0.066",
"align_orientation_prior: vertical",
"triangulation_type: ROBUST",
"retriangulation_ratio: 2",
"bundle_compensate_gps_bias: yes",
]
if args.camera_lens != 'auto':
config.append("camera_projection_type: %s" % args.camera_lens.upper())
if not has_gps:
log.ODM_INFO("No GPS information, using BOW matching")
use_bow = True
matcher_type = args.matcher_type
feature_type = args.feature_type.upper()
if use_bow:
config.append("matcher_type: WORDS")
osfm_matchers = {
"bow": "WORDS",
"flann": "FLANN",
"bruteforce": "BRUTEFORCE"
}
# Cannot use SIFT with BOW
if feature_type == "SIFT":
if not has_gps and not 'matcher_type_is_set' in args:
log.ODM_INFO("No GPS information, using BOW matching by default (you can override this by setting --matcher-type explicitly)")
matcher_type = "bow"
if matcher_type == "bow":
# Cannot use anything other than HAHOG with BOW
if feature_type != "HAHOG":
log.ODM_WARNING("Using BOW matching, will use HAHOG feature type, not SIFT")
feature_type = "HAHOG"
config.append("matcher_type: %s" % osfm_matchers[matcher_type])
# GPU acceleration?
if has_gpus() and feature_type == "SIFT":
log.ODM_INFO("Using GPU for extracting SIFT features")
log.ODM_INFO("--min-num-features will be ignored")
feature_type = "SIFT_GPU"
if has_gpu():
max_photo = find_largest_photo(photos)
w, h = max_photo.width, max_photo.height
if w > h:
h = int((h / w) * feature_process_size)
w = int(feature_process_size)
else:
w = int((w / h) * feature_process_size)
h = int(feature_process_size)
if has_popsift_and_can_handle_texsize(w, h) and feature_type == "SIFT":
log.ODM_INFO("Using GPU for extracting SIFT features")
feature_type = "SIFT_GPU"
self.gpu_sift_feature_extraction = True
config.append("feature_type: %s" % feature_type)
@ -255,6 +292,10 @@ class OSFMContext:
config_filename = self.get_config_file_path()
with open(config_filename, 'w') as fout:
fout.write("\n".join(config))
# We impose our own reference_lla
if reconstruction.is_georeferenced():
self.write_reference_lla(reconstruction.georef.utm_east_offset, reconstruction.georef.utm_north_offset, reconstruction.georef.proj4())
else:
log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" % list_path)
@ -272,6 +313,43 @@ class OSFMContext:
metadata_dir = self.path("exif")
if not io.dir_exists(metadata_dir) or rerun:
self.run('extract_metadata')
def photos_to_metadata(self, photos, rerun=False):
metadata_dir = self.path("exif")
if io.dir_exists(metadata_dir) and not rerun:
log.ODM_WARNING("%s already exists, not rerunning photo to metadata" % metadata_dir)
return
if io.dir_exists(metadata_dir):
shutil.rmtree(metadata_dir)
os.makedirs(metadata_dir, exist_ok=True)
camera_models = {}
data = DataSet(self.opensfm_project_path)
for p in photos:
d = p.to_opensfm_exif()
with open(os.path.join(metadata_dir, "%s.exif" % p.filename), 'w') as f:
f.write(json.dumps(d, indent=4))
camera_id = p.camera_id()
if camera_id not in camera_models:
camera = exif.camera_from_exif_metadata(d, data)
camera_models[camera_id] = camera
# Override any camera specified in the camera models overrides file.
if data.camera_models_overrides_exists():
overrides = data.load_camera_models_overrides()
if "all" in overrides:
for key in camera_models:
camera_models[key] = copy.copy(overrides["all"])
camera_models[key].id = key
else:
for key, value in overrides.items():
camera_models[key] = value
data.save_camera_models(camera_models)
def is_feature_matching_done(self):
features_dir = self.path("features")
@ -284,7 +362,18 @@ class OSFMContext:
matches_dir = self.path("matches")
if not io.dir_exists(features_dir) or rerun:
self.run('detect_features')
try:
self.run('detect_features')
except system.SubprocessException as e:
# Sometimes feature extraction by GPU can fail
# for various reasons, so before giving up
# we try to fallback to CPU
if hasattr(self, 'gpu_sift_feature_extraction'):
log.ODM_WARNING("GPU SIFT extraction failed, maybe the graphics card is not supported? Attempting fallback to CPU")
self.update_config({'feature_type': "SIFT"})
self.run('detect_features')
else:
raise e
else:
log.ODM_WARNING('Detect features already done: %s exists' % features_dir)
@ -428,6 +517,57 @@ class OSFMContext:
log.ODM_WARNING("Report could not be generated")
else:
log.ODM_WARNING("Report %s already exported" % report_path)
def write_reference_lla(self, offset_x, offset_y, proj4):
reference_lla = self.path("reference_lla.json")
longlat = CRS.from_epsg("4326")
lon, lat = location.transform2(CRS.from_proj4(proj4), longlat, offset_x, offset_y)
with open(reference_lla, 'w') as f:
f.write(json.dumps({
'latitude': lat,
'longitude': lon,
'altitude': 0.0
}, indent=4))
log.ODM_INFO("Wrote reference_lla.json")
def ground_control_points(self, proj4):
"""
Load ground control point information.
"""
gcp_stats_file = self.path("stats", "ground_control_points.json")
if not io.file_exists(gcp_stats_file):
return []
gcps_stats = {}
try:
with open(gcp_stats_file) as f:
gcps_stats = json.loads(f.read())
except:
log.ODM_INFO("Cannot parse %s" % gcp_stats_file)
if not gcps_stats:
return []
ds = DataSet(self.opensfm_project_path)
reference = ds.load_reference()
projection = pyproj.Proj(proj4)
result = []
for gcp in gcps_stats:
geocoords = _transform(gcp['coordinates'], reference, projection)
result.append({
'id': gcp['id'],
'observations': gcp['observations'],
'coordinates': geocoords,
'error': gcp['error']
})
return result
def name(self):
return os.path.basename(os.path.abspath(self.path("..")))
@ -449,10 +589,11 @@ def get_submodel_argv(args, submodels_path = None, submodel_name = None):
tweaking --crop if necessary (DEM merging makes assumption about the area of DEMs and their euclidean maps that require cropping. If cropping is skipped, this leads to errors.)
removing --gcp (the GCP path if specified is always "gcp_list.txt")
reading the contents of --cameras
reading the contents of --boundary
"""
assure_always = ['orthophoto_cutline', 'dem_euclidean_map', 'skip_3dmodel', 'skip_report']
remove_always = ['split', 'split_overlap', 'rerun_from', 'rerun', 'gcp', 'end_with', 'sm_cluster', 'rerun_all', 'pc_csv', 'pc_las', 'pc_ept', 'tiles', 'copy-to', 'cog']
read_json_always = ['cameras']
read_json_always = ['cameras', 'boundary']
argv = sys.argv

Wyświetl plik

@ -1,6 +1,7 @@
import logging
import re
import os
import math
import exifread
import numpy as np
@ -14,18 +15,24 @@ from opendm import system
import xmltodict as x2d
from opendm import get_image_size
from xml.parsers.expat import ExpatError
from opensfm.sensors import sensor_data
from opensfm.geo import ecef_from_lla
projections = ['perspective', 'fisheye', 'brown', 'dual', 'equirectangular', 'spherical']
def find_largest_photo_dims(photos):
max_mp = 0
max_dims = None
def find_largest_photo(photos):
max_photo = None
for p in photos:
if p.width is None:
if p.width is None or p.height is None:
continue
if max_photo is None:
max_photo = p
else:
if max_photo.width * max_photo.height < p.width * p.height:
max_photo = p
return p
mp = p.width * p.height
if mp > max_mp:
max_mp = mp
max_dims = (p.width, p.height)
return max_dims
def find_largest_photo_dim(photos):
max_dim = 0
@ -36,6 +43,44 @@ def find_largest_photo_dim(photos):
return max_dim
def find_largest_photo(photos):
max_p = None
max_area = 0
for p in photos:
if p.width is None:
continue
area = p.width * p.height
if area > max_area:
max_area = area
max_p = p
return max_p
def get_mm_per_unit(resolution_unit):
"""Length of a resolution unit in millimeters.
Uses the values from the EXIF specs in
https://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/EXIF.html
Args:
resolution_unit: the resolution unit value given in the EXIF
"""
if resolution_unit == 2: # inch
return 25.4
elif resolution_unit == 3: # cm
return 10
elif resolution_unit == 4: # mm
return 1
elif resolution_unit == 5: # um
return 0.001
else:
log.ODM_WARNING("Unknown EXIF resolution unit value: {}".format(resolution_unit))
return None
class PhotoCorruptedException(Exception):
pass
class ODM_Photo:
"""ODMPhoto - a class for ODMPhotos"""
@ -48,6 +93,7 @@ class ODM_Photo:
self.height = None
self.camera_make = ''
self.camera_model = ''
self.orientation = 1
# Geo tags
self.latitude = None
@ -75,6 +121,14 @@ class ODM_Photo:
self.irradiance_scale_to_si = None
self.utc_time = None
# OPK angles
self.yaw = None
self.pitch = None
self.roll = None
self.omega = None
self.phi = None
self.kappa = None
# DLS
self.sun_sensor = None
self.dls_yaw = None
@ -88,6 +142,10 @@ class ODM_Photo:
self.gps_xy_stddev = None # Dilution of Precision X/Y
self.gps_z_stddev = None # Dilution of Precision Z
# Misc SFM
self.camera_projection = 'brown'
self.focal_ratio = 0.85
# parse values from metadata
self.parse_exif_values(path_file)
@ -107,6 +165,9 @@ class ODM_Photo:
self.latitude = geo_entry.y
self.longitude = geo_entry.x
self.altitude = geo_entry.z
self.omega = geo_entry.omega
self.phi = geo_entry.phi
self.kappa = geo_entry.kappa
self.dls_yaw = geo_entry.omega
self.dls_pitch = geo_entry.phi
self.dls_roll = geo_entry.kappa
@ -117,18 +178,28 @@ class ODM_Photo:
# Disable exifread log
logging.getLogger('exifread').setLevel(logging.CRITICAL)
try:
self.width, self.height = get_image_size.get_image_size(_path_file)
except Exception as e:
raise PhotoCorruptedException(str(e))
tags = {}
xtags = {}
with open(_path_file, 'rb') as f:
tags = exifread.process_file(f, details=False)
try:
if 'Image Make' in tags:
try:
self.camera_make = tags['Image Make'].values
self.camera_make = self.camera_make.strip()
except UnicodeDecodeError:
log.ODM_WARNING("EXIF Image Make might be corrupted")
self.camera_make = "unknown"
if 'Image Model' in tags:
try:
self.camera_model = tags['Image Model'].values
self.camera_model = self.camera_model.strip()
except UnicodeDecodeError:
log.ODM_WARNING("EXIF Image Model might be corrupted")
self.camera_model = "unknown"
@ -140,8 +211,10 @@ class ODM_Photo:
self.latitude = self.dms_to_decimal(tags['GPS GPSLatitude'], tags['GPS GPSLatitudeRef'])
if 'GPS GPSLongitude' in tags and 'GPS GPSLongitudeRef' in tags:
self.longitude = self.dms_to_decimal(tags['GPS GPSLongitude'], tags['GPS GPSLongitudeRef'])
if 'Image Orientation' in tags:
self.orientation = self.int_value(tags['Image Orientation'])
except (IndexError, ValueError) as e:
log.ODM_WARNING("Cannot read basic EXIF tags for %s: %s" % (_path_file, str(e)))
log.ODM_WARNING("Cannot read basic EXIF tags for %s: %s" % (self.filename, str(e)))
try:
if 'Image Tag 0xC61A' in tags:
@ -183,96 +256,202 @@ class ODM_Photo:
epoch = timezone.localize(datetime.utcfromtimestamp(0))
self.utc_time = (timezone.localize(utc_time) - epoch).total_seconds() * 1000.0
except Exception as e:
log.ODM_WARNING("Cannot read extended EXIF tags for %s: %s" % (_path_file, str(e)))
log.ODM_WARNING("Cannot read extended EXIF tags for %s: %s" % (self.filename, str(e)))
# Warn if GPS coordinates are suspiciously wrong
if self.latitude is not None and self.latitude == 0 and \
self.longitude is not None and self.longitude == 0:
log.ODM_WARNING("%s has GPS position (0,0), possibly corrupted" % self.filename)
# Extract XMP tags
f.seek(0)
xmp = self.get_xmp(f)
for tags in xmp:
for xtags in xmp:
try:
band_name = self.get_xmp_tag(tags, ['Camera:BandName', '@Camera:BandName'])
band_name = self.get_xmp_tag(xtags, ['Camera:BandName', '@Camera:BandName'])
if band_name is not None:
self.band_name = band_name.replace(" ", "")
self.set_attr_from_xmp_tag('band_index', tags, [
self.set_attr_from_xmp_tag('band_index', xtags, [
'DLS:SensorId', # Micasense RedEdge
'@Camera:RigCameraIndex', # Parrot Sequoia, Sentera 21244-00_3.2MP-GS-0001
'Camera:RigCameraIndex', # MicaSense Altum
])
self.set_attr_from_xmp_tag('radiometric_calibration', tags, [
self.set_attr_from_xmp_tag('radiometric_calibration', xtags, [
'MicaSense:RadiometricCalibration',
])
self.set_attr_from_xmp_tag('vignetting_center', tags, [
self.set_attr_from_xmp_tag('vignetting_center', xtags, [
'Camera:VignettingCenter',
'Sentera:VignettingCenter',
])
self.set_attr_from_xmp_tag('vignetting_polynomial', tags, [
self.set_attr_from_xmp_tag('vignetting_polynomial', xtags, [
'Camera:VignettingPolynomial',
'Sentera:VignettingPolynomial',
])
self.set_attr_from_xmp_tag('horizontal_irradiance', tags, [
self.set_attr_from_xmp_tag('horizontal_irradiance', xtags, [
'Camera:HorizontalIrradiance'
], float)
self.set_attr_from_xmp_tag('irradiance_scale_to_si', tags, [
self.set_attr_from_xmp_tag('irradiance_scale_to_si', xtags, [
'Camera:IrradianceScaleToSIUnits'
], float)
self.set_attr_from_xmp_tag('sun_sensor', tags, [
self.set_attr_from_xmp_tag('sun_sensor', xtags, [
'Camera:SunSensor',
], float)
self.set_attr_from_xmp_tag('spectral_irradiance', tags, [
self.set_attr_from_xmp_tag('spectral_irradiance', xtags, [
'Camera:SpectralIrradiance',
'Camera:Irradiance',
], float)
self.set_attr_from_xmp_tag('capture_uuid', tags, [
'@drone-dji:CaptureUUID'
self.set_attr_from_xmp_tag('capture_uuid', xtags, [
'@drone-dji:CaptureUUID', # DJI
'@Camera:ImageUniqueID', # sentera 6x
])
# Camera make / model for some cameras is stored in the XMP
if self.camera_make == '':
self.set_attr_from_xmp_tag('camera_make', xtags, [
'@tiff:Make'
])
if self.camera_model == '':
self.set_attr_from_xmp_tag('camera_model', xtags, [
'@tiff:Model'
])
# DJI GPS tags
self.set_attr_from_xmp_tag('longitude', xtags, [
'@drone-dji:Longitude'
], float)
self.set_attr_from_xmp_tag('latitude', xtags, [
'@drone-dji:Latitude'
], float)
self.set_attr_from_xmp_tag('altitude', xtags, [
'@drone-dji:AbsoluteAltitude'
], float)
# Phantom 4 RTK
if '@drone-dji:RtkStdLon' in tags:
y = float(self.get_xmp_tag(tags, '@drone-dji:RtkStdLon'))
x = float(self.get_xmp_tag(tags, '@drone-dji:RtkStdLat'))
if '@drone-dji:RtkStdLon' in xtags:
y = float(self.get_xmp_tag(xtags, '@drone-dji:RtkStdLon'))
x = float(self.get_xmp_tag(xtags, '@drone-dji:RtkStdLat'))
self.gps_xy_stddev = max(x, y)
if '@drone-dji:RtkStdHgt' in tags:
self.gps_z_stddev = float(self.get_xmp_tag(tags, '@drone-dji:RtkStdHgt'))
if '@drone-dji:RtkStdHgt' in xtags:
self.gps_z_stddev = float(self.get_xmp_tag(xtags, '@drone-dji:RtkStdHgt'))
else:
self.set_attr_from_xmp_tag('gps_xy_stddev', tags, [
self.set_attr_from_xmp_tag('gps_xy_stddev', xtags, [
'@Camera:GPSXYAccuracy',
'GPSXYAccuracy'
], float)
self.set_attr_from_xmp_tag('gps_z_stddev', tags, [
self.set_attr_from_xmp_tag('gps_z_stddev', xtags, [
'@Camera:GPSZAccuracy',
'GPSZAccuracy'
], float)
if 'DLS:Yaw' in tags:
self.set_attr_from_xmp_tag('dls_yaw', tags, ['DLS:Yaw'], float)
self.set_attr_from_xmp_tag('dls_pitch', tags, ['DLS:Pitch'], float)
self.set_attr_from_xmp_tag('dls_roll', tags, ['DLS:Roll'], float)
except Exception as e:
log.ODM_WARNING("Cannot read XMP tags for %s: %s" % (_path_file, str(e)))
if 'DLS:Yaw' in xtags:
self.set_attr_from_xmp_tag('dls_yaw', xtags, ['DLS:Yaw'], float)
self.set_attr_from_xmp_tag('dls_pitch', xtags, ['DLS:Pitch'], float)
self.set_attr_from_xmp_tag('dls_roll', xtags, ['DLS:Roll'], float)
camera_projection = self.get_xmp_tag(xtags, ['@Camera:ModelType', 'Camera:ModelType'])
if camera_projection is not None:
camera_projection = camera_projection.lower()
if camera_projection in projections:
self.camera_projection = camera_projection
# self.set_attr_from_xmp_tag('center_wavelength', tags, [
# OPK
self.set_attr_from_xmp_tag('yaw', xtags, ['@drone-dji:FlightYawDegree', '@Camera:Yaw', 'Camera:Yaw'], float)
self.set_attr_from_xmp_tag('pitch', xtags, ['@drone-dji:GimbalPitchDegree', '@Camera:Pitch', 'Camera:Pitch'], float)
self.set_attr_from_xmp_tag('roll', xtags, ['@drone-dji:GimbalRollDegree', '@Camera:Roll', 'Camera:Roll'], float)
# Normalize YPR conventions (assuming nadir camera)
# Yaw: 0 --> top of image points north
# Yaw: 90 --> top of image points east
# Yaw: 270 --> top of image points west
# Pitch: 0 --> nadir camera
# Pitch: 90 --> camera is looking forward
# Roll: 0 (assuming gimbal)
if self.has_ypr():
if self.camera_make.lower() in ['dji', 'hasselblad']:
self.pitch = 90 + self.pitch
if self.camera_make.lower() == 'sensefly':
self.roll *= -1
except Exception as e:
log.ODM_WARNING("Cannot read XMP tags for %s: %s" % (self.filename, str(e)))
# self.set_attr_from_xmp_tag('center_wavelength', xtags, [
# 'Camera:CentralWavelength'
# ], float)
# self.set_attr_from_xmp_tag('bandwidth', tags, [
# self.set_attr_from_xmp_tag('bandwidth', xtags, [
# 'Camera:WavelengthFWHM'
# ], float)
self.width, self.height = get_image_size.get_image_size(_path_file)
# Sanitize band name since we use it in folder paths
self.band_name = re.sub('[^A-Za-z0-9]+', '', self.band_name)
self.compute_focal(tags, xtags)
self.compute_opk()
def compute_focal(self, tags, xtags):
try:
self.focal_ratio = self.extract_focal(self.camera_make, self.camera_model, tags, xtags)
except (IndexError, ValueError) as e:
log.ODM_WARNING("Cannot extract focal ratio for %s: %s" % (self.filename, str(e)))
def extract_focal(self, make, model, tags, xtags):
if make != "unknown":
# remove duplicate 'make' information in 'model'
model = model.replace(make, "")
sensor_string = (make.strip() + " " + model.strip()).strip().lower()
sensor_width = None
if ("EXIF FocalPlaneResolutionUnit" in tags and "EXIF FocalPlaneXResolution" in tags):
resolution_unit = self.float_value(tags["EXIF FocalPlaneResolutionUnit"])
mm_per_unit = get_mm_per_unit(resolution_unit)
if mm_per_unit:
pixels_per_unit = self.float_value(tags["EXIF FocalPlaneXResolution"])
if pixels_per_unit <= 0 and "EXIF FocalPlaneYResolution" in tags:
pixels_per_unit = self.float_value(tags["EXIF FocalPlaneYResolution"])
if pixels_per_unit > 0 and self.width is not None:
units_per_pixel = 1 / pixels_per_unit
sensor_width = self.width * units_per_pixel * mm_per_unit
focal_35 = None
focal = None
if "EXIF FocalLengthIn35mmFilm" in tags:
focal_35 = self.float_value(tags["EXIF FocalLengthIn35mmFilm"])
if "EXIF FocalLength" in tags:
focal = self.float_value(tags["EXIF FocalLength"])
if focal is None and "@aux:Lens" in xtags:
lens = self.get_xmp_tag(xtags, ["@aux:Lens"])
matches = re.search('([\d\.]+)mm', str(lens))
if matches:
focal = float(matches.group(1))
if focal_35 is not None and focal_35 > 0:
focal_ratio = focal_35 / 36.0 # 35mm film produces 36x24mm pictures.
else:
if not sensor_width:
sensor_width = sensor_data().get(sensor_string, None)
if sensor_width and focal:
focal_ratio = focal / sensor_width
else:
focal_ratio = 0.85
return focal_ratio
def set_attr_from_xmp_tag(self, attr, xmp_tags, tags, cast=None):
v = self.get_xmp_tag(xmp_tags, tags)
if v is not None:
@ -479,5 +658,137 @@ class ODM_Photo:
return None
def override_gps_dop(self, dop):
self.gps_xy_stddev = self.gps_z_stddev = dop
def override_camera_projection(self, camera_projection):
if camera_projection in projections:
self.camera_projection = camera_projection
def is_thermal(self):
return self.band_name.upper() in ["LWIR"] # TODO: more?
#Added for support M2EA camera sensor
if(self.camera_make == "DJI"):
return self.camera_model == "MAVIC2-ENTERPRISE-ADVANCED" and self.width == 640 and self.height == 512
return self.band_name.upper() in ["LWIR"] # TODO: more?
def camera_id(self):
return " ".join(
[
"v2",
self.camera_make.strip(),
self.camera_model.strip(),
str(int(self.width)),
str(int(self.height)),
self.camera_projection,
str(float(self.focal_ratio))[:6],
]
).lower()
def to_opensfm_exif(self):
capture_time = 0.0
if self.utc_time is not None:
capture_time = self.utc_time / 1000.0
gps = {}
if self.latitude is not None and self.longitude is not None:
gps['latitude'] = self.latitude
gps['longitude'] = self.longitude
if self.altitude is not None:
gps['altitude'] = self.altitude
else:
gps['altitude'] = 0.0
dop = self.get_gps_dop()
if dop is None:
dop = 10.0 # Default
gps['dop'] = dop
d = {
"make": self.camera_make,
"model": self.camera_model,
"width": self.width,
"height": self.height,
"projection_type": self.camera_projection,
"focal_ratio": self.focal_ratio,
"orientation": self.orientation,
"capture_time": capture_time,
"gps": gps,
"camera": self.camera_id()
}
if self.has_opk():
d['opk'] = {
'omega': self.omega,
'phi': self.phi,
'kappa': self.kappa
}
return d
def has_ypr(self):
return self.yaw is not None and \
self.pitch is not None and \
self.roll is not None
def has_opk(self):
return self.omega is not None and \
self.phi is not None and \
self.kappa is not None
def has_geo(self):
return self.latitude is not None and \
self.longitude is not None
def compute_opk(self):
if self.has_ypr() and self.has_geo():
y, p, r = math.radians(self.yaw), math.radians(self.pitch), math.radians(self.roll)
# Ref: New Calibration and Computing Method for Direct
# Georeferencing of Image and Scanner Data Using the
# Position and Angular Data of an Hybrid Inertial Navigation System
# by Manfred Bäumker
# YPR rotation matrix
cnb = np.array([[ math.cos(y) * math.cos(p), math.cos(y) * math.sin(p) * math.sin(r) - math.sin(y) * math.cos(r), math.cos(y) * math.sin(p) * math.cos(r) + math.sin(y) * math.sin(r)],
[ math.sin(y) * math.cos(p), math.sin(y) * math.sin(p) * math.sin(r) + math.cos(y) * math.cos(r), math.sin(y) * math.sin(p) * math.cos(r) - math.cos(y) * math.sin(r)],
[ -math.sin(p), math.cos(p) * math.sin(r), math.cos(p) * math.cos(r)],
])
# Convert between image and body coordinates
# Top of image pixels point to flying direction
# and camera is looking down.
# We might need to change this if we want different
# camera mount orientations (e.g. backward or sideways)
# (Swap X/Y, flip Z)
cbb = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, -1]])
delta = 1e-7
alt = self.altitude if self.altitude is not None else 0.0
p1 = np.array(ecef_from_lla(self.latitude + delta, self.longitude, alt))
p2 = np.array(ecef_from_lla(self.latitude - delta, self.longitude, alt))
xnp = p1 - p2
m = np.linalg.norm(xnp)
if m == 0:
log.ODM_WARNING("Cannot compute OPK angles, divider = 0")
return
# Unit vector pointing north
xnp /= m
znp = np.array([0, 0, -1]).T
ynp = np.cross(znp, xnp)
cen = np.array([xnp, ynp, znp]).T
# OPK rotation matrix
ceb = cen.dot(cnb).dot(cbb)
self.omega = math.degrees(math.atan2(-ceb[1][2], ceb[2][2]))
self.phi = math.degrees(math.asin(ceb[0][2]))
self.kappa = math.degrees(math.atan2(-ceb[0][1], ceb[0][0]))

Wyświetl plik

@ -1,4 +1,4 @@
import os, sys, shutil, tempfile, json, math
import os, sys, shutil, tempfile, math, json
from opendm import system
from opendm import log
from opendm import context
@ -7,6 +7,8 @@ from opendm import entwine
from opendm import io
from opendm.concurrency import parallel_map
from opendm.utils import double_quote
from opendm.boundary import as_polygon, as_geojson
from opendm.dem.pdal import run_pipeline
def ply_info(input_ply):
if not os.path.exists(input_ply):
@ -38,7 +40,8 @@ def ply_info(input_ply):
return {
'has_normals': has_normals,
'vertex_count': vertex_count,
'has_views': has_views
'has_views': has_views,
'header_lines': i + 1
}
@ -68,7 +71,7 @@ def split(input_point_cloud, outdir, filename_template, capacity, dims=None):
return [os.path.join(outdir, f) for f in os.listdir(outdir)]
def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=16, sample_radius=0, verbose=False, max_concurrency=1):
def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=16, sample_radius=0, boundary=None, verbose=False, max_concurrency=1):
"""
Filters a point cloud
"""
@ -76,98 +79,31 @@ def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=
log.ODM_ERROR("{} does not exist. The program will now exit.".format(input_point_cloud))
sys.exit(1)
if (standard_deviation <= 0 or meank <= 0) and sample_radius <= 0:
log.ODM_INFO("Skipping point cloud filtering")
# if using the option `--pc-filter 0`, we need copy input_point_cloud
shutil.copy(input_point_cloud, output_point_cloud)
return
filters = []
args = [
'--input "%s"' % input_point_cloud,
'--output "%s"' % output_point_cloud,
'--concurrency %s' % max_concurrency,
'--verbose' if verbose else '',
]
if sample_radius > 0:
log.ODM_INFO("Sampling points around a %sm radius" % sample_radius)
filters.append('sample')
args.append('--radius %s' % sample_radius)
if standard_deviation > 0 and meank > 0:
log.ODM_INFO("Filtering {} (statistical, meanK {}, standard deviation {})".format(input_point_cloud, meank, standard_deviation))
filters.append('outlier')
args.append('--meank %s' % meank)
args.append('--std %s' % standard_deviation)
if boundary is not None:
log.ODM_INFO("Boundary {}".format(boundary))
fd, boundary_json_file = tempfile.mkstemp(suffix='.boundary.json')
os.close(fd)
with open(boundary_json_file, 'w') as f:
f.write(as_geojson(boundary))
args.append('--boundary "%s"' % boundary_json_file)
if len(filters) > 0:
filters.append('range')
info = ply_info(input_point_cloud)
dims = "x=float,y=float,z=float,"
if info['has_normals']:
dims += "nx=float,ny=float,nz=float,"
dims += "red=uchar,blue=uchar,green=uchar"
if info['has_views']:
dims += ",views=uchar"
if info['vertex_count'] == 0:
log.ODM_ERROR("Cannot read vertex count for {}".format(input_point_cloud))
sys.exit(1)
# Do we need to split this?
VERTEX_THRESHOLD = 250000
should_split = max_concurrency > 1 and info['vertex_count'] > VERTEX_THRESHOLD*2
if should_split:
partsdir = os.path.join(os.path.dirname(output_point_cloud), "parts")
if os.path.exists(partsdir):
log.ODM_WARNING("Removing existing directory %s" % partsdir)
shutil.rmtree(partsdir)
point_cloud_submodels = split(input_point_cloud, partsdir, "part.ply", capacity=VERTEX_THRESHOLD, dims=dims)
def run_filter(pcs):
# Recurse
filter(pcs['path'], io.related_file_path(pcs['path'], postfix="_filtered"),
standard_deviation=standard_deviation,
meank=meank,
sample_radius=sample_radius,
verbose=verbose,
max_concurrency=1)
# Filter
parallel_map(run_filter, [{'path': p} for p in point_cloud_submodels], max_concurrency)
# Merge
log.ODM_INFO("Merging %s point cloud chunks to %s" % (len(point_cloud_submodels), output_point_cloud))
filtered_pcs = [io.related_file_path(pcs, postfix="_filtered") for pcs in point_cloud_submodels]
#merge_ply(filtered_pcs, output_point_cloud, dims)
fast_merge_ply(filtered_pcs, output_point_cloud)
if os.path.exists(partsdir):
shutil.rmtree(partsdir)
else:
# Process point cloud (or a point cloud submodel) in a single step
filterArgs = {
'inputFile': input_point_cloud,
'outputFile': output_point_cloud,
'stages': " ".join(filters),
'dims': dims
}
cmd = ("pdal translate -i \"{inputFile}\" "
"-o \"{outputFile}\" "
"{stages} "
"--writers.ply.sized_types=false "
"--writers.ply.storage_mode=\"little endian\" "
"--writers.ply.dims=\"{dims}\" "
"").format(**filterArgs)
if 'sample' in filters:
cmd += "--filters.sample.radius={} ".format(sample_radius)
if 'outlier' in filters:
cmd += ("--filters.outlier.method=\"statistical\" "
"--filters.outlier.mean_k={} "
"--filters.outlier.multiplier={} ").format(meank, standard_deviation)
if 'range' in filters:
# Remove outliers
cmd += "--filters.range.limits=\"Classification![7:7]\" "
system.run(cmd)
system.run('"%s" %s' % (context.fpcfilter_path, " ".join(args)))
if not os.path.exists(output_point_cloud):
log.ODM_WARNING("{} not found, filtering has failed.".format(output_point_cloud))

Wyświetl plik

@ -27,4 +27,4 @@ def add_pseudo_georeferencing(geotiff):
dst_ds = None
except Exception as e:
log.ODM_WARNING("Cannot add psuedo georeferencing to %s (%s), skipping..." % (geotiff, str(e)))
log.ODM_WARNING("Cannot add pseudo georeferencing to %s (%s), skipping..." % (geotiff, str(e)))

Wyświetl plik

@ -341,6 +341,10 @@ class Task:
if os.path.exists(self.path("gcp_list.txt")):
images.append(self.path("gcp_list.txt"))
# Add GEO (optional)
if os.path.exists(self.path("geo.txt")):
images.append(self.path("geo.txt"))
# Add seed file
images.append(seed_file)

Plik binarny nie jest wyświetlany.

Przed

Szerokość:  |  Wysokość:  |  Rozmiar: 5.8 KiB

Po

Szerokość:  |  Wysokość:  |  Rozmiar: 5.6 KiB

Wyświetl plik

@ -46,6 +46,10 @@ def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset
[0, 0, 0, 1]])
raster = None
pseudo = True
# Couldn't get a SRS?
if utm_srs is None:
return None
crstrans = transformer(CRS.from_proj4(utm_srs), CRS.from_epsg("4326"))

Wyświetl plik

@ -1,4 +1,5 @@
from opendm import log
from opendm.thermal_tools import dji_unpack
import cv2
def resize_to_match(image, match_photo = None):
@ -17,16 +18,16 @@ def resize_to_match(image, match_photo = None):
interpolation=cv2.INTER_LANCZOS4)
return image
def dn_to_temperature(photo, image):
def dn_to_temperature(photo, image, dataset_tree):
"""
Convert Digital Number values to temperature (C) values
:param photo ODM_Photo
:param image numpy array containing image data
:param resize_to_photo ODM_Photo that photo should be resized to (to match its dimensions)
:param dataset_tree path to original source image to read data using PIL for DJI thermal photos
:return numpy array with temperature (C) image values
"""
image = image.astype("float32")
# Handle thermal bands
if photo.is_thermal():
@ -34,12 +35,18 @@ def dn_to_temperature(photo, image):
# The following will work for MicaSense Altum cameras
# but not necessarily for others
if photo.camera_make == "MicaSense" and photo.camera_model == "Altum":
image = image.astype("float32")
image -= (273.15 * 100.0) # Convert Kelvin to Celsius
image *= 0.01
return image
elif photo.camera_make == "DJI" and photo.camera_model == "MAVIC2-ENTERPRISE-ADVANCED":
image = dji_unpack.extract_temperatures_dji(photo, image, dataset_tree)
image = image.astype("float32")
return image
else:
log.ODM_WARNING("Unsupported camera [%s %s], thermal band will have digital numbers." % (photo.camera_make, photo.camera_model))
else:
image = image.astype("float32")
log.ODM_WARNING("Tried to radiometrically calibrate a non-thermal image with temperature values (%s)" % photo.filename)
return image

Wyświetl plik

@ -0,0 +1,50 @@
from PIL import Image
import numpy as np
from opendm import system
from opendm import log
from opendm.thermal_tools.thermal_utils import sensor_vals_to_temp
def extract_temperatures_dji(photo, image, dataset_tree):
"""Extracts the DJI-encoded thermal image as 2D floating-point numpy array with temperatures in degC.
The raw sensor values are obtained using the sample binaries provided in the official Thermal SDK by DJI.
The executable file is run and generates a 16 bit unsigned RAW image with Little Endian byte order.
Link to DJI Forum post: https://forum.dji.com/forum.php?mod=redirect&goto=findpost&ptid=230321&pid=2389016
"""
# Hardcoded metadata for mean of values
# This is added to support the possibility of extracting RJPEG from DJI M2EA
meta = {
"Emissivity": 0.95,
"ObjectDistance": 50, #This is mean value of flights for better results. Need to be changed later, or improved by bypassing options from task broker
"AtmosphericTemperature": 20,
"ReflectedApparentTemperature": 30,
"IRWindowTemperature": 20,
"IRWindowTransmission": 1,
"RelativeHumidity": 40,
"PlanckR1": 21106.77,
"PlanckB": 1501,
"PlanckF": 1,
"PlanckO": -7340,
"PlanckR2": 0.012545258,
}
if photo.camera_model == "MAVIC2-ENTERPRISE-ADVANCED":
# Adding support for MAVIC2-ENTERPRISE-ADVANCED Camera images
im = Image.open(f"{dataset_tree}/{photo.filename}")
# concatenate APP3 chunks of data
a = im.applist[3][1]
for i in range(4, 14):
a += im.applist[i][1]
# create image from bytes
try:
img = Image.frombytes("I;16L", (640, 512), a)
except ValueError as e:
log.ODM_ERROR("Error during extracting temperature values for file %s : %s" % photo.filename, e)
else:
log.ODM_DEBUG("Only DJI M2EA currently supported, please wait for new updates")
return image
# Extract raw sensor values from generated image into numpy array
raw_sensor_np = np.array(img)
## extracting the temperatures from thermal images
thermal_np = sensor_vals_to_temp(raw_sensor_np, **meta)
return thermal_np

Wyświetl plik

@ -0,0 +1,271 @@
"""
THIS IS WIP, DON'T USE THIS FILE, IT IS HERE FOR FURTHER IMPROVEMENT
Tools for extracting thermal data from FLIR images.
Derived from https://bitbucket.org/nimmerwoner/flyr/src/master/
"""
import os
from io import BufferedIOBase, BytesIO
from typing import BinaryIO, Dict, Optional, Tuple, Union
import numpy as np
from PIL import Image
# Constants
SEGMENT_SEP = b"\xff"
APP1_MARKER = b"\xe1"
MAGIC_FLIR_DEF = b"FLIR\x00"
CHUNK_APP1_BYTES_COUNT = len(APP1_MARKER)
CHUNK_LENGTH_BYTES_COUNT = 2
CHUNK_MAGIC_BYTES_COUNT = len(MAGIC_FLIR_DEF)
CHUNK_SKIP_BYTES_COUNT = 1
CHUNK_NUM_BYTES_COUNT = 1
CHUNK_TOT_BYTES_COUNT = 1
CHUNK_PARTIAL_METADATA_LENGTH = CHUNK_APP1_BYTES_COUNT + CHUNK_LENGTH_BYTES_COUNT + CHUNK_MAGIC_BYTES_COUNT
CHUNK_METADATA_LENGTH = (
CHUNK_PARTIAL_METADATA_LENGTH + CHUNK_SKIP_BYTES_COUNT + CHUNK_NUM_BYTES_COUNT + CHUNK_TOT_BYTES_COUNT
)
def unpack(path_or_stream: Union[str, BinaryIO]) -> np.ndarray:
"""Unpacks the FLIR image, meaning that it will return the thermal data embedded in the image.
Parameters
----------
path_or_stream : Union[str, BinaryIO]
Either a path (string) to a FLIR file, or a byte stream such as
BytesIO or file opened as `open(file_path, "rb")`.
Returns
-------
FlyrThermogram
When successful, a FlyrThermogram object containing thermogram data.
"""
if isinstance(path_or_stream, str) and os.path.isfile(path_or_stream):
with open(path_or_stream, "rb") as flirh:
return unpack(flirh)
elif isinstance(path_or_stream, BufferedIOBase):
stream = path_or_stream
flir_app1_stream = extract_flir_app1(stream)
flir_records = parse_flir_app1(flir_app1_stream)
raw_np = parse_thermal(flir_app1_stream, flir_records)
return raw_np
else:
raise ValueError("Incorrect input")
def extract_flir_app1(stream: BinaryIO) -> BinaryIO:
"""Extracts the FLIR APP1 bytes.
Parameters
---------
stream : BinaryIO
A full bytes stream of a JPEG file, expected to be a FLIR file.
Raises
------
ValueError
When the file is invalid in one the next ways, a
ValueError is thrown.
* File is not a JPEG
* A FLIR chunk number occurs more than once
* The total chunks count is inconsistent over multiple chunks
* No APP1 segments are successfully parsed
Returns
-------
BinaryIO
A bytes stream of the APP1 FLIR segments
"""
# Check JPEG-ness
_ = stream.read(2)
chunks_count: Optional[int] = None
chunks: Dict[int, bytes] = {}
while True:
b = stream.read(1)
if b == b"":
break
if b != SEGMENT_SEP:
continue
parsed_chunk = parse_flir_chunk(stream, chunks_count)
if not parsed_chunk:
continue
chunks_count, chunk_num, chunk = parsed_chunk
chunk_exists = chunks.get(chunk_num, None) is not None
if chunk_exists:
raise ValueError("Invalid FLIR: duplicate chunk number")
chunks[chunk_num] = chunk
# Encountered all chunks, break out of loop to process found metadata
if chunk_num == chunks_count:
break
if chunks_count is None:
raise ValueError("Invalid FLIR: no metadata encountered")
flir_app1_bytes = b""
for chunk_num in range(chunks_count + 1):
flir_app1_bytes += chunks[chunk_num]
flir_app1_stream = BytesIO(flir_app1_bytes)
flir_app1_stream.seek(0)
return flir_app1_stream
def parse_flir_chunk(stream: BinaryIO, chunks_count: Optional[int]) -> Optional[Tuple[int, int, bytes]]:
"""Parse flir chunk."""
# Parse the chunk header. Headers are as follows (definition with example):
#
# \xff\xe1<length: 2 bytes>FLIR\x00\x01<chunk nr: 1 byte><chunk count: 1 byte>
# \xff\xe1\xff\xfeFLIR\x00\x01\x01\x0b
#
# Meaning: Exif APP1, 65534 long, FLIR chunk 1 out of 12
marker = stream.read(CHUNK_APP1_BYTES_COUNT)
length_bytes = stream.read(CHUNK_LENGTH_BYTES_COUNT)
length = int.from_bytes(length_bytes, "big")
length -= CHUNK_METADATA_LENGTH
magic_flir = stream.read(CHUNK_MAGIC_BYTES_COUNT)
if not (marker == APP1_MARKER and magic_flir == MAGIC_FLIR_DEF):
# Seek back to just after byte b and continue searching for chunks
stream.seek(-len(marker) - len(length_bytes) - len(magic_flir), 1)
return None
stream.seek(1, 1) # skip 1 byte, unsure what it is for
chunk_num = int.from_bytes(stream.read(CHUNK_NUM_BYTES_COUNT), "big")
chunks_tot = int.from_bytes(stream.read(CHUNK_TOT_BYTES_COUNT), "big")
# Remember total chunks to verify metadata consistency
if chunks_count is None:
chunks_count = chunks_tot
if ( # Check whether chunk metadata is consistent
chunks_tot is None or chunk_num < 0 or chunk_num > chunks_tot or chunks_tot != chunks_count
):
raise ValueError(f"Invalid FLIR: inconsistent total chunks, should be 0 or greater, but is {chunks_tot}")
return chunks_tot, chunk_num, stream.read(length + 1)
def parse_thermal(stream: BinaryIO, records: Dict[int, Tuple[int, int, int, int]]) -> np.ndarray:
"""Parse thermal."""
RECORD_IDX_RAW_DATA = 1
raw_data_md = records[RECORD_IDX_RAW_DATA]
_, _, raw_data = parse_raw_data(stream, raw_data_md)
return raw_data
def parse_flir_app1(stream: BinaryIO) -> Dict[int, Tuple[int, int, int, int]]:
"""Parse flir app1."""
# 0x00 - string[4] file format ID = "FFF\0"
# 0x04 - string[16] file creator: seen "\0","MTX IR\0","CAMCTRL\0"
# 0x14 - int32u file format version = 100
# 0x18 - int32u offset to record directory
# 0x1c - int32u number of entries in record directory
# 0x20 - int32u next free index ID = 2
# 0x24 - int16u swap pattern = 0 (?)
# 0x28 - int16u[7] spares
# 0x34 - int32u[2] reserved
# 0x3c - int32u checksum
# 1. Read 0x40 bytes and verify that its contents equals AFF\0 or FFF\0
_ = stream.read(4)
# 2. Read FLIR record directory metadata (ref 3)
stream.seek(16, 1)
_ = int.from_bytes(stream.read(4), "big")
record_dir_offset = int.from_bytes(stream.read(4), "big")
record_dir_entries_count = int.from_bytes(stream.read(4), "big")
stream.seek(28, 1)
_ = int.from_bytes(stream.read(4), "big")
# 3. Read record directory (which is a FLIR record entry repeated
# `record_dir_entries_count` times)
stream.seek(record_dir_offset)
record_dir_stream = BytesIO(stream.read(32 * record_dir_entries_count))
# First parse the record metadata
record_details: Dict[int, Tuple[int, int, int, int]] = {}
for record_nr in range(record_dir_entries_count):
record_dir_stream.seek(0)
details = parse_flir_record_metadata(stream, record_nr)
if details:
record_details[details[1]] = details
# Then parse the actual records
# for (entry_idx, type, offset, length) in record_details:
# parse_record = record_parsers[type]
# stream.seek(offset)
# record = BytesIO(stream.read(length + 36)) # + 36 needed to find end
# parse_record(record, offset, length)
return record_details
def parse_flir_record_metadata(stream: BinaryIO, record_nr: int) -> Optional[Tuple[int, int, int, int]]:
"""Parse flir record metadata."""
# FLIR record entry (ref 3):
# 0x00 - int16u record type
# 0x02 - int16u record subtype: RawData 1=BE, 2=LE, 3=PNG; 1 for other record types
# 0x04 - int32u record version: seen 0x64,0x66,0x67,0x68,0x6f,0x104
# 0x08 - int32u index id = 1
# 0x0c - int32u record offset from start of FLIR data
# 0x10 - int32u record length
# 0x14 - int32u parent = 0 (?)
# 0x18 - int32u object number = 0 (?)
# 0x1c - int32u checksum: 0 for no checksum
entry = 32 * record_nr
stream.seek(entry)
record_type = int.from_bytes(stream.read(2), "big")
if record_type < 1:
return None
_ = int.from_bytes(stream.read(2), "big")
_ = int.from_bytes(stream.read(4), "big")
_ = int.from_bytes(stream.read(4), "big")
record_offset = int.from_bytes(stream.read(4), "big")
record_length = int.from_bytes(stream.read(4), "big")
_ = int.from_bytes(stream.read(4), "big")
_ = int.from_bytes(stream.read(4), "big")
_ = int.from_bytes(stream.read(4), "big")
return (entry, record_type, record_offset, record_length)
def parse_raw_data(stream: BinaryIO, metadata: Tuple[int, int, int, int]):
"""Parse raw data."""
(_, _, offset, length) = metadata
stream.seek(offset)
stream.seek(2, 1)
width = int.from_bytes(stream.read(2), "little")
height = int.from_bytes(stream.read(2), "little")
stream.seek(offset + 32)
# Read the bytes with the raw thermal data and decode using PIL
thermal_bytes = stream.read(length)
thermal_stream = BytesIO(thermal_bytes)
thermal_img = Image.open(thermal_stream)
thermal_np = np.array(thermal_img)
# Check shape
if thermal_np.shape != (height, width):
msg = "Invalid FLIR: metadata's width and height don't match thermal data's actual width\
and height ({} vs ({}, {})"
msg = msg.format(thermal_np.shape, height, width)
raise ValueError(msg)
# FLIR PNG data is in the wrong byte order, fix that
fix_byte_order = np.vectorize(lambda x: (x >> 8) + ((x & 0x00FF) << 8))
thermal_np = fix_byte_order(thermal_np)
return width, height, thermal_np

Wyświetl plik

@ -0,0 +1,139 @@
"""Thermal Image manipulation utilities."""
"""Based on https://github.com/detecttechnologies/thermal_base"""
import numpy as np
def sensor_vals_to_temp(
raw,
Emissivity=1.0,
ObjectDistance=1,
AtmosphericTemperature=20,
ReflectedApparentTemperature=20,
IRWindowTemperature=20,
IRWindowTransmission=1,
RelativeHumidity=50,
PlanckR1=21106.77,
PlanckB=1501,
PlanckF=1,
PlanckO=-7340,
PlanckR2=0.012545258,
**kwargs,):
"""Convert raw values from the thermographic sensor sensor to temperatures in °C. Tested for Flir and DJI cams."""
# this calculation has been ported to python from https://github.com/gtatters/Thermimage/blob/master/R/raw2temp.R
# a detailed explanation of what is going on here can be found there
# constants
ATA1 = 0.006569
ATA2 = 0.01262
ATB1 = -0.002276
ATB2 = -0.00667
ATX = 1.9
# transmission through window (calibrated)
emiss_wind = 1 - IRWindowTransmission
refl_wind = 0
# transmission through the air
h2o = (RelativeHumidity / 100) * np.exp(
1.5587
+ 0.06939 * (AtmosphericTemperature)
- 0.00027816 * (AtmosphericTemperature) ** 2
+ 0.00000068455 * (AtmosphericTemperature) ** 3
)
tau1 = ATX * np.exp(-np.sqrt(ObjectDistance / 2) * (ATA1 + ATB1 * np.sqrt(h2o))) + (1 - ATX) * np.exp(
-np.sqrt(ObjectDistance / 2) * (ATA2 + ATB2 * np.sqrt(h2o))
)
tau2 = ATX * np.exp(-np.sqrt(ObjectDistance / 2) * (ATA1 + ATB1 * np.sqrt(h2o))) + (1 - ATX) * np.exp(
-np.sqrt(ObjectDistance / 2) * (ATA2 + ATB2 * np.sqrt(h2o))
)
# radiance from the environment
raw_refl1 = PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (ReflectedApparentTemperature + 273.15)) - PlanckF)) - PlanckO
# Reflected component
raw_refl1_attn = (1 - Emissivity) / Emissivity * raw_refl1
# Emission from atmosphere 1
raw_atm1 = (
PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (AtmosphericTemperature + 273.15)) - PlanckF)) - PlanckO
)
# attenuation for atmospheric 1 emission
raw_atm1_attn = (1 - tau1) / Emissivity / tau1 * raw_atm1
# Emission from window due to its own temp
raw_wind = (
PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (IRWindowTemperature + 273.15)) - PlanckF)) - PlanckO
)
# Componen due to window emissivity
raw_wind_attn = (
emiss_wind / Emissivity / tau1 / IRWindowTransmission * raw_wind
)
# Reflection from window due to external objects
raw_refl2 = (
PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (ReflectedApparentTemperature + 273.15)) - PlanckF)) - PlanckO
)
# component due to window reflectivity
raw_refl2_attn = (
refl_wind / Emissivity / tau1 / IRWindowTransmission * raw_refl2
)
# Emission from atmosphere 2
raw_atm2 = (
PlanckR1 / (PlanckR2 * (np.exp(PlanckB / (AtmosphericTemperature + 273.15)) - PlanckF)) - PlanckO
)
# attenuation for atmospheric 2 emission
raw_atm2_attn = (
(1 - tau2) / Emissivity / tau1 / IRWindowTransmission / tau2 * raw_atm2
)
raw_obj = (
raw / Emissivity / tau1 / IRWindowTransmission / tau2
- raw_atm1_attn
- raw_atm2_attn
- raw_wind_attn
- raw_refl1_attn
- raw_refl2_attn
)
val_to_log = PlanckR1 / (PlanckR2 * (raw_obj + PlanckO)) + PlanckF
if any(val_to_log.ravel() < 0):
raise Exception("Image seems to be corrupted")
# temperature from radiance
return PlanckB / np.log(val_to_log) - 273.15
def parse_from_exif_str(temp_str):
"""String to float parser."""
# we assume degrees celsius for temperature, metres for length
if isinstance(temp_str, str):
return float(temp_str.split()[0])
return float(temp_str)
def normalize_temp_matrix(thermal_np):
"""Normalize a temperature matrix to the 0-255 uint8 image range."""
num = thermal_np - np.amin(thermal_np)
den = np.amax(thermal_np) - np.amin(thermal_np)
thermal_np = num / den
return thermal_np
def clip_temp_to_roi(thermal_np, thermal_roi_values):
"""
Given an RoI within a temperature matrix, this function clips the temperature values in the entire thermal.
Image temperature values above and below the max/min temperatures within the RoI are clipped to said max/min.
Args:
thermal_np (np.ndarray): Floating point array containing the temperature matrix.
thermal_roi_values (np.ndarray / list): Any iterable containing the temperature values within the RoI.
Returns:
np.ndarray: The clipped temperature matrix.
"""
maximum = np.amax(thermal_roi_values)
minimum = np.amin(thermal_roi_values)
thermal_np[thermal_np > maximum] = maximum
thermal_np[thermal_np < minimum] = minimum
return thermal_np
def scale_with_roi(thermal_np, thermal_roi_values):
"""Alias for clip_temp_to_roi, to be deprecated in the future."""
return clip_temp_to_roi(thermal_np, thermal_roi_values)

Wyświetl plik

@ -1,12 +1,12 @@
0% 255 0 255
10% 128 0 255
20% 0 0 255
30% 0 128 255
40% 0 255 255
50% 0 255 128
60% 0 255 0
70% 128 255 0
80% 255 255 0
90% 255 128 0
100% 255 0 0
0% 68 1 84
10% 72 36 117
20% 64 67 135
30% 52 95 141
40% 41 120 142
50% 32 144 141
60% 34 168 132
70% 67 191 112
80% 122 210 81
90% 188 223 39
100% 253 231 37
nv 0 0 0 0

Wyświetl plik

@ -1405,7 +1405,7 @@ class GDAL2Tiles(object):
# Not for 'raster' profile
self.scaledquery = True
# How big should be query window be for scaling down
# Later on reset according the chosen resampling algorightm
# Later on reset according the chosen resampling algorithm
self.querysize = 4 * self.tilesize
# Should we use Read on the input file for generating overview tiles?

Wyświetl plik

@ -25,7 +25,6 @@ class ODM_Reconstruction(object):
self.photos = photos
self.georef = None
self.gcp = None
self.geo_file = None
self.multi_camera = self.detect_multi_camera()
def detect_multi_camera(self):
@ -69,7 +68,7 @@ class ODM_Reconstruction(object):
return self.georef is not None
def has_gcp(self):
return self.is_georeferenced() and self.gcp is not None
return self.is_georeferenced() and self.gcp is not None and self.gcp.exists()
def georeference_with_gcp(self, gcp_file, output_coords_file, output_gcp_file, output_model_txt_geo, rerun=False):
if not io.file_exists(output_coords_file) or not io.file_exists(output_gcp_file) or rerun:
@ -158,6 +157,12 @@ class ODM_Reconstruction(object):
def get_proj_srs(self):
if self.is_georeferenced():
return self.georef.proj4()
def get_proj_offset(self):
if self.is_georeferenced():
return (self.georef.utm_east_offset, self.georef.utm_north_offset)
else:
return (None, None)
def get_photo(self, filename):
for p in self.photos:
@ -235,6 +240,7 @@ class ODM_Tree(object):
self.opensfm_reconstruction = os.path.join(self.opensfm, 'reconstruction.json')
self.opensfm_reconstruction_nvm = os.path.join(self.opensfm, 'undistorted/reconstruction.nvm')
self.opensfm_geocoords_reconstruction = os.path.join(self.opensfm, 'reconstruction.geocoords.json')
self.opensfm_topocentric_reconstruction = os.path.join(self.opensfm, 'reconstruction.topocentric.json')
# OpenMVS
self.openmvs_model = os.path.join(self.openmvs, 'scene_dense_dense_filtered.ply')
@ -359,6 +365,13 @@ class ODM_Stage:
progressbc.send_update(self.previous_stages_progress() +
(self.delta_progress() / 100.0) * float(progress))
def last_stage(self):
if self.next_stage:
return self.next_stage.last_stage()
else:
return self
def process(self, args, outputs):
raise NotImplementedError

Wyświetl plik

@ -1,28 +1,38 @@
import os, shutil
from opendm import log
from opendm.photo import find_largest_photo_dim
from opendm.photo import find_largest_photo_dims
from osgeo import gdal
from opendm.loghelpers import double_quote
def get_depthmap_resolution(args, photos):
if 'depthmap_resolution_is_set' in args:
# Legacy
log.ODM_WARNING("Legacy option --depthmap-resolution (this might be removed in a future version). Use --pc-quality instead.")
# Override pc-quality
return int(args.depthmap_resolution)
else:
max_dim = find_largest_photo_dim(photos)
max_dims = find_largest_photo_dims(photos)
min_dim = 320 # Never go lower than this
pc_quality_scale = {
'ultra': 0.5,
'high': 0.25,
'medium': 0.125,
'low': 0.0675,
'lowest': 0.03375
}
if max_dims is not None:
w, h = max_dims
max_dim = max(w, h)
if max_dim > 0:
return max(min_dim, int(max_dim * pc_quality_scale[args.pc_quality]))
megapixels = (w * h) / 1e6
multiplier = 1
if megapixels < 6:
multiplier = 2
elif megapixels > 42:
multiplier = 0.5
pc_quality_scale = {
'ultra': 0.5,
'high': 0.25,
'medium': 0.125,
'low': 0.0675,
'lowest': 0.03375
}
return max(min_dim, int(max_dim * pc_quality_scale[args.pc_quality] * multiplier))
else:
log.ODM_WARNING("Cannot compute max image dimensions, going with default depthmap_resolution of 640")
return 640 # Sensible default
@ -83,3 +93,12 @@ def copy_paths(paths, destination, rerun):
elif os.path.isdir(p):
shutil.copytree(p, dst_path)
log.ODM_INFO("Copying %s --> %s" % (p, dst_path))
def rm_r(path):
try:
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
elif os.path.exists(path):
os.remove(path)
except:
log.ODM_WARNING("Cannot remove %s" % path)

0
opendm/vendor/__init__.py vendored 100644
Wyświetl plik

216
opendm/vendor/gdal_fillnodata.py vendored 100644
Wyświetl plik

@ -0,0 +1,216 @@
#!/usr/bin/env python3
# ******************************************************************************
# $Id: gdal_fillnodata.py 124baa7f71f15396a661014a81b6c5b0c82c8004 2020-10-14 17:29:39 +0300 Idan Miara $
#
# Project: GDAL Python Interface
# Purpose: Application for filling nodata areas in a raster by interpolation
# Author: Frank Warmerdam, warmerdam@pobox.com
#
# ******************************************************************************
# Copyright (c) 2008, Frank Warmerdam
# Copyright (c) 2009-2011, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
import sys
from osgeo import gdal
def CopyBand(srcband, dstband):
for line in range(srcband.YSize):
line_data = srcband.ReadRaster(0, line, srcband.XSize, 1)
dstband.WriteRaster(0, line, srcband.XSize, 1, line_data,
buf_type=srcband.DataType)
def Usage():
print("""
gdal_fillnodata [-q] [-md max_distance] [-si smooth_iterations]
[-o name=value] [-b band]
srcfile [-nomask] [-mask filename] [-of format] [-co name=value]* [dstfile]
""")
sys.exit(1)
def main(argv):
max_distance = 100
smoothing_iterations = 0
options = []
quiet_flag = 0
src_filename = None
src_band = 1
dst_filename = None
frmt = 'GTiff'
creation_options = []
mask = 'default'
gdal.AllRegister()
argv = gdal.GeneralCmdLineProcessor(argv)
if argv is None:
sys.exit(0)
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-of' or arg == '-f':
i = i + 1
frmt = argv[i]
elif arg == '-co':
i = i + 1
creation_options.append(argv[i])
elif arg == '-q' or arg == '-quiet':
quiet_flag = 1
elif arg == '-si':
i = i + 1
smoothing_iterations = int(argv[i])
elif arg == '-b':
i = i + 1
src_band = int(argv[i])
elif arg == '-md':
i = i + 1
max_distance = float(argv[i])
elif arg == '-nomask':
mask = 'none'
elif arg == '-mask':
i = i + 1
mask = argv[i]
elif arg == '-mask':
i = i + 1
mask = argv[i]
elif arg[:2] == '-h':
Usage()
elif src_filename is None:
src_filename = argv[i]
elif dst_filename is None:
dst_filename = argv[i]
else:
Usage()
i = i + 1
if src_filename is None:
Usage()
# =============================================================================
# Verify we have next gen bindings with the sievefilter method.
# =============================================================================
try:
gdal.FillNodata
except AttributeError:
print('')
print('gdal.FillNodata() not available. You are likely using "old gen"')
print('bindings or an older version of the next gen bindings.')
print('')
sys.exit(1)
# =============================================================================
# Open source file
# =============================================================================
if dst_filename is None:
src_ds = gdal.Open(src_filename, gdal.GA_Update)
else:
src_ds = gdal.Open(src_filename, gdal.GA_ReadOnly)
if src_ds is None:
print('Unable to open %s' % src_filename)
sys.exit(1)
srcband = src_ds.GetRasterBand(src_band)
if mask == 'default':
maskband = srcband.GetMaskBand()
elif mask == 'none':
maskband = None
else:
mask_ds = gdal.Open(mask)
maskband = mask_ds.GetRasterBand(1)
# =============================================================================
# Create output file if one is specified.
# =============================================================================
if dst_filename is not None:
drv = gdal.GetDriverByName(frmt)
dst_ds = drv.Create(dst_filename, src_ds.RasterXSize, src_ds.RasterYSize, 1,
srcband.DataType, creation_options)
wkt = src_ds.GetProjection()
if wkt != '':
dst_ds.SetProjection(wkt)
gt = src_ds.GetGeoTransform(can_return_null=True)
if gt:
dst_ds.SetGeoTransform(gt)
dstband = dst_ds.GetRasterBand(1)
CopyBand(srcband, dstband)
ndv = srcband.GetNoDataValue()
if ndv is not None:
dstband.SetNoDataValue(ndv)
color_interp = srcband.GetColorInterpretation()
dstband.SetColorInterpretation(color_interp)
if color_interp == gdal.GCI_PaletteIndex:
color_table = srcband.GetColorTable()
dstband.SetColorTable(color_table)
else:
dstband = srcband
# =============================================================================
# Invoke algorithm.
# =============================================================================
if quiet_flag:
prog_func = None
else:
prog_func = gdal.TermProgress_nocb
result = gdal.FillNodata(dstband, maskband,
max_distance, smoothing_iterations, options,
callback=prog_func)
src_ds = None
dst_ds = None
mask_ds = None
return result
if __name__ == '__main__':
sys.exit(main(sys.argv))

Wyświetl plik

@ -1,8 +1,8 @@
FROM ubuntu:20.04 AS builder
FROM ubuntu:21.04 AS builder
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/src/opensfm" \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
# Prepare directories
@ -21,11 +21,11 @@ RUN bash configure.sh clean
### Use a second image for the final asset to reduce the number and
# size of the layers.
FROM ubuntu:20.04
FROM ubuntu:21.04
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/src/opensfm" \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
WORKDIR /code
@ -40,7 +40,9 @@ COPY --from=builder /usr/local /usr/local
# the -dev packages to save space!
RUN bash configure.sh installruntimedepsonly \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
&& bash run.sh --help \
&& bash -c "eval $(python3 /code/opendm/context.py) && python3 -c 'from opensfm import io, pymap'"
# Entry point
ENTRYPOINT ["python3", "/code/run.py"]

Wyświetl plik

@ -1,2 +0,0 @@
silx>=0.12.0
pyopencl==2021.1.1

Wyświetl plik

@ -11,21 +11,21 @@ laspy==1.7.0
lxml==4.6.1
matplotlib==3.3.3
networkx==2.5
numpy==1.19.4
Pillow==8.0.1
numpy==1.21.1
Pillow==8.3.2
vmem==1.0.1
pyodm==1.5.6
pyodm==1.5.8
pyproj==3.0.0.post1
Pysolar==0.9
pytz==2020.4
PyYAML==5.1
rasterio==1.1.8 ; sys_platform == 'linux' or sys_platform == 'darwin'
rasterio==1.2.3 ; sys_platform == 'linux' or sys_platform == 'darwin'
https://github.com/OpenDroneMap/windows-deps/raw/main/rasterio-1.2.3-cp38-cp38-win_amd64.whl ; sys_platform == 'win32'
https://github.com/OpenDroneMap/windows-deps/raw/main/GDAL-3.2.3-cp38-cp38-win_amd64.whl ; sys_platform == 'win32'
repoze.lru==0.7
scikit-learn==0.23.2
scikit-image==0.17.2
scikit-learn==0.24.2
scikit-image==0.18.3
scipy==1.5.4
xmltodict==0.12.0
fpdf2==2.2.0rc2
fpdf2==2.4.6
Shapely==1.7.1

31
run.py
Wyświetl plik

@ -6,22 +6,28 @@ if sys.version_info.major < 3:
print("Ups! ODM needs to run with Python 3. It seems you launched it with Python 2. Try using: python3 run.py ... ")
sys.exit(1)
import os
from opendm import log
from opendm import config
from opendm import system
from opendm import io
from opendm.progress import progressbc
from opendm.utils import double_quote, get_processing_results_paths
from opendm.utils import get_processing_results_paths, rm_r
from opendm.loghelpers import args_to_dict
import os
from stages.odm_app import ODMApp
def odm_version():
try:
with open("VERSION") as f:
return f.read().split("\n")[0].strip()
except:
return "?"
if __name__ == '__main__':
args = config.config()
log.ODM_INFO('Initializing ODM - %s' % system.now())
log.ODM_INFO('Initializing ODM %s - %s' % (odm_version(), system.now()))
# Print args
args_dict = args_to_dict(args)
@ -41,20 +47,19 @@ if __name__ == '__main__':
# If user asks to rerun everything, delete all of the existing progress directories.
if args.rerun_all:
log.ODM_INFO("Rerun all -- Removing old data")
os.system("rm -rf " +
" ".join([double_quote(os.path.join(args.project_path, p)) for p in get_processing_results_paths()] + [
double_quote(os.path.join(args.project_path, "odm_meshing")),
double_quote(os.path.join(args.project_path, "opensfm")),
double_quote(os.path.join(args.project_path, "odm_texturing_25d")),
double_quote(os.path.join(args.project_path, "odm_filterpoints")),
double_quote(os.path.join(args.project_path, "submodels")),
]))
for d in [os.path.join(args.project_path, p) for p in get_processing_results_paths()] + [
os.path.join(args.project_path, "odm_meshing"),
os.path.join(args.project_path, "opensfm"),
os.path.join(args.project_path, "odm_texturing_25d"),
os.path.join(args.project_path, "odm_filterpoints"),
os.path.join(args.project_path, "submodels")]:
rm_r(d)
app = ODMApp(args)
retcode = app.execute()
# Do not show ASCII art for local submodels runs
if retcode == 0 and not "submodels/submodel_" in args.project_path:
if retcode == 0 and not "submodels" in args.project_path:
log.ODM_INFO('MMMMMMMMMMMNNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNNNMMMMMMMMMMM')
log.ODM_INFO('MMMMMMdo:..---../sNMMMMMMMMMMMMMMMMMMMMMMMMMMNs/..---..:odMMMMMM')
log.ODM_INFO('MMMMy-.odNMMMMMNy/`/mMMMMMMMMMMMMMMMMMMMMMMm/`/hNMMMMMNdo.-yMMMM')

Wyświetl plik

@ -61,6 +61,7 @@ parts:
- libspqr2
- libssl1.1
- libusb-1.0-0
- proj-data
- procps
- python3
- python3-distutils

Wyświetl plik

@ -0,0 +1,228 @@
name: opendronemap
adopt-info: odm
grade: stable
confinement: strict
base: core21
summary: Command line toolkit for processing aerial drone imagery
description: >
An open source command line toolkit for processing aerial drone imagery. ODM turns simple 2D images into:
* Classified Point Clouds
* 3D Textured Models
* Georeferenced Orthorectified Imagery
* Georeferenced Digital Elevation Models
The application is available for Windows, Mac and Linux and it works from the command line, making it ideal for power users, scripts and for integration with other software.
# The UbuntuGIS PPA only has i386 and amd64 packages so we can't build any
# other architectures. Therefore let's limit to those here.
architectures:
- build-on: i386
run-on: i386
- build-on: amd64
run-on: amd64
package-repositories:
- type: apt
ppa: ubuntugis/ubuntugis-unstable
parts:
prereqs:
source: .
plugin: nil
build-packages:
- build-essential
- cmake
- gdal-bin
- gfortran # to build scipy
- git
- libboost-log-dev
- libgdal-dev
- libgeotiff-dev
- libjsoncpp-dev
- libssl-dev
- libusb-1.0-0-dev
- ninja-build
- pkg-config
- python3-dev
- python3-gdal
- python3-pip
- python3-setuptools
- python3-wheel
- rsync
- swig3.0
stage-packages:
- gdal-bin
- libboost-log1.74.0
- libgdal28
- libgeotiff5
- libjsoncpp24
- libspqr2
- libssl1.1
- libusb-1.0-0
- proj-data
- procps
- python3
- python3-distutils
- python3-gdal
- python3-pkg-resources # required base package for core20
- python3-requests # required base package for core20
- python3-setuptools
stage:
# remove deb-based numpy because it conflicts with our pip-installed version
- -usr/lib/python3/dist-packages/numpy
opencv:
source: .
plugin: nil
build-packages:
- libavcodec-dev
- libavformat-dev
- libeigen3-dev
- libflann-dev
- libgtk2.0-dev
- libjpeg-dev
- liblapack-dev
- libopenjpip7
- libpng-dev
- libproj-dev
- libswscale-dev
- libtbb-dev
- libtiff-dev
- libxext-dev
- proj-bin
stage-packages:
- libavcodec58
- libavformat58
- libflann1.9
- libgtk2.0-0
- libjpeg-turbo8
- libopenjpip7
- liblapack3
- libpng16-16
- libproj19
- libswscale5
- libtbb2
- libtiff5
- libwebpdemux2
- libxext6
openmvs:
source: .
plugin: nil
build-packages:
- libcgal-dev
- libboost-program-options-dev
stage-packages:
- libboost-program-options1.74.0
opensfm:
source: .
plugin: nil
build-packages:
- libboost-date-time-dev
- libboost-filesystem-dev
- libboost-iostreams-dev
- libboost-python-dev
- libboost-regex-dev
- libboost-serialization-dev
- libboost-system-dev
- libboost-thread-dev
- libgoogle-glog-dev
- libsuitesparse-dev
stage-packages:
- libamd2
- libboost-date-time1.74.0
- libboost-filesystem1.74.0
- libboost-iostreams1.74.0
- libboost-python1.74.0
- libboost-regex1.74.0
- libboost-serialization1.74.0
- libboost-system1.74.0
- libboost-thread1.74.0
- libcamd2
- libccolamd2
- libcholmod3
- libcolamd2
- libcxsparse3
- libgoogle-glog0v5
- libsuitesparseconfig5
odm:
after:
- prereqs
- opencv
- opensfm
- openmvs
source: .
plugin: nil # We will script everything ourselves
build-environment:
# Set Python location to build host's system so that we can
# use system libraries while building the Snap Package
- PYTHONHOME: /usr
# Set the location for pip to install requirements into inside
# the Snap package
- PYTHONUSERBASE: $SNAPCRAFT_PART_INSTALL
override-build: |
snapcraftctl set-version $(cat VERSION)
# Portable build
test -f /usr/bin/gcc_real || mv -v /usr/bin/gcc /usr/bin/gcc_real
test -f /usr/bin/gcc || cp -v ./docker/gcc /usr/bin/gcc
test -f /usr/bin/g++_real || mv -v /usr/bin/g++ /usr/bin/g++_real
test -f /usr/bin/g++ || cp -v ./docker/g++ /usr/bin/g++
pip3 install --user -r requirements.txt
# Build the SuperBuild libraries
mkdir -p SuperBuild/build
cd SuperBuild/build
cmake -G Ninja ..
cmake --build . --parallel 1
rsync -av --exclude .git \
$SNAPCRAFT_PART_BUILD/ $SNAPCRAFT_PART_INSTALL/odm/
chmod -R u=rwX,go=rX $PYTHONUSERBASE/lib/python*
stage:
# strip the temporary build files and sources
- -odm/SuperBuild/build
- -odm/SuperBuild/download
- -odm/SuperBuild/src
prime:
# remove any static-libraries
- -**/*.a
# remove any header files
- -**/*.h
# remove any left-over temporary compiled 'object' files
- -**/*.o
build-snaps:
- cmake
snap-specifics:
source: snap/local
plugin: dump
snapcraft-preload:
source: https://github.com/sergiusens/snapcraft-preload.git
plugin: cmake
cmake-parameters:
- -DCMAKE_INSTALL_PREFIX=/
build-packages:
- on amd64:
- gcc-multilib
- g++-multilib
apps:
opendronemap:
command: odm/run.sh
command-chain:
- bin/snapcraft-preload # Fixes multiprocessing python module
environment:
# Ensure libraries are found
LD_LIBRARY_PATH: $SNAP/usr/lib/$SNAPCRAFT_ARCH_TRIPLET/blas:$SNAP/usr/lib/$SNAPCRAFT_ARCH_TRIPLET/lapack:$LD_LIBRARY_PATH
plugs:
- home
- network
- network-bind
- removable-media

Wyświetl plik

@ -4,12 +4,13 @@ import json
from opendm import context
from opendm import io
from opendm import types
from opendm.photo import PhotoCorruptedException
from opendm import log
from opendm import system
from opendm.geo import GeoFile
from shutil import copyfile
from opendm import progress
from opendm import boundary
def save_images_database(photos, database_file):
with open(database_file, 'w') as f:
@ -109,13 +110,16 @@ class ODMLoadDatasetStage(types.ODM_Stage):
with open(tree.dataset_list, 'w') as dataset_list:
log.ODM_INFO("Loading %s images" % len(path_files))
for f in path_files:
p = types.ODM_Photo(f)
p.set_mask(find_mask(f, masks))
photos += [p]
dataset_list.write(photos[-1].filename + '\n')
try:
p = types.ODM_Photo(f)
p.set_mask(find_mask(f, masks))
photos += [p]
dataset_list.write(photos[-1].filename + '\n')
except PhotoCorruptedException:
log.ODM_WARNING("%s seems corrupted and will not be used" % os.path.basename(f))
# Check if a geo file is available
if tree.odm_geo_file is not None and os.path.exists(tree.odm_geo_file):
if tree.odm_geo_file is not None and os.path.isfile(tree.odm_geo_file):
log.ODM_INFO("Found image geolocation file")
gf = GeoFile(tree.odm_geo_file)
updated = 0
@ -126,6 +130,20 @@ class ODMLoadDatasetStage(types.ODM_Stage):
updated += 1
log.ODM_INFO("Updated %s image positions" % updated)
# GPSDOP override if we have GPS accuracy information (such as RTK)
if 'gps_accuracy_is_set' in args:
log.ODM_INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy)
for p in photos:
p.override_gps_dop(args.gps_accuracy)
# Override projection type
if args.camera_lens != "auto":
log.ODM_INFO("Setting camera lens to %s for all images" % args.camera_lens)
for p in photos:
p.override_camera_projection(args.camera_lens)
# Save image database for faster restart
save_images_database(photos, images_database_file)
else:
@ -154,3 +172,19 @@ class ODMLoadDatasetStage(types.ODM_Stage):
reconstruction.save_proj_srs(os.path.join(tree.odm_georeferencing, tree.odm_georeferencing_proj))
outputs['reconstruction'] = reconstruction
# Try to load boundaries
if args.boundary:
if reconstruction.is_georeferenced():
outputs['boundary'] = boundary.load_boundary(args.boundary, reconstruction.get_proj_srs())
else:
args.boundary = None
log.ODM_WARNING("Reconstruction is not georeferenced, but boundary file provided (will ignore boundary file)")
# If sfm-algorithm is triangulation, check if photos have OPK
if args.sfm_algorithm == 'triangulation':
for p in photos:
if not p.has_opk():
log.ODM_WARNING("No omega/phi/kappa angles found in input photos (%s), switching sfm-algorithm to incremental" % p.filename)
args.sfm_algorithm = 'incremental'
break

Wyświetl plik

@ -6,12 +6,20 @@ from opendm import system
from opendm import context
from opendm import types
from opendm.multispectral import get_primary_band_name
from opendm.photo import find_largest_photo_dim
class ODMMvsTexStage(types.ODM_Stage):
def process(self, args, outputs):
tree = outputs['tree']
reconstruction = outputs['reconstruction']
max_dim = find_largest_photo_dim(reconstruction.photos)
max_texture_size = 8 * 1024 # default
if max_dim > 8000:
log.ODM_INFO("Large input images (%s pixels), increasing maximum texture size." % max_dim)
max_texture_size *= 3
class nonloc:
runs = []
@ -92,6 +100,7 @@ class ODMMvsTexStage(types.ODM_Stage):
'keepUnseenFaces': keepUnseenFaces,
'toneMapping': self.params.get('tone_mapping'),
'nadirMode': nadir,
'maxTextureSize': '--max_texture_size=%s' % max_texture_size,
'nvm_file': r['nvm_file'],
'intermediate': '--no_intermediate_results' if (r['labeling_file'] or not reconstruction.multi_camera) else '',
'labelingFile': '-L "%s"' % r['labeling_file'] if r['labeling_file'] else ''
@ -113,7 +122,8 @@ class ODMMvsTexStage(types.ODM_Stage):
'{skipLocalSeamLeveling} '
'{keepUnseenFaces} '
'{nadirMode} '
'{labelingFile} '.format(**kwargs))
'{labelingFile} '
'{maxTextureSize} '.format(**kwargs))
# Backward compatibility: copy odm_textured_model_geo.mtl to odm_textured_model.mtl
# for certain older WebODM clients which expect a odm_textured_model.mtl

Wyświetl plik

@ -18,6 +18,8 @@ from stages.odm_filterpoints import ODMFilterPoints
from stages.splitmerge import ODMSplitStage, ODMMergeStage
from stages.odm_report import ODMReport
from stages.odm_postprocess import ODMPostProcess
class ODMApp:
def __init__(self, args):
@ -61,7 +63,9 @@ class ODMApp:
max_concurrency=args.max_concurrency,
verbose=args.verbose)
orthophoto = ODMOrthoPhotoStage('odm_orthophoto', args, progress=98.0)
report = ODMReport('odm_report', args, progress=100.0)
report = ODMReport('odm_report', args, progress=99.0)
postprocess = ODMPostProcess('odm_postprocess', args, progress=100.0)
# Normal pipeline
self.first_stage = dataset
@ -82,7 +86,8 @@ class ODMApp:
.connect(georeferencing) \
.connect(dem) \
.connect(orthophoto) \
.connect(report)
.connect(report) \
.connect(postprocess)
def execute(self):
try:
@ -101,10 +106,10 @@ class ODMApp:
code = e.errorCode
log.logger.log_json_stage_error(str(e), code, stack_trace)
if code == 139 or code == 134 or code == 1:
if code == 139 or code == 134 or code == 1 or code == 3221225477:
# Segfault
log.ODM_ERROR("Uh oh! Processing stopped because of strange values in the reconstruction. This is often a sign that the input data has some issues or the software cannot deal with it. Have you followed best practices for data acquisition? See https://docs.opendronemap.org/flying.html")
elif code == 137:
log.ODM_ERROR("Uh oh! Processing stopped because of strange values in the reconstruction. This is often a sign that the input data has some issues or the software cannot deal with it. Have you followed best practices for data acquisition? See https://docs.opendronemap.org/flying/")
elif code == 137 or code == 3221226505:
log.ODM_ERROR("Whoops! You ran out of memory! Add more RAM to your computer, if you're using docker configure it to use more memory, for WSL2 make use of .wslconfig (https://docs.microsoft.com/en-us/windows/wsl/wsl-config#configure-global-options-with-wslconfig), resize your images, lower the quality settings or process the images using a cloud provider (e.g. https://webodm.net).")
elif code == 132:
log.ODM_ERROR("Oh no! It looks like your CPU is not supported (is it fairly old?). You can still use ODM, but you will need to build your own docker image. See https://github.com/OpenDroneMap/ODM#build-from-source")

Wyświetl plik

@ -13,6 +13,7 @@ from opendm import pseudogeo
from opendm.tiles.tiler import generate_dem_tiles
from opendm.cogeo import convert_to_cogeo
class ODMDEMStage(types.ODM_Stage):
def process(self, args, outputs):
tree = outputs['tree']
@ -28,11 +29,15 @@ class ODMDEMStage(types.ODM_Stage):
ignore_resolution = True
pseudo_georeference = True
# It is probably not reasonable to have accurate DEMs a the same resolution as the source photos, so reduce it
# by a factor!
gsd_scaling = 2.0
resolution = gsd.cap_resolution(args.dem_resolution, tree.opensfm_reconstruction,
gsd_error_estimate=-1,
ignore_gsd=args.ignore_gsd,
ignore_resolution=ignore_resolution,
has_gcp=reconstruction.has_gcp())
gsd_scaling=gsd_scaling,
ignore_gsd=args.ignore_gsd,
ignore_resolution=ignore_resolution and args.ignore_gsd,
has_gcp=reconstruction.has_gcp())
log.ODM_INFO('Classify: ' + str(args.pc_classify))
log.ODM_INFO('Create DSM: ' + str(args.dsm))
@ -106,14 +111,14 @@ class ODMDEMStage(types.ODM_Stage):
dem_geotiff_path = os.path.join(odm_dem_root, "{}.tif".format(product))
bounds_file_path = os.path.join(tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg')
if args.crop > 0:
if args.crop > 0 or args.boundary:
# Crop DEM
Cropper.crop(bounds_file_path, dem_geotiff_path, utils.get_dem_vars(args), keep_original=not args.optimize_disk_space)
if args.dem_euclidean_map:
unfilled_dem_path = io.related_file_path(dem_geotiff_path, postfix=".unfilled")
if args.crop > 0:
if args.crop > 0 or args.boundary:
# Crop unfilled DEM
Cropper.crop(bounds_file_path, unfilled_dem_path, utils.get_dem_vars(args), keep_original=not args.optimize_disk_space)

Wyświetl plik

@ -6,6 +6,8 @@ from opendm import system
from opendm import context
from opendm import point_cloud
from opendm import types
from opendm import gsd
from opendm.boundary import boundary_offset, compute_boundary_from_shots
class ODMFilterPoints(types.ODM_Stage):
def process(self, args, outputs):
@ -21,12 +23,33 @@ class ODMFilterPoints(types.ODM_Stage):
else:
inputPointCloud = tree.openmvs_model
# Check if we need to compute boundary
if args.auto_boundary:
if reconstruction.is_georeferenced():
if not 'boundary' in outputs:
avg_gsd = gsd.opensfm_reconstruction_average_gsd(tree.opensfm_reconstruction)
outputs['boundary'] = compute_boundary_from_shots(tree.opensfm_reconstruction, avg_gsd * 20, reconstruction.get_proj_offset()) # 20 is arbitrary
if outputs['boundary'] is None:
log.ODM_WARNING("Cannot compute boundary from camera shots")
else:
log.ODM_WARNING("--auto-boundary set but so is --boundary, will use --boundary")
else:
log.ODM_WARNING("Not a georeferenced reconstruction, will ignore --auto-boundary")
point_cloud.filter(inputPointCloud, tree.filtered_point_cloud,
standard_deviation=args.pc_filter,
sample_radius=args.pc_sample,
boundary=boundary_offset(outputs.get('boundary'), reconstruction.get_proj_offset()),
verbose=args.verbose,
max_concurrency=args.max_concurrency)
# Quick check
info = point_cloud.ply_info(tree.filtered_point_cloud)
if info["vertex_count"] == 0:
extra_msg = ''
if 'boundary' in outputs:
extra_msg = '. Also, since you used a boundary setting, make sure that the boundary polygon you specified covers the reconstruction area correctly.'
raise system.ExitException("Uh oh! We ended up with an empty point cloud. This means that the reconstruction did not succeed. Have you followed best practices for data acquisition? See https://docs.opendronemap.org/flying/%s" % extra_msg)
else:
log.ODM_WARNING('Found a valid point cloud file in: %s' %
tree.filtered_point_cloud)

Wyświetl plik

@ -1,21 +1,107 @@
import os
import struct
import pipes
import fiona
import fiona.crs
import json
from collections import OrderedDict
from pyproj import CRS
from opendm import io
from opendm import log
from opendm import types
from opendm import system
from opendm import context
from opendm import location
from opendm.cropper import Cropper
from opendm import point_cloud
from opendm.multispectral import get_primary_band_name
from opendm.osfm import OSFMContext
from opendm.boundary import as_polygon, export_to_bounds_files
class ODMGeoreferencingStage(types.ODM_Stage):
def process(self, args, outputs):
tree = outputs['tree']
reconstruction = outputs['reconstruction']
# Export GCP information if available
gcp_export_file = tree.path("odm_georeferencing", "ground_control_points.gpkg")
gcp_gml_export_file = tree.path("odm_georeferencing", "ground_control_points.gml")
gcp_geojson_export_file = tree.path("odm_georeferencing", "ground_control_points.geojson")
if reconstruction.has_gcp() and (not io.file_exists(gcp_export_file) or self.rerun()):
octx = OSFMContext(tree.opensfm)
gcps = octx.ground_control_points(reconstruction.georef.proj4())
if len(gcps):
gcp_schema = {
'geometry': 'Point',
'properties': OrderedDict([
('id', 'str'),
('observations_count', 'int'),
('observations_list', 'str'),
('error_x', 'float'),
('error_y', 'float'),
('error_z', 'float'),
])
}
# Write GeoPackage
with fiona.open(gcp_export_file, 'w', driver="GPKG",
crs=fiona.crs.from_string(reconstruction.georef.proj4()),
schema=gcp_schema) as f:
for gcp in gcps:
f.write({
'geometry': {
'type': 'Point',
'coordinates': gcp['coordinates'],
},
'properties': OrderedDict([
('id', gcp['id']),
('observations_count', len(gcp['observations'])),
('observations_list', ",".join([obs['shot_id'] for obs in gcp['observations']])),
('error_x', gcp['error'][0]),
('error_y', gcp['error'][1]),
('error_z', gcp['error'][2]),
])
})
# Write GML
try:
system.run('ogr2ogr -of GML "{}" "{}"'.format(gcp_gml_export_file, gcp_export_file))
except Exception as e:
log.ODM_WARNING("Cannot generate ground control points GML file: %s" % str(e))
# Write GeoJSON
geojson = {
'type': 'FeatureCollection',
'features': []
}
from_srs = CRS.from_proj4(reconstruction.georef.proj4())
to_srs = CRS.from_epsg(4326)
transformer = location.transformer(from_srs, to_srs)
for gcp in gcps:
properties = gcp.copy()
del properties['coordinates']
geojson['features'].append({
'type': 'Feature',
'geometry': {
'type': 'Point',
'coordinates': transformer.TransformPoint(*gcp['coordinates']),
},
'properties': properties
})
with open(gcp_geojson_export_file, 'w') as f:
f.write(json.dumps(geojson, indent=4))
else:
log.ODM_WARNING("GCPs could not be loaded for writing to %s" % gcp_export_file)
if not io.file_exists(tree.odm_georeferencing_model_laz) or self.rerun():
cmd = ('pdal translate -i "%s" -o \"%s\"' % (tree.filtered_point_cloud, tree.odm_georeferencing_model_laz))
stages = ["ferry"]
@ -35,6 +121,12 @@ class ODMGeoreferencingStage(types.ODM_Stage):
'--writers.las.offset_z=0',
'--writers.las.a_srs="%s"' % reconstruction.georef.proj4()
]
if reconstruction.has_gcp() and io.file_exists(gcp_gml_export_file):
log.ODM_INFO("Embedding GCP info in point cloud")
params += [
'--writers.las.vlrs="{\\\"filename\\\": \\\"%s\\\", \\\"user_id\\\": \\\"ODM_GCP\\\", \\\"description\\\": \\\"Ground Control Points (GML)\\\"}"' % gcp_gml_export_file.replace(os.sep, "/")
]
system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params))
@ -45,7 +137,7 @@ class ODMGeoreferencingStage(types.ODM_Stage):
cropper = Cropper(tree.odm_georeferencing, 'odm_georeferenced_model')
if args.fast_orthophoto:
decimation_step = 10
decimation_step = 4
else:
decimation_step = 40
@ -60,6 +152,14 @@ class ODMGeoreferencingStage(types.ODM_Stage):
except:
log.ODM_WARNING("Cannot calculate crop bounds! We will skip cropping")
args.crop = 0
if 'boundary' in outputs and args.crop == 0:
log.ODM_INFO("Using boundary JSON as cropping area")
bounds_base, _ = os.path.splitext(tree.odm_georeferencing_model_laz)
bounds_json = bounds_base + ".bounds.geojson"
bounds_gpkg = bounds_base + ".bounds.gpkg"
export_to_bounds_files(outputs['boundary'], reconstruction.get_proj_srs(), bounds_json, bounds_gpkg)
else:
log.ODM_INFO("Converting point cloud (non-georeferenced)")
system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params))
@ -68,7 +168,7 @@ class ODMGeoreferencingStage(types.ODM_Stage):
else:
log.ODM_WARNING('Found a valid georeferenced model in: %s'
% tree.odm_georeferencing_model_laz)
if args.optimize_disk_space and io.file_exists(tree.odm_georeferencing_model_laz) and io.file_exists(tree.filtered_point_cloud):
os.remove(tree.filtered_point_cloud)

Wyświetl plik

@ -44,7 +44,7 @@ class ODMeshingStage(types.ODM_Stage):
log.ODM_INFO('Writing ODM 2.5D Mesh file in: %s' % tree.odm_25dmesh)
ortho_resolution = gsd.cap_resolution(args.orthophoto_resolution, tree.opensfm_reconstruction,
ignore_gsd=args.ignore_gsd,
ignore_resolution=not reconstruction.is_georeferenced(),
ignore_resolution=(not reconstruction.is_georeferenced()) and args.ignore_gsd,
has_gcp=reconstruction.has_gcp()) / 100.0
dsm_multiplier = max(1.0, gsd.rounded_gsd(tree.opensfm_reconstruction, default_value=4, ndigits=3, ignore_gsd=args.ignore_gsd))
@ -56,10 +56,9 @@ class ODMeshingStage(types.ODM_Stage):
dsm_radius = dsm_resolution * math.sqrt(2)
# Sparse point clouds benefits from using
# a larger radius interolation --> less holes
if args.fast_orthophoto:
dsm_radius *= 2
dsm_resolution *= 8
log.ODM_INFO('ODM 2.5D DSM resolution: %s' % dsm_resolution)
@ -72,7 +71,7 @@ class ODMeshingStage(types.ODM_Stage):
verbose=self.params.get('verbose'),
available_cores=args.max_concurrency,
method='poisson' if args.fast_orthophoto else 'gridded',
smooth_dsm=not args.fast_orthophoto)
smooth_dsm=True)
else:
log.ODM_WARNING('Found a valid ODM 2.5D Mesh file in: %s' %
tree.odm_25dmesh)

Wyświetl plik

@ -13,6 +13,7 @@ from opendm.utils import double_quote
from opendm import pseudogeo
from opendm.multispectral import get_primary_band_name
class ODMOrthoPhotoStage(types.ODM_Stage):
def process(self, args, outputs):
tree = outputs['tree']
@ -22,19 +23,16 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
# define paths and create working directories
system.mkdir_p(tree.odm_orthophoto)
if args.skip_orthophoto:
log.ODM_WARNING("--skip-orthophoto is set, no orthophoto will be generated")
return
if not io.file_exists(tree.odm_orthophoto_tif) or self.rerun():
gsd_error_estimate = 0.1
ignore_resolution = False
if not reconstruction.is_georeferenced():
# Match DEMs
gsd_error_estimate = -3
ignore_resolution = True
resolution = 1.0 / (gsd.cap_resolution(args.orthophoto_resolution, tree.opensfm_reconstruction,
gsd_error_estimate=gsd_error_estimate,
ignore_gsd=args.ignore_gsd,
ignore_resolution=ignore_resolution,
has_gcp=reconstruction.has_gcp()) / 100.0)
ignore_gsd=args.ignore_gsd,
ignore_resolution=(not reconstruction.is_georeferenced()) and args.ignore_gsd,
has_gcp=reconstruction.has_gcp()) / 100.0)
# odm_orthophoto definitions
kwargs = {

Wyświetl plik

@ -0,0 +1,51 @@
import os
from osgeo import gdal
from opendm import io
from opendm import log
from opendm import types
from opendm.utils import copy_paths, get_processing_results_paths
class ODMPostProcess(types.ODM_Stage):
def process(self, args, outputs):
tree = outputs['tree']
reconstruction = outputs['reconstruction']
log.ODM_INFO("Post Processing")
if not outputs['large']:
# TODO: support for split-merge?
# Embed GCP info in 2D results via
# XML metadata fields
gcp_gml_export_file = tree.path("odm_georeferencing", "ground_control_points.gml")
if reconstruction.has_gcp() and io.file_exists(gcp_gml_export_file):
skip_embed_gcp = False
gcp_xml = ""
with open(gcp_gml_export_file) as f:
gcp_xml = f.read()
for product in [tree.odm_orthophoto_tif,
tree.path("odm_dem", "dsm.tif"),
tree.path("odm_dem", "dtm.tif")]:
if os.path.isfile(product):
ds = gdal.Open(product)
if ds is not None:
if ds.GetMetadata('xml:GROUND_CONTROL_POINTS') is None or self.rerun():
ds.SetMetadata(gcp_xml, 'xml:GROUND_CONTROL_POINTS')
ds = None
log.ODM_INFO("Wrote xml:GROUND_CONTROL_POINTS metadata to %s" % product)
else:
skip_embed_gcp = True
log.ODM_WARNING("Already embedded ground control point information")
break
else:
log.ODM_WARNING("Cannot open %s for writing, skipping GCP embedding" % product)
if args.copy_to:
try:
copy_paths([os.path.join(args.project_path, p) for p in get_processing_results_paths()], args.copy_to, self.rerun())
except Exception as e:
log.ODM_WARNING("Cannot copy to %s: %s" % (args.copy_to, str(e)))

Wyświetl plik

@ -14,7 +14,7 @@ from opendm.point_cloud import export_info_json
from opendm.cropper import Cropper
from opendm.orthophoto import get_orthophoto_vars, get_max_memory, generate_png
from opendm.tiles.tiler import generate_colored_hillshade
from opendm.utils import get_raster_stats, copy_paths, get_processing_results_paths
from opendm.utils import get_raster_stats
def hms(seconds):
h = seconds // 3600
@ -28,8 +28,8 @@ def hms(seconds):
return '{}s'.format(round(s, 0))
def generate_point_cloud_stats(input_point_cloud, pc_info_file):
if not os.path.exists(pc_info_file):
def generate_point_cloud_stats(input_point_cloud, pc_info_file, rerun=False):
if not os.path.exists(pc_info_file) or rerun:
export_info_json(input_point_cloud, pc_info_file)
if os.path.exists(pc_info_file):
@ -89,7 +89,7 @@ class ODMReport(types.ODM_Stage):
# pc_info_file should have been generated by cropper
pc_info_file = os.path.join(tree.odm_georeferencing, "odm_georeferenced_model.info.json")
odm_stats['point_cloud_statistics'] = generate_point_cloud_stats(tree.odm_georeferencing_model_laz, pc_info_file)
odm_stats['point_cloud_statistics'] = generate_point_cloud_stats(tree.odm_georeferencing_model_laz, pc_info_file, self.rerun())
else:
ply_pc = os.path.join(tree.odm_filterpoints, "point_cloud.ply")
if os.path.exists(ply_pc):
@ -97,7 +97,7 @@ class ODMReport(types.ODM_Stage):
views_dimension = "views"
pc_info_file = os.path.join(tree.odm_filterpoints, "point_cloud.info.json")
odm_stats['point_cloud_statistics'] = generate_point_cloud_stats(ply_pc, pc_info_file)
odm_stats['point_cloud_statistics'] = generate_point_cloud_stats(ply_pc, pc_info_file, self.rerun())
else:
log.ODM_WARNING("No point cloud found")
@ -152,7 +152,7 @@ class ODMReport(types.ODM_Stage):
overlap_color_map = os.path.join(report_assets, "overlap_color_map.txt")
bounds_file_path = os.path.join(tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg')
if args.crop > 0 and os.path.isfile(bounds_file_path):
if (args.crop > 0 or args.boundary) and os.path.isfile(bounds_file_path):
Cropper.crop(bounds_file_path, diagram_tiff, get_orthophoto_vars(args), keep_original=False)
system.run("gdaldem color-relief \"{}\" \"{}\" \"{}\" -of PNG -alpha".format(diagram_tiff, overlap_color_map, diagram_png))
@ -196,10 +196,3 @@ class ODMReport(types.ODM_Stage):
log.ODM_WARNING("Cannot generate overlap diagram, point cloud stats missing")
octx.export_report(os.path.join(tree.odm_report, "report.pdf"), odm_stats, self.rerun())
# TODO: does this warrant a new stage?
if args.copy_to:
try:
copy_paths([os.path.join(args.project_path, p) for p in get_processing_results_paths()], args.copy_to, self.rerun())
except Exception as e:
log.ODM_WARNING("Cannot copy to %s: %s" % (args.copy_to, str(e)))

Wyświetl plik

@ -1,4 +1,4 @@
import shutil, os, glob, math
import shutil, os, glob, math, sys
from opendm import log
from opendm import io
@ -6,6 +6,7 @@ from opendm import system
from opendm import context
from opendm import point_cloud
from opendm import types
from opendm.gpu import has_gpu
from opendm.utils import get_depthmap_resolution
from opendm.osfm import OSFMContext
from opendm.multispectral import get_primary_band_name
@ -45,8 +46,10 @@ class ODMOpenMVSStage(types.ODM_Stage):
if not io.dir_exists(depthmaps_dir):
os.mkdir(depthmaps_dir)
depthmap_resolution = get_depthmap_resolution(args, photos)
log.ODM_INFO("Depthmap resolution set to: %spx" % depthmap_resolution)
if outputs["undist_image_max_size"] <= depthmap_resolution:
resolution_level = 0
else:
@ -55,30 +58,45 @@ class ODMOpenMVSStage(types.ODM_Stage):
log.ODM_INFO("Running dense reconstruction. This might take a while.")
log.ODM_INFO("Estimating depthmaps")
number_views_fuse = 2
densify_ini_file = os.path.join(tree.openmvs, 'config.ini')
with open(densify_ini_file, 'w+') as f:
f.write("Optimize = 0\n") # Disable depth-maps re-filtering
config = [
" --resolution-level %s" % int(resolution_level),
"--min-resolution %s" % depthmap_resolution,
"--min-resolution %s" % depthmap_resolution,
"--max-resolution %s" % int(outputs['undist_image_max_size']),
"--max-threads %s" % args.max_concurrency,
"--number-views-fuse 2",
"--number-views-fuse %s" % number_views_fuse,
'-w "%s"' % depthmaps_dir,
"-v 0"
]
gpu_config = []
if not has_gpu():
gpu_config.append("--cuda-device -1")
if args.pc_tile:
config.append("--fusion-mode 1")
if not args.pc_geometric:
config.append("--geometric-iters 0")
system.run('%s "%s" %s' % (context.omvs_densify_path,
openmvs_scene_file,
' '.join(config)))
def run_densify():
system.run('"%s" "%s" %s' % (context.omvs_densify_path,
openmvs_scene_file,
' '.join(config + gpu_config)))
try:
run_densify()
except system.SubprocessException as e:
# If the GPU was enabled and the program failed,
# try to run it again without GPU
if e.errorCode == 1 and len(gpu_config) == 0:
log.ODM_WARNING("OpenMVS failed with GPU, is your graphics card driver up to date? Falling back to CPU.")
gpu_config.append("--cuda-device -1")
run_densify()
else:
raise e
self.update_progress(85)
files_to_remove = []
@ -86,15 +104,20 @@ class ODMOpenMVSStage(types.ODM_Stage):
if args.pc_tile:
log.ODM_INFO("Computing sub-scenes")
subscene_densify_ini_file = os.path.join(tree.openmvs, 'subscene-config.ini')
with open(subscene_densify_ini_file, 'w+') as f:
f.write("Optimize = 0\n")
config = [
"--sub-scene-area 660000",
"--max-threads %s" % args.max_concurrency,
'-w "%s"' % depthmaps_dir,
"-v 0",
]
system.run('%s "%s" %s' % (context.omvs_densify_path,
system.run('"%s" "%s" %s' % (context.omvs_densify_path,
openmvs_scene_file,
' '.join(config)))
' '.join(config + gpu_config)))
scene_files = glob.glob(os.path.join(tree.openmvs, "scene_[0-9][0-9][0-9][0-9].mvs"))
if len(scene_files) == 0:
@ -106,10 +129,11 @@ class ODMOpenMVSStage(types.ODM_Stage):
for sf in scene_files:
p, _ = os.path.splitext(sf)
scene_ply_unfiltered = p + "_dense.ply"
scene_ply = p + "_dense_dense_filtered.ply"
scene_dense_mvs = p + "_dense.mvs"
files_to_remove += [scene_ply, sf, scene_dense_mvs]
files_to_remove += [scene_ply, sf, scene_dense_mvs, scene_ply_unfiltered]
scene_ply_files.append(scene_ply)
if not io.file_exists(scene_ply) or self.rerun():
@ -118,18 +142,26 @@ class ODMOpenMVSStage(types.ODM_Stage):
'--resolution-level %s' % int(resolution_level),
'--min-resolution %s' % depthmap_resolution,
'--max-resolution %s' % int(outputs['undist_image_max_size']),
'--dense-config-file "%s"' % densify_ini_file,
'--number-views-fuse 2',
'--dense-config-file "%s"' % subscene_densify_ini_file,
'--number-views-fuse %s' % number_views_fuse,
'--max-threads %s' % args.max_concurrency,
'-w "%s"' % depthmaps_dir,
'-v 0',
]
if not args.pc_geometric:
config.append("--geometric-iters 0")
try:
system.run('%s "%s" %s' % (context.omvs_densify_path, sf, ' '.join(config)))
system.run('"%s" "%s" %s' % (context.omvs_densify_path, sf, ' '.join(config + gpu_config)))
# Filter
system.run('%s "%s" --filter-point-cloud -1 -v 0' % (context.omvs_densify_path, scene_dense_mvs))
if args.pc_filter > 0:
system.run('"%s" "%s" --filter-point-cloud -1 -v 0 %s' % (context.omvs_densify_path, scene_dense_mvs, ' '.join(gpu_config)))
else:
# Just rename
log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_ply_unfiltered, scene_ply))
os.rename(scene_ply_unfiltered, scene_ply)
except:
log.ODM_WARNING("Sub-scene %s could not be reconstructed, skipping..." % sf)
@ -152,15 +184,21 @@ class ODMOpenMVSStage(types.ODM_Stage):
fast_merge_ply(scene_ply_files, tree.openmvs_model)
else:
# Filter all at once
if os.path.exists(scene_dense):
config = [
"--filter-point-cloud -1",
'-i "%s"' % scene_dense,
"-v 0"
]
system.run('%s %s' % (context.omvs_densify_path, ' '.join(config)))
if args.pc_filter > 0:
if os.path.exists(scene_dense):
config = [
"--filter-point-cloud -1",
'-i "%s"' % scene_dense,
"-v 0"
]
system.run('"%s" %s' % (context.omvs_densify_path, ' '.join(config + gpu_config)))
else:
raise system.ExitException("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...")
else:
raise system.ExitException("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...")
# Just rename
scene_dense_ply = os.path.join(tree.openmvs, 'scene_dense.ply')
log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_dense_ply, tree.openmvs_model))
os.rename(scene_dense_ply, tree.openmvs_model)
# TODO: add support for image masks

Wyświetl plik

@ -30,7 +30,7 @@ class ODMOpenSfMStage(types.ODM_Stage):
octx = OSFMContext(tree.opensfm)
octx.setup(args, tree.dataset_raw, reconstruction=reconstruction, rerun=self.rerun())
octx.extract_metadata(self.rerun())
octx.photos_to_metadata(photos, self.rerun())
self.update_progress(20)
octx.feature_matching(self.rerun())
self.update_progress(30)
@ -40,7 +40,7 @@ class ODMOpenSfMStage(types.ODM_Stage):
def cleanup_disk_space():
if args.optimize_disk_space:
for folder in ["features", "matches", "exif", "reports"]:
for folder in ["features", "matches", "reports"]:
folder_path = octx.path(folder)
if os.path.exists(folder_path):
if os.path.islink(folder_path):
@ -68,14 +68,11 @@ class ODMOpenSfMStage(types.ODM_Stage):
self.update_progress(75)
# We now switch to a geographic CRS
geocoords_flag_file = octx.path("exported_geocoords.txt")
if reconstruction.is_georeferenced() and (not io.file_exists(geocoords_flag_file) or self.rerun()):
if reconstruction.is_georeferenced() and (not io.file_exists(tree.opensfm_topocentric_reconstruction) or self.rerun()):
octx.run('export_geocoords --reconstruction --proj "%s" --offset-x %s --offset-y %s' %
(reconstruction.georef.proj4(), reconstruction.georef.utm_east_offset, reconstruction.georef.utm_north_offset))
# Destructive
shutil.move(tree.opensfm_reconstruction, tree.opensfm_topocentric_reconstruction)
shutil.move(tree.opensfm_geocoords_reconstruction, tree.opensfm_reconstruction)
octx.touch(geocoords_flag_file)
else:
log.ODM_WARNING("Will skip exporting %s" % tree.opensfm_geocoords_reconstruction)
@ -116,7 +113,7 @@ class ODMOpenSfMStage(types.ODM_Stage):
def radiometric_calibrate(shot_id, image):
photo = reconstruction.get_photo(shot_id)
if photo.is_thermal():
return thermal.dn_to_temperature(photo, image)
return thermal.dn_to_temperature(photo, image, tree.dataset_raw)
else:
return multispectral.dn_to_reflectance(photo, image, use_sun_sensor=args.radiometric_calibration=="camera+sun")

Wyświetl plik

@ -52,7 +52,7 @@ class ODMSplitStage(types.ODM_Stage):
]
octx.setup(args, tree.dataset_raw, reconstruction=reconstruction, append_config=config, rerun=self.rerun())
octx.extract_metadata(self.rerun())
octx.photos_to_metadata(photos, self.rerun())
self.update_progress(5)
@ -89,7 +89,13 @@ class ODMSplitStage(types.ODM_Stage):
io.copy(submodel_gcp_file, os.path.abspath(sp_octx.path("gcp_list.txt")))
else:
log.ODM_INFO("No GCP will be copied for %s, not enough images in the submodel are referenced by the GCP" % sp_octx.name())
# Copy GEO file if needed (one for each submodel project directory)
if tree.odm_geo_file is not None and os.path.isfile(tree.odm_geo_file):
geo_dst_path = os.path.abspath(sp_octx.path("..", "geo.txt"))
io.copy(tree.odm_geo_file, geo_dst_path)
log.ODM_INFO("Copied GEO file to %s" % geo_dst_path)
# Reconstruct each submodel
log.ODM_INFO("Dataset has been split into %s submodels. Reconstructing each submodel..." % len(submodel_paths))
self.update_progress(25)
@ -105,71 +111,6 @@ class ODMSplitStage(types.ODM_Stage):
self.update_progress(50)
# TODO: this is currently not working and needs a champion to fix it
# https://community.opendronemap.org/t/filenotfound-error-cameras-json/6047/2
# resplit_done_file = octx.path('resplit_done.txt')
# if not io.file_exists(resplit_done_file) and bool(args.split_multitracks):
# submodels = mds.get_submodel_paths()
# i = 0
# for s in submodels:
# template = octx.path("../aligned_submodels/submodel_%04d")
# with open(s+"/reconstruction.json", "r") as f:
# j = json.load(f)
# for k in range(0, len(j)):
# v = j[k]
# path = template % i
# #Create the submodel path up to opensfm
# os.makedirs(path+"/opensfm")
# os.makedirs(path+"/images")
# #symlinks for common data
# images = os.listdir(octx.path("../images"))
# for image in images:
# os.symlink("../../../images/"+image, path+"/images/"+image)
# os.symlink("../../../opensfm/exif", path+"/opensfm/exif")
# os.symlink("../../../opensfm/features", path+"/opensfm/features")
# os.symlink("../../../opensfm/matches", path+"/opensfm/matches")
# os.symlink("../../../opensfm/reference_lla.json", path+"/opensfm/reference_lla.json")
# os.symlink("../../../opensfm/camera_models.json", path+"/opensfm/camera_models.json")
# shutil.copy(s+"/../cameras.json", path+"/cameras.json")
# shutil.copy(s+"/../images.json", path+"/images.json")
# with open(octx.path("config.yaml")) as f:
# doc = yaml.safe_load(f)
# dmcv = "depthmap_min_consistent_views"
# if dmcv in doc:
# if len(v["shots"]) < doc[dmcv]:
# doc[dmcv] = len(v["shots"])
# print("WARNING: Reduced "+dmcv+" to accommodate short track")
# with open(path+"/opensfm/config.yaml", "w") as f:
# yaml.dump(doc, f)
# #We need the original tracks file for the visualsfm export, since
# #there may still be point matches between the tracks
# shutil.copy(s+"/tracks.csv", path+"/opensfm/tracks.csv")
# #Create our new reconstruction file with only the relevant track
# with open(path+"/opensfm/reconstruction.json", "w") as o:
# json.dump([v], o)
# #Create image lists
# with open(path+"/opensfm/image_list.txt", "w") as o:
# o.writelines(list(map(lambda x: "../images/"+x+'\n', v["shots"].keys())))
# with open(path+"/img_list.txt", "w") as o:
# o.writelines(list(map(lambda x: x+'\n', v["shots"].keys())))
# i+=1
# os.rename(octx.path("../submodels"), octx.path("../unaligned_submodels"))
# os.rename(octx.path("../aligned_submodels"), octx.path("../submodels"))
# octx.touch(resplit_done_file)
mds = metadataset.MetaDataSet(tree.opensfm)
submodel_paths = [os.path.abspath(p) for p in mds.get_submodel_paths()]
@ -332,7 +273,7 @@ class ODMMergeStage(types.ODM_Stage):
if io.file_exists(dem_file):
# Crop
if args.crop > 0:
if args.crop > 0 or args.boundary:
Cropper.crop(merged_bounds_file, dem_file, dem_vars, keep_original=not args.optimize_disk_space)
log.ODM_INFO("Created %s" % dem_file)
@ -367,8 +308,9 @@ class ODMMergeStage(types.ODM_Stage):
else:
log.ODM_WARNING("Found merged shots.geojson in %s" % tree.odm_report)
# Stop the pipeline short! We're done.
self.next_stage = None
# Stop the pipeline short by skipping to the postprocess stage.
# Afterwards, we're done.
self.next_stage = self.last_stage()
else:
log.ODM_INFO("Normal dataset, nothing to merge.")
self.progress = 0.0

Wyświetl plik

@ -23,6 +23,7 @@ if [ "$1" = "--setup" ]; then
echo "Adding $2 to /etc/shadow"
echo "$2:x:14871::::::" >> /etc/shadow
echo "$2 ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
echo "odm ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
echo "echo '' && echo '' && echo '' && echo '###################################' && echo 'ODM Dev Environment Ready. Hack on!' && echo '###################################' && echo '' && cd /code" > $HOME/.bashrc
# Install qt creator
@ -81,12 +82,20 @@ fi
if hash docker 2>/dev/null; then
has_docker="YES"
fi
if hash nvidia-smi 2>/dev/null; then
has_nvidia_smi="YES"
fi
if [ "$has_docker" != "YES" ]; then
echo "You need to install docker before running this script."
exit 1
fi
IMAGE_SET=NO
if [[ ! -z $IMAGE ]]; then
IMAGE_SET=YES
fi
export PORT="${PORT:=3000}"
export QTC="${QTC:=NO}"
export IMAGE="${IMAGE:=opendronemap/nodeodm}"
@ -119,11 +128,18 @@ fi
USER_ID=$(id -u)
GROUP_ID=$(id -g)
USER=$(id -un)
GPU_FLAG=""
GPU_FLAGS=""
if [[ "$GPU" != "NO" ]]; then
GPU_FLAG="--gpus all"
if [[ "$IMAGE_SET" = "NO" ]]; then
IMAGE="$IMAGE:gpu"
fi
GPU_FLAGS="--gpus all"
if [[ "$has_nvidia_smi" = "YES" ]]; then
GPU_FLAGS="$GPU_FLAGS --device /dev/nvidia0 --device /dev/nvidia-uvm --device /dev/nvidia-uvm-tools --device /dev/nvidia-modeset --device /dev/nvidiactl"
fi
fi
xhost + || true
docker run -ti --entrypoint bash --name odmdev -v $(pwd):/code -v "$DATA":/datasets -p $PORT:3000 $GPU_FLAG --privileged -e DISPLAY -e LANG=C.UTF-8 -e LC_ALL=C.UTF-8 -v="/tmp/.X11-unix:/tmp/.X11-unix:rw" -v="$HOME/.odm-dev-home:/home/$USER" $IMAGE -c "/code/start-dev-env.sh --setup $USER $USER_ID $GROUP_ID $QTC"
docker run -ti --entrypoint bash --name odmdev --user root -v $(pwd):/code -v "$DATA":/datasets -p $PORT:3000 $GPU_FLAGS --privileged -e DISPLAY -e LANG=C.UTF-8 -e LC_ALL=C.UTF-8 -v="/tmp/.X11-unix:/tmp/.X11-unix:rw" -v="$HOME/.odm-dev-home:/home/$USER" $IMAGE -c "/code/start-dev-env.sh --setup $USER $USER_ID $GROUP_ID $QTC"
exit 0

Wyświetl plik

@ -48,4 +48,4 @@ set PATH=%VIRTUAL_ENV%\Scripts;%PATH%
if defined _OLD_CODEPAGE (
"%SystemRoot%\System32\chcp.com" %_OLD_CODEPAGE% > nul
set _OLD_CODEPAGE=
)
)