kopia lustrzana https://github.com/OpenDroneMap/ODM
commit
12fcdb6550
|
@ -130,7 +130,6 @@ RUN rm -rf \
|
|||
/code/SuperBuild/build/opencv \
|
||||
/code/SuperBuild/download \
|
||||
/code/SuperBuild/src/ceres \
|
||||
/code/SuperBuild/src/exiv2lib \
|
||||
/code/SuperBuild/src/mvstexturing \
|
||||
/code/SuperBuild/src/opencv \
|
||||
/code/SuperBuild/src/opengv \
|
||||
|
|
|
@ -109,7 +109,7 @@ or
|
|||
|
||||
python run.py --rerun-from odm_meshing project-name
|
||||
|
||||
The options for rerunning are: 'resize', 'opensfm', 'slam', 'smvs', 'odm_meshing', 'mvs_texturing', 'odm_georeferencing', 'odm_orthophoto'
|
||||
The options for rerunning are: 'resize', 'opensfm', 'slam', 'mve', 'odm_meshing', 'mvs_texturing', 'odm_georeferencing', 'odm_orthophoto'
|
||||
|
||||
### View Results
|
||||
|
||||
|
@ -199,7 +199,7 @@ If you want to get all intermediate outputs, run the following command:
|
|||
-v "$(pwd)/odm_orthophoto:/code/odm_orthophoto" \
|
||||
-v "$(pwd)/odm_texturing:/code/odm_texturing" \
|
||||
-v "$(pwd)/opensfm:/code/opensfm" \
|
||||
-v "$(pwd)/smvs:/code/smvs" \
|
||||
-v "$(pwd)/mve:/code/mve" \
|
||||
opendronemap/odm
|
||||
|
||||
To pass in custom parameters to the run.py script, simply pass it as arguments to the `docker run` command. For example:
|
||||
|
|
|
@ -112,7 +112,6 @@ set(custom_libs OpenGV
|
|||
Catkin
|
||||
Ecto
|
||||
LASzip
|
||||
Exiv2
|
||||
PDAL
|
||||
MvsTexturing
|
||||
)
|
||||
|
@ -128,11 +127,11 @@ foreach(lib ${custom_libs})
|
|||
SETUP_EXTERNAL_PROJECT_CUSTOM(${lib})
|
||||
endforeach()
|
||||
|
||||
## Add smvs Build
|
||||
## Add mve Build
|
||||
|
||||
externalproject_add(mve
|
||||
GIT_REPOSITORY https://github.com/simonfuhrmann/mve.git
|
||||
GIT_TAG fb942b4458dbf8490c9a4c6b81b9b9f57c593c0f
|
||||
GIT_TAG 4328bb4d1666113e919213daea4fe6b2ff9be588
|
||||
UPDATE_COMMAND ""
|
||||
SOURCE_DIR ${SB_SOURCE_DIR}/elibs/mve
|
||||
CONFIGURE_COMMAND ""
|
||||
|
@ -141,18 +140,6 @@ externalproject_add(mve
|
|||
INSTALL_COMMAND ""
|
||||
)
|
||||
|
||||
externalproject_add(smvs
|
||||
DEPENDS mve
|
||||
GIT_REPOSITORY https://github.com/flanggut/smvs.git
|
||||
GIT_TAG 6a7d0c095aa66ab98c5b285c2bc04e34d8993353
|
||||
UPDATE_COMMAND ""
|
||||
SOURCE_DIR ${SB_SOURCE_DIR}/elibs/smvs
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND ""
|
||||
)
|
||||
|
||||
externalproject_add(poissonrecon
|
||||
GIT_REPOSITORY https://github.com/mkazhdan/PoissonRecon.git
|
||||
GIT_TAG ce5005ae3094d902d551a65a8b3131e06f45e7cf
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
set(_proj_name exiv2lib)
|
||||
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
|
||||
|
||||
ExternalProject_Add(${_proj_name}
|
||||
PREFIX ${_SB_BINARY_DIR}
|
||||
TMP_DIR ${_SB_BINARY_DIR}/tmp
|
||||
STAMP_DIR ${_SB_BINARY_DIR}/stamp
|
||||
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}/${_proj_name}
|
||||
URL https://github.com/Exiv2/exiv2/archive/0.27.tar.gz
|
||||
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
|
||||
CMAKE_ARGS
|
||||
-DCMAKE_INSTALL_PREFIX=${SB_INSTALL_DIR}
|
||||
-DCMAKE_INSTALL_LIBDIR=lib
|
||||
BUILD_IN_SOURCE ON
|
||||
BUILD_COMMAND make exiv2lib
|
||||
INSTALL_DIR ${SB_INSTALL_DIR}
|
||||
LOG_DOWNLOAD OFF
|
||||
LOG_CONFIGURE OFF
|
||||
LOG_BUILD OFF
|
||||
)
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
0.4.1
|
||||
0.5.0
|
||||
|
|
|
@ -1,67 +0,0 @@
|
|||
FROM phusion/baseimage
|
||||
|
||||
# Env variables
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
#Install dependencies
|
||||
#Required Requisites
|
||||
RUN add-apt-repository -y ppa:ubuntugis/ppa
|
||||
RUN add-apt-repository -y ppa:george-edison55/cmake-3.x
|
||||
RUN apt-get update -y
|
||||
|
||||
# All packages (Will install much faster)
|
||||
RUN apt-get install --no-install-recommends -y git cmake python-pip build-essential software-properties-common python-software-properties libgdal-dev gdal-bin libgeotiff-dev \
|
||||
libgtk2.0-dev libavcodec-dev libavformat-dev libswscale-dev python-dev libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libflann-dev \
|
||||
libproj-dev libxext-dev liblapack-dev libeigen3-dev libvtk6-dev python-networkx libgoogle-glog-dev libsuitesparse-dev libboost-filesystem-dev libboost-iostreams-dev \
|
||||
libboost-regex-dev libboost-python-dev libboost-date-time-dev libboost-thread-dev python-pyproj python-empy python-nose python-pyside \
|
||||
liblas-bin python-matplotlib libatlas-base-dev swig2.0 python-wheel libboost-log-dev libjsoncpp-dev python-gdal
|
||||
|
||||
RUN apt-get remove libdc1394-22-dev
|
||||
RUN pip install --upgrade pip
|
||||
RUN pip install setuptools
|
||||
RUN pip install -U PyYAML exifread gpxpy xmltodict catkin-pkg appsettings https://github.com/OpenDroneMap/gippy/archive/numpyfix.zip loky shapely scipy numpy==1.15.4 pyproj psutil repoze.lru
|
||||
|
||||
ENV PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python2.7/dist-packages"
|
||||
ENV PYTHONPATH="$PYTHONPATH:/code/SuperBuild/src/opensfm"
|
||||
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
|
||||
|
||||
# Prepare directories
|
||||
|
||||
RUN mkdir /code
|
||||
WORKDIR /code
|
||||
|
||||
# Copy repository files
|
||||
COPY CMakeLists.txt /code/CMakeLists.txt
|
||||
COPY configure.sh /code/configure.sh
|
||||
COPY /modules/ /code/modules/
|
||||
COPY /opendm/ /code/opendm/
|
||||
COPY /patched_files/ /code/patched_files/
|
||||
COPY run.py /code/run.py
|
||||
COPY run.sh /code/run.sh
|
||||
COPY /scripts/ /code/scripts/
|
||||
COPY /SuperBuild/cmake/ /code/SuperBuild/cmake/
|
||||
COPY /SuperBuild/CMakeLists.txt /code/SuperBuild/CMakeLists.txt
|
||||
COPY docker.settings.yaml /code/settings.yaml
|
||||
COPY VERSION /code/VERSION
|
||||
|
||||
# Replace g++ and gcc with our own scripts
|
||||
COPY /docker/ /code/docker/
|
||||
RUN mv -v /usr/bin/gcc /usr/bin/gcc_real && mv -v /usr/bin/g++ /usr/bin/g++_real && cp -v /code/docker/gcc /usr/bin/gcc && cp -v /code/docker/g++ /usr/bin/g++
|
||||
|
||||
#Compile code in SuperBuild and root directories
|
||||
|
||||
RUN cd SuperBuild && mkdir build && cd build && cmake .. && make -j$(nproc) && cd ../.. && mkdir build && cd build && cmake .. && make -j$(nproc)
|
||||
|
||||
RUN apt-get -y remove libgl1-mesa-dri git cmake python-pip build-essential
|
||||
RUN apt-get install -y libvtk6-dev
|
||||
|
||||
# Cleanup APT
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# Clean Superbuild
|
||||
|
||||
RUN rm -rf /code/SuperBuild/download /code/SuperBuild/src/opencv /code/SuperBuild/src/pcl /code/SuperBuild/src/pdal /code/SuperBuild/src/opengv /code/SuperBuild/src/mvstexturing /code/SuperBuild/src/ceres /code/SuperBuild/build/opencv /code/SuperBuild/src/exiv2lib
|
||||
|
||||
# Entry point
|
||||
ENTRYPOINT ["python", "/code/run.py", "code"]
|
||||
|
|
@ -1,3 +1,3 @@
|
|||
The g++ and gcc scripts in this directory are used to replace the real g++ and gcc programs so that compilation across all projects (including dependencies) uses the -march=core2 flag, which allows us to build a docker image compatible with most Intel based CPUs.
|
||||
The g++ and gcc scripts in this directory are used to replace the real g++ and gcc programs so that compilation across all projects (including dependencies) uses the -march=nehalem flag, which allows us to build a docker image compatible with most Intel based CPUs.
|
||||
|
||||
Without the -march=core2 flag, a docker image will contain binaries that are optimized for the machine that built the image, and will not run on older machines.
|
||||
Without the -march=nehalem flag, a docker image will contain binaries that are optimized for the machine that built the image, and will not run on older machines.
|
||||
|
|
|
@ -9,4 +9,4 @@ do
|
|||
fi
|
||||
done
|
||||
|
||||
/usr/bin/g++_real -march=core2 $args
|
||||
/usr/bin/g++_real -march=nehalem $args
|
||||
|
|
|
@ -9,4 +9,4 @@ do
|
|||
fi
|
||||
done
|
||||
|
||||
/usr/bin/gcc_real -march=core2 $args
|
||||
/usr/bin/gcc_real -march=nehalem $args
|
||||
|
|
|
@ -4,7 +4,6 @@ if (NOT CMAKE_BUILD_TYPE)
|
|||
endif()
|
||||
|
||||
# Add ODM sub-modules
|
||||
add_subdirectory(odm_extract_utm)
|
||||
add_subdirectory(odm_georef)
|
||||
add_subdirectory(odm_orthophoto)
|
||||
add_subdirectory(odm_cleanmesh)
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
project(odm_extract_utm)
|
||||
cmake_minimum_required(VERSION 2.8)
|
||||
|
||||
set(PROJ4_INCLUDE_DIR "/usr/include/" CACHE "PROJ4_INCLUDE_DIR" "Path to the proj4 inlcude directory")
|
||||
|
||||
find_library(PROJ4_LIBRARY "libproj.so" PATHS "/usr/lib" "/usr/lib/x86_64-linux-gnu")
|
||||
find_library(EXIV2_LIBRARY "libexiv2.so.27" PATHS "${PROJECT_SOURCE_DIR}/../../SuperBuild/install/lib")
|
||||
include_directories("${PROJECT_SOURCE_DIR}/../../SuperBuild/install/include")
|
||||
|
||||
# Add compiler options.
|
||||
add_definitions(-Wall -Wextra)
|
||||
|
||||
# Add source directory
|
||||
aux_source_directory("./src" SRC_LIST)
|
||||
|
||||
# Add exectuteable
|
||||
add_executable(${PROJECT_NAME} ${SRC_LIST})
|
||||
|
||||
# Link
|
||||
target_link_libraries(${PROJECT_NAME} ${PROJ4_LIBRARY} ${EXIV2_LIBRARY})
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
#include "Logger.hpp"
|
||||
|
||||
|
||||
Logger::Logger(bool isPrintingInCout) : isPrintingInCout_(isPrintingInCout)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
Logger::~Logger()
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
void Logger::printToFile(std::string filePath)
|
||||
{
|
||||
std::ofstream file(filePath.c_str(), std::ios::binary);
|
||||
file << logStream_.str();
|
||||
file.close();
|
||||
}
|
||||
|
||||
bool Logger::isPrintingInCout() const
|
||||
{
|
||||
return isPrintingInCout_;
|
||||
}
|
||||
|
||||
void Logger::setIsPrintingInCout(bool isPrintingInCout)
|
||||
{
|
||||
isPrintingInCout_ = isPrintingInCout;
|
||||
}
|
||||
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
// STL
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
/*!
|
||||
* \brief The Logger class is used to store program messages in a log file.
|
||||
* \details By using the << operator while printInCout is set, the class writes both to
|
||||
* cout and to file, if the flag is not set, output is written to file only.
|
||||
*/
|
||||
class Logger
|
||||
{
|
||||
public:
|
||||
/*!
|
||||
* \brief Logger Contains functionality for printing and displaying log information.
|
||||
* \param printInCout Flag toggling if operator << also writes to cout.
|
||||
*/
|
||||
Logger(bool isPrintingInCout = true);
|
||||
|
||||
/*!
|
||||
* \brief Destructor.
|
||||
*/
|
||||
~Logger();
|
||||
|
||||
/*!
|
||||
* \brief print Prints the contents of the log to file.
|
||||
* \param filePath Path specifying where to write the log.
|
||||
*/
|
||||
void printToFile(std::string filePath);
|
||||
|
||||
/*!
|
||||
* \brief isPrintingInCout Check if console printing flag is set.
|
||||
* \return Console printing flag.
|
||||
*/
|
||||
bool isPrintingInCout() const;
|
||||
|
||||
/*!
|
||||
* \brief setIsPrintingInCout Set console printing flag.
|
||||
* \param isPrintingInCout Value, if true, messages added to the log are also printed in cout.
|
||||
*/
|
||||
void setIsPrintingInCout(bool isPrintingInCout);
|
||||
|
||||
/*!
|
||||
* Operator for printing messages to log and in the standard output stream if desired.
|
||||
*/
|
||||
template<class T>
|
||||
friend Logger& operator<< (Logger &log, T t)
|
||||
{
|
||||
// If console printing is enabled.
|
||||
if (log.isPrintingInCout_)
|
||||
{
|
||||
std::cout << t;
|
||||
std::cout.flush();
|
||||
}
|
||||
// Write to log.
|
||||
log.logStream_ << t;
|
||||
|
||||
return log;
|
||||
}
|
||||
|
||||
private:
|
||||
bool isPrintingInCout_; /*!< If flag is set, log is printed in cout and written to the log. */
|
||||
|
||||
std::stringstream logStream_; /*!< Stream for storing the log. */
|
||||
};
|
|
@ -1,353 +0,0 @@
|
|||
// STL
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <cstring>
|
||||
#include <iomanip>
|
||||
#include <vector>
|
||||
#include <cstdlib>
|
||||
#include <stdio.h>
|
||||
#include <ctype.h>
|
||||
#include <sstream>
|
||||
#include <math.h>
|
||||
#include <exiv2/exiv2.hpp>
|
||||
|
||||
// Proj4
|
||||
#include <proj_api.h>
|
||||
|
||||
// This
|
||||
#include "UtmExtractor.hpp"
|
||||
|
||||
UtmExtractor::UtmExtractor() : log_(false)
|
||||
{
|
||||
logFile_ = "odm_extracting_utm_log.txt";
|
||||
}
|
||||
|
||||
UtmExtractor::~UtmExtractor()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
|
||||
int UtmExtractor::run(int argc, char **argv)
|
||||
{
|
||||
if (argc <= 1)
|
||||
{
|
||||
printHelp();
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
parseArguments(argc, argv);
|
||||
extractUtm();
|
||||
}
|
||||
catch (const UtmExtractorException& e)
|
||||
{
|
||||
log_.setIsPrintingInCout(true);
|
||||
log_ << e.what() << "\n";
|
||||
log_.printToFile(logFile_);
|
||||
log_ << "For more detailed information, see log file." << "\n";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
catch (const std::runtime_error& e)
|
||||
{
|
||||
log_.setIsPrintingInCout(true);
|
||||
log_ << "Error in OdmExtractUtm:\n";
|
||||
log_ << e.what() << "\n";
|
||||
log_.printToFile(logFile_);
|
||||
log_ << "For more detailed information, see log file." << "\n";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
log_.setIsPrintingInCout(true);
|
||||
log_ << "Unknown error in OdmExtractUtm:\n";
|
||||
log_.printToFile(logFile_);
|
||||
log_ << "For more detailed information, see log file." << "\n";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
log_.printToFile(logFile_);
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
void UtmExtractor::parseArguments(int argc, char **argv)
|
||||
{
|
||||
for(int argIndex = 1; argIndex < argc; ++argIndex)
|
||||
{
|
||||
// The argument to be parsed
|
||||
std::string argument = std::string(argv[argIndex]);
|
||||
if (argument == "-help")
|
||||
{
|
||||
printHelp();
|
||||
}
|
||||
else if (argument == "-verbose")
|
||||
{
|
||||
log_.setIsPrintingInCout(true);
|
||||
}
|
||||
else if (argument == "-imageListFile")
|
||||
{
|
||||
++argIndex;
|
||||
if (argIndex >= argc)
|
||||
{
|
||||
throw UtmExtractorException("Missing argument for '" + argument + "'.");
|
||||
}
|
||||
imageListFileName_ = std::string(argv[argIndex]);
|
||||
std::ifstream testFile(imageListFileName_.c_str(), std::ios_base::binary);
|
||||
if (!testFile.is_open())
|
||||
{
|
||||
throw UtmExtractorException("Argument '" + argument + "' has a bad value (file not accessible).");
|
||||
}
|
||||
log_ << "imageListFile was set to: " << imageListFileName_ << "\n";
|
||||
}
|
||||
else if (argument == "-imagesPath")
|
||||
{
|
||||
++argIndex;
|
||||
if (argIndex >= argc)
|
||||
{
|
||||
throw UtmExtractorException("Missing argument for '" + argument + "'.");
|
||||
}
|
||||
std::stringstream ss(argv[argIndex]);
|
||||
ss >> imagesPath_;
|
||||
if (ss.bad())
|
||||
{
|
||||
throw UtmExtractorException("Argument '" + argument + "' has a bad value. (wrong type)");
|
||||
}
|
||||
log_ << "imagesPath was set to: " << imagesPath_ << "\n";
|
||||
}
|
||||
else if (argument == "-outputCoordFile")
|
||||
{
|
||||
++argIndex;
|
||||
if (argIndex >= argc)
|
||||
{
|
||||
throw UtmExtractorException("Missing argument for '" + argument + "'.");
|
||||
}
|
||||
std::stringstream ss(argv[argIndex]);
|
||||
ss >> outputCoordFileName_;
|
||||
if (ss.bad())
|
||||
{
|
||||
throw UtmExtractorException("Argument '" + argument + "' has a bad value. (wrong type)");
|
||||
}
|
||||
log_ << "outputCoordFile was set to: " << outputCoordFileName_ << "\n";
|
||||
}
|
||||
else if (argument == "-logFile")
|
||||
{
|
||||
++argIndex;
|
||||
if (argIndex >= argc)
|
||||
{
|
||||
throw UtmExtractorException("Missing argument for '" + argument + "'.");
|
||||
}
|
||||
std::stringstream ss(argv[argIndex]);
|
||||
ss >> logFile_;
|
||||
if (ss.bad())
|
||||
{
|
||||
throw UtmExtractorException("Argument '" + argument + "' has a bad value. (wrong type)");
|
||||
}
|
||||
log_ << "logFile_ was set to: " << logFile_ << "\n";
|
||||
}
|
||||
else
|
||||
{
|
||||
printHelp();
|
||||
throw UtmExtractorException("Unrecognised argument '" + argument + "'.");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void UtmExtractor::extractUtm()
|
||||
{
|
||||
// Open file listing all used camera images
|
||||
std::ifstream imageListStream(imageListFileName_.c_str());
|
||||
if (!imageListStream.good()) {
|
||||
throw UtmExtractorException("Failed to open " + imageListFileName_ + " for reading.");
|
||||
}
|
||||
|
||||
// Traverse images
|
||||
int utmZone = 99; // for auto-select
|
||||
char hemisphere;
|
||||
std::string imageFilename;
|
||||
std::vector<Coord> coords;
|
||||
while (getline(imageListStream, imageFilename)) {
|
||||
|
||||
// Read image and load metadata
|
||||
Exiv2::Image::AutoPtr image = Exiv2::ImageFactory::open(imagesPath_ + "/" + imageFilename);
|
||||
if (image.get() == 0) {
|
||||
std::string error(imageFilename);
|
||||
error += ": Image cannot be read";
|
||||
throw std::runtime_error(error.c_str());
|
||||
}
|
||||
else {
|
||||
image->readMetadata();
|
||||
|
||||
Exiv2::ExifData &exifData = image->exifData();
|
||||
if (exifData.empty()) {
|
||||
std::string error(imageFilename);
|
||||
error += ": No Exif data found in the file";
|
||||
throw std::runtime_error(error.c_str());
|
||||
}
|
||||
|
||||
// Parse exif data for positional data
|
||||
double lon, lat, alt = 0.0;
|
||||
|
||||
parsePosition(exifData, lon, lat, alt);
|
||||
|
||||
if (lon == 0.0 || lat == 0.0 || alt == 0.0) {
|
||||
std::string error("Failed parsing GPS position for " + imageFilename);
|
||||
throw UtmExtractorException(error);
|
||||
}
|
||||
// Convert to UTM
|
||||
double x, y, z = 0.0;
|
||||
convert(lon, lat, alt, x, y, z, utmZone, hemisphere);
|
||||
if (x == 0.0 || y == 0.0 || z == 0.0) {
|
||||
std::string error("Failed to convert GPS position to UTM for " + imageFilename);
|
||||
throw UtmExtractorException(error);
|
||||
}
|
||||
coords.push_back(Coord(x, y, z));
|
||||
}
|
||||
}
|
||||
imageListStream.close();
|
||||
|
||||
// Calculate average
|
||||
double dx = 0.0, dy = 0.0;
|
||||
double num = static_cast<double>(coords.size());
|
||||
for (std::vector<Coord>::iterator iter = coords.begin(); iter != coords.end(); ++iter) {
|
||||
dx += iter->x/num;
|
||||
dy += iter->y/num;
|
||||
}
|
||||
|
||||
dx = floor(dx);
|
||||
dy = floor(dy);
|
||||
|
||||
// Open output file
|
||||
std::ofstream outputCoordStream(outputCoordFileName_.c_str());
|
||||
if (!outputCoordStream.good()) {
|
||||
throw UtmExtractorException("Failed to open " + outputCoordFileName_ + " for writing.");
|
||||
}
|
||||
outputCoordStream.precision(10);
|
||||
|
||||
// Write coordinate file
|
||||
outputCoordStream << "WGS84 UTM " << utmZone << hemisphere << std::endl;
|
||||
outputCoordStream << dx << " " << dy << std::endl;
|
||||
for (std::vector<Coord>::iterator iter = coords.begin(); iter != coords.end(); ++iter) {
|
||||
outputCoordStream << (iter->x - dx) << " " << (iter->y - dy) << " " << iter->z << std::endl;
|
||||
}
|
||||
|
||||
outputCoordStream.close();
|
||||
|
||||
}
|
||||
|
||||
void UtmExtractor::convert(const double &lon, const double &lat, const double &alt, double &x, double &y, double &z, int &utmZone, char &hemisphere)
|
||||
{
|
||||
// Create WGS84 longitude/latitude coordinate system
|
||||
projPJ pjLatLon = pj_init_plus("+proj=latlong +datum=WGS84");
|
||||
if (!pjLatLon) {
|
||||
throw UtmExtractorException("Couldn't create WGS84 coordinate system with PROJ.4.");
|
||||
}
|
||||
|
||||
// Calculate UTM zone if it's set to magic 99
|
||||
// NOTE: Special UTM cases in Norway/Svalbard not supported here
|
||||
if (utmZone == 99) {
|
||||
utmZone = ((static_cast<int>(floor((lon + 180.0)/6.0)) % 60) + 1);
|
||||
if (lat < 0)
|
||||
hemisphere = 'S';
|
||||
else
|
||||
hemisphere = 'N';
|
||||
}
|
||||
|
||||
std::ostringstream ostr;
|
||||
ostr << utmZone;
|
||||
if (hemisphere == 'S')
|
||||
ostr << " +south";
|
||||
|
||||
// Create UTM coordinate system
|
||||
projPJ pjUtm = pj_init_plus(("+proj=utm +datum=WGS84 +zone=" + ostr.str()).c_str());
|
||||
if (!pjUtm) {
|
||||
throw UtmExtractorException("Couldn't create UTM coordinate system with PROJ.4.");
|
||||
}
|
||||
|
||||
// Convert to radians
|
||||
x = lon * DEG_TO_RAD;
|
||||
y = lat * DEG_TO_RAD;
|
||||
z = alt;
|
||||
|
||||
// Transform
|
||||
int res = pj_transform(pjLatLon, pjUtm, 1, 1, &x, &y, &z);
|
||||
if (res != 0) {
|
||||
throw UtmExtractorException("Failed to transform coordinates");
|
||||
}
|
||||
}
|
||||
|
||||
void UtmExtractor::parsePosition(Exiv2::ExifData &exifData, double &lon, double &lat, double &alt)
|
||||
{
|
||||
Exiv2::Exifdatum& latitudeTag = exifData["Exif.GPSInfo.GPSLatitude"];
|
||||
Exiv2::Exifdatum& latitudeRef = exifData["Exif.GPSInfo.GPSLatitudeRef"];
|
||||
Exiv2::Exifdatum& longitudeTag = exifData["Exif.GPSInfo.GPSLongitude"];
|
||||
Exiv2::Exifdatum& longitudeRef = exifData["Exif.GPSInfo.GPSLongitudeRef"];
|
||||
Exiv2::Exifdatum& altitudeTag = exifData["Exif.GPSInfo.GPSAltitude"];
|
||||
Exiv2::Exifdatum& altitudeRef = exifData["Exif.GPSInfo.GPSAltitudeRef"];
|
||||
|
||||
// Latitude: parse into a double
|
||||
if (latitudeTag.count() < 3)
|
||||
throw UtmExtractorException("Image is missing GPS Latitude data");
|
||||
else {
|
||||
Exiv2::URational rLat[] = {latitudeTag.toRational(0), latitudeTag.toRational(1), latitudeTag.toRational(2)};
|
||||
bool south = (strcmp(latitudeRef.toString().c_str(), "S") == 0);
|
||||
double degrees, minutes, seconds;
|
||||
|
||||
degrees = (double)rLat[0].first / (double)rLat[0].second;
|
||||
minutes = (double)rLat[1].first / (double)rLat[1].second / 60.0;
|
||||
seconds = (double)rLat[2].first / (double)rLat[2].second / 3600.0;
|
||||
lat = (south ? -1 : 1) * (degrees + minutes + seconds);
|
||||
}
|
||||
|
||||
// Longitude
|
||||
if (longitudeTag.count() < 3)
|
||||
throw UtmExtractorException("Image is missing GPS Longitude data");
|
||||
else {
|
||||
Exiv2::URational rLon[] = {longitudeTag.toRational(0), longitudeTag.toRational(1), longitudeTag.toRational(2)};
|
||||
bool west = (strcmp(longitudeRef.toString().c_str(), "W") == 0);
|
||||
double degrees, minutes, seconds;
|
||||
|
||||
degrees = (double)rLon[0].first / (double)rLon[0].second;
|
||||
minutes = (double)rLon[1].first / (double)rLon[1].second / 60.0;
|
||||
seconds = (double)rLon[2].first / (double)rLon[2].second / 3600.0;
|
||||
lon = (west ? -1 : 1) * (degrees + minutes + seconds);
|
||||
}
|
||||
|
||||
// Altitude
|
||||
if (altitudeTag.count() < 1)
|
||||
throw UtmExtractorException("Image is missing GPS Altitude data");
|
||||
else {
|
||||
Exiv2::URational rAlt = altitudeTag.toRational(0);
|
||||
bool below = (altitudeRef.count() >= 1 && altitudeRef.toLong() == 1);
|
||||
alt = (below ? -1 : 1) * (double) rAlt.first / (double) rAlt.second;
|
||||
}
|
||||
}
|
||||
|
||||
void UtmExtractor::printHelp()
|
||||
{
|
||||
log_.setIsPrintingInCout(true);
|
||||
|
||||
log_ << "Purpose:\n";
|
||||
log_ << "Create a coordinate file containing the GPS positions of all cameras to be used later in the ODM toolchain for automatic georeferecing.\n";
|
||||
|
||||
log_ << "Usage:\n";
|
||||
log_ << "The program requires paths to a image list file, a image folder path and an output textfile to store the results.\n";
|
||||
|
||||
log_ << "The following flags are available:\n";
|
||||
log_ << "Call the program with flag \"-help\", or without parameters to print this message, or check any generated log file.\n";
|
||||
log_ << "Call the program with flag \"-verbose\", to print log messages in the standard output.\n\n";
|
||||
|
||||
log_ << "Parameters are specified as: \"-<argument name> <argument>\", (without <>), and the following parameters are configurable:\n";
|
||||
log_ << "\"-imageListFile <path>\" (mandatory)\n";
|
||||
log_ << "Path to the list containing the image names used in the bundle.out file.\n";
|
||||
|
||||
log_ << "\"-imagesPath <path>\" (mandatory)\n";
|
||||
log_ << "Path folder containing all images in the imageListFile.\n";
|
||||
|
||||
log_ << "\"-outputCoordFile <path>\" (mandatory)\n";
|
||||
log_ << "Path output textfile.\n";
|
||||
|
||||
log_.setIsPrintingInCout(false);
|
||||
}
|
|
@ -1,98 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
// Logging
|
||||
#include "Logger.hpp"
|
||||
#include <exiv2/exiv2.hpp>
|
||||
|
||||
|
||||
/*!
|
||||
* \breif The Coord struct Class used in UtmExtractor to extract GPS positions from images and ODM output
|
||||
*/
|
||||
struct Coord
|
||||
{
|
||||
double x, y, z;
|
||||
Coord(double ix, double iy, double iz) : x(ix), y(iy), z(iz) {}
|
||||
};
|
||||
|
||||
class UtmExtractor
|
||||
{
|
||||
public:
|
||||
UtmExtractor();
|
||||
~UtmExtractor();
|
||||
|
||||
/*!
|
||||
* \brief run Runs the texturing functionality using the provided input arguments.
|
||||
* For a list of the accepted arguments, please see the main page documentation or
|
||||
* call the program with parameter "-help".
|
||||
* \param argc Application argument count.
|
||||
* \param argv Argument values.
|
||||
* \return 0 if successful.
|
||||
*/
|
||||
int run (int argc, char **argv);
|
||||
|
||||
private:
|
||||
|
||||
/*!
|
||||
* \brief parseArguments Parses command line arguments.
|
||||
* \param argc Application argument count.
|
||||
* \param argv Argument values.
|
||||
*/
|
||||
void parseArguments(int argc, char **argv);
|
||||
|
||||
/*!
|
||||
* \breif extractUtm Performs the extraction of coordinates inside the run function.
|
||||
*/
|
||||
void extractUtm();
|
||||
|
||||
/*!
|
||||
* /brief Static method that converts a WGS84 longitude/latitude coordinate in decimal degrees to UTM.
|
||||
*
|
||||
* \param lon The longitude in decimal degrees (negative if western hemisphere).
|
||||
* \param lat The latitude in decimal degrees (negative if southern hemisphere).
|
||||
* \param alt The altitude in meters.
|
||||
* \param x Output parameter, the easting UTM value in meters.
|
||||
* \param y Output parameter, the northing UTM value in meters.
|
||||
* \param utmZone Input or output parameter, the UTM zone. Set to 99 for automatic selection.
|
||||
* \param hemisphere Input or output parameter, 'N' for norther hemisphere, 'S' for southern. Automatically selected if utmZone is 99.
|
||||
*
|
||||
* \returns True if successful (otherwise output parameters are 0)
|
||||
*/
|
||||
static void convert(const double &lon, const double &lat, const double &alt, double &x, double &y, double &z, int &utmZone, char &hemisphere);
|
||||
|
||||
/*!
|
||||
* \brief Static method that parses a GPS position from jhead data.
|
||||
*
|
||||
* \param jheadDataStream Jhead data stream with EXIF information.
|
||||
* \param lon Output parameter, the longitude in decimal degrees.
|
||||
* \param lat Output parameter, the latitude in decimal degrees.
|
||||
* \param alt Output parameter, the altitude in meters.
|
||||
*
|
||||
* \returns True if successful (otherwise output parameters are 0)
|
||||
*/
|
||||
static void parsePosition(Exiv2::ExifData &exifData, double &lon, double &lat, double &alt);
|
||||
|
||||
/*!
|
||||
* \brief printHelp Prints help, explaining usage. Can be shown by calling the program with arguments: "-help".
|
||||
*/
|
||||
void printHelp();
|
||||
|
||||
std::string imageListFileName_; /**< Path to the image list. */
|
||||
std::string outputCoordFileName_; /**< Path to the file to store the output textfile. */
|
||||
std::string imagesPath_; /**< Path to the folder with all images in the image list. */
|
||||
|
||||
Logger log_; /**< Logging object. */
|
||||
std::string logFile_; /**< Path to store the log file. */
|
||||
|
||||
};
|
||||
|
||||
class UtmExtractorException : public std::exception
|
||||
{
|
||||
public:
|
||||
UtmExtractorException() : message("Error in OdmExtractUtm") {}
|
||||
UtmExtractorException(std::string msgInit) : message("Error in OdmExtractUtm:\n" + msgInit) {}
|
||||
~UtmExtractorException() throw() {}
|
||||
virtual const char* what() const throw() {return message.c_str(); }
|
||||
|
||||
private:
|
||||
std::string message; /**< The error message. */
|
||||
};
|
|
@ -1,9 +0,0 @@
|
|||
|
||||
|
||||
#include "UtmExtractor.hpp"
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
UtmExtractor utmExtractor;
|
||||
return utmExtractor.run(argc, argv);
|
||||
}
|
|
@ -1,4 +1,5 @@
|
|||
#include <iostream>
|
||||
#include <algorithm>
|
||||
#include <pdal/filters/OutlierFilter.hpp>
|
||||
#include <pdal/filters/RangeFilter.hpp>
|
||||
#include "CmdLineParser.h"
|
||||
|
@ -13,12 +14,13 @@ cmdLineParameter< char* >
|
|||
OutputFile( "outputFile" );
|
||||
cmdLineParameter< float >
|
||||
StandardDeviation( "sd" ) ,
|
||||
MeanK ( "meank" );
|
||||
MeanK ( "meank" ) ,
|
||||
Confidence ( "confidence" );
|
||||
cmdLineReadable
|
||||
Verbose( "verbose" );
|
||||
|
||||
cmdLineReadable* params[] = {
|
||||
&InputFile , &OutputFile , &StandardDeviation, &MeanK, &Verbose ,
|
||||
&InputFile , &OutputFile , &StandardDeviation, &MeanK, &Confidence, &Verbose ,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -28,6 +30,7 @@ void help(char *ex){
|
|||
<< "\t -" << OutputFile.name << " <output PLY point cloud>" << std::endl
|
||||
<< "\t [-" << StandardDeviation.name << " <standard deviation threshold>]" << std::endl
|
||||
<< "\t [-" << MeanK.name << " <mean number of neighbors >]" << std::endl
|
||||
<< "\t [-" << Confidence.name << " <lower bound filter for confidence property>]" << std::endl
|
||||
|
||||
<< "\t [-" << Verbose.name << "]" << std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
|
@ -65,13 +68,31 @@ int main(int argc, char **argv) {
|
|||
pdal::FloatPlyReader plyReader;
|
||||
plyReader.setOptions(inPlyOpts);
|
||||
|
||||
pdal::RangeFilter confidenceFilter;
|
||||
if (Confidence.set){
|
||||
pdal::Options confidenceFilterOpts;
|
||||
float confidenceValue = std::min(1.0f, std::max(Confidence.value, 0.0f));
|
||||
std::ostringstream confidenceLimit;
|
||||
confidenceLimit << "confidence[" << confidenceValue << ":1]";
|
||||
confidenceFilterOpts.add("limits", confidenceLimit.str());
|
||||
|
||||
confidenceFilter.setInput(plyReader);
|
||||
confidenceFilter.setOptions(confidenceFilterOpts);
|
||||
}
|
||||
|
||||
pdal::Options outlierOpts;
|
||||
outlierOpts.add("method", "statistical");
|
||||
outlierOpts.add("mean_k", MeanK.value);
|
||||
outlierOpts.add("multiplier", StandardDeviation.value);
|
||||
|
||||
pdal::OutlierFilter outlierFilter;
|
||||
outlierFilter.setInput(plyReader);
|
||||
if (Confidence.set){
|
||||
logWriter("Filtering confidence\n");
|
||||
outlierFilter.setInput(confidenceFilter);
|
||||
}else{
|
||||
outlierFilter.setInput(plyReader);
|
||||
|
||||
}
|
||||
outlierFilter.setOptions(outlierOpts);
|
||||
|
||||
pdal::Options rangeOpts;
|
||||
|
|
|
@ -9,18 +9,3 @@ def get_max_memory(minimum = 5, use_at_most = 0.5):
|
|||
"""
|
||||
return max(minimum, (100 - virtual_memory().percent) * use_at_most)
|
||||
|
||||
|
||||
def get_max_concurrency_for_dem(available_cores, input_file, use_at_most = 0.8):
|
||||
"""
|
||||
DEM generation requires ~2x the input file size of memory per available core.
|
||||
:param available_cores number of cores available (return value will never exceed this value)
|
||||
:param input_file path to input file
|
||||
:use_at_most use at most this fraction of the available memory when calculating a concurrency value. 0.9 = assume that we can only use 90% of available memory.
|
||||
:return maximum number of cores recommended to use for DEM processing.
|
||||
"""
|
||||
memory_available = virtual_memory().available * use_at_most
|
||||
file_size = os.path.getsize(input_file)
|
||||
memory_required_per_core = max(1, file_size * 2)
|
||||
|
||||
return min(available_cores, max(1, int(memory_available) / int(memory_required_per_core)))
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ from appsettings import SettingsParser
|
|||
import sys
|
||||
|
||||
# parse arguments
|
||||
processopts = ['dataset', 'opensfm', 'slam', 'smvs',
|
||||
processopts = ['dataset', 'opensfm', 'slam', 'mve', 'odm_filterpoints',
|
||||
'odm_meshing', 'odm_25dmeshing', 'mvs_texturing', 'odm_georeferencing',
|
||||
'odm_dem', 'odm_orthophoto']
|
||||
|
||||
|
@ -171,6 +171,15 @@ def config():
|
|||
help='Run local bundle adjustment for every image added to the reconstruction and a global '
|
||||
'adjustment every 100 images. Speeds up reconstruction for very large datasets.')
|
||||
|
||||
parser.add_argument('--mve-confidence',
|
||||
metavar='<float: 0 <= x <= 1>',
|
||||
type=float,
|
||||
default=0.60,
|
||||
help=('Discard points that have less than a certain confidence threshold. '
|
||||
'This only affects dense reconstructions performed with MVE. '
|
||||
'Higher values discard more points. '
|
||||
'Default: %(default)s'))
|
||||
|
||||
parser.add_argument('--use-3dmesh',
|
||||
action='store_true',
|
||||
default=False,
|
||||
|
@ -194,41 +203,6 @@ def config():
|
|||
'resizes images when necessary, resulting in faster processing and '
|
||||
'lower memory usage. Since GSD is an estimate, sometimes ignoring it can result in slightly better image output quality.')
|
||||
|
||||
parser.add_argument('--smvs-alpha',
|
||||
metavar='<float>',
|
||||
default=1.0,
|
||||
type=float,
|
||||
help='Regularization parameter, a higher alpha leads to '
|
||||
'smoother surfaces. Default: %(default)s')
|
||||
|
||||
parser.add_argument('--smvs-output-scale',
|
||||
metavar='<positive integer>',
|
||||
default=1,
|
||||
type=int,
|
||||
help='The scale of the optimization - the '
|
||||
'finest resolution of the bicubic patches will have the'
|
||||
' size of the respective power of 2 (e.g. 2 will '
|
||||
'optimize patches covering down to 4x4 pixels). '
|
||||
'Default: %(default)s')
|
||||
|
||||
parser.add_argument('--smvs-enable-shading',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Use shading-based optimization. This model cannot '
|
||||
'handle complex scenes. Try to supply linear images to '
|
||||
'the reconstruction pipeline that are not tone mapped '
|
||||
'or altered as this can also have very negative effects '
|
||||
'on the reconstruction. If you have simple JPGs with SRGB '
|
||||
'gamma correction you can remove it with the --smvs-gamma-srgb '
|
||||
'option. Default: %(default)s')
|
||||
|
||||
parser.add_argument('--smvs-gamma-srgb',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Apply inverse SRGB gamma correction. To be used '
|
||||
'with --smvs-enable-shading when you have simple JPGs with '
|
||||
'SRGB gamma correction. Default: %(default)s')
|
||||
|
||||
parser.add_argument('--mesh-size',
|
||||
metavar='<positive integer>',
|
||||
default=100000,
|
||||
|
@ -252,7 +226,7 @@ def config():
|
|||
'and default value: %(default)s'))
|
||||
|
||||
parser.add_argument('--mesh-point-weight',
|
||||
metavar='<interpolation weight>',
|
||||
metavar='<positive float>',
|
||||
default=4,
|
||||
type=float,
|
||||
help=('This floating point value specifies the importance'
|
||||
|
@ -305,6 +279,38 @@ def config():
|
|||
'\nDefault: '
|
||||
'%(default)s')
|
||||
|
||||
parser.add_argument('--smrf-scalar',
|
||||
metavar='<positive float>',
|
||||
type=float,
|
||||
default=1.25,
|
||||
help='Simple Morphological Filter elevation scalar parameter. '
|
||||
'\nDefault: '
|
||||
'%(default)s')
|
||||
|
||||
parser.add_argument('--smrf-slope',
|
||||
metavar='<positive float>',
|
||||
type=float,
|
||||
default=0.15,
|
||||
help='Simple Morphological Filter slope parameter (rise over run). '
|
||||
'\nDefault: '
|
||||
'%(default)s')
|
||||
|
||||
parser.add_argument('--smrf-threshold',
|
||||
metavar='<positive float>',
|
||||
type=float,
|
||||
default=0.5,
|
||||
help='Simple Morphological Filter elevation threshold parameter (meters). '
|
||||
'\nDefault: '
|
||||
'%(default)s')
|
||||
|
||||
parser.add_argument('--smrf-window',
|
||||
metavar='<positive float>',
|
||||
type=float,
|
||||
default=18.0,
|
||||
help='Simple Morphological Filter window radius parameter (meters). '
|
||||
'\nDefault: '
|
||||
'%(default)s')
|
||||
|
||||
parser.add_argument('--texturing-data-term',
|
||||
metavar='<string>',
|
||||
default='gmi',
|
||||
|
@ -385,14 +391,14 @@ def config():
|
|||
parser.add_argument('--dtm',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Use this tag to build a DTM (Digital Terrain Model, ground only) using a progressive '
|
||||
'morphological filter. Check the --dem* parameters for fine tuning.')
|
||||
help='Use this tag to build a DTM (Digital Terrain Model, ground only) using a simple '
|
||||
'morphological filter. Check the --dem* and --smrf* parameters for finer tuning.')
|
||||
|
||||
parser.add_argument('--dsm',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Use this tag to build a DSM (Digital Surface Model, ground + objects) using a progressive '
|
||||
'morphological filter. Check the --dem* parameters for fine tuning.')
|
||||
'morphological filter. Check the --dem* parameters for finer tuning.')
|
||||
|
||||
parser.add_argument('--dem-gapfill-steps',
|
||||
metavar='<positive integer>',
|
||||
|
|
|
@ -22,15 +22,15 @@ opensfm_path = os.path.join(superbuild_path, "src/opensfm")
|
|||
# define orb_slam2 path
|
||||
orb_slam2_path = os.path.join(superbuild_path, "src/orb_slam2")
|
||||
|
||||
# define smvs join_paths
|
||||
# define mve join_paths
|
||||
makescene_path = os.path.join(superbuild_path, 'src', 'elibs', 'mve', 'apps', 'makescene', 'makescene') #TODO: don't install in source
|
||||
smvs_path = os.path.join(superbuild_path, 'src', 'elibs', 'smvs', 'app', 'smvsrecon')
|
||||
dmrecon_path = os.path.join(superbuild_path, 'src', 'elibs', 'mve', 'apps', 'dmrecon', 'dmrecon')
|
||||
scene2pset_path = os.path.join(superbuild_path, 'src', 'elibs', 'mve', 'apps', 'scene2pset', 'scene2pset')
|
||||
|
||||
poisson_recon_path = os.path.join(superbuild_path, 'src', 'PoissonRecon', 'Bin', 'Linux', 'PoissonRecon')
|
||||
dem2mesh_path = os.path.join(superbuild_path, 'src', 'dem2mesh', 'dem2mesh')
|
||||
dem2points_path = os.path.join(superbuild_path, 'src', 'dem2points', 'dem2points')
|
||||
|
||||
|
||||
# define mvstex path
|
||||
mvstex_path = os.path.join(superbuild_path, "install/bin/texrecon")
|
||||
|
||||
|
|
|
@ -1,128 +1,240 @@
|
|||
import os, glob
|
||||
import os
|
||||
import sys
|
||||
import gippy
|
||||
import numpy
|
||||
from scipy import ndimage
|
||||
import math
|
||||
import time
|
||||
from opendm.system import run
|
||||
from opendm import point_cloud
|
||||
from opendm.concurrency import get_max_memory
|
||||
from scipy import ndimage, signal
|
||||
from datetime import datetime
|
||||
from opendm import log
|
||||
from loky import get_reusable_executor
|
||||
from functools import partial
|
||||
try:
|
||||
import Queue as queue
|
||||
except:
|
||||
import queue
|
||||
import threading
|
||||
|
||||
from . import pdal
|
||||
|
||||
def classify(lasFile, slope=0.15, cellsize=1, maxWindowSize=18, verbose=False):
|
||||
def classify(lasFile, scalar, slope, threshold, window, verbose=False):
|
||||
start = datetime.now()
|
||||
|
||||
try:
|
||||
pdal.run_pdaltranslate_smrf(lasFile, lasFile, slope, cellsize, maxWindowSize, verbose)
|
||||
pdal.run_pdaltranslate_smrf(lasFile, lasFile, scalar, slope, threshold, window, verbose)
|
||||
except:
|
||||
raise Exception("Error creating classified file %s" % fout)
|
||||
|
||||
log.ODM_INFO('Created %s in %s' % (os.path.relpath(lasFile), datetime.now() - start))
|
||||
return lasFile
|
||||
|
||||
error = None
|
||||
|
||||
def create_dems(filenames, demtype, radius=['0.56'], gapfill=False,
|
||||
outdir='', suffix='', resolution=0.1, max_workers=None, **kwargs):
|
||||
""" Create DEMS for multiple radii, and optionally gapfill """
|
||||
fouts = []
|
||||
|
||||
create_dem_for_radius = partial(create_dem,
|
||||
filenames, demtype,
|
||||
outdir=outdir, suffix=suffix, resolution=resolution, **kwargs)
|
||||
|
||||
with get_reusable_executor(max_workers=max_workers, timeout=None) as e:
|
||||
fouts = list(e.map(create_dem_for_radius, radius))
|
||||
|
||||
fnames = {}
|
||||
# convert from list of dicts, to dict of lists
|
||||
for product in fouts[0].keys():
|
||||
fnames[product] = [f[product] for f in fouts]
|
||||
fouts = fnames
|
||||
def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'], gapfill=True,
|
||||
outdir='', resolution=0.1, max_workers=1, max_tile_size=2048,
|
||||
verbose=False, decimation=None):
|
||||
""" Create DEM from multiple radii, and optionally gapfill """
|
||||
global error
|
||||
error = None
|
||||
|
||||
# gapfill all products
|
||||
_fouts = {}
|
||||
if gapfill:
|
||||
for product in fouts.keys():
|
||||
# output filename
|
||||
fout = os.path.join(outdir, '%s%s.tif' % (demtype, suffix))
|
||||
gap_fill(fouts[product], fout)
|
||||
_fouts[product] = fout
|
||||
start = datetime.now()
|
||||
|
||||
if not os.path.exists(outdir):
|
||||
log.ODM_INFO("Creating %s" % outdir)
|
||||
os.mkdir(outdir)
|
||||
|
||||
extent = point_cloud.get_extent(input_point_cloud)
|
||||
log.ODM_INFO("Point cloud bounds are [minx: %s, maxx: %s] [miny: %s, maxy: %s]" % (extent['minx'], extent['maxx'], extent['miny'], extent['maxy']))
|
||||
ext_width = extent['maxx'] - extent['minx']
|
||||
ext_height = extent['maxy'] - extent['miny']
|
||||
|
||||
final_dem_resolution = (int(math.ceil(ext_width / float(resolution))),
|
||||
int(math.ceil(ext_height / float(resolution))))
|
||||
final_dem_pixels = final_dem_resolution[0] * final_dem_resolution[1]
|
||||
|
||||
num_splits = int(max(1, math.ceil(math.log(math.ceil(final_dem_pixels / float(max_tile_size * max_tile_size)))/math.log(2))))
|
||||
num_tiles = num_splits * num_splits
|
||||
log.ODM_INFO("DEM resolution is %s, max tile size is %s, will split DEM generation into %s tiles" % (final_dem_resolution, max_tile_size, num_tiles))
|
||||
|
||||
tile_bounds_width = ext_width / float(num_splits)
|
||||
tile_bounds_height = ext_height / float(num_splits)
|
||||
|
||||
tiles = []
|
||||
|
||||
for r in radiuses:
|
||||
minx = extent['minx']
|
||||
|
||||
for x in range(num_splits):
|
||||
miny = extent['miny']
|
||||
if x == num_splits - 1:
|
||||
maxx = extent['maxx']
|
||||
else:
|
||||
maxx = minx + tile_bounds_width
|
||||
|
||||
for y in range(num_splits):
|
||||
if y == num_splits - 1:
|
||||
maxy = extent['maxy']
|
||||
else:
|
||||
maxy = miny + tile_bounds_height
|
||||
|
||||
filename = os.path.join(os.path.abspath(outdir), '%s_r%s_x%s_y%s.tif' % (dem_type, r, x, y))
|
||||
|
||||
tiles.append({
|
||||
'radius': r,
|
||||
'bounds': {
|
||||
'minx': minx,
|
||||
'maxx': maxx,
|
||||
'miny': miny,
|
||||
'maxy': maxy
|
||||
},
|
||||
'filename': filename
|
||||
})
|
||||
|
||||
miny = maxy
|
||||
minx = maxx
|
||||
|
||||
# Sort tiles by increasing radius
|
||||
tiles.sort(key=lambda t: float(t['radius']), reverse=True)
|
||||
|
||||
def process_one(q):
|
||||
log.ODM_INFO("Generating %s (%s, radius: %s, resolution: %s)" % (q['filename'], output_type, q['radius'], resolution))
|
||||
|
||||
d = pdal.json_gdal_base(q['filename'], output_type, q['radius'], resolution, q['bounds'])
|
||||
|
||||
if dem_type == 'dsm':
|
||||
d = pdal.json_add_classification_filter(d, 2, equality='max')
|
||||
elif dem_type == 'dtm':
|
||||
d = pdal.json_add_classification_filter(d, 2)
|
||||
|
||||
if decimation is not None:
|
||||
d = pdal.json_add_decimation_filter(d, decimation)
|
||||
|
||||
pdal.json_add_readers(d, [input_point_cloud])
|
||||
pdal.run_pipeline(d, verbose=verbose)
|
||||
|
||||
def worker():
|
||||
global error
|
||||
|
||||
while True:
|
||||
(num, q) = pq.get()
|
||||
if q is None or error is not None:
|
||||
pq.task_done()
|
||||
break
|
||||
|
||||
try:
|
||||
process_one(q)
|
||||
except Exception as e:
|
||||
error = e
|
||||
finally:
|
||||
pq.task_done()
|
||||
|
||||
if max_workers > 1:
|
||||
use_single_thread = False
|
||||
pq = queue.PriorityQueue()
|
||||
threads = []
|
||||
for i in range(max_workers):
|
||||
t = threading.Thread(target=worker)
|
||||
t.start()
|
||||
threads.append(t)
|
||||
|
||||
for t in tiles:
|
||||
pq.put((i, t.copy()))
|
||||
|
||||
def stop_workers():
|
||||
for i in range(len(threads)):
|
||||
pq.put((-1, None))
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
# block until all tasks are done
|
||||
try:
|
||||
while pq.unfinished_tasks > 0:
|
||||
time.sleep(0.5)
|
||||
except KeyboardInterrupt:
|
||||
print("CTRL+C terminating...")
|
||||
stop_workers()
|
||||
sys.exit(1)
|
||||
|
||||
stop_workers()
|
||||
|
||||
if error is not None:
|
||||
# Try to reprocess using a single thread
|
||||
# in case this was a memory error
|
||||
log.ODM_WARNING("DEM processing failed with multiple threads, let's retry with a single thread...")
|
||||
use_single_thread = True
|
||||
else:
|
||||
# only return single filename (first radius run)
|
||||
for product in fouts.keys():
|
||||
_fouts[product] = fouts[product][0]
|
||||
use_single_thread = True
|
||||
|
||||
return _fouts
|
||||
if use_single_thread:
|
||||
# Boring, single thread processing
|
||||
for q in tiles:
|
||||
process_one(q)
|
||||
|
||||
output_file = "%s.tif" % dem_type
|
||||
output_path = os.path.abspath(os.path.join(outdir, output_file))
|
||||
|
||||
def create_dem(filenames, demtype, radius, decimation=None,
|
||||
products=['idw'], outdir='', suffix='', verbose=False, resolution=0.1):
|
||||
""" Create DEM from collection of LAS files """
|
||||
start = datetime.now()
|
||||
# filename based on demtype, radius, and optional suffix
|
||||
bname = os.path.join(os.path.abspath(outdir), '%s_r%s%s' % (demtype, radius, suffix))
|
||||
ext = 'tif'
|
||||
|
||||
fouts = {o: bname + '.%s.%s' % (o, ext) for o in products}
|
||||
prettyname = os.path.relpath(bname) + ' [%s]' % (' '.join(products))
|
||||
|
||||
log.ODM_INFO('Creating %s from %s files' % (prettyname, len(filenames)))
|
||||
# JSON pipeline
|
||||
json = pdal.json_gdal_base(bname, products, radius, resolution)
|
||||
# Verify tile results
|
||||
for t in tiles:
|
||||
if not os.path.exists(t['filename']):
|
||||
raise Exception("Error creating %s, %s failed to be created" % (output_file, t['filename']))
|
||||
|
||||
if demtype == 'dsm':
|
||||
json = pdal.json_add_classification_filter(json, 2, equality='max')
|
||||
elif demtype == 'dtm':
|
||||
json = pdal.json_add_classification_filter(json, 2)
|
||||
# Create virtual raster
|
||||
vrt_path = os.path.abspath(os.path.join(outdir, "merged.vrt"))
|
||||
run('gdalbuildvrt "%s" "%s"' % (vrt_path, '" "'.join(map(lambda t: t['filename'], tiles))))
|
||||
|
||||
if decimation is not None:
|
||||
json = pdal.json_add_decimation_filter(json, decimation)
|
||||
geotiff_path = os.path.abspath(os.path.join(outdir, 'merged.tif'))
|
||||
|
||||
pdal.json_add_readers(json, filenames)
|
||||
# Build GeoTIFF
|
||||
kwargs = {
|
||||
'max_memory': get_max_memory(),
|
||||
'threads': max_workers if max_workers else 'ALL_CPUS',
|
||||
'vrt': vrt_path,
|
||||
'geotiff': geotiff_path
|
||||
}
|
||||
|
||||
pdal.run_pipeline(json, verbose=verbose)
|
||||
if gapfill:
|
||||
run('gdal_fillnodata.py '
|
||||
'-co NUM_THREADS={threads} '
|
||||
'--config GDAL_CACHEMAX {max_memory}% '
|
||||
'-b 1 '
|
||||
'-of GTiff '
|
||||
'{vrt} {geotiff}'.format(**kwargs))
|
||||
else:
|
||||
run('gdal_translate '
|
||||
'-co NUM_THREADS={threads} '
|
||||
'--config GDAL_CACHEMAX {max_memory}% '
|
||||
'{vrt} {geotiff}'.format(**kwargs))
|
||||
|
||||
post_process(geotiff_path, output_path)
|
||||
os.remove(geotiff_path)
|
||||
|
||||
if os.path.exists(vrt_path): os.remove(vrt_path)
|
||||
for t in tiles:
|
||||
if os.path.exists(t['filename']): os.remove(t['filename'])
|
||||
|
||||
# verify existence of fout
|
||||
exists = True
|
||||
for f in fouts.values():
|
||||
if not os.path.exists(f):
|
||||
exists = False
|
||||
if not exists:
|
||||
raise Exception("Error creating dems: %s" % ' '.join(fouts))
|
||||
|
||||
log.ODM_INFO('Completed %s in %s' % (prettyname, datetime.now() - start))
|
||||
return fouts
|
||||
log.ODM_INFO('Completed %s in %s' % (output_file, datetime.now() - start))
|
||||
|
||||
|
||||
def gap_fill(filenames, fout):
|
||||
""" Gap fill from higher radius DTMs, then fill remainder with interpolation """
|
||||
def post_process(geotiff_path, output_path, smoothing_iterations=1):
|
||||
""" Apply median smoothing """
|
||||
start = datetime.now()
|
||||
|
||||
if len(filenames) == 0:
|
||||
raise Exception('No filenames provided!')
|
||||
if not os.path.exists(geotiff_path):
|
||||
raise Exception('File %s does not exist!' % geotiff_path)
|
||||
|
||||
log.ODM_INFO('Starting gap-filling with nearest interpolation...')
|
||||
filenames = sorted(filenames)
|
||||
log.ODM_INFO('Starting post processing (smoothing)...')
|
||||
|
||||
imgs = map(gippy.GeoImage, filenames)
|
||||
nodata = imgs[0][0].nodata()
|
||||
arr = imgs[0][0].read()
|
||||
|
||||
for i in range(1, len(imgs)):
|
||||
locs = numpy.where(arr == nodata)
|
||||
arr[locs] = imgs[i][0].read()[locs]
|
||||
|
||||
# Nearest neighbor interpolation at bad points
|
||||
indices = ndimage.distance_transform_edt(arr == nodata,
|
||||
return_distances=False,
|
||||
return_indices=True)
|
||||
arr = arr[tuple(indices)]
|
||||
img = gippy.GeoImage(geotiff_path)
|
||||
nodata = img[0].nodata()
|
||||
arr = img[0].read()
|
||||
|
||||
# Median filter (careful, changing the value 5 might require tweaking)
|
||||
# the lines below. There's another numpy function that takes care of
|
||||
# these edge cases, but it's slower.
|
||||
from scipy import signal
|
||||
arr = signal.medfilt(arr, 5)
|
||||
for i in range(smoothing_iterations):
|
||||
log.ODM_INFO("Smoothing iteration %s" % str(i + 1))
|
||||
arr = signal.medfilt(arr, 5)
|
||||
|
||||
# Fill corner points with nearest value
|
||||
if arr.shape >= (4, 4):
|
||||
|
@ -131,13 +243,17 @@ def gap_fill(filenames, fout):
|
|||
arr[-1][:2] = arr[-2][0] = arr[-2][1]
|
||||
arr[-1][-2:] = arr[-2][-1] = arr[-2][-2]
|
||||
|
||||
# Median filter leaves a bunch of zeros in nodata areas
|
||||
locs = numpy.where(arr == 0.0)
|
||||
arr[locs] = nodata
|
||||
|
||||
# write output
|
||||
imgout = gippy.GeoImage.create_from(imgs[0], fout)
|
||||
imgout = gippy.GeoImage.create_from(img, output_path)
|
||||
imgout.set_nodata(nodata)
|
||||
imgout[0].write(arr)
|
||||
fout = imgout.filename()
|
||||
output_path = imgout.filename()
|
||||
imgout = None
|
||||
|
||||
log.ODM_INFO('Completed gap-filling to create %s in %s' % (os.path.relpath(fout), datetime.now() - start))
|
||||
log.ODM_INFO('Completed post processing to create %s in %s' % (os.path.relpath(output_path), datetime.now() - start))
|
||||
|
||||
return fout
|
||||
return output_path
|
|
@ -48,24 +48,23 @@ def json_base():
|
|||
return {'pipeline': []}
|
||||
|
||||
|
||||
def json_gdal_base(fout, output, radius, resolution=1):
|
||||
def json_gdal_base(filename, output_type, radius, resolution=1, bounds=None):
|
||||
""" Create initial JSON for PDAL pipeline containing a Writer element """
|
||||
json = json_base()
|
||||
|
||||
if len(output) > 1:
|
||||
# TODO: we might want to create a multiband raster with max/min/idw
|
||||
# in the future
|
||||
print "More than 1 output, will only create {0}".format(output[0])
|
||||
output = [output[0]]
|
||||
|
||||
json['pipeline'].insert(0, {
|
||||
d = {
|
||||
'type': 'writers.gdal',
|
||||
'resolution': resolution,
|
||||
'radius': radius,
|
||||
'filename': '{0}.{1}.tif'.format(fout, output[0]),
|
||||
'output_type': output[0],
|
||||
'filename': filename,
|
||||
'output_type': output_type,
|
||||
'data_type': 'float'
|
||||
})
|
||||
}
|
||||
|
||||
if bounds is not None:
|
||||
d['bounds'] = "([%s,%s],[%s,%s])" % (bounds['minx'], bounds['maxx'], bounds['miny'], bounds['maxy'])
|
||||
|
||||
json['pipeline'].insert(0, d)
|
||||
|
||||
return json
|
||||
|
||||
|
@ -155,7 +154,6 @@ def run_pipeline(json, verbose=False):
|
|||
cmd = [
|
||||
'pdal',
|
||||
'pipeline',
|
||||
'--nostream',
|
||||
'-i %s' % jsonfile
|
||||
]
|
||||
if verbose:
|
||||
|
@ -165,7 +163,7 @@ def run_pipeline(json, verbose=False):
|
|||
os.remove(jsonfile)
|
||||
|
||||
|
||||
def run_pdaltranslate_smrf(fin, fout, slope, cellsize, maxWindowSize, verbose=False):
|
||||
def run_pdaltranslate_smrf(fin, fout, scalar, slope, threshold, window, verbose=False):
|
||||
""" Run PDAL translate """
|
||||
cmd = [
|
||||
'pdal',
|
||||
|
@ -173,11 +171,11 @@ def run_pdaltranslate_smrf(fin, fout, slope, cellsize, maxWindowSize, verbose=Fa
|
|||
'-i %s' % fin,
|
||||
'-o %s' % fout,
|
||||
'smrf',
|
||||
'--filters.smrf.cell=%s' % cellsize,
|
||||
'--filters.smrf.scalar=%s' % scalar,
|
||||
'--filters.smrf.slope=%s' % slope,
|
||||
'--filters.smrf.threshold=%s' % threshold,
|
||||
'--filters.smrf.window=%s' % window,
|
||||
]
|
||||
if maxWindowSize is not None:
|
||||
cmd.append('--filters.smrf.window=%s' % maxWindowSize)
|
||||
|
||||
if verbose:
|
||||
print ' '.join(cmd)
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
import math
|
||||
from opendm import log
|
||||
from pyproj import Proj
|
||||
|
||||
def extract_utm_coords(photos, images_path, output_coords_file):
|
||||
"""
|
||||
Create a coordinate file containing the GPS positions of all cameras
|
||||
to be used later in the ODM toolchain for automatic georeferecing
|
||||
:param photos ([ODM_Photo]) list of photos
|
||||
:param images_path (str) path to dataset images
|
||||
:param output_coords_file (str) path to output coordinates file
|
||||
:return None
|
||||
"""
|
||||
if len(photos) == 0:
|
||||
raise Exception("No input images, cannot create coordinates file of GPS positions")
|
||||
|
||||
utm_zone = None
|
||||
hemisphere = None
|
||||
coords = []
|
||||
reference_photo = None
|
||||
for photo in photos:
|
||||
if photo.latitude is None or photo.longitude is None or photo.altitude is None:
|
||||
log.ODM_ERROR("Failed parsing GPS position for %s, skipping" % photo.filename)
|
||||
continue
|
||||
|
||||
if utm_zone is None:
|
||||
utm_zone, hemisphere = get_utm_zone_and_hemisphere_from(photo.longitude, photo.latitude)
|
||||
|
||||
try:
|
||||
coord = convert_to_utm(photo.longitude, photo.latitude, photo.altitude, utm_zone, hemisphere)
|
||||
except:
|
||||
raise Exception("Failed to convert GPS position to UTM for %s" % photo.filename)
|
||||
|
||||
coords.append(coord)
|
||||
|
||||
if utm_zone is None:
|
||||
raise Exception("No images seem to have GPS information")
|
||||
|
||||
# Calculate average
|
||||
dx = 0.0
|
||||
dy = 0.0
|
||||
num = float(len(coords))
|
||||
for coord in coords:
|
||||
dx += coord[0] / num
|
||||
dy += coord[1] / num
|
||||
|
||||
dx = int(math.floor(dx))
|
||||
dy = int(math.floor(dy))
|
||||
|
||||
# Open output file
|
||||
with open(output_coords_file, "w") as f:
|
||||
f.write("WGS84 UTM %s%s\n" % (utm_zone, hemisphere))
|
||||
f.write("%s %s\n" % (dx, dy))
|
||||
for coord in coords:
|
||||
f.write("%s %s %s\n" % (coord[0] - dx, coord[1] - dy, coord[2]))
|
||||
|
||||
|
||||
def get_utm_zone_and_hemisphere_from(lon, lat):
|
||||
"""
|
||||
Calculate the UTM zone and hemisphere that a longitude/latitude pair falls on
|
||||
:param lon longitude
|
||||
:param lat latitude
|
||||
:return [utm_zone, hemisphere]
|
||||
"""
|
||||
utm_zone = (int(math.floor((lon + 180.0)/6.0)) % 60) + 1
|
||||
hemisphere = 'S' if lat < 0 else 'N'
|
||||
return [utm_zone, hemisphere]
|
||||
|
||||
def convert_to_utm(lon, lat, alt, utm_zone, hemisphere):
|
||||
"""
|
||||
Convert longitude, latitude and elevation values to UTM
|
||||
:param lon longitude
|
||||
:param lat latitude
|
||||
:param alt altitude
|
||||
:param utm_zone UTM zone number
|
||||
:param hemisphere one of 'N' or 'S'
|
||||
:return [x,y,z] UTM coordinates
|
||||
"""
|
||||
if hemisphere == 'N':
|
||||
p = Proj(proj='utm',zone=utm_zone,ellps='WGS84', preserve_units=True)
|
||||
else:
|
||||
p = Proj(proj='utm',zone=utm_zone,ellps='WGS84', preserve_units=True, south=True)
|
||||
|
||||
x,y = p(lon, lat)
|
||||
return [x, y, alt]
|
||||
|
|
@ -5,7 +5,6 @@ from opendm.dem import commands
|
|||
from opendm import system
|
||||
from opendm import log
|
||||
from opendm import context
|
||||
from opendm.concurrency import get_max_concurrency_for_dem
|
||||
from scipy import signal, ndimage
|
||||
import numpy as np
|
||||
|
||||
|
@ -24,16 +23,16 @@ def create_25dmesh(inPointCloud, outMesh, dsm_radius=0.07, dsm_resolution=0.05,
|
|||
|
||||
log.ODM_INFO('Creating DSM for 2.5D mesh')
|
||||
|
||||
commands.create_dems(
|
||||
[inPointCloud],
|
||||
commands.create_dem(
|
||||
inPointCloud,
|
||||
'mesh_dsm',
|
||||
radius=map(str, radius_steps),
|
||||
output_type='max',
|
||||
radiuses=map(str, radius_steps),
|
||||
gapfill=True,
|
||||
outdir=tmp_directory,
|
||||
resolution=dsm_resolution,
|
||||
products=['max'],
|
||||
verbose=verbose,
|
||||
max_workers=get_max_concurrency_for_dem(available_cores, inPointCloud)
|
||||
max_workers=available_cores
|
||||
)
|
||||
|
||||
if method == 'gridded':
|
||||
|
|
|
@ -1,54 +1,97 @@
|
|||
import os, sys
|
||||
import os, sys, shutil, tempfile, json
|
||||
from opendm import system
|
||||
from opendm import log
|
||||
from opendm import context
|
||||
from opendm.system import run
|
||||
|
||||
def filter(pointCloudPath, standard_deviation=2.5, meank=16, verbose=False):
|
||||
def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=16, confidence=None, verbose=False):
|
||||
"""
|
||||
Filters a point cloud in place (it will replace the input file with the filtered result).
|
||||
Filters a point cloud
|
||||
"""
|
||||
if standard_deviation <= 0 or meank <= 0:
|
||||
log.ODM_INFO("Skipping point cloud filtering")
|
||||
return
|
||||
|
||||
log.ODM_INFO("Filtering point cloud (statistical, meanK {}, standard deviation {})".format(meank, standard_deviation))
|
||||
if confidence:
|
||||
log.ODM_INFO("Keeping only points with > %s confidence" % confidence)
|
||||
|
||||
if not os.path.exists(pointCloudPath):
|
||||
log.ODM_ERROR("{} does not exist, cannot filter point cloud. The program will now exit.".format(pointCloudPath))
|
||||
if not os.path.exists(input_point_cloud):
|
||||
log.ODM_ERROR("{} does not exist, cannot filter point cloud. The program will now exit.".format(input_point_cloud))
|
||||
sys.exit(1)
|
||||
|
||||
filter_program = os.path.join(context.odm_modules_path, 'odm_filterpoints')
|
||||
if not os.path.exists(filter_program):
|
||||
log.ODM_WARNING("{} program not found. Will skip filtering, but this installation should be fixed.")
|
||||
shutil.copy(input_point_cloud, output_point_cloud)
|
||||
return
|
||||
|
||||
pc_path, pc_filename = os.path.split(pointCloudPath)
|
||||
# pc_path = path/to
|
||||
# pc_filename = pointcloud.ply
|
||||
|
||||
basename, ext = os.path.splitext(pc_filename)
|
||||
# basename = pointcloud
|
||||
# ext = .ply
|
||||
|
||||
tmpPointCloud = os.path.join(pc_path, "{}.tmp{}".format(basename, ext))
|
||||
|
||||
filterArgs = {
|
||||
'bin': filter_program,
|
||||
'inputFile': pointCloudPath,
|
||||
'outputFile': tmpPointCloud,
|
||||
'inputFile': input_point_cloud,
|
||||
'outputFile': output_point_cloud,
|
||||
'sd': standard_deviation,
|
||||
'meank': meank,
|
||||
'verbose': '--verbose' if verbose else '',
|
||||
'verbose': '-verbose' if verbose else '',
|
||||
'confidence': '-confidence %s' % confidence if confidence else '',
|
||||
}
|
||||
|
||||
system.run('{bin} -inputFile {inputFile} '
|
||||
'-outputFile {outputFile} '
|
||||
'-sd {sd} '
|
||||
'-meank {meank} {verbose} '.format(**filterArgs))
|
||||
'-meank {meank} {confidence} {verbose} '.format(**filterArgs))
|
||||
|
||||
# Remove input file, swap temp file
|
||||
if os.path.exists(tmpPointCloud):
|
||||
os.remove(pointCloudPath)
|
||||
os.rename(tmpPointCloud, pointCloudPath)
|
||||
else:
|
||||
log.ODM_WARNING("{} not found, filtering has failed.".format(tmpPointCloud))
|
||||
if not os.path.exists(output_point_cloud):
|
||||
log.ODM_WARNING("{} not found, filtering has failed.".format(output_point_cloud))
|
||||
|
||||
def get_extent(input_point_cloud):
|
||||
fd, json_file = tempfile.mkstemp(suffix='.json')
|
||||
os.close(fd)
|
||||
|
||||
# Get point cloud extent
|
||||
fallback = False
|
||||
|
||||
# We know PLY files do not have --summary support
|
||||
if input_point_cloud.lower().endswith(".ply"):
|
||||
fallback = True
|
||||
run('pdal info {0} > {1}'.format(input_point_cloud, json_file))
|
||||
|
||||
try:
|
||||
if not fallback:
|
||||
run('pdal info --summary {0} > {1}'.format(input_point_cloud, json_file))
|
||||
except:
|
||||
fallback = True
|
||||
run('pdal info {0} > {1}'.format(input_point_cloud, json_file))
|
||||
|
||||
bounds = {}
|
||||
with open(json_file, 'r') as f:
|
||||
result = json.loads(f.read())
|
||||
|
||||
if not fallback:
|
||||
summary = result.get('summary')
|
||||
if summary is None: raise Exception("Cannot compute summary for %s (summary key missing)" % input_point_cloud)
|
||||
bounds = summary.get('bounds')
|
||||
else:
|
||||
stats = result.get('stats')
|
||||
if stats is None: raise Exception("Cannot compute bounds for %s (stats key missing)" % input_point_cloud)
|
||||
bbox = stats.get('bbox')
|
||||
if bbox is None: raise Exception("Cannot compute bounds for %s (bbox key missing)" % input_point_cloud)
|
||||
native = bbox.get('native')
|
||||
if native is None: raise Exception("Cannot compute bounds for %s (native key missing)" % input_point_cloud)
|
||||
bounds = native.get('bbox')
|
||||
|
||||
if bounds is None: raise Exception("Cannot compute bounds for %s (bounds key missing)" % input_point_cloud)
|
||||
|
||||
if bounds.get('maxx', None) is None or \
|
||||
bounds.get('minx', None) is None or \
|
||||
bounds.get('maxy', None) is None or \
|
||||
bounds.get('miny', None) is None or \
|
||||
bounds.get('maxz', None) is None or \
|
||||
bounds.get('minz', None) is None:
|
||||
raise Exception("Cannot compute bounds for %s (invalid keys) %s" % (input_point_cloud, str(bounds)))
|
||||
|
||||
os.remove(json_file)
|
||||
return bounds
|
||||
|
||||
|
||||
|
|
|
@ -17,14 +17,16 @@ def get_ccd_widths():
|
|||
return dict(zip(map(string.lower, sensor_data.keys()), sensor_data.values()))
|
||||
|
||||
|
||||
def run(cmd, env_paths=[context.superbuild_bin_path]):
|
||||
def run(cmd, env_paths=[context.superbuild_bin_path], env_vars={}):
|
||||
"""Run a system command"""
|
||||
log.ODM_DEBUG('running %s' % cmd)
|
||||
|
||||
env = None
|
||||
env = os.environ.copy()
|
||||
if len(env_paths) > 0:
|
||||
env = os.environ.copy()
|
||||
env["PATH"] = env["PATH"] + ":" + ":".join(env_paths)
|
||||
|
||||
for k in env_vars:
|
||||
env[k] = str(env_vars[k])
|
||||
|
||||
retcode = subprocess.call(cmd, shell=True, env=env)
|
||||
|
||||
|
|
|
@ -246,14 +246,14 @@ class ODM_Tree(object):
|
|||
# whole reconstruction process.
|
||||
self.dataset_raw = io.join_paths(self.root_path, 'images')
|
||||
self.opensfm = io.join_paths(self.root_path, 'opensfm')
|
||||
self.smvs = io.join_paths(self.root_path, 'smvs')
|
||||
self.mve = io.join_paths(self.root_path, 'mve')
|
||||
self.odm_meshing = io.join_paths(self.root_path, 'odm_meshing')
|
||||
self.odm_texturing = io.join_paths(self.root_path, 'odm_texturing')
|
||||
self.odm_25dtexturing = io.join_paths(self.root_path, 'odm_texturing_25d')
|
||||
self.odm_georeferencing = io.join_paths(self.root_path, 'odm_georeferencing')
|
||||
self.odm_25dgeoreferencing = io.join_paths(self.root_path, 'odm_25dgeoreferencing')
|
||||
self.odm_filterpoints = io.join_paths(self.root_path, 'odm_filterpoints')
|
||||
self.odm_orthophoto = io.join_paths(self.root_path, 'odm_orthophoto')
|
||||
self.odm_pdal = io.join_paths(self.root_path, 'pdal')
|
||||
|
||||
# important files paths
|
||||
|
||||
|
@ -271,12 +271,15 @@ class ODM_Tree(object):
|
|||
self.opensfm_model = io.join_paths(self.opensfm, 'depthmaps/merged.ply')
|
||||
self.opensfm_transformation = io.join_paths(self.opensfm, 'geocoords_transformation.txt')
|
||||
|
||||
# smvs
|
||||
self.smvs_model = io.join_paths(self.smvs, 'smvs_dense_point_cloud.ply')
|
||||
# mve
|
||||
self.mve_model = io.join_paths(self.mve, 'mve_dense_point_cloud.ply')
|
||||
self.mve_path = io.join_paths(self.opensfm, 'mve')
|
||||
self.mve_image_list = io.join_paths(self.mve_path, 'list.txt')
|
||||
self.mve_bundle = io.join_paths(self.mve_path, 'bundle/bundle.out')
|
||||
self.mve_views = io.join_paths(self.smvs, 'views')
|
||||
self.mve_views = io.join_paths(self.mve, 'views')
|
||||
|
||||
# filter points
|
||||
self.filtered_point_cloud = io.join_paths(self.odm_filterpoints, "point_cloud.ply")
|
||||
|
||||
# odm_meshing
|
||||
self.odm_mesh = io.join_paths(self.odm_meshing, 'odm_mesh.ply')
|
||||
|
|
|
@ -0,0 +1,148 @@
|
|||
FROM phusion/baseimage
|
||||
|
||||
# Env variables
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
#Install dependencies and required requisites
|
||||
RUN apt-get update -y \
|
||||
&& apt-get install -y \
|
||||
software-properties-common \
|
||||
&& add-apt-repository -y ppa:ubuntugis/ppa \
|
||||
&& add-apt-repository -y ppa:george-edison55/cmake-3.x \
|
||||
&& apt-get update -y
|
||||
|
||||
# All packages (Will install much faster)
|
||||
RUN apt-get install --no-install-recommends -y \
|
||||
build-essential \
|
||||
cmake \
|
||||
gdal-bin \
|
||||
git \
|
||||
libatlas-base-dev \
|
||||
libavcodec-dev \
|
||||
libavformat-dev \
|
||||
libboost-date-time-dev \
|
||||
libboost-filesystem-dev \
|
||||
libboost-iostreams-dev \
|
||||
libboost-log-dev \
|
||||
libboost-python-dev \
|
||||
libboost-regex-dev \
|
||||
libboost-thread-dev \
|
||||
libeigen3-dev \
|
||||
libflann-dev \
|
||||
libgdal-dev \
|
||||
libgeotiff-dev \
|
||||
libgoogle-glog-dev \
|
||||
libgtk2.0-dev \
|
||||
libjasper-dev \
|
||||
libjpeg-dev \
|
||||
libjsoncpp-dev \
|
||||
liblapack-dev \
|
||||
liblas-bin \
|
||||
libpng-dev \
|
||||
libproj-dev \
|
||||
libsuitesparse-dev \
|
||||
libswscale-dev \
|
||||
libtbb2 \
|
||||
libtbb-dev \
|
||||
libtiff-dev \
|
||||
libvtk6-dev \
|
||||
libxext-dev \
|
||||
python-dev \
|
||||
python-empy \
|
||||
python-gdal \
|
||||
python-matplotlib \
|
||||
python-networkx \
|
||||
python-nose \
|
||||
python-pip \
|
||||
python-pyproj \
|
||||
python-pyside \
|
||||
python-software-properties \
|
||||
python-wheel \
|
||||
swig2.0
|
||||
|
||||
RUN apt-get remove libdc1394-22-dev
|
||||
RUN pip install --upgrade pip
|
||||
RUN pip install setuptools
|
||||
RUN pip install -U \
|
||||
appsettings \
|
||||
catkin-pkg \
|
||||
exifread \
|
||||
gpxpy \
|
||||
loky \
|
||||
numpy==1.15.4 \
|
||||
psutil \
|
||||
pyproj \
|
||||
PyYAML \
|
||||
repoze.lru \
|
||||
scipy \
|
||||
shapely \
|
||||
xmltodict \
|
||||
https://github.com/OpenDroneMap/gippy/archive/numpyfix.zip
|
||||
|
||||
ENV PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python2.7/dist-packages"
|
||||
ENV PYTHONPATH="$PYTHONPATH:/code/SuperBuild/src/opensfm"
|
||||
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
|
||||
|
||||
# Prepare directories
|
||||
RUN mkdir /code
|
||||
WORKDIR /code
|
||||
|
||||
# Copy repository files
|
||||
COPY CMakeLists.txt /code/CMakeLists.txt
|
||||
COPY configure.sh /code/configure.sh
|
||||
COPY /modules/ /code/modules/
|
||||
COPY /opendm/ /code/opendm/
|
||||
COPY /patched_files/ /code/patched_files/
|
||||
COPY run.py /code/run.py
|
||||
COPY run.sh /code/run.sh
|
||||
COPY /scripts/ /code/scripts/
|
||||
COPY /SuperBuild/cmake/ /code/SuperBuild/cmake/
|
||||
COPY /SuperBuild/CMakeLists.txt /code/SuperBuild/CMakeLists.txt
|
||||
COPY docker.settings.yaml /code/settings.yaml
|
||||
COPY VERSION /code/VERSION
|
||||
|
||||
# Replace g++ and gcc with our own scripts
|
||||
COPY /docker/ /code/docker/
|
||||
RUN mv -v /usr/bin/gcc /usr/bin/gcc_real \
|
||||
&& mv -v /usr/bin/g++ /usr/bin/g++_real \
|
||||
&& cp -v /code/docker/gcc /usr/bin/gcc \
|
||||
&& cp -v /code/docker/g++ /usr/bin/g++
|
||||
|
||||
# Compile code in SuperBuild and root directories
|
||||
RUN cd SuperBuild \
|
||||
&& mkdir build \
|
||||
&& cd build \
|
||||
&& cmake .. \
|
||||
&& make -j$(nproc) \
|
||||
&& cd ../.. \
|
||||
&& mkdir build \
|
||||
&& cd build \
|
||||
&& cmake .. \
|
||||
&& make -j$(nproc)
|
||||
|
||||
RUN apt-get -y remove \
|
||||
git \
|
||||
build-essential \
|
||||
cmake \
|
||||
libgl1-mesa-dri \
|
||||
python-pip
|
||||
|
||||
RUN apt-get install -y libvtk6-dev
|
||||
|
||||
# Cleanup APT
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# Clean Superbuild
|
||||
RUN rm -rf \
|
||||
/code/SuperBuild/build/opencv \
|
||||
/code/SuperBuild/download \
|
||||
/code/SuperBuild/src/ceres \
|
||||
/code/SuperBuild/src/mvstexturing \
|
||||
/code/SuperBuild/src/opencv \
|
||||
/code/SuperBuild/src/opengv \
|
||||
/code/SuperBuild/src/pcl \
|
||||
/code/SuperBuild/src/pdal
|
||||
|
||||
# Entry point
|
||||
ENTRYPOINT ["python", "/code/run.py", "code"]
|
||||
|
2
run.py
2
run.py
|
@ -33,7 +33,7 @@ if __name__ == '__main__':
|
|||
+ args.project_path + "/odm_orthophoto "
|
||||
+ args.project_path + "/odm_texturing "
|
||||
+ args.project_path + "/opensfm "
|
||||
+ args.project_path + "/smvs")
|
||||
+ args.project_path + "/mve")
|
||||
|
||||
# create an instance of my App BlackBox
|
||||
# internally configure all tasks
|
||||
|
|
|
@ -7,6 +7,7 @@ from opendm import io
|
|||
from opendm import types
|
||||
from opendm import log
|
||||
from opendm import system
|
||||
from opendm import location
|
||||
from shutil import copyfile
|
||||
|
||||
def save_images_database(photos, database_file):
|
||||
|
@ -116,24 +117,10 @@ class ODMLoadDatasetCell(ecto.Cell):
|
|||
if tree.odm_georeferencing_gcp:
|
||||
outputs.reconstruction = types.ODM_Reconstruction(photos, coords_file=tree.odm_georeferencing_gcp)
|
||||
else:
|
||||
verbose = '-verbose' if self.params.verbose else ''
|
||||
# Generate UTM from images
|
||||
# odm_georeference definitions
|
||||
kwargs = {
|
||||
'bin': context.odm_modules_path,
|
||||
'imgs': tree.dataset_raw,
|
||||
'imgs_list': tree.dataset_list,
|
||||
'coords': tree.odm_georeferencing_coords,
|
||||
'log': tree.odm_georeferencing_utm_log,
|
||||
'verbose': verbose
|
||||
}
|
||||
|
||||
# run UTM extraction binary
|
||||
try:
|
||||
if not io.file_exists(tree.odm_georeferencing_coords) or rerun_cell:
|
||||
system.run('{bin}/odm_extract_utm -imagesPath {imgs}/ '
|
||||
'-imageListFile {imgs_list} -outputCoordFile {coords} {verbose} '
|
||||
'-logFile {log}'.format(**kwargs))
|
||||
location.extract_utm_coords(photos, tree.dataset_raw, tree.odm_georeferencing_coords)
|
||||
else:
|
||||
log.ODM_INFO("Coordinates file already exist: %s" % tree.odm_georeferencing_coords)
|
||||
except:
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
import ecto, shutil, os, glob, math
|
||||
|
||||
from opendm import log
|
||||
from opendm import io
|
||||
from opendm import system
|
||||
from opendm import context
|
||||
from opendm import point_cloud
|
||||
|
||||
class ODMMveCell(ecto.Cell):
|
||||
def declare_io(self, params, inputs, outputs):
|
||||
inputs.declare("tree", "Struct with paths", [])
|
||||
inputs.declare("args", "The application arguments.", {})
|
||||
inputs.declare("reconstruction", "ODMReconstruction", [])
|
||||
outputs.declare("reconstruction", "list of ODMReconstructions", [])
|
||||
|
||||
def process(self, inputs, outputs):
|
||||
# Benchmarking
|
||||
start_time = system.now_raw()
|
||||
|
||||
log.ODM_INFO('Running MVE Cell')
|
||||
|
||||
# get inputs
|
||||
tree = inputs.tree
|
||||
args = inputs.args
|
||||
reconstruction = inputs.reconstruction
|
||||
photos = reconstruction.photos
|
||||
|
||||
if not photos:
|
||||
log.ODM_ERROR('Not enough photos in photos array to start MVE')
|
||||
return ecto.QUIT
|
||||
|
||||
# check if we rerun cell or not
|
||||
rerun_cell = (args.rerun is not None and
|
||||
args.rerun == 'mve') or \
|
||||
(args.rerun_all) or \
|
||||
(args.rerun_from is not None and
|
||||
'mve' in args.rerun_from)
|
||||
|
||||
# check if reconstruction was done before
|
||||
if not io.file_exists(tree.mve_model) or rerun_cell:
|
||||
# cleanup if a rerun
|
||||
if io.dir_exists(tree.mve_path) and rerun_cell:
|
||||
shutil.rmtree(tree.mve_path)
|
||||
|
||||
# make bundle directory
|
||||
if not io.file_exists(tree.mve_bundle):
|
||||
system.mkdir_p(tree.mve_path)
|
||||
system.mkdir_p(io.join_paths(tree.mve_path, 'bundle'))
|
||||
io.copy(tree.opensfm_image_list, tree.mve_image_list)
|
||||
io.copy(tree.opensfm_bundle, tree.mve_bundle)
|
||||
|
||||
# mve makescene wants the output directory
|
||||
# to not exists before executing it (otherwise it
|
||||
# will prompt the user for confirmation)
|
||||
if io.dir_exists(tree.mve):
|
||||
shutil.rmtree(tree.mve)
|
||||
|
||||
# run mve makescene
|
||||
if not io.dir_exists(tree.mve_views):
|
||||
system.run('%s %s %s' % (context.makescene_path, tree.mve_path, tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency})
|
||||
|
||||
# Compute mve output scale based on depthmap_resolution
|
||||
max_width = 0
|
||||
max_height = 0
|
||||
for photo in photos:
|
||||
max_width = max(photo.width, max_width)
|
||||
max_height = max(photo.height, max_height)
|
||||
|
||||
max_pixels = args.depthmap_resolution * args.depthmap_resolution
|
||||
if max_width * max_height <= max_pixels:
|
||||
mve_output_scale = 0
|
||||
else:
|
||||
ratio = float(max_width * max_height) / float(max_pixels)
|
||||
mve_output_scale = int(math.ceil(math.log(ratio) / math.log(4.0)))
|
||||
|
||||
dmrecon_config = [
|
||||
"-s%s" % mve_output_scale,
|
||||
"--progress=silent",
|
||||
"--local-neighbors=2",
|
||||
"--force",
|
||||
]
|
||||
|
||||
# Run MVE's dmrecon
|
||||
log.ODM_INFO(' ')
|
||||
log.ODM_INFO(' ,*/** ')
|
||||
log.ODM_INFO(' ,*@%*/@%* ')
|
||||
log.ODM_INFO(' ,/@%******@&*. ')
|
||||
log.ODM_INFO(' ,*@&*********/@&* ')
|
||||
log.ODM_INFO(' ,*@&**************@&* ')
|
||||
log.ODM_INFO(' ,/@&******************@&*. ')
|
||||
log.ODM_INFO(' ,*@&*********************/@&* ')
|
||||
log.ODM_INFO(' ,*@&**************************@&*. ')
|
||||
log.ODM_INFO(' ,/@&******************************&&*, ')
|
||||
log.ODM_INFO(' ,*&&**********************************@&*. ')
|
||||
log.ODM_INFO(' ,*@&**************************************@&*. ')
|
||||
log.ODM_INFO(' ,*@&***************#@@@@@@@@@%****************&&*, ')
|
||||
log.ODM_INFO(' .*&&***************&@@@@@@@@@@@@@@****************@@*. ')
|
||||
log.ODM_INFO(' .*@&***************&@@@@@@@@@@@@@@@@@%****(@@%********@@*. ')
|
||||
log.ODM_INFO(' .*@@***************%@@@@@@@@@@@@@@@@@@@@@#****&@@@@%******&@*, ')
|
||||
log.ODM_INFO(' .*&@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/*****@@*. ')
|
||||
log.ODM_INFO(' .*@@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%*************@@*. ')
|
||||
log.ODM_INFO(' .*@@****/***********@@@@@&**(@@@@@@@@@@@@@@@@@@@@@@@#*****************%@*, ')
|
||||
log.ODM_INFO(' */@*******@*******#@@@@%*******/@@@@@@@@@@@@@@@@@@@@********************/@(, ')
|
||||
log.ODM_INFO(' ,*@(********&@@@@@@#**************/@@@@@@@#**(@@&/**********************@&* ')
|
||||
log.ODM_INFO(' *#@/*******************************@@@@@***&@&**********************&@*, ')
|
||||
log.ODM_INFO(' *#@#******************************&@@@***@#*********************&@*, ')
|
||||
log.ODM_INFO(' */@#*****************************@@@************************@@*. ')
|
||||
log.ODM_INFO(' *#@/***************************/@@/*********************%@*, ')
|
||||
log.ODM_INFO(' *#@#**************************#@@%******************%@*, ')
|
||||
log.ODM_INFO(' */@#*************************(@@@@@@@&%/********&@*. ')
|
||||
log.ODM_INFO(' *(@(*********************************/%@@%**%@*, ')
|
||||
log.ODM_INFO(' *(@%************************************%@** ')
|
||||
log.ODM_INFO(' **@%********************************&@*, ')
|
||||
log.ODM_INFO(' *(@(****************************%@/* ')
|
||||
log.ODM_INFO(' ,(@%************************#@/* ')
|
||||
log.ODM_INFO(' ,*@%********************&@/, ')
|
||||
log.ODM_INFO(' */@#****************#@/* ')
|
||||
log.ODM_INFO(' ,/@&************#@/* ')
|
||||
log.ODM_INFO(' ,*@&********%@/, ')
|
||||
log.ODM_INFO(' */@#****(@/* ')
|
||||
log.ODM_INFO(' ,/@@@@(* ')
|
||||
log.ODM_INFO(' .**, ')
|
||||
log.ODM_INFO('')
|
||||
log.ODM_INFO("Running dense reconstruction. This might take a while. Please be patient, the process is not dead or hung.")
|
||||
log.ODM_INFO(" Process is running")
|
||||
system.run('%s %s %s' % (context.dmrecon_path, ' '.join(dmrecon_config), tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency})
|
||||
|
||||
scene2pset_config = [
|
||||
"-F%s" % mve_output_scale
|
||||
]
|
||||
|
||||
# run scene2pset
|
||||
system.run('%s %s "%s" "%s"' % (context.scene2pset_path, ' '.join(scene2pset_config), tree.mve, tree.mve_model), env_vars={'OMP_NUM_THREADS': args.max_concurrency})
|
||||
else:
|
||||
log.ODM_WARNING('Found a valid MVE reconstruction file in: %s' %
|
||||
tree.mve_model)
|
||||
|
||||
outputs.reconstruction = reconstruction
|
||||
|
||||
if args.time:
|
||||
system.benchmark(start_time, tree.benchmarking, 'MVE')
|
||||
|
||||
log.ODM_INFO('Running ODM MVE Cell - Finished')
|
||||
return ecto.OK if args.end_with != 'mve' else ecto.QUIT
|
|
@ -8,13 +8,14 @@ from opendm import system
|
|||
|
||||
from dataset import ODMLoadDatasetCell
|
||||
from run_opensfm import ODMOpenSfMCell
|
||||
from smvs import ODMSmvsCell
|
||||
from mve import ODMMveCell
|
||||
from odm_slam import ODMSlamCell
|
||||
from odm_meshing import ODMeshingCell
|
||||
from mvstex import ODMMvsTexCell
|
||||
from odm_georeferencing import ODMGeoreferencingCell
|
||||
from odm_orthophoto import ODMOrthoPhotoCell
|
||||
from odm_dem import ODMDEMCell
|
||||
from odm_filterpoints import ODMFilterPoints
|
||||
|
||||
|
||||
class ODMApp(ecto.BlackBox):
|
||||
|
@ -47,13 +48,11 @@ class ODMApp(ecto.BlackBox):
|
|||
fixed_camera_params=p.args.use_fixed_camera_params,
|
||||
hybrid_bundle_adjustment=p.args.use_hybrid_bundle_adjustment),
|
||||
'slam': ODMSlamCell(),
|
||||
'smvs': ODMSmvsCell(alpha=p.args.smvs_alpha,
|
||||
max_pixels=p.args.depthmap_resolution*p.args.depthmap_resolution,
|
||||
threads=p.args.max_concurrency,
|
||||
output_scale=p.args.smvs_output_scale,
|
||||
shading=p.args.smvs_enable_shading,
|
||||
gamma_srgb=p.args.smvs_gamma_srgb,
|
||||
verbose=p.args.verbose),
|
||||
|
||||
'mve': ODMMveCell(),
|
||||
|
||||
'filterpoints': ODMFilterPoints(),
|
||||
|
||||
'meshing': ODMeshingCell(max_vertex=p.args.mesh_size,
|
||||
oct_tree=p.args.mesh_octree_depth,
|
||||
samples=p.args.mesh_samples,
|
||||
|
@ -99,13 +98,6 @@ class ODMApp(ecto.BlackBox):
|
|||
if p.args.video:
|
||||
return self.slam_connections(p)
|
||||
|
||||
# define initial task
|
||||
# TODO: What is this?
|
||||
# initial_task = p.args['start_with']
|
||||
# initial_task_id = config.processopts.index(initial_task)
|
||||
|
||||
# define the connections like you would for the plasm
|
||||
|
||||
# load the dataset
|
||||
connections = [self.tree[:] >> self.dataset['tree'],
|
||||
self.args[:] >> self.dataset['args']]
|
||||
|
@ -116,21 +108,25 @@ class ODMApp(ecto.BlackBox):
|
|||
self.dataset['reconstruction'] >> self.opensfm['reconstruction']]
|
||||
|
||||
if p.args.use_opensfm_dense or p.args.fast_orthophoto:
|
||||
# create odm mesh from opensfm point cloud
|
||||
connections += [self.tree[:] >> self.meshing['tree'],
|
||||
self.args[:] >> self.meshing['args'],
|
||||
self.opensfm['reconstruction'] >> self.meshing['reconstruction']]
|
||||
# filter points from opensfm point cloud
|
||||
connections += [self.tree[:] >> self.filterpoints['tree'],
|
||||
self.args[:] >> self.filterpoints['args'],
|
||||
self.opensfm['reconstruction'] >> self.filterpoints['reconstruction']]
|
||||
else:
|
||||
# run smvs
|
||||
# run mve
|
||||
connections += [self.tree[:] >> self.mve['tree'],
|
||||
self.args[:] >> self.mve['args'],
|
||||
self.opensfm['reconstruction'] >> self.mve['reconstruction']]
|
||||
|
||||
connections += [self.tree[:] >> self.smvs['tree'],
|
||||
self.args[:] >> self.smvs['args'],
|
||||
self.opensfm['reconstruction'] >> self.smvs['reconstruction']]
|
||||
# filter points from mve point cloud
|
||||
connections += [self.tree[:] >> self.filterpoints['tree'],
|
||||
self.args[:] >> self.filterpoints['args'],
|
||||
self.mve['reconstruction'] >> self.filterpoints['reconstruction']]
|
||||
|
||||
# create odm mesh from smvs point cloud
|
||||
connections += [self.tree[:] >> self.meshing['tree'],
|
||||
self.args[:] >> self.meshing['args'],
|
||||
self.smvs['reconstruction'] >> self.meshing['reconstruction']]
|
||||
# create mesh
|
||||
connections += [self.tree[:] >> self.meshing['tree'],
|
||||
self.args[:] >> self.meshing['args'],
|
||||
self.filterpoints['reconstruction'] >> self.meshing['reconstruction']]
|
||||
|
||||
# create odm texture
|
||||
connections += [self.tree[:] >> self.texturing['tree'],
|
||||
|
@ -161,14 +157,14 @@ class ODMApp(ecto.BlackBox):
|
|||
connections += [self.tree[:] >> self.slam['tree'],
|
||||
self.args[:] >> self.slam['args']]
|
||||
|
||||
connections += [self.tree[:] >> self.smvs['tree'],
|
||||
self.args[:] >> self.smvs['args'],
|
||||
self.slam['reconstruction'] >> self.smvs['reconstruction']]
|
||||
connections += [self.tree[:] >> self.mve['tree'],
|
||||
self.args[:] >> self.mve['args'],
|
||||
self.slam['reconstruction'] >> self.mve['reconstruction']]
|
||||
|
||||
# create odm mesh
|
||||
connections += [self.tree[:] >> self.meshing['tree'],
|
||||
self.args[:] >> self.meshing['args'],
|
||||
self.smvs['reconstruction'] >> self.meshing['reconstruction']]
|
||||
self.mve['reconstruction'] >> self.meshing['reconstruction']]
|
||||
|
||||
# create odm texture
|
||||
connections += [self.tree[:] >> self.texturing['tree'],
|
||||
|
|
|
@ -9,7 +9,6 @@ from opendm import types
|
|||
from opendm import gsd
|
||||
from opendm.dem import commands
|
||||
from opendm.cropper import Cropper
|
||||
from opendm.concurrency import get_max_concurrency_for_dem
|
||||
|
||||
class ODMDEMCell(ecto.Cell):
|
||||
def declare_params(self, params):
|
||||
|
@ -44,8 +43,6 @@ class ODMDEMCell(ecto.Cell):
|
|||
log.ODM_INFO('Create DTM: ' + str(args.dtm))
|
||||
log.ODM_INFO('DEM input file {0} found: {1}'.format(tree.odm_georeferencing_model_laz, str(las_model_found)))
|
||||
|
||||
slope, cellsize = (0.15, 1)
|
||||
|
||||
# define paths and create working directories
|
||||
odm_dem_root = tree.path('odm_dem')
|
||||
if not io.dir_exists(odm_dem_root):
|
||||
|
@ -57,15 +54,19 @@ class ODMDEMCell(ecto.Cell):
|
|||
if not io.file_exists(pc_classify_marker) or rerun_cell:
|
||||
log.ODM_INFO("Classifying {} using Simple Morphological Filter".format(tree.odm_georeferencing_model_laz))
|
||||
commands.classify(tree.odm_georeferencing_model_laz,
|
||||
slope,
|
||||
cellsize,
|
||||
args.smrf_scalar,
|
||||
args.smrf_slope,
|
||||
args.smrf_threshold,
|
||||
args.smrf_window,
|
||||
verbose=args.verbose
|
||||
)
|
||||
|
||||
with open(pc_classify_marker, 'w') as f:
|
||||
f.write('Classify: smrf\n')
|
||||
f.write('Slope: {}\n'.format(slope))
|
||||
f.write('Cellsize: {}\n'.format(cellsize))
|
||||
f.write('Scalar: {}\n'.format(args.smrf_scalar))
|
||||
f.write('Slope: {}\n'.format(args.smrf_slope))
|
||||
f.write('Threshold: {}\n'.format(args.smrf_threshold))
|
||||
f.write('Window: {}\n'.format(args.smrf_window))
|
||||
|
||||
# Do we need to process anything here?
|
||||
if (args.dsm or args.dtm) and las_model_found:
|
||||
|
@ -86,16 +87,17 @@ class ODMDEMCell(ecto.Cell):
|
|||
radius_steps.append(radius_steps[-1] * 2) # 2 is arbitrary, maybe there's a better value?
|
||||
|
||||
for product in products:
|
||||
commands.create_dems(
|
||||
[tree.odm_georeferencing_model_laz],
|
||||
commands.create_dem(
|
||||
tree.odm_georeferencing_model_laz,
|
||||
product,
|
||||
radius=map(str, radius_steps),
|
||||
gapfill=True,
|
||||
output_type='idw' if product == 'dtm' else 'max',
|
||||
radiuses=map(str, radius_steps),
|
||||
gapfill=args.dem_gapfill_steps > 0,
|
||||
outdir=odm_dem_root,
|
||||
resolution=resolution / 100.0,
|
||||
decimation=args.dem_decimation,
|
||||
verbose=args.verbose,
|
||||
max_workers=get_max_concurrency_for_dem(args.max_concurrency,tree.odm_georeferencing_model_laz)
|
||||
max_workers=args.max_concurrency
|
||||
)
|
||||
|
||||
if args.crop > 0:
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
import ecto, os
|
||||
|
||||
from opendm import log
|
||||
from opendm import io
|
||||
from opendm import system
|
||||
from opendm import context
|
||||
from opendm import point_cloud
|
||||
|
||||
class ODMFilterPoints(ecto.Cell):
|
||||
def declare_io(self, params, inputs, outputs):
|
||||
inputs.declare("tree", "Struct with paths", [])
|
||||
inputs.declare("args", "The application arguments.", {})
|
||||
inputs.declare("reconstruction", "ODMReconstruction", [])
|
||||
outputs.declare("reconstruction", "list of ODMReconstructions", [])
|
||||
|
||||
def process(self, inputs, outputs):
|
||||
# Benchmarking
|
||||
start_time = system.now_raw()
|
||||
|
||||
log.ODM_INFO('Running ODM FilterPoints Cell')
|
||||
|
||||
# get inputs
|
||||
tree = inputs.tree
|
||||
args = inputs.args
|
||||
reconstruction = inputs.reconstruction
|
||||
|
||||
# check if we rerun cell or not
|
||||
rerun_cell = (args.rerun is not None and
|
||||
args.rerun == 'odm_filterpoints') or \
|
||||
(args.rerun_all) or \
|
||||
(args.rerun_from is not None and
|
||||
'odm_filterpoints' in args.rerun_from)
|
||||
if not os.path.exists(tree.odm_filterpoints): system.mkdir_p(tree.odm_filterpoints)
|
||||
|
||||
# check if reconstruction was done before
|
||||
if not io.file_exists(tree.filtered_point_cloud) or rerun_cell:
|
||||
if args.fast_orthophoto:
|
||||
inputPointCloud = os.path.join(tree.opensfm, 'reconstruction.ply')
|
||||
elif args.use_opensfm_dense:
|
||||
inputPointCloud = tree.opensfm_model
|
||||
else:
|
||||
inputPointCloud = tree.mve_model
|
||||
|
||||
confidence = None
|
||||
if not args.use_opensfm_dense and not args.fast_orthophoto:
|
||||
confidence = args.mve_confidence
|
||||
|
||||
point_cloud.filter(inputPointCloud, tree.filtered_point_cloud, standard_deviation=args.pc_filter, confidence=confidence, verbose=args.verbose)
|
||||
else:
|
||||
log.ODM_WARNING('Found a valid point cloud file in: %s' %
|
||||
tree.filtered_point_cloud)
|
||||
|
||||
outputs.reconstruction = reconstruction
|
||||
|
||||
if args.time:
|
||||
system.benchmark(start_time, tree.benchmarking, 'MVE')
|
||||
|
||||
log.ODM_INFO('Running ODM FilterPoints Cell - Finished')
|
||||
return ecto.OK if args.end_with != 'odm_filterpoints' else ecto.QUIT
|
|
@ -83,6 +83,7 @@ class ODMGeoreferencingCell(ecto.Cell):
|
|||
# odm_georeference definitions
|
||||
kwargs = {
|
||||
'bin': context.odm_modules_path,
|
||||
'input_pc_file': tree.filtered_point_cloud,
|
||||
'bundle': tree.opensfm_bundle,
|
||||
'imgs': tree.dataset_raw,
|
||||
'imgs_list': tree.opensfm_bundle_list,
|
||||
|
@ -98,13 +99,6 @@ class ODMGeoreferencingCell(ecto.Cell):
|
|||
'verbose': verbose
|
||||
}
|
||||
|
||||
if args.fast_orthophoto:
|
||||
kwargs['input_pc_file'] = os.path.join(tree.opensfm, 'reconstruction.ply')
|
||||
elif args.use_opensfm_dense:
|
||||
kwargs['input_pc_file'] = tree.opensfm_model
|
||||
else:
|
||||
kwargs['input_pc_file'] = tree.smvs_model
|
||||
|
||||
if transformPointCloud:
|
||||
kwargs['pc_params'] = '-inputPointCloudFile {input_pc_file} -outputPointCloudFile {output_pc_file}'.format(**kwargs)
|
||||
|
||||
|
@ -114,7 +108,7 @@ class ODMGeoreferencingCell(ecto.Cell):
|
|||
log.ODM_WARNING('NO SRS: The output point cloud will not have a SRS.')
|
||||
else:
|
||||
kwargs['pc_params'] = ''
|
||||
|
||||
|
||||
# Check to see if the GCP file exists
|
||||
|
||||
if not self.params.use_exif and (self.params.gcp_file or tree.odm_georeferencing_gcp):
|
||||
|
|
|
@ -49,18 +49,12 @@ class ODMeshingCell(ecto.Cell):
|
|||
(args.rerun_from is not None and
|
||||
'odm_meshing' in args.rerun_from)
|
||||
|
||||
infile = tree.smvs_model
|
||||
if args.fast_orthophoto:
|
||||
infile = os.path.join(tree.opensfm, 'reconstruction.ply')
|
||||
elif args.use_opensfm_dense:
|
||||
infile = tree.opensfm_model
|
||||
|
||||
# Create full 3D model unless --skip-3dmodel is set
|
||||
if not args.skip_3dmodel:
|
||||
if not io.file_exists(tree.odm_mesh) or rerun_cell:
|
||||
log.ODM_DEBUG('Writing ODM Mesh file in: %s' % tree.odm_mesh)
|
||||
|
||||
mesh.screened_poisson_reconstruction(infile,
|
||||
mesh.screened_poisson_reconstruction(tree.filtered_point_cloud,
|
||||
tree.odm_mesh,
|
||||
depth=self.params.oct_tree,
|
||||
samples=self.params.samples,
|
||||
|
@ -97,7 +91,7 @@ class ODMeshingCell(ecto.Cell):
|
|||
|
||||
log.ODM_DEBUG('ODM 2.5D DSM resolution: %s' % dsm_resolution)
|
||||
|
||||
mesh.create_25dmesh(infile, tree.odm_25dmesh,
|
||||
mesh.create_25dmesh(tree.filtered_point_cloud, tree.odm_25dmesh,
|
||||
dsm_radius=dsm_radius,
|
||||
dsm_resolution=dsm_resolution,
|
||||
depth=self.params.oct_tree,
|
||||
|
|
|
@ -87,8 +87,11 @@ class ODMOpenSfMCell(ecto.Cell):
|
|||
|
||||
if has_alt:
|
||||
log.ODM_DEBUG("Altitude data detected, enabling it for GPS alignment")
|
||||
config.append("use_altitude_tag: True")
|
||||
config.append("use_altitude_tag: yes")
|
||||
config.append("align_method: naive")
|
||||
else:
|
||||
config.append("align_method: orientation_prior")
|
||||
config.append("align_orientation_prior: vertical")
|
||||
|
||||
if args.use_hybrid_bundle_adjustment:
|
||||
log.ODM_DEBUG("Enabling hybrid bundle adjustment")
|
||||
|
@ -172,9 +175,6 @@ class ODMOpenSfMCell(ecto.Cell):
|
|||
if args.fast_orthophoto:
|
||||
system.run('PYTHONPATH=%s %s/bin/opensfm export_ply --no-cameras %s' %
|
||||
(context.pyopencv_path, context.opensfm_path, tree.opensfm))
|
||||
|
||||
# Filter
|
||||
point_cloud.filter(os.path.join(tree.opensfm, 'reconstruction.ply'), standard_deviation=args.pc_filter, verbose=args.verbose)
|
||||
elif args.use_opensfm_dense:
|
||||
# Undistort images at full scale in JPG
|
||||
# (TODO: we could compare the size of the PNGs if they are < than depthmap_resolution
|
||||
|
@ -183,9 +183,6 @@ class ODMOpenSfMCell(ecto.Cell):
|
|||
(context.pyopencv_path, context.opensfm_path, tree.opensfm))
|
||||
system.run('PYTHONPATH=%s %s/bin/opensfm compute_depthmaps %s' %
|
||||
(context.pyopencv_path, context.opensfm_path, tree.opensfm))
|
||||
|
||||
# Filter
|
||||
point_cloud.filter(tree.opensfm_model, standard_deviation=args.pc_filter, verbose=args.verbose)
|
||||
else:
|
||||
log.ODM_WARNING('Found a valid OpenSfM reconstruction file in: %s' %
|
||||
tree.opensfm_reconstruction)
|
||||
|
|
110
scripts/smvs.py
110
scripts/smvs.py
|
@ -1,110 +0,0 @@
|
|||
import ecto, shutil, os, glob
|
||||
|
||||
from opendm import log
|
||||
from opendm import io
|
||||
from opendm import system
|
||||
from opendm import context
|
||||
from opendm import point_cloud
|
||||
|
||||
|
||||
class ODMSmvsCell(ecto.Cell):
|
||||
def declare_params(self, params):
|
||||
params.declare("threads", "max number of threads", context.num_cores)
|
||||
params.declare("alpha", "Regularization parameter", 1)
|
||||
params.declare("max_pixels", "max pixels for reconstruction", 1700000)
|
||||
params.declare("output_scale", "scale of optimization", 2)
|
||||
params.declare("shading", "Enable shading-aware model", False)
|
||||
params.declare("gamma_srgb", "Apply inverse SRGB gamma correction", False)
|
||||
params.declare("verbose", "Increase debug level", False)
|
||||
|
||||
def declare_io(self, params, inputs, outputs):
|
||||
inputs.declare("tree", "Struct with paths", [])
|
||||
inputs.declare("args", "The application arguments.", {})
|
||||
inputs.declare("reconstruction", "ODMReconstruction", [])
|
||||
outputs.declare("reconstruction", "list of ODMReconstructions", [])
|
||||
|
||||
def process(self, inputs, outputs):
|
||||
|
||||
# Benchmarking
|
||||
start_time = system.now_raw()
|
||||
|
||||
log.ODM_INFO('Running SMVS Cell')
|
||||
|
||||
# get inputs
|
||||
tree = inputs.tree
|
||||
args = inputs.args
|
||||
reconstruction = inputs.reconstruction
|
||||
photos = reconstruction.photos
|
||||
|
||||
if not photos:
|
||||
log.ODM_ERROR('Not enough photos in photos array to start SMVS')
|
||||
return ecto.QUIT
|
||||
|
||||
# check if we rerun cell or not
|
||||
rerun_cell = (args.rerun is not None and
|
||||
args.rerun == 'smvs') or \
|
||||
(args.rerun_all) or \
|
||||
(args.rerun_from is not None and
|
||||
'smvs' in args.rerun_from)
|
||||
|
||||
# check if reconstruction was done before
|
||||
if not io.file_exists(tree.smvs_model) or rerun_cell:
|
||||
# cleanup if a rerun
|
||||
if io.dir_exists(tree.mve_path) and rerun_cell:
|
||||
shutil.rmtree(tree.mve_path)
|
||||
|
||||
# make bundle directory
|
||||
if not io.file_exists(tree.mve_bundle):
|
||||
system.mkdir_p(tree.mve_path)
|
||||
system.mkdir_p(io.join_paths(tree.mve_path, 'bundle'))
|
||||
io.copy(tree.opensfm_image_list, tree.mve_image_list)
|
||||
io.copy(tree.opensfm_bundle, tree.mve_bundle)
|
||||
|
||||
# mve makescene wants the output directory
|
||||
# to not exists before executing it (otherwise it
|
||||
# will prompt the user for confirmation)
|
||||
if io.dir_exists(tree.smvs):
|
||||
shutil.rmtree(tree.smvs)
|
||||
|
||||
# run mve makescene
|
||||
if not io.dir_exists(tree.mve_views):
|
||||
system.run('%s %s %s' % (context.makescene_path, tree.mve_path, tree.smvs))
|
||||
|
||||
# config
|
||||
config = [
|
||||
"-t%s" % self.params.threads,
|
||||
"-a%s" % self.params.alpha,
|
||||
"--max-pixels=%s" % int(self.params.max_pixels),
|
||||
"-o%s" % self.params.output_scale,
|
||||
"--debug-lvl=%s" % ('1' if self.params.verbose else '0'),
|
||||
"%s" % '-S' if self.params.shading else '',
|
||||
"%s" % '-g' if self.params.gamma_srgb and self.params.shading else '',
|
||||
"--force" if rerun_cell else ''
|
||||
]
|
||||
|
||||
# run smvs
|
||||
system.run('%s %s %s' % (context.smvs_path, ' '.join(config), tree.smvs))
|
||||
|
||||
# find and rename the output file for simplicity
|
||||
smvs_files = glob.glob(os.path.join(tree.smvs, 'smvs-*'))
|
||||
smvs_files.sort(key=os.path.getmtime) # sort by last modified date
|
||||
if len(smvs_files) > 0:
|
||||
old_file = smvs_files[-1]
|
||||
if not (io.rename_file(old_file, tree.smvs_model)):
|
||||
log.ODM_WARNING("File %s does not exist, cannot be renamed. " % old_file)
|
||||
|
||||
# Filter
|
||||
point_cloud.filter(tree.smvs_model, standard_deviation=args.pc_filter, verbose=args.verbose)
|
||||
else:
|
||||
log.ODM_WARNING("Cannot find a valid point cloud (smvs-XX.ply) in %s. Check the console output for errors." % tree.smvs)
|
||||
else:
|
||||
log.ODM_WARNING('Found a valid SMVS reconstruction file in: %s' %
|
||||
tree.smvs_model)
|
||||
|
||||
outputs.reconstruction = reconstruction
|
||||
|
||||
if args.time:
|
||||
system.benchmark(start_time, tree.benchmarking, 'SMVS')
|
||||
|
||||
log.ODM_INFO('Running ODM SMVS Cell - Finished')
|
||||
return ecto.OK if args.end_with != 'smvs' else ecto.QUIT
|
|
@ -5,7 +5,7 @@
|
|||
# or --force-ccd n will have to be set in the command line (if you need to)
|
||||
|
||||
# This line is really important to set up properly
|
||||
project_path: '' # Example: '/home/user/ODMProjects
|
||||
project_path: '' # Example: '/home/user/ODMProjects'
|
||||
|
||||
# The rest of the settings will default to the values set unless you uncomment and change them
|
||||
#resize_to: 2048
|
||||
|
|
Ładowanie…
Reference in New Issue