Merge branch 'master' into image-load-process

Former-commit-id: c413629fc3
pull/1161/head
Dakota Benjamin 2017-01-17 14:35:49 -05:00 zatwierdzone przez GitHub
commit 42a8aa2788
20 zmienionych plików z 973 dodań i 26 usunięć

Wyświetl plik

@ -12,5 +12,7 @@ set(CMAKE_PREFIX_PATH "${CMAKE_CURRENT_SOURCE_DIR}/SuperBuild/install")
# move binaries to the same bin directory
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
option(ODM_BUILD_SLAM "Build SLAM module" OFF)
# Add ODM sub-modules
add_subdirectory(modules)
add_subdirectory(modules)

Wyświetl plik

@ -23,7 +23,7 @@ So far, it does Point Clouds, Digital Surface Models, Textured Digital Surface M
## QUICKSTART
Requires Ubuntu 14.04 or later, see https://github.com/OpenDroneMap/odm_vagrant for running on Windows in a VM
OpenDroneMap can run natively on Ubuntu 14.04 or later, see [Build and Run Using Docker](#build-and-run-using-docker) for running on Windows / MacOS. A Vagrant VM is also available: https://github.com/OpenDroneMap/odm_vagrant.
*Support for Ubuntu 12.04 is currently BROKEN with the addition of OpenSfM and Ceres-Solver. It is likely to remain broken unless a champion is found to fix it.*
@ -124,26 +124,37 @@ has equivalent procedures for Mac OS X and Windows. See [docs.docker.com](docs.d
OpenDroneMap is Dockerized, meaning you can use containerization to build and run it without tampering with the configuration of libraries and packages already
installed on your machine. Docker software is free to install and use in this context. If you don't have it installed,
see the [Docker Ubuntu installation tutorial](https://docs.docker.com/engine/installation/linux/ubuntulinux/) and follow the
instructions through "Create a Docker group". Once Docker is installed, an OpenDroneMap Docker image can be created
like so:
instructions through "Create a Docker group". Once Docker is installed, the fastest way to use OpenDroneMap is to run a pre-built image by typing:
docker run -it --rm -v $(pwd)/images:/code/images v $(pwd)/odm_orthophoto:/code/odm_orthophoto -v $(pwd)/odm_texturing:/code/odm_texturing opendronemap/opendronemap
If you want to build your own Docker image from sources, type:
docker build -t packages -f packages.Dockerfile .
docker build -t odm_image .
docker run -it --user root\
-v /path/to/images:/project/images \
-v $(pwd)/project:/project \
-v /path/to/gcp_list.txt:/code/gcp_list.txt \
--rm odm_image --project-path /project
Using this method, the containerized ODM will process the images in the OpenDroneMap/images directory and output results to the OpenDroneMap/odm_orthophoto and OpenDroneMap/odm_texturing directories as described in the **Viewing Results** section.
docker build -t my_odm_image .
docker run -it --rm -v $(pwd)/images:/code/images v $(pwd)/odm_orthophoto:/code/odm_orthophoto -v $(pwd)/odm_texturing:/code/odm_texturing my_odm_image
Using this method, the containerized ODM will process the images in the OpenDroneMap/images directory and output results
to the OpenDroneMap/odm_orthophoto and OpenDroneMap/odm_texturing directories as described in the **Viewing Results** section.
If you want to view other results outside the Docker image simply add which directories you're interested in to the run command in the same pattern
established above. For example, if you're interested in the dense cloud results generated by PMVS and in the orthophoto,
simply use the following `docker run` command after building the image:
To pass in custom parameters to the run.py script, simply pass it as arguments to the `docker run` command.
docker run -it --rm -v $(pwd)/images:/code/images -v $(pwd)/pmvs:/code/pmvs -v $(pwd)/odm_orthophoto:/code/odm_orthophoto my_odm_image
To pass in custom parameters to the run.py script, simply pass it as arguments to the `docker run` command. For example:
docker run -it --rm -v $(pwd)/images:/code/images v $(pwd)/odm_orthophoto:/code/odm_orthophoto -v $(pwd)/odm_texturing:/code/odm_texturing opendronemap/opendronemap --resize-to 1800 --force-ccd 6.16
## User Interface
A web interface and API to OpenDroneMap is currently under active development in the [WebODM](https://github.com/OpenDroneMap/WebODM) repository.
## Video Support
Currently we have an experimental feature that uses ORB_SLAM to render a textured mesh from video. It is only supported on Ubuntu 14.04 on machines with X11 support. See the [wiki](https://github.com/OpenDroneMap/OpenDroneMap/wiki/Reconstruction-from-Video)for details on installation and use.
## Examples
Coming soon...

Wyświetl plik

@ -11,6 +11,8 @@ set(CMAKE_MODULE_PATH ${SB_ROOT_DIR}/cmake)
include(ExternalProject)
include(ExternalProject-Setup)
option(ODM_BUILD_SLAM "Build SLAM module" OFF)
################################
# Setup SuperBuild directories #
@ -105,6 +107,13 @@ set(custom_libs OpenGV
MvsTexturing
)
# Dependencies of the SLAM module
if(ODM_BUILD_SLAM)
list(APPEND custom_libs
Pangolin
ORB_SLAM2)
endif()
foreach(lib ${custom_libs})
SETUP_EXTERNAL_PROJECT_CUSTOM(${lib})
endforeach()

Wyświetl plik

@ -0,0 +1,78 @@
set(_proj_name orb_slam2)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
ExternalProject_Add(${_proj_name}
DEPENDS opencv pangolin
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
URL https://github.com/paulinus/ORB_SLAM2/archive/7c11f186a53a75560cd17352d327b0bc127a82de.zip
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_COMMAND ""
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)
# DBoW2
set(DBoW2_BINARY_DIR "${SB_BINARY_DIR}/DBoW2")
file(MAKE_DIRECTORY "${DBoW2_BINARY_DIR}")
ExternalProject_Add_Step(${_proj_name} build_DBoW2
COMMAND make -j2
DEPENDEES configure_DBoW2
DEPENDERS configure
WORKING_DIRECTORY ${DBoW2_BINARY_DIR}
ALWAYS 1
)
ExternalProject_Add_Step(${_proj_name} configure_DBoW2
COMMAND ${CMAKE_COMMAND} <SOURCE_DIR>/Thirdparty/DBoW2
-DOpenCV_DIR=${SB_INSTALL_DIR}/share/OpenCV
-DCMAKE_BUILD_TYPE=Release
DEPENDEES download
DEPENDERS build_DBoW2
WORKING_DIRECTORY ${DBoW2_BINARY_DIR}
ALWAYS 1
)
# g2o
set(g2o_BINARY_DIR "${SB_BINARY_DIR}/g2o")
file(MAKE_DIRECTORY "${g2o_BINARY_DIR}")
ExternalProject_Add_Step(${_proj_name} build_g2o
COMMAND make -j2
DEPENDEES configure_g2o
DEPENDERS configure
WORKING_DIRECTORY ${g2o_BINARY_DIR}
ALWAYS 1
)
ExternalProject_Add_Step(${_proj_name} configure_g2o
COMMAND ${CMAKE_COMMAND} <SOURCE_DIR>/Thirdparty/g2o
-DCMAKE_BUILD_TYPE=Release
DEPENDEES download
DEPENDERS build_g2o
WORKING_DIRECTORY ${g2o_BINARY_DIR}
ALWAYS 1
)
# Uncompress Vocabulary
ExternalProject_Add_Step(${_proj_name} uncompress_vocabulary
COMMAND tar -xf ORBvoc.txt.tar.gz
DEPENDEES download
DEPENDERS configure
WORKING_DIRECTORY <SOURCE_DIR>/Vocabulary
ALWAYS 1
)

Wyświetl plik

@ -27,9 +27,9 @@ ExternalProject_Add(${_proj_name}
-DBUILD_opencv_photo=ON
-DBUILD_opencv_legacy=ON
-DBUILD_opencv_python=ON
-DWITH_FFMPEG=OFF
-DWITH_FFMPEG=${ODM_BUILD_SLAM}
-DWITH_CUDA=OFF
-DWITH_GTK=OFF
-DWITH_GTK=${ODM_BUILD_SLAM}
-DWITH_VTK=OFF
-DWITH_EIGEN=OFF
-DWITH_OPENNI=OFF
@ -57,4 +57,4 @@ ExternalProject_Add(${_proj_name}
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)
)

Wyświetl plik

@ -0,0 +1,29 @@
set(_proj_name pangolin)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
ExternalProject_Add(${_proj_name}
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
URL https://github.com/paulinus/Pangolin/archive/b7c66570b336e012bf3124e2a7411d417a1d35f7.zip
URL_MD5 9b7938d1045d26b27a637b663e647aef
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DCPP11_NO_BOOST=1
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

Wyświetl plik

@ -4,4 +4,6 @@ add_subdirectory(odm_georef)
add_subdirectory(odm_meshing)
add_subdirectory(odm_orthophoto)
add_subdirectory(odm_texturing)
if (ODM_BUILD_SLAM)
add_subdirectory(odm_slam)
endif ()

Wyświetl plik

@ -1141,9 +1141,40 @@ void Georef::createGeoreferencedModelFromExifData()
void Georef::chooseBestGCPTriplet(size_t &gcp0, size_t &gcp1, size_t &gcp2)
{
double minTotError = std::numeric_limits<double>::infinity();
size_t numThreads = boost::thread::hardware_concurrency();
boost::thread_group threads;
std::vector<GeorefBestTriplet*> triplets;
for(size_t t = 0; t < numThreads; ++t)
{
GeorefBestTriplet* triplet = new GeorefBestTriplet();
triplets.push_back(triplet);
threads.create_thread(boost::bind(&Georef::findBestGCPTriplet, this, boost::ref(triplet->t_), boost::ref(triplet->s_), boost::ref(triplet->p_), t, numThreads, boost::ref(triplet->err_)));
}
for(size_t t = 0; t < gcps_.size(); ++t)
threads.join_all();
double minTotError = std::numeric_limits<double>::infinity();
for(size_t t = 0; t<numThreads; t++)
{
GeorefBestTriplet* triplet = triplets[t];
if(minTotError > triplet->err_)
{
minTotError = triplet->err_;
gcp0 = triplet->t_;
gcp1 = triplet->s_;
gcp2 = triplet->p_;
}
delete triplet;
}
log_ << "Mean georeference error " << minTotError / static_cast<double>(gcps_.size()) << '\n';
}
void Georef::findBestGCPTriplet(size_t &gcp0, size_t &gcp1, size_t &gcp2, size_t offset, size_t stride, double &minTotError)
{
minTotError = std::numeric_limits<double>::infinity();
for(size_t t = offset; t < gcps_.size(); t+=stride)
{
if (gcps_[t].use_)
{
@ -1180,14 +1211,47 @@ void Georef::chooseBestGCPTriplet(size_t &gcp0, size_t &gcp1, size_t &gcp2)
}
}
}
log_ << "Mean georeference error " << minTotError / static_cast<double>(cameras_.size()) << '\n';
log_ << '[' << offset+1 << " of " << stride << "] Mean georeference error " << minTotError / static_cast<double>(gcps_.size());
log_ << " (" << gcp0 << ", " << gcp1 << ", " << gcp2 << ")\n";
}
void Georef::chooseBestCameraTriplet(size_t &cam0, size_t &cam1, size_t &cam2)
{
size_t numThreads = boost::thread::hardware_concurrency();
boost::thread_group threads;
std::vector<GeorefBestTriplet*> triplets;
for(size_t t = 0; t < numThreads; ++t)
{
GeorefBestTriplet* triplet = new GeorefBestTriplet();
triplets.push_back(triplet);
threads.create_thread(boost::bind(&Georef::findBestCameraTriplet, this, boost::ref(triplet->t_), boost::ref(triplet->s_), boost::ref(triplet->p_), t, numThreads, boost::ref(triplet->err_)));
}
threads.join_all();
double minTotError = std::numeric_limits<double>::infinity();
for(size_t t = 0; t<numThreads; t++)
{
GeorefBestTriplet* triplet = triplets[t];
if(minTotError > triplet->err_)
{
minTotError = triplet->err_;
cam0 = triplet->t_;
cam1 = triplet->s_;
cam2 = triplet->p_;
}
delete triplet;
}
log_ << "Mean georeference error " << minTotError / static_cast<double>(cameras_.size()) << '\n';
}
void Georef::findBestCameraTriplet(size_t &cam0, size_t &cam1, size_t &cam2, size_t offset, size_t stride, double &minTotError)
{
minTotError = std::numeric_limits<double>::infinity();
for(size_t t = 0; t < cameras_.size(); ++t)
for(size_t t = offset; t < cameras_.size(); t+=stride)
{
for(size_t s = t; s < cameras_.size(); ++s)
{
@ -1215,8 +1279,9 @@ void Georef::chooseBestCameraTriplet(size_t &cam0, size_t &cam1, size_t &cam2)
}
}
}
log_ << "Mean georeference error " << minTotError / static_cast<double>(cameras_.size()) << '\n';
log_ << '[' << offset+1 << " of " << stride << "] Mean georeference error " << minTotError / static_cast<double>(cameras_.size());
log_ << " (" << cam0 << ", " << cam1 << ", " << cam2 << ")\n";
}
void Georef::printGeorefSystem()

Wyświetl plik

@ -111,6 +111,17 @@ struct GeorefCamera
friend std::ostream& operator<<(std::ostream &os, const GeorefCamera &cam);
};
/*!
* \brief The GeorefBestTriplet struct is used to store the best triplet found.
*/
struct GeorefBestTriplet
{
size_t t_; /**< First ordinate of the best triplet found. **/
size_t s_; /**< Second ordinate of the best triplet found. **/
size_t p_; /**< Third ordinate of the best triplet found. **/
double err_; /**< Error of this triplet. **/
};
/*!
* \brief The Georef class is used to transform a mesh into a georeferenced system.
* The class reads camera positions from a bundle file.
@ -196,10 +207,20 @@ private:
*/
void chooseBestGCPTriplet(size_t &gcp0, size_t &gcp1, size_t &gcp2);
/*!
* \brief findBestGCPTriplet Partitioned version of chooseBestGCPTriplet.
*/
void findBestGCPTriplet(size_t &gcp0, size_t &gcp1, size_t &gcp2, size_t offset, size_t stride, double &minTotError);
/*!
* \brief chooseBestCameraTriplet Chooses the best triplet of cameras to use when making the model georeferenced.
*/
void chooseBestCameraTriplet(size_t &cam0, size_t &cam1, size_t &cam2);
/*!
* \brief findBestCameraTriplet Partitioned version of chooseBestCameraTriplet.
*/
void findBestCameraTriplet(size_t &cam0, size_t &cam1, size_t &cam2, size_t offset, size_t stride, double &minTotError);
/*!
* \brief printGeorefSystem Prints a file containing information about the georeference system, next to the ouptut file.

Wyświetl plik

@ -0,0 +1,41 @@
project(odm_slam)
cmake_minimum_required(VERSION 2.8)
# Set opencv dir to the input spedified with option -DOPENCV_DIR="path"
set(OPENCV_DIR "OPENCV_DIR-NOTFOUND" CACHE "OPENCV_DIR" "Path to the opencv installation directory")
# Add compiler options.
add_definitions(-Wall -Wextra)
# Find pcl at the location specified by PCL_DIR
find_package(PCL 1.7 HINTS "${PCL_DIR}/share/pcl-1.7" REQUIRED)
# Find OpenCV at the default location
find_package(OpenCV HINTS "${OPENCV_DIR}" REQUIRED)
# Only link with required opencv modules.
set(OpenCV_LIBS opencv_core opencv_imgproc opencv_highgui)
# Add the Eigen and OpenCV include dirs.
# Necessary since the PCL_INCLUDE_DIR variable set by find_package is broken.)
include_directories(${EIGEN_ROOT})
include_directories(${OpenCV_INCLUDE_DIRS})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -std=c++11")
set(PANGOLIN_ROOT ${CMAKE_BINARY_DIR}/../SuperBuild/install)
set(ORB_SLAM_ROOT ${CMAKE_BINARY_DIR}/../SuperBuild/src/orb_slam2)
include_directories(${EIGEN_ROOT})
include_directories(${ORB_SLAM_ROOT})
include_directories(${ORB_SLAM_ROOT}/include)
link_directories(${PANGOLIN_ROOT}/lib)
link_directories(${ORB_SLAM_ROOT}/lib)
# Add source directory
aux_source_directory("./src" SRC_LIST)
# Add exectuteable
add_executable(${PROJECT_NAME} ${SRC_LIST})
target_link_libraries(odm_slam ${OpenCV_LIBS} ORB_SLAM2 pangolin)

Wyświetl plik

@ -0,0 +1,98 @@
#include <iostream>
#include <opencv2/opencv.hpp>
#include <System.h>
#include <Converter.h>
void SaveKeyFrameTrajectory(ORB_SLAM2::Map *map, const string &filename, const string &tracksfile) {
std::cout << std::endl << "Saving keyframe trajectory to " << filename << " ..." << std::endl;
vector<ORB_SLAM2::KeyFrame*> vpKFs = map->GetAllKeyFrames();
sort(vpKFs.begin(), vpKFs.end(), ORB_SLAM2::KeyFrame::lId);
std::ofstream f;
f.open(filename.c_str());
f << fixed;
std::ofstream fpoints;
fpoints.open(tracksfile.c_str());
fpoints << fixed;
for(size_t i = 0; i < vpKFs.size(); i++) {
ORB_SLAM2::KeyFrame* pKF = vpKFs[i];
if(pKF->isBad())
continue;
cv::Mat R = pKF->GetRotation().t();
vector<float> q = ORB_SLAM2::Converter::toQuaternion(R);
cv::Mat t = pKF->GetCameraCenter();
f << setprecision(6) << pKF->mTimeStamp << setprecision(7) << " " << t.at<float>(0) << " " << t.at<float>(1) << " " << t.at<float>(2)
<< " " << q[0] << " " << q[1] << " " << q[2] << " " << q[3] << std::endl;
for (auto point : pKF->GetMapPoints()) {
auto coords = point->GetWorldPos();
fpoints << setprecision(6)
<< pKF->mTimeStamp
<< " " << point->mnId
<< setprecision(7)
<< " " << coords.at<float>(0, 0)
<< " " << coords.at<float>(1, 0)
<< " " << coords.at<float>(2, 0)
<< std::endl;
}
}
f.close();
fpoints.close();
std::cout << std::endl << "trajectory saved!" << std::endl;
}
int main(int argc, char **argv) {
if(argc != 4) {
std::cerr << std::endl <<
"Usage: " << argv[0] << " vocabulary settings video" <<
std::endl;
return 1;
}
cv::VideoCapture cap(argv[3]);
if(!cap.isOpened()) {
std::cerr << "Failed to load video: " << argv[3] << std::endl;
return -1;
}
ORB_SLAM2::System SLAM(argv[1], argv[2], ORB_SLAM2::System::MONOCULAR, true);
usleep(10 * 1e6);
std::cout << "Start processing video ..." << std::endl;
double T = 0.1; // Seconds between frames
cv::Mat im;
int num_frames = cap.get(CV_CAP_PROP_FRAME_COUNT);
for(int ni = 0;; ++ni){
std::cout << "processing frame " << ni << "/" << num_frames << std::endl;
// Get frame
bool res = false;
for (int trial = 0; !res && trial < 20; ++trial) {
std::cout << "trial " << trial << std::endl;
res = cap.read(im);
}
if(!res) break;
double timestamp = ni * T;
SLAM.TrackMonocular(im, timestamp);
//usleep(int(T * 1e6));
}
SLAM.Shutdown();
SaveKeyFrameTrajectory(SLAM.GetMap(), "KeyFrameTrajectory.txt", "MapPoints.txt");
return 0;
}

Wyświetl plik

@ -0,0 +1,152 @@
#!/usr/bin/env python
import argparse
import sys
import numpy as np
import cv2
class Calibrator:
"""Camera calibration using a chessboard pattern."""
def __init__(self, pattern_width, pattern_height, motion_threshold=0.05):
"""Init the calibrator.
The parameter motion_threshold determines the minimal motion required
to add a new frame to the calibration data, as a ratio of image width.
"""
self.pattern_size = (pattern_width, pattern_height)
self.motion_threshold = motion_threshold
self.pattern_points = np.array([
(i, j, 0.0)
for j in range(pattern_height)
for i in range(pattern_width)
], dtype=np.float32)
self.object_points = []
self.image_points = []
def process_image(self, image, window_name):
"""Find corners of an image and store them internally."""
if len(image.shape) == 3:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray = image
h, w = gray.shape
self.image_size = (w, h)
found, corners = cv2.findChessboardCorners(gray, self.pattern_size)
if found:
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), term)
self._add_points(corners.reshape(-1, 2))
if window_name:
cv2.drawChessboardCorners(image, self.pattern_size, corners, found)
cv2.imshow(window_name, image)
return found
def calibrate(self):
"""Run calibration using points extracted by process_image."""
rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(
self.object_points, self.image_points, self.image_size, None, None)
return rms, camera_matrix, dist_coefs.ravel()
def _add_points(self, image_points):
if self.image_points:
delta = np.fabs(image_points - self.image_points[-1]).max()
should_add = (delta > self.image_size[0] * self.motion_threshold)
else:
should_add = True
if should_add:
self.image_points.append(image_points)
self.object_points.append(self.pattern_points)
def video_frames(filename):
"""Yield frames in a video."""
cap = cv2.VideoCapture(args.video)
while True:
ret, frame = cap.read()
if ret:
yield frame
else:
break
cap.release()
def orb_slam_calibration_config(camera_matrix, dist_coefs):
"""String with calibration parameters in orb_slam config format."""
lines = [
"# Camera calibration and distortion parameters (OpenCV)",
"Camera.fx: {}".format(camera_matrix[0, 0]),
"Camera.fy: {}".format(camera_matrix[1, 1]),
"Camera.cx: {}".format(camera_matrix[0, 2]),
"Camera.cy: {}".format(camera_matrix[1, 2]),
"",
"Camera.k1: {}".format(dist_coefs[0]),
"Camera.k2: {}".format(dist_coefs[1]),
"Camera.p1: {}".format(dist_coefs[2]),
"Camera.p2: {}".format(dist_coefs[3]),
"Camera.k3: {}".format(dist_coefs[4]),
]
return "\n".join(lines)
def parse_arguments():
parser = argparse.ArgumentParser(
description="Camera calibration from video of a chessboard.")
parser.add_argument(
'video',
help="video of the checkerboard")
parser.add_argument(
'--output',
default='calibration',
help="base name for the output files")
parser.add_argument(
'--size',
default='8x6',
help="size of the chessboard")
parser.add_argument(
'--visual',
action='store_true',
help="display images while calibrating")
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
pattern_size = [int(i) for i in args.size.split('x')]
calibrator = Calibrator(pattern_size[0], pattern_size[1])
window_name = None
if args.visual:
window_name = 'Chessboard detection'
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
print "kept\tcurrent\tchessboard found"
for i, frame in enumerate(video_frames(args.video)):
found = calibrator.process_image(frame, window_name)
print "{}\t{}\t{} \r".format(
len(calibrator.image_points), i, found),
sys.stdout.flush()
if args.visual:
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
rms, camera_matrix, dist_coefs = calibrator.calibrate()
print
print "RMS:", rms
print
print orb_slam_calibration_config(camera_matrix, dist_coefs)

Wyświetl plik

@ -0,0 +1,196 @@
import argparse
import json
import os
import yaml
import cv2
import numpy as np
from opensfm import transformations as tf
from opensfm.io import mkdir_p
SCALE = 50
def parse_orb_slam2_config_file(filename):
'''
Parse ORB_SLAM2 config file.
Parsing manually since neither pyyaml nor cv2.FileStorage seem to work.
'''
res = {}
with open(filename) as fin:
lines = fin.readlines()
for line in lines:
line = line.strip()
if line and line[0] != '#' and ':' in line:
key, value = line.split(':')
res[key.strip()] = value.strip()
return res
def camera_from_config(video_filename, config_filename):
'''
Creates an OpenSfM from an ORB_SLAM2 config
'''
config = parse_orb_slam2_config_file(config_filename)
fx = float(config['Camera.fx'])
fy = float(config['Camera.fy'])
cx = float(config['Camera.cx'])
cy = float(config['Camera.cy'])
k1 = float(config['Camera.k1'])
k2 = float(config['Camera.k2'])
p1 = float(config['Camera.p1'])
p2 = float(config['Camera.p2'])
width, height = get_video_size(video_filename)
size = max(width, height)
return {
'width': width,
'height': height,
'focal': np.sqrt(fx * fy) / size,
'k1': k1,
'k2': k2
}
def shot_id_from_timestamp(timestamp):
T = 0.1 # TODO(pau) get this from config
i = int(round(timestamp / T))
return 'frame{0:06d}.png'.format(i)
def shots_from_trajectory(trajectory_filename):
'''
Create opensfm shots from an orb_slam2/TUM trajectory
'''
shots = {}
with open(trajectory_filename) as fin:
lines = fin.readlines()
for line in lines:
a = map(float, line.split())
timestamp = a[0]
c = np.array(a[1:4])
q = np.array(a[4:8])
R = tf.quaternion_matrix([q[3], q[0], q[1], q[2]])[:3, :3].T
t = -R.dot(c) * SCALE
shot = {
'camera': 'slamcam',
'rotation': list(cv2.Rodrigues(R)[0].flat),
'translation': list(t.flat),
'created_at': timestamp,
}
shots[shot_id_from_timestamp(timestamp)] = shot
return shots
def points_from_map_points(filename):
points = {}
with open(filename) as fin:
lines = fin.readlines()
for line in lines:
words = line.split()
point_id = words[1]
coords = map(float, words[2:5])
coords = [SCALE * i for i in coords]
points[point_id] = {
'coordinates': coords,
'color': [100, 0, 200]
}
return points
def tracks_from_map_points(filename):
tracks = []
with open(filename) as fin:
lines = fin.readlines()
for line in lines:
words = line.split()
timestamp = float(words[0])
shot_id = shot_id_from_timestamp(timestamp)
point_id = words[1]
row = [shot_id, point_id, point_id, '0', '0', '0', '0', '0']
tracks.append('\t'.join(row))
return '\n'.join(tracks)
def get_video_size(video):
cap = cv2.VideoCapture(video)
width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
cap.release()
return width, height
def extract_keyframes_from_video(video, reconstruction):
'''
Reads video and extracts a frame for each shot in reconstruction
'''
image_path = 'images'
mkdir_p(image_path)
T = 0.1 # TODO(pau) get this from config
cap = cv2.VideoCapture(video)
video_idx = 0
shot_ids = sorted(reconstruction['shots'].keys())
for shot_id in shot_ids:
shot = reconstruction['shots'][shot_id]
timestamp = shot['created_at']
keyframe_idx = int(round(timestamp / T))
while video_idx <= keyframe_idx:
for i in range(20):
ret, frame = cap.read()
if ret:
break
else:
print 'retrying'
if not ret:
raise RuntimeError(
'Cound not find keyframe {} in video'.format(shot_id))
if video_idx == keyframe_idx:
cv2.imwrite(os.path.join(image_path, shot_id), frame)
video_idx += 1
cap.release()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Convert ORB_SLAM2 output to OpenSfM')
parser.add_argument(
'video',
help='the tracked video file')
parser.add_argument(
'trajectory',
help='the trajectory file')
parser.add_argument(
'points',
help='the map points file')
parser.add_argument(
'config',
help='config file with camera calibration')
args = parser.parse_args()
r = {
'cameras': {},
'shots': {},
'points': {},
}
r['cameras']['slamcam'] = camera_from_config(args.video, args.config)
r['shots'] = shots_from_trajectory(args.trajectory)
r['points'] = points_from_map_points(args.points)
tracks = tracks_from_map_points(args.points)
with open('reconstruction.json', 'w') as fout:
json.dump([r], fout, indent=4)
with open('tracks.csv', 'w') as fout:
fout.write(tracks)
extract_keyframes_from_video(args.video, r)

Wyświetl plik

@ -0,0 +1,53 @@
#!/usr/bin/env python
import argparse
import os
import cv2
import numpy as np
import opensfm.dataset as dataset
import opensfm.io as io
def opencv_calibration_matrix(width, height, focal):
'''Calibration matrix as used by OpenCV and PMVS
'''
f = focal * max(width, height)
return np.matrix([[f, 0, 0.5 * (width - 1)],
[0, f, 0.5 * (height - 1)],
[0, 0, 1.0]])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Undistort images')
parser.add_argument('dataset', help='path to the dataset to be processed')
parser.add_argument('--output', help='output folder for the undistorted images')
args = parser.parse_args()
data = dataset.DataSet(args.dataset)
if args.output:
output_path = args.output
else:
output_path = os.path.join(data.data_path, 'undistorted')
print "Undistorting images from dataset [%s] to dir [%s]" % (data.data_path, output_path)
io.mkdir_p(output_path)
reconstructions = data.load_reconstruction()
for h, reconstruction in enumerate(reconstructions):
print "undistorting reconstruction", h
for image in reconstruction['shots']:
print "undistorting image", image
shot = reconstruction["shots"][image]
original_image = data.image_as_array(image)[:,:,::-1]
camera = reconstruction['cameras'][shot['camera']]
original_h, original_w = original_image.shape[:2]
K = opencv_calibration_matrix(original_w, original_h, camera['focal'])
k1 = camera["k1"]
k2 = camera["k2"]
undistorted_image = cv2.undistort(original_image, K, np.array([k1, k2, 0, 0]))
new_image_path = os.path.join(output_path, image.split('/')[-1])
cv2.imwrite(new_image_path, undistorted_image)

Wyświetl plik

@ -2,7 +2,7 @@ import argparse
import context
# parse arguments
processopts = ['resize', 'opensfm', 'cmvs', 'pmvs',
processopts = ['resize', 'opensfm', 'slam', 'cmvs', 'pmvs',
'odm_meshing', 'mvs_texturing', 'odm_georeferencing',
'odm_orthophoto']
@ -60,6 +60,14 @@ def config():
choices=processopts,
help=('Can be one of:' + ' | '.join(processopts)))
parser.add_argument('--video',
metavar='<string>',
help='Path to the video file to process')
parser.add_argument('--slam-config',
metavar='<string>',
help='Path to config file for orb-slam')
parser.add_argument('--force-focal',
metavar='<positive float>',
type=float,
@ -114,6 +122,13 @@ def config():
'images based on GPS exif data. Set to 0 to skip '
'pre-matching. Default: %(default)s')
parser.add_argument('--opensfm-processes',
metavar='<positive integer>',
default=context.num_cores,
type=int,
help=('The maximum number of processes to use in dense '
'reconstruction. Default: %(default)s'))
parser.add_argument('--use-opensfm-pointcloud',
action='store_true',
default=False,

Wyświetl plik

@ -18,6 +18,9 @@ sys.path.append(pyopencv_path)
opensfm_path = os.path.join(superbuild_path, "src/opensfm")
ccd_widths_path = os.path.join(opensfm_path, 'opensfm/data/sensor_data.json')
# define orb_slam2 path
orb_slam2_path = os.path.join(superbuild_path, "src/orb_slam2")
# define pmvs path
cmvs_path = os.path.join(superbuild_path, "install/bin/cmvs")
cmvs_opts_path = os.path.join(superbuild_path, "install/bin/genOption")
@ -32,6 +35,7 @@ pdal_path = os.path.join(superbuild_path, 'build/pdal/bin')
# define odm modules path
odm_modules_path = os.path.join(root_path, "build/bin")
odm_modules_src_path = os.path.join(root_path, "modules")
# Define supported image extensions
supported_extensions = {'.jpg','.jpeg','.png'}

Wyświetl plik

@ -378,6 +378,8 @@ class ODM_Tree(object):
self.odm_meshing_log = io.join_paths(self.odm_meshing, 'odm_meshing_log.txt')
# texturing
self.odm_texturing_undistorted_image_path = io.join_paths(
self.odm_texturing, 'undistorted')
self.odm_textured_model_obj = io.join_paths(
self.odm_texturing, 'odm_textured_model.obj')
self.odm_textured_model_mtl = io.join_paths(

Wyświetl plik

@ -9,6 +9,7 @@ from opendm import system
from dataset import ODMLoadDatasetCell
from resize import ODMResizeCell
from opensfm import ODMOpenSfMCell
from odm_slam import ODMSlamCell
from pmvs import ODMPmvsCell
from cmvs import ODMCmvsCell
from odm_meshing import ODMeshingCell
@ -43,9 +44,10 @@ class ODMApp(ecto.BlackBox):
'opensfm': ODMOpenSfMCell(use_exif_size=False,
feature_process_size=p.args.resize_to,
feature_min_frames=p.args.min_num_features,
processes=context.num_cores,
processes=p.args.opensfm_processes,
matching_gps_neighbors=p.args.matcher_neighbors,
matching_gps_distance=p.args.matcher_distance),
'slam': ODMSlamCell(),
'cmvs': ODMCmvsCell(max_images=p.args.cmvs_maxImages),
'pmvs': ODMPmvsCell(level=p.args.pmvs_level,
csize=p.args.pmvs_csize,
@ -87,13 +89,15 @@ class ODMApp(ecto.BlackBox):
b.write('ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores))
def connections(self, _p):
if _p.args.video:
return self.slam_connections(_p)
# define initial task
# TODO: What is this?
# initial_task = _p.args['start_with']
# initial_task_id = config.processopts.index(initial_task)
# define the connections like you would for the plasm
# connections = []
# load the dataset
connections = [self.tree[:] >> self.dataset['tree']]
@ -146,3 +150,33 @@ class ODMApp(ecto.BlackBox):
self.georeferencing['reconstruction'] >> self.orthophoto['reconstruction']]
return connections
def slam_connections(self, _p):
"""Get connections used when running from video instead of images."""
connections = []
# run slam cell
connections += [self.tree[:] >> self.slam['tree'],
self.args[:] >> self.slam['args']]
# run cmvs
connections += [self.tree[:] >> self.cmvs['tree'],
self.args[:] >> self.cmvs['args'],
self.slam['reconstruction'] >> self.cmvs['reconstruction']]
# run pmvs
connections += [self.tree[:] >> self.pmvs['tree'],
self.args[:] >> self.pmvs['args'],
self.cmvs['reconstruction'] >> self.pmvs['reconstruction']]
# create odm mesh
connections += [self.tree[:] >> self.meshing['tree'],
self.args[:] >> self.meshing['args'],
self.pmvs['reconstruction'] >> self.meshing['reconstruction']]
# create odm texture
connections += [self.tree[:] >> self.texturing['tree'],
self.args[:] >> self.texturing['args'],
self.meshing['reconstruction'] >> self.texturing['reconstruction']]
return connections

111
scripts/odm_slam.py 100644
Wyświetl plik

@ -0,0 +1,111 @@
"""Cell to run odm_slam."""
import os
import ecto
from opendm import log
from opendm import io
from opendm import system
from opendm import context
class ODMSlamCell(ecto.Cell):
"""Run odm_slam on a video and export to opensfm format."""
def declare_params(self, params):
"""Cell parameters."""
pass
def declare_io(self, params, inputs, outputs):
"""Cell inputs and outputs."""
inputs.declare("tree", "Struct with paths", [])
inputs.declare("args", "The application arguments.", {})
outputs.declare("reconstruction", "list of ODMReconstructions", [])
def process(self, inputs, outputs):
"""Run the cell."""
log.ODM_INFO('Running OMD Slam Cell')
# get inputs
tree = self.inputs.tree
args = self.inputs.args
video = os.path.join(tree.root_path, args.video)
slam_config = os.path.join(tree.root_path, args.slam_config)
if not video:
log.ODM_ERROR('No video provided')
return ecto.QUIT
# create working directories
system.mkdir_p(tree.opensfm)
system.mkdir_p(tree.pmvs)
vocabulary = os.path.join(context.orb_slam2_path,
'Vocabulary/ORBvoc.txt')
orb_slam_cmd = os.path.join(context.odm_modules_path, 'odm_slam')
trajectory = os.path.join(tree.opensfm, 'KeyFrameTrajectory.txt')
map_points = os.path.join(tree.opensfm, 'MapPoints.txt')
# check if we rerun cell or not
rerun_cell = args.rerun == 'slam'
# check if slam was run before
if not io.file_exists(trajectory) or rerun_cell:
# run slam binary
system.run(' '.join([
'cd {} &&'.format(tree.opensfm),
orb_slam_cmd,
vocabulary,
slam_config,
video,
]))
else:
log.ODM_WARNING('Found a valid slam trajectory in: {}'.format(
trajectory))
# check if trajectory was exported to opensfm before
if not io.file_exists(tree.opensfm_reconstruction) or rerun_cell:
# convert slam to opensfm
system.run(' '.join([
'cd {} &&'.format(tree.opensfm),
'PYTHONPATH={}:{}'.format(context.pyopencv_path,
context.opensfm_path),
'python',
os.path.join(context.odm_modules_src_path,
'odm_slam/src/orb_slam_to_opensfm.py'),
video,
trajectory,
map_points,
slam_config,
]))
# link opensfm images to resized images
os.symlink(tree.opensfm + '/images', tree.dataset_resize)
else:
log.ODM_WARNING('Found a valid OpenSfM file in: {}'.format(
tree.opensfm_reconstruction))
# check if reconstruction was exported to bundler before
if not io.file_exists(tree.opensfm_bundle_list) or rerun_cell:
# convert back to bundler's format
system.run(
'PYTHONPATH={} {}/bin/export_bundler {}'.format(
context.pyopencv_path, context.opensfm_path, tree.opensfm))
else:
log.ODM_WARNING(
'Found a valid Bundler file in: {}'.format(
tree.opensfm_reconstruction))
# check if reconstruction was exported to pmvs before
if not io.file_exists(tree.pmvs_visdat) or rerun_cell:
# run PMVS converter
system.run(
'PYTHONPATH={} {}/bin/export_pmvs {} --output {}'.format(
context.pyopencv_path, context.opensfm_path, tree.opensfm,
tree.pmvs))
else:
log.ODM_WARNING('Found a valid CMVS file in: {}'.format(
tree.pmvs_visdat))
log.ODM_INFO('Running OMD Slam Cell - Finished')
return ecto.OK if args.end_with != 'odm_slam' else ecto.QUIT

Wyświetl plik

@ -1,3 +1,5 @@
import os
import ecto
from opendm import log
@ -43,6 +45,28 @@ class ODMTexturingCell(ecto.Cell):
(args.rerun_from is not None and
'odm_texturing' in args.rerun_from)
# Undistort radial distortion
if not os.path.isdir(tree.odm_texturing_undistorted_image_path) or rerun_cell:
system.run(' '.join([
'cd {} &&'.format(tree.opensfm),
'PYTHONPATH={}:{}'.format(context.pyopencv_path,
context.opensfm_path),
'python',
os.path.join(context.odm_modules_src_path,
'odm_slam/src/undistort_radial.py'),
'--output',
tree.odm_texturing_undistorted_image_path,
tree.opensfm,
]))
system.run(
'PYTHONPATH=%s %s/bin/export_bundler %s' %
(context.pyopencv_path, context.opensfm_path, tree.opensfm))
else:
log.ODM_WARNING(
'Found a valid Bundler file in: %s' %
(tree.opensfm_reconstruction))
if not io.file_exists(tree.odm_textured_model_obj) or rerun_cell:
log.ODM_DEBUG('Writing ODM Textured file in: %s'
% tree.odm_textured_model_obj)
@ -52,7 +76,7 @@ class ODMTexturingCell(ecto.Cell):
'bin': context.odm_modules_path,
'out_dir': tree.odm_texturing,
'bundle': tree.opensfm_bundle,
'imgs_path': tree.dataset_resize,
'imgs_path': tree.odm_texturing_undistorted_image_path,
'imgs_list': tree.opensfm_bundle_list,
'model': tree.odm_mesh,
'log': tree.odm_texuring_log,