kopia lustrzana https://github.com/OpenDroneMap/ODM
Porównaj commity
No commits in common. "master" and "v3.5.5" have entirely different histories.
|
@ -9,7 +9,7 @@ on:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: windows-2022
|
runs-on: windows-2019
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
|
@ -35,4 +35,3 @@ venv/
|
||||||
python38/
|
python38/
|
||||||
dist/
|
dist/
|
||||||
innosetup/
|
innosetup/
|
||||||
.DS_Store
|
|
||||||
|
|
|
@ -26,8 +26,7 @@ Run ODM by placing some images (JPEGs, TIFFs or DNGs) in a folder named “image
|
||||||
```bash
|
```bash
|
||||||
# Windows
|
# Windows
|
||||||
docker run -ti --rm -v c:/Users/youruser/datasets:/datasets opendronemap/odm --project-path /datasets project
|
docker run -ti --rm -v c:/Users/youruser/datasets:/datasets opendronemap/odm --project-path /datasets project
|
||||||
```
|
|
||||||
```bash
|
|
||||||
# Mac/Linux
|
# Mac/Linux
|
||||||
docker run -ti --rm -v /home/youruser/datasets:/datasets opendronemap/odm --project-path /datasets project
|
docker run -ti --rm -v /home/youruser/datasets:/datasets opendronemap/odm --project-path /datasets project
|
||||||
```
|
```
|
||||||
|
@ -95,7 +94,7 @@ run C:\Users\youruser\datasets\project [--additional --parameters --here]
|
||||||
ODM has support for doing SIFT feature extraction on a GPU, which is about 2x faster than the CPU on a typical consumer laptop. To use this feature, you need to use the `opendronemap/odm:gpu` docker image instead of `opendronemap/odm` and you need to pass the `--gpus all` flag:
|
ODM has support for doing SIFT feature extraction on a GPU, which is about 2x faster than the CPU on a typical consumer laptop. To use this feature, you need to use the `opendronemap/odm:gpu` docker image instead of `opendronemap/odm` and you need to pass the `--gpus all` flag:
|
||||||
|
|
||||||
```
|
```
|
||||||
docker run -ti --rm -v c:/Users/youruser/datasets:/datasets --gpus all opendronemap/odm:gpu --project-path /datasets project --feature-type sift
|
docker run -ti --rm -v c:/Users/youruser/datasets:/datasets --gpus all opendronemap/odm:gpu --project-path /datasets project
|
||||||
```
|
```
|
||||||
|
|
||||||
When you run ODM, if the GPU is recognized, in the first few lines of output you should see:
|
When you run ODM, if the GPU is recognized, in the first few lines of output you should see:
|
||||||
|
|
|
@ -184,7 +184,7 @@ set(custom_libs OpenSfM
|
||||||
|
|
||||||
externalproject_add(mve
|
externalproject_add(mve
|
||||||
GIT_REPOSITORY https://github.com/OpenDroneMap/mve.git
|
GIT_REPOSITORY https://github.com/OpenDroneMap/mve.git
|
||||||
GIT_TAG 356
|
GIT_TAG 290
|
||||||
UPDATE_COMMAND ""
|
UPDATE_COMMAND ""
|
||||||
SOURCE_DIR ${SB_SOURCE_DIR}/mve
|
SOURCE_DIR ${SB_SOURCE_DIR}/mve
|
||||||
CMAKE_ARGS ${WIN32_CMAKE_ARGS} ${APPLE_CMAKE_ARGS}
|
CMAKE_ARGS ${WIN32_CMAKE_ARGS} ${APPLE_CMAKE_ARGS}
|
||||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
3.5.6
|
3.5.5
|
||||||
|
|
|
@ -58,7 +58,7 @@ ensure_prereqs() {
|
||||||
if [[ "$UBUNTU_VERSION" == *"20.04"* ]]; then
|
if [[ "$UBUNTU_VERSION" == *"20.04"* ]]; then
|
||||||
echo "Enabling PPA for Ubuntu GIS"
|
echo "Enabling PPA for Ubuntu GIS"
|
||||||
sudo $APT_GET install -y -qq --no-install-recommends software-properties-common
|
sudo $APT_GET install -y -qq --no-install-recommends software-properties-common
|
||||||
sudo add-apt-repository ppa:ubuntugis/ppa
|
sudo add-apt-repository -y ppa:ubuntugis/ubuntugis-unstable
|
||||||
sudo $APT_GET update
|
sudo $APT_GET update
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
# Fix Ply
|
|
||||||
|
|
||||||
Use to translate a modified ply into a compatible format for subsequent steps in ODM. Via Jaime Chacoff, https://community.opendronemap.org/t/edited-point-cloud-with-cloudcompare-wont-rerun-from-odm-meshing/21449/6
|
|
||||||
|
|
||||||
The basic idea is to process through ODM until the point cloud is created, use a 3rd party tool, like CloudCompare to edit the point cloud, and then continue processing in OpenDroneMap.
|
|
||||||
|
|
||||||
This useful bit of python will convert the PLY exported from CloudCompare back into a compatible format for continued processing in OpenDroneMap.
|
|
||||||
|
|
||||||
1. Run project in WebODM and add this to your settings: `end-with: odm-filterpoints`
|
|
||||||
1. Once complete, go to your NodeODM container and copy `/var/www/data/[Task ID]/odm-filterpoints` directory
|
|
||||||
1. Open CloudCompare and from `odm-filterpoints` directory you've copied, open `point_cloud.ply`
|
|
||||||
1. In the box that pops up, add a scalar field `vertex - views`
|
|
||||||
1. To see the actual colours again - select the point cloud, then in properties change colours from "Scalar field" to "RGB"
|
|
||||||
1. Make your changes to the point cloud
|
|
||||||
1. Compute normals (Edit > Normals > Compute)
|
|
||||||
1. Save PLY file as ASCII
|
|
||||||
1. Run Python file above to fix PLY file and convert to binary
|
|
||||||
1. Copy `odm_filterpoints` directory (or just `point_cloud.ply`) back into NodeODM container
|
|
||||||
1. Restart project in WebODM "From Meshing" (don't forget to edit settings to remove `end-with: odm-filterpoints` or it's not going to do anything).
|
|
|
@ -1,68 +0,0 @@
|
||||||
import os
|
|
||||||
import logging
|
|
||||||
from plyfile import PlyData, PlyElement
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
# Configure logging
|
|
||||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
||||||
|
|
||||||
def pcd_ascii_to_binary_ply(ply_file: str, binary_ply: str) -> None:
|
|
||||||
"""Converts ASCII PLY to binary, ensuring 'views' is present and of type uchar.
|
|
||||||
Raises ValueError if neither 'scalar_views' nor 'views' is found.
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
logging.info(f"Reading ASCII PLY file: {ply_file}")
|
|
||||||
ply_data: PlyData = PlyData.read(ply_file)
|
|
||||||
except FileNotFoundError:
|
|
||||||
logging.error(f"File not found: {ply_file}")
|
|
||||||
return
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error reading PLY file: {e}")
|
|
||||||
return
|
|
||||||
|
|
||||||
new_elements: list[PlyElement] = []
|
|
||||||
|
|
||||||
for element in ply_data.elements:
|
|
||||||
new_data = element.data.copy()
|
|
||||||
|
|
||||||
if 'scalar_views' in element.data.dtype.names:
|
|
||||||
new_data['views'] = new_data['scalar_views'].astype('u1')
|
|
||||||
del new_data['scalar_views']
|
|
||||||
elif 'views' in element.data.dtype.names:
|
|
||||||
new_data['views'] = new_data['views'].astype('u1')
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Neither 'scalar_views' nor 'views' found - did you import them when opened the file in CloudCompare?")
|
|
||||||
|
|
||||||
|
|
||||||
new_element = PlyElement.describe(new_data, element.name)
|
|
||||||
new_elements.append(new_element)
|
|
||||||
|
|
||||||
new_ply_data = PlyData(new_elements, text=False)
|
|
||||||
|
|
||||||
try:
|
|
||||||
logging.info(f"Writing binary PLY file: {binary_ply}")
|
|
||||||
new_ply_data.write(binary_ply)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error writing PLY file: {e}")
|
|
||||||
return
|
|
||||||
|
|
||||||
logging.info("PLY conversion complete.")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
|
|
||||||
# Parameters
|
|
||||||
base: str = os.path.dirname(os.path.abspath(__file__))
|
|
||||||
ply_file: str = os.path.join(base, 'point_cloud_ascii.ply')
|
|
||||||
binary_ply_file: str = os.path.join(base, 'point_cloud.ply')
|
|
||||||
|
|
||||||
if not os.path.exists(ply_file):
|
|
||||||
logging.error(f"Input file not found: {ply_file}")
|
|
||||||
exit(1) # Exit with error code
|
|
||||||
|
|
||||||
try:
|
|
||||||
pcd_ascii_to_binary_ply(ply_file, binary_ply_file)
|
|
||||||
except ValueError as e:
|
|
||||||
logging.error(f"PLY conversion failed: {e}")
|
|
||||||
exit(1) # Exit with error code to indicate failure
|
|
|
@ -1,64 +0,0 @@
|
||||||
# Plugin Time-SIFT
|
|
||||||
|
|
||||||
This script does Time-SIFT processing with ODM. Time-SIFT is a method for multi-temporal analysis without the need to co-registrate the data.
|
|
||||||
|
|
||||||
> D. Feurer, F. Vinatier, Joining multi-epoch archival aerial images in a single SfM block allows 3-D change detection with almost exclusively image information, ISPRS Journal of Photogrammetry and Remote Sensing, Volume 146, 2018, Pages 495-506, ISSN 0924-2716, doi: 10.1016/j.isprsjprs.2018.10.016
|
|
||||||
(https://doi.org/10.1016/j.isprsjprs.2018.10.016)
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
* ODM ! :-)
|
|
||||||
* subprocess
|
|
||||||
* json
|
|
||||||
* os
|
|
||||||
* shutil
|
|
||||||
* pathlib
|
|
||||||
* sys
|
|
||||||
* argparse
|
|
||||||
* textwrap
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
### Provided example
|
|
||||||
Download or clone [this repo](https://forge.inrae.fr/Denis.Feurer/timesift-odm-data-example.git) to get example data.
|
|
||||||
|
|
||||||
Then execute
|
|
||||||
```
|
|
||||||
python Timesift_odm.py datasets --end-with odm_filterpoints
|
|
||||||
```
|
|
||||||
It should make the Time-SIFT processing on the downloaded example data, stopping after the filtered dense clouds step.
|
|
||||||
|
|
||||||
In the destination dir, you should obtain new directories, ```0_before``` and ```1_after``` at the same level as the ```time-sift-block``` directory. These new directories contain all the results natively co-registered.
|
|
||||||
|
|
||||||
You can then use [CloudCompare](https://cloudcompare.org/) to compute distance between the ```datasets/0_before/odm_filterpoints/point_cloud.ply``` and the ```datasets/1_after/odm_filterpoints/point_cloud.ply``` and obtain this image showing the difference between the two 3D surfaces. Here, two soil samples were excavated as can be seen on the image below.
|
|
||||||

|
|
||||||
|
|
||||||
### Your own data
|
|
||||||
In your dataset directory (usually ```datasets```, but you can have chosen another name) you have to prepare a Time-SIFT project directory (default name : ```time-sift-block```, *can be tuned via a parameter*) that contains :
|
|
||||||
* ```images/``` : a subdirectory with all images of all epochs. This directory name is fixed as it is the one expected by ODM
|
|
||||||
* ```images_epochs.txt``` : a file that has the same format as the file used for the split and merge ODM function. This file name *can be tuned via a parameter*.
|
|
||||||
|
|
||||||
The ```images_epochs.txt``` file has two columns, the first column contains image names and the second contains the epoch name as follows
|
|
||||||
```
|
|
||||||
DSC_0368.JPG 0_before
|
|
||||||
DSC_0369.JPG 0_before
|
|
||||||
DSC_0370.JPG 0_before
|
|
||||||
DSC_0389.JPG 1_after
|
|
||||||
DSC_0390.JPG 1_after
|
|
||||||
DSC_0391.JPG 1_after
|
|
||||||
```
|
|
||||||
|
|
||||||
Your directory, before running the script, should look like this :
|
|
||||||
```
|
|
||||||
$PWD/datasets/
|
|
||||||
└── time-sift-block/
|
|
||||||
├── images/
|
|
||||||
└── images_epochs.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
At the end of the script you obtain a directory by epoch (at the same level as the Time-SIFT project directory). Each directory is processed with images of each epoch and all results are natively co-registered due to the initial sfm step done with all images.
|
|
||||||
```
|
|
||||||
$PWD/datasets/
|
|
||||||
├── 0_before/
|
|
||||||
├── 1_after/
|
|
||||||
└── time-sift-block/
|
|
||||||
```
|
|
|
@ -1,167 +0,0 @@
|
||||||
# Script for Time-SIFT multi-temporal images alignment with ODM
|
|
||||||
#
|
|
||||||
# This is python script for ODM, based on the following publication :
|
|
||||||
#
|
|
||||||
# D. Feurer, F. Vinatier, Joining multi-epoch archival aerial images in a single SfM block allows 3-D change detection
|
|
||||||
# with almost exclusively image information, ISPRS Journal of Photogrammetry and Remote Sensing, Volume 146, 2018,
|
|
||||||
# Pages 495-506, ISSN 0924-2716, https://doi.org/10.1016/j.isprsjprs.2018.10.016.
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
from pathlib import Path
|
|
||||||
import sys
|
|
||||||
import argparse
|
|
||||||
import textwrap
|
|
||||||
|
|
||||||
def main(argv):
|
|
||||||
# Parsing and checking args
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\
|
|
||||||
Timesift_odm.py datasetdir [-t <timesift-dir>] [-i <imageepochs-file>] [<options passed to ODM>]
|
|
||||||
|
|
||||||
you can add options passed to ODM, for instance [--end-with odm_filterpoints] so that the final step is point clouds
|
|
||||||
these options are not checked for before the final runs of each epoch, so use it carefully
|
|
||||||
'''))
|
|
||||||
parser.add_argument('datasetdir', help='dataset directory')
|
|
||||||
parser.add_argument('-t', '--timesift-dir',
|
|
||||||
help='Time-SIFT directory ; default value : "time-sift-block" # must be in the datasetdir')
|
|
||||||
parser.add_argument('-i', '--imageepochs-file',
|
|
||||||
help='Text file describing epochs ; default value : "images_epochs.txt" # must be in the TIMESIFT_DIR ')
|
|
||||||
args, additional_options_to_rerun = parser.parse_known_args()
|
|
||||||
datasets_DIR = Path(args.datasetdir).absolute().as_posix()
|
|
||||||
if args.timesift_dir:
|
|
||||||
timesift_DIR = args.timesift_dir
|
|
||||||
else:
|
|
||||||
timesift_DIR = 'time-sift-block'
|
|
||||||
if args.imageepochs_file:
|
|
||||||
images_epochs_file = args.imageepochs_file
|
|
||||||
else:
|
|
||||||
images_epochs_file = 'images_epochs.txt'
|
|
||||||
if '-h' in sys.argv or '--help' in sys.argv:
|
|
||||||
parser.print_help()
|
|
||||||
sys.exit()
|
|
||||||
if additional_options_to_rerun: # for instance, --end-with odm_filterpoints
|
|
||||||
print(f'[Time-SIFT] Options passed to ODM for the final steps: {additional_options_to_rerun}')
|
|
||||||
print(f'[Time-SIFT] \033[93mWARNING there is no check of these options done before the last ODM call\033[0m')
|
|
||||||
|
|
||||||
def check_path_args(var: Path):
|
|
||||||
if not var.exists():
|
|
||||||
print(
|
|
||||||
f'\033[91m[Time-SIFT] ERROR: the {var.as_posix()} directory does not exist. Exiting program\033[0m')
|
|
||||||
exit()
|
|
||||||
|
|
||||||
check_path_args(Path(datasets_DIR))
|
|
||||||
check_path_args(Path(datasets_DIR, timesift_DIR))
|
|
||||||
check_path_args(Path(datasets_DIR, timesift_DIR, images_epochs_file))
|
|
||||||
|
|
||||||
def clean_reconstruction_dict(subdict, key, images):
|
|
||||||
"""
|
|
||||||
Delete subdict elements where the key do not match any name in the images list.
|
|
||||||
To create the {epoch} block with only images of this epoch
|
|
||||||
"""
|
|
||||||
# The list of valid images is prepared by removing any extension (to be robust to the .tif added by ODM)
|
|
||||||
valid_images = {os.path.basename(image).split(os.extsep)[0] for image in images}
|
|
||||||
for item_key in list(subdict[key]):
|
|
||||||
image_name = os.path.basename(item_key).split(os.extsep)[0]
|
|
||||||
if image_name not in valid_images:
|
|
||||||
del subdict[key][item_key]
|
|
||||||
|
|
||||||
### Read images.txt file and create a dict of images/epochs
|
|
||||||
images_epochs_dict = {}
|
|
||||||
with open(Path(datasets_DIR, timesift_DIR, images_epochs_file), 'r') as f:
|
|
||||||
for line in f:
|
|
||||||
line = line.strip()
|
|
||||||
if not line:
|
|
||||||
continue # Empty lines are skipped
|
|
||||||
image, epoch = line.split()
|
|
||||||
if epoch not in images_epochs_dict:
|
|
||||||
images_epochs_dict[epoch] = []
|
|
||||||
images_epochs_dict[epoch].append(image)
|
|
||||||
|
|
||||||
### Check for existing epochs directories before computing anything (these directories must be deleted by hand)
|
|
||||||
path_exists_error = False
|
|
||||||
for epoch in images_epochs_dict:
|
|
||||||
if Path(datasets_DIR, epoch).exists():
|
|
||||||
if path_exists_error:
|
|
||||||
print(f"sudo rm -rf {Path(datasets_DIR, epoch).as_posix()}")
|
|
||||||
else:
|
|
||||||
print(f'\033[91m[Time-SIFT] ERROR: {Path(datasets_DIR, epoch).as_posix()} already exists.\033[0m')
|
|
||||||
print(f" Other epochs probably also exist.")
|
|
||||||
print(
|
|
||||||
f" The problem is \033[93mI CAN'T\033[0m delete it by myself, it requires root privileges.")
|
|
||||||
print(
|
|
||||||
f" The good news is \033[92mYOU CAN\033[0m do it with the following command (be careful).")
|
|
||||||
print(f'\033[91m => Consider doing it (at your own risks). Exiting program\033[0m')
|
|
||||||
print(f"- Commands to copy/paste (I'm kind, I prepared all the necessary commands for you).")
|
|
||||||
print(f"sudo rm -rf {Path(datasets_DIR, epoch).as_posix()}")
|
|
||||||
path_exists_error = True
|
|
||||||
if path_exists_error:
|
|
||||||
exit()
|
|
||||||
|
|
||||||
### LAUNCH global alignment (Time-SIFT multitemporal block)
|
|
||||||
try:
|
|
||||||
subprocess.run(['docker', 'run', '-i', '--rm', '-v', datasets_DIR + ':/datasets',
|
|
||||||
'opendronemap/odm', '--project-path', '/datasets', timesift_DIR, '--end-with', 'opensfm'])
|
|
||||||
except:
|
|
||||||
print(f'\033[91m[Time-SIFT] ERROR: {sys.exc_info()[0]}\033[0m')
|
|
||||||
exit()
|
|
||||||
print('\033[92m[Time-SIFT] Sfm on multi-temporal block done\033[0m')
|
|
||||||
|
|
||||||
print('[Time-SIFT] Going to dense matching on all epochs...')
|
|
||||||
### Loop on epochs for the dense matching
|
|
||||||
for epoch in images_epochs_dict:
|
|
||||||
#### We first duplicate the time-sift multitemporal block to save sfm results
|
|
||||||
shutil.copytree(Path(datasets_DIR, timesift_DIR),
|
|
||||||
Path(datasets_DIR, epoch))
|
|
||||||
|
|
||||||
#### Reads the datasets/{epoch}/opensfm/undistorted/reconstruction.json file that has to be modified
|
|
||||||
with open(Path(datasets_DIR, epoch, 'opensfm', 'undistorted', 'reconstruction.json'), mode="r",
|
|
||||||
encoding="utf-8") as read_file:
|
|
||||||
reconstruction_dict = json.load(read_file)
|
|
||||||
|
|
||||||
#### Removes images in this json dict (we delete the shot and the rig_instances that do not correspond to this epoch)
|
|
||||||
images = images_epochs_dict[epoch]
|
|
||||||
clean_reconstruction_dict(reconstruction_dict[0], 'shots', images)
|
|
||||||
clean_reconstruction_dict(reconstruction_dict[0], 'rig_instances', images)
|
|
||||||
|
|
||||||
#### Makes a backup of the reconstruction.json file and writes the modified json
|
|
||||||
shutil.copy(Path(datasets_DIR, epoch, 'opensfm', 'undistorted', 'reconstruction.json'),
|
|
||||||
Path(datasets_DIR, epoch, 'opensfm', 'undistorted', 'reconstruction.json.bak'))
|
|
||||||
with open(Path(datasets_DIR, epoch, 'opensfm', 'undistorted', 'reconstruction.json'), mode="w",
|
|
||||||
encoding="utf-8") as write_file:
|
|
||||||
json.dump(reconstruction_dict, write_file)
|
|
||||||
|
|
||||||
#### Launches dense matching from the good previous step, with possible options (e.g. => to stop at the point clouds)
|
|
||||||
command_rerun = ['docker', 'run', '-i', '--rm', '-v', datasets_DIR + ':/datasets',
|
|
||||||
'opendronemap/odm',
|
|
||||||
'--project-path', '/datasets', epoch,
|
|
||||||
'--rerun-from', 'openmvs']
|
|
||||||
if additional_options_to_rerun:
|
|
||||||
print(f'[Time-SIFT] Epoch {epoch}: Rerun with additionnal options: {additional_options_to_rerun}')
|
|
||||||
command_rerun.extend(additional_options_to_rerun)
|
|
||||||
else:
|
|
||||||
print(f'[Time-SIFT] Epoch {epoch}: Default full rerun')
|
|
||||||
result = subprocess.run(command_rerun)
|
|
||||||
if result.returncode != 0:
|
|
||||||
print(f'\033[91m[Time-SIFT] ERROR in processing epoch {epoch}\033[0m')
|
|
||||||
print(f'{result=}')
|
|
||||||
exit(result.returncode)
|
|
||||||
print(f'\033[92m[Time-SIFT] Epoch {epoch} finished\033[0m')
|
|
||||||
|
|
||||||
print('§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§')
|
|
||||||
print('§§§ §§ §§§ §§§§§ §§§ §§§§§§§§§§ §§ §§ §§ §§§')
|
|
||||||
print('§§§§§ §§§§ §§§ §§§ §§§ §§§§§§§§§§§§§ §§§§§§ §§ §§§§§§§§ §§§§§')
|
|
||||||
print('§§§§§ §§§§ §§§ § §§§ §§§§ §§§§§ §§§§ §§ §§§§§§ §§§§§')
|
|
||||||
print('§§§§§ §§§§ §§§ §§§§§ §§§ §§§§§§§§§§§§§§§§§ §§ §§ §§§§§§§§ §§§§§')
|
|
||||||
print('§§§§§ §§§§ §§§ §§§§§ §§§ §§§§§§§§§ §§§ §§ §§§§§§§§ §§§§§')
|
|
||||||
print('§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§')
|
|
||||||
print(' \033[92mTime-SIFT with ODM finished, congrats !\033[0m Want to cite the method ?')
|
|
||||||
print('=> D. Feurer, F. Vinatier, Joining multi-epoch archival aerial images in ')
|
|
||||||
print(' a single SfM block allows 3-D change detection with almost exclusively')
|
|
||||||
print(' image information, ISPRS Journal of Photogrammetry and Remote Sensing,')
|
|
||||||
print(' 2018, https://doi.org/10.1016/j.isprsjprs.2018.10.016 ')
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main(sys.argv[1:])
|
|
|
@ -1,8 +1,8 @@
|
||||||
FROM nvidia/cuda:11.8.0-devel-ubuntu20.04 AS builder
|
FROM nvidia/cuda:11.2.2-devel-ubuntu20.04 AS builder
|
||||||
|
|
||||||
# Env variables
|
# Env variables
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
PYTHONPATH="/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
|
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
|
||||||
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
|
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
|
||||||
|
|
||||||
# Prepare directories
|
# Prepare directories
|
||||||
|
@ -21,11 +21,12 @@ RUN bash configure.sh clean
|
||||||
|
|
||||||
### Use a second image for the final asset to reduce the number and
|
### Use a second image for the final asset to reduce the number and
|
||||||
# size of the layers.
|
# size of the layers.
|
||||||
FROM nvidia/cuda:11.8.0-runtime-ubuntu20.04
|
FROM nvidia/cuda:11.2.2-runtime-ubuntu20.04
|
||||||
|
#FROM nvidia/cuda:11.2.0-devel-ubuntu20.04
|
||||||
|
|
||||||
# Env variables
|
# Env variables
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
PYTHONPATH="/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
|
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
|
||||||
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" \
|
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" \
|
||||||
PDAL_DRIVER_PATH="/code/SuperBuild/install/bin"
|
PDAL_DRIVER_PATH="/code/SuperBuild/install/bin"
|
||||||
|
|
||||||
|
@ -37,7 +38,7 @@ COPY --from=builder /code /code
|
||||||
# Copy the Python libraries installed via pip from the builder
|
# Copy the Python libraries installed via pip from the builder
|
||||||
COPY --from=builder /usr/local /usr/local
|
COPY --from=builder /usr/local /usr/local
|
||||||
#COPY --from=builder /usr/lib/x86_64-linux-gnu/libavcodec.so.58 /usr/lib/x86_64-linux-gnu/libavcodec.so.58
|
#COPY --from=builder /usr/lib/x86_64-linux-gnu/libavcodec.so.58 /usr/lib/x86_64-linux-gnu/libavcodec.so.58
|
||||||
|
RUN dpkg --remove cuda-compat-11-2
|
||||||
RUN apt-get update -y \
|
RUN apt-get update -y \
|
||||||
&& apt-get install -y ffmpeg libtbb2
|
&& apt-get install -y ffmpeg libtbb2
|
||||||
# Install shared libraries that we depend on via APT, but *not*
|
# Install shared libraries that we depend on via APT, but *not*
|
||||||
|
|
|
@ -5,7 +5,6 @@ import zipfile
|
||||||
import time
|
import time
|
||||||
import sys
|
import sys
|
||||||
import rawpy
|
import rawpy
|
||||||
import cv2
|
|
||||||
|
|
||||||
def read_image(img_path):
|
def read_image(img_path):
|
||||||
if img_path[-4:].lower() in [".dng", ".raw", ".nef"]:
|
if img_path[-4:].lower() in [".dng", ".raw", ".nef"]:
|
||||||
|
|
|
@ -41,7 +41,6 @@ rerun_stages = {
|
||||||
'geo': 'dataset',
|
'geo': 'dataset',
|
||||||
'gltf': 'mvs_texturing',
|
'gltf': 'mvs_texturing',
|
||||||
'gps_accuracy': 'dataset',
|
'gps_accuracy': 'dataset',
|
||||||
'gps_z_offset': 'dataset',
|
|
||||||
'help': None,
|
'help': None,
|
||||||
'ignore_gsd': 'opensfm',
|
'ignore_gsd': 'opensfm',
|
||||||
'matcher_neighbors': 'opensfm',
|
'matcher_neighbors': 'opensfm',
|
||||||
|
@ -786,12 +785,11 @@ def config(argv=None, parser=None):
|
||||||
action=StoreValue,
|
action=StoreValue,
|
||||||
metavar='<positive integer>',
|
metavar='<positive integer>',
|
||||||
default=150,
|
default=150,
|
||||||
help='Radius of the overlap between submodels in meters. '
|
help='Radius of the overlap between submodels. '
|
||||||
'After grouping images into clusters, images '
|
'After grouping images into clusters, images '
|
||||||
'that are closer than this radius to a cluster '
|
'that are closer than this radius to a cluster '
|
||||||
'are added to the cluster. This is done to ensure '
|
'are added to the cluster. This is done to ensure '
|
||||||
'that neighboring submodels overlap. All images' \
|
'that neighboring submodels overlap. Default: %(default)s')
|
||||||
'need GPS information. Default: %(default)s')
|
|
||||||
|
|
||||||
parser.add_argument('--split-image-groups',
|
parser.add_argument('--split-image-groups',
|
||||||
metavar='<path string>',
|
metavar='<path string>',
|
||||||
|
@ -847,16 +845,6 @@ def config(argv=None, parser=None):
|
||||||
'set accordingly. You can use this option to manually set it in case the reconstruction '
|
'set accordingly. You can use this option to manually set it in case the reconstruction '
|
||||||
'fails. Lowering this option can sometimes help control bowling-effects over large areas. Default: %(default)s')
|
'fails. Lowering this option can sometimes help control bowling-effects over large areas. Default: %(default)s')
|
||||||
|
|
||||||
parser.add_argument('--gps-z-offset',
|
|
||||||
type=float,
|
|
||||||
action=StoreValue,
|
|
||||||
metavar='<float>',
|
|
||||||
default=0,
|
|
||||||
help='Set a GPS offset in meters for the vertical axis (Z) '
|
|
||||||
'by adding it to the altitude value of the GPS EXIF data. This does not change the value of any GCPs. '
|
|
||||||
'This can be useful for example when adjusting from ellipsoidal to orthometric height. '
|
|
||||||
'Default: %(default)s')
|
|
||||||
|
|
||||||
parser.add_argument('--optimize-disk-space',
|
parser.add_argument('--optimize-disk-space',
|
||||||
action=StoreTrue,
|
action=StoreTrue,
|
||||||
nargs=0,
|
nargs=0,
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
import os
|
import os
|
||||||
import subprocess
|
|
||||||
import sys
|
import sys
|
||||||
import rasterio
|
import rasterio
|
||||||
import numpy
|
import numpy
|
||||||
|
@ -21,8 +20,6 @@ from opendm import log
|
||||||
from .ground_rectification.rectify import run_rectification
|
from .ground_rectification.rectify import run_rectification
|
||||||
from . import pdal
|
from . import pdal
|
||||||
|
|
||||||
gdal_proximity = None
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# GDAL >= 3.3
|
# GDAL >= 3.3
|
||||||
from osgeo_utils.gdal_proximity import main as gdal_proximity
|
from osgeo_utils.gdal_proximity import main as gdal_proximity
|
||||||
|
@ -30,13 +27,8 @@ except ModuleNotFoundError:
|
||||||
# GDAL <= 3.2
|
# GDAL <= 3.2
|
||||||
try:
|
try:
|
||||||
from osgeo.utils.gdal_proximity import main as gdal_proximity
|
from osgeo.utils.gdal_proximity import main as gdal_proximity
|
||||||
except ModuleNotFoundError:
|
except:
|
||||||
# GDAL <= 3.0
|
pass
|
||||||
gdal_proximity_script = shutil.which("gdal_proximity.py")
|
|
||||||
if gdal_proximity_script is not None:
|
|
||||||
def gdal_proximity(args):
|
|
||||||
subprocess.run([gdal_proximity_script] + args[1:], check=True)
|
|
||||||
|
|
||||||
|
|
||||||
def classify(lasFile, scalar, slope, threshold, window):
|
def classify(lasFile, scalar, slope, threshold, window):
|
||||||
start = datetime.now()
|
start = datetime.now()
|
||||||
|
|
|
@ -19,16 +19,13 @@ def has_popsift_and_can_handle_texsize(width, height):
|
||||||
log.ODM_INFO("CUDA compute platform is not supported (detected: %s.%s but we need at least 3.5)" % (compute_major, compute_minor))
|
log.ODM_INFO("CUDA compute platform is not supported (detected: %s.%s but we need at least 3.5)" % (compute_major, compute_minor))
|
||||||
return False
|
return False
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.ODM_WARNING(str(e))
|
log.ODM_INFO("Using CPU for feature extraction: %s" % str(e))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from opensfm import pypopsift
|
from opensfm import pypopsift
|
||||||
if pypopsift.fits_texture(int(width * 1.02), int(height * 1.02)):
|
return pypopsift.fits_texture(int(width * 1.02), int(height * 1.02))
|
||||||
log.ODM_INFO("popsift can handle texture size %dx%d" % (width, height))
|
except (ModuleNotFoundError, ImportError):
|
||||||
return True
|
|
||||||
else:
|
|
||||||
log.ODM_INFO("popsift cannot handle texture size %dx%d" % (width, height))
|
|
||||||
return False
|
return False
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.ODM_WARNING(str(e))
|
log.ODM_WARNING(str(e))
|
||||||
|
@ -84,12 +81,11 @@ def has_gpu(args):
|
||||||
log.ODM_INFO("CUDA drivers detected")
|
log.ODM_INFO("CUDA drivers detected")
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
log.ODM_INFO("No CUDA drivers detected")
|
log.ODM_INFO("No CUDA drivers detected, using CPU")
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
if shutil.which('nvidia-smi') is not None:
|
if shutil.which('nvidia-smi') is not None:
|
||||||
log.ODM_INFO("nvidia-smi detected")
|
log.ODM_INFO("nvidia-smi detected")
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
log.ODM_INFO("No nvidia-smi detected")
|
|
||||||
return False
|
return False
|
||||||
|
|
|
@ -5,7 +5,6 @@ import math
|
||||||
from repoze.lru import lru_cache
|
from repoze.lru import lru_cache
|
||||||
from opendm import log
|
from opendm import log
|
||||||
from opendm.shots import get_origin
|
from opendm.shots import get_origin
|
||||||
from scipy import spatial
|
|
||||||
|
|
||||||
def rounded_gsd(reconstruction_json, default_value=None, ndigits=0, ignore_gsd=False):
|
def rounded_gsd(reconstruction_json, default_value=None, ndigits=0, ignore_gsd=False):
|
||||||
"""
|
"""
|
||||||
|
@ -112,11 +111,15 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
|
||||||
with open(reconstruction_json) as f:
|
with open(reconstruction_json) as f:
|
||||||
data = json.load(f)
|
data = json.load(f)
|
||||||
|
|
||||||
|
# Calculate median height from sparse reconstruction
|
||||||
reconstruction = data[0]
|
reconstruction = data[0]
|
||||||
points = np.array([reconstruction['points'][pointId]['coordinates'] for pointId in reconstruction['points']])
|
point_heights = []
|
||||||
tdpoints = points.copy()
|
|
||||||
tdpoints[:,2] = 0
|
for pointId in reconstruction['points']:
|
||||||
tree = spatial.cKDTree(tdpoints)
|
point = reconstruction['points'][pointId]
|
||||||
|
point_heights.append(point['coordinates'][2])
|
||||||
|
|
||||||
|
ground_height = np.median(point_heights)
|
||||||
|
|
||||||
gsds = []
|
gsds = []
|
||||||
for shotImage in reconstruction['shots']:
|
for shotImage in reconstruction['shots']:
|
||||||
|
@ -130,13 +133,6 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
|
||||||
log.ODM_WARNING("Cannot parse focal values from %s. This is likely an unsupported camera model." % reconstruction_json)
|
log.ODM_WARNING("Cannot parse focal values from %s. This is likely an unsupported camera model." % reconstruction_json)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
shot_origin[2] = 0
|
|
||||||
distances, neighbors = tree.query(
|
|
||||||
shot_origin, k=9
|
|
||||||
)
|
|
||||||
|
|
||||||
if len(distances) > 0:
|
|
||||||
ground_height = np.median(points[neighbors][:,2])
|
|
||||||
gsds.append(calculate_gsd_from_focal_ratio(focal_ratio,
|
gsds.append(calculate_gsd_from_focal_ratio(focal_ratio,
|
||||||
shot_height - ground_height,
|
shot_height - ground_height,
|
||||||
camera['width']))
|
camera['width']))
|
||||||
|
@ -149,6 +145,7 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def calculate_gsd(sensor_width, flight_height, focal_length, image_width):
|
def calculate_gsd(sensor_width, flight_height, focal_length, image_width):
|
||||||
"""
|
"""
|
||||||
:param sensor_width in millimeters
|
:param sensor_width in millimeters
|
||||||
|
|
|
@ -13,7 +13,6 @@ from rasterio.mask import mask
|
||||||
from opendm import io
|
from opendm import io
|
||||||
from opendm.tiles.tiler import generate_orthophoto_tiles
|
from opendm.tiles.tiler import generate_orthophoto_tiles
|
||||||
from opendm.cogeo import convert_to_cogeo
|
from opendm.cogeo import convert_to_cogeo
|
||||||
from opendm.utils import add_raster_meta_tags
|
|
||||||
from osgeo import gdal
|
from osgeo import gdal
|
||||||
from osgeo import ogr
|
from osgeo import ogr
|
||||||
|
|
||||||
|
@ -167,7 +166,7 @@ def generate_tfw(orthophoto_file):
|
||||||
log.ODM_WARNING("Cannot create .tfw for %s: %s" % (orthophoto_file, str(e)))
|
log.ODM_WARNING("Cannot create .tfw for %s: %s" % (orthophoto_file, str(e)))
|
||||||
|
|
||||||
|
|
||||||
def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir, resolution, reconstruction, tree, embed_gcp_meta=False):
|
def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir, resolution):
|
||||||
if args.crop > 0 or args.boundary:
|
if args.crop > 0 or args.boundary:
|
||||||
Cropper.crop(bounds_file_path, orthophoto_file, get_orthophoto_vars(args), keep_original=not args.optimize_disk_space, warp_options=['-dstalpha'])
|
Cropper.crop(bounds_file_path, orthophoto_file, get_orthophoto_vars(args), keep_original=not args.optimize_disk_space, warp_options=['-dstalpha'])
|
||||||
|
|
||||||
|
@ -180,8 +179,6 @@ def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_ti
|
||||||
if args.orthophoto_kmz:
|
if args.orthophoto_kmz:
|
||||||
generate_kmz(orthophoto_file)
|
generate_kmz(orthophoto_file)
|
||||||
|
|
||||||
add_raster_meta_tags(orthophoto_file, reconstruction, tree, embed_gcp_meta=embed_gcp_meta)
|
|
||||||
|
|
||||||
if args.tiles:
|
if args.tiles:
|
||||||
generate_orthophoto_tiles(orthophoto_file, orthophoto_tiles_dir, args.max_concurrency, resolution)
|
generate_orthophoto_tiles(orthophoto_file, orthophoto_tiles_dir, args.max_concurrency, resolution)
|
||||||
|
|
||||||
|
|
|
@ -296,8 +296,6 @@ class OSFMContext:
|
||||||
config.append("matcher_type: %s" % osfm_matchers[matcher_type])
|
config.append("matcher_type: %s" % osfm_matchers[matcher_type])
|
||||||
|
|
||||||
# GPU acceleration?
|
# GPU acceleration?
|
||||||
if feature_type == "SIFT":
|
|
||||||
log.ODM_INFO("Checking for GPU as using SIFT for extracting features")
|
|
||||||
if has_gpu(args) and max_dims is not None:
|
if has_gpu(args) and max_dims is not None:
|
||||||
w, h = max_dims
|
w, h = max_dims
|
||||||
if w > h:
|
if w > h:
|
||||||
|
@ -307,12 +305,10 @@ class OSFMContext:
|
||||||
w = int((w / h) * feature_process_size)
|
w = int((w / h) * feature_process_size)
|
||||||
h = int(feature_process_size)
|
h = int(feature_process_size)
|
||||||
|
|
||||||
if has_popsift_and_can_handle_texsize(w, h):
|
if has_popsift_and_can_handle_texsize(w, h) and feature_type == "SIFT":
|
||||||
log.ODM_INFO("Using GPU for extracting SIFT features")
|
log.ODM_INFO("Using GPU for extracting SIFT features")
|
||||||
feature_type = "SIFT_GPU"
|
feature_type = "SIFT_GPU"
|
||||||
self.gpu_sift_feature_extraction = True
|
self.gpu_sift_feature_extraction = True
|
||||||
else:
|
|
||||||
log.ODM_INFO("Using CPU for extracting SIFT features as texture size is too large or GPU SIFT is not available")
|
|
||||||
|
|
||||||
config.append("feature_type: %s" % feature_type)
|
config.append("feature_type: %s" % feature_type)
|
||||||
|
|
||||||
|
|
|
@ -786,12 +786,6 @@ class ODM_Photo:
|
||||||
def override_gps_dop(self, dop):
|
def override_gps_dop(self, dop):
|
||||||
self.gps_xy_stddev = self.gps_z_stddev = dop
|
self.gps_xy_stddev = self.gps_z_stddev = dop
|
||||||
|
|
||||||
def adjust_z_offset(self, z_offset):
|
|
||||||
if self.altitude is not None:
|
|
||||||
self.altitude += z_offset
|
|
||||||
else:
|
|
||||||
self.altitude = z_offset
|
|
||||||
|
|
||||||
def override_camera_projection(self, camera_projection):
|
def override_camera_projection(self, camera_projection):
|
||||||
if camera_projection in projections:
|
if camera_projection in projections:
|
||||||
self.camera_projection = camera_projection
|
self.camera_projection = camera_projection
|
||||||
|
|
|
@ -22,12 +22,6 @@ RS_DATABASE = {
|
||||||
'hasselblad l2d-20c': 16.6, # DJI Mavic 3 (not enterprise version)
|
'hasselblad l2d-20c': 16.6, # DJI Mavic 3 (not enterprise version)
|
||||||
|
|
||||||
'dji fc3582': lambda p: 26 if p.get_capture_megapixels() < 48 else 60, # DJI Mini 3 pro (at 48MP readout is 60ms, at 12MP it's 26ms)
|
'dji fc3582': lambda p: 26 if p.get_capture_megapixels() < 48 else 60, # DJI Mini 3 pro (at 48MP readout is 60ms, at 12MP it's 26ms)
|
||||||
'dji fc8482': lambda p: (
|
|
||||||
16 if p.get_capture_megapixels() < 12 else # 12MP 16:9 mode (actual 9.1MP)
|
|
||||||
21 if p.get_capture_megapixels() < 20 else # 12MP 4:3 mode (actual 12.2MP)
|
|
||||||
43 if p.get_capture_megapixels() < 45 else # 48MP 16:9 mode (actual 36.6MP)
|
|
||||||
58 # 48MP 4:3 mode (actual 48.8MP)
|
|
||||||
), # DJI Mini 4 Pro (readout varies by resolution and aspect ratio, image heights all different)
|
|
||||||
|
|
||||||
'dji fc350': 30, # Inspire 1
|
'dji fc350': 30, # Inspire 1
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ def generate_colored_hillshade(geotiff):
|
||||||
|
|
||||||
system.run('gdaldem color-relief "%s" "%s" "%s" -alpha -co ALPHA=YES' % (geotiff, relief_file, colored_dem))
|
system.run('gdaldem color-relief "%s" "%s" "%s" -alpha -co ALPHA=YES' % (geotiff, relief_file, colored_dem))
|
||||||
system.run('gdaldem hillshade "%s" "%s" -z 1.0 -s 1.0 -az 315.0 -alt 45.0' % (geotiff, hillshade_dem))
|
system.run('gdaldem hillshade "%s" "%s" -z 1.0 -s 1.0 -az 315.0 -alt 45.0' % (geotiff, hillshade_dem))
|
||||||
system.run('"%s" "%s" "%s" "%s" "%s"' % (sys.executable, hsv_merge_script, colored_dem, hillshade_dem, colored_hillshade_dem))
|
system.run('%s "%s" "%s" "%s" "%s"' % (sys.executable, hsv_merge_script, colored_dem, hillshade_dem, colored_hillshade_dem))
|
||||||
|
|
||||||
return outputs
|
return outputs
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
|
@ -1,12 +1,8 @@
|
||||||
import os, shutil
|
import os, shutil
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import json
|
import json
|
||||||
import rasterio
|
|
||||||
from osgeo import gdal
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
from opendm import log
|
from opendm import log
|
||||||
from opendm.photo import find_largest_photo_dims, find_mean_utc_time
|
from opendm.photo import find_largest_photo_dims
|
||||||
from osgeo import gdal
|
from osgeo import gdal
|
||||||
from opendm.arghelpers import double_quote
|
from opendm.arghelpers import double_quote
|
||||||
|
|
||||||
|
@ -118,41 +114,3 @@ def np_to_json(arr):
|
||||||
|
|
||||||
def np_from_json(json_dump):
|
def np_from_json(json_dump):
|
||||||
return np.asarray(json.loads(json_dump))
|
return np.asarray(json.loads(json_dump))
|
||||||
|
|
||||||
def add_raster_meta_tags(raster, reconstruction, tree, embed_gcp_meta=True):
|
|
||||||
try:
|
|
||||||
if os.path.isfile(raster):
|
|
||||||
mean_capture_time = find_mean_utc_time(reconstruction.photos)
|
|
||||||
mean_capture_dt = None
|
|
||||||
if mean_capture_time is not None:
|
|
||||||
mean_capture_dt = datetime.fromtimestamp(mean_capture_time).strftime('%Y:%m:%d %H:%M:%S') + '+00:00'
|
|
||||||
|
|
||||||
log.ODM_INFO("Adding TIFFTAGs to {}".format(raster))
|
|
||||||
with rasterio.open(raster, 'r+') as rst:
|
|
||||||
if mean_capture_dt is not None:
|
|
||||||
rst.update_tags(TIFFTAG_DATETIME=mean_capture_dt)
|
|
||||||
rst.update_tags(TIFFTAG_SOFTWARE='ODM {}'.format(log.odm_version()))
|
|
||||||
|
|
||||||
if embed_gcp_meta:
|
|
||||||
# Embed GCP info in 2D results via
|
|
||||||
# XML metadata fields
|
|
||||||
gcp_gml_export_file = tree.path("odm_georeferencing", "ground_control_points.gml")
|
|
||||||
|
|
||||||
if reconstruction.has_gcp() and os.path.isfile(gcp_gml_export_file):
|
|
||||||
gcp_xml = ""
|
|
||||||
|
|
||||||
with open(gcp_gml_export_file) as f:
|
|
||||||
gcp_xml = f.read()
|
|
||||||
|
|
||||||
ds = gdal.Open(raster)
|
|
||||||
if ds is not None:
|
|
||||||
if ds.GetMetadata('xml:GROUND_CONTROL_POINTS') is None or self.rerun():
|
|
||||||
ds.SetMetadata(gcp_xml, 'xml:GROUND_CONTROL_POINTS')
|
|
||||||
ds = None
|
|
||||||
log.ODM_INFO("Wrote xml:GROUND_CONTROL_POINTS metadata to %s" % raster)
|
|
||||||
else:
|
|
||||||
log.ODM_WARNING("Already embedded ground control point information")
|
|
||||||
else:
|
|
||||||
log.ODM_WARNING("Cannot open %s for writing, skipping GCP embedding" % raster)
|
|
||||||
except Exception as e:
|
|
||||||
log.ODM_WARNING("Cannot write raster meta tags to %s: %s" % (raster, str(e)))
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ xmltodict==0.12.0
|
||||||
fpdf2==2.4.6
|
fpdf2==2.4.6
|
||||||
Shapely==1.7.1
|
Shapely==1.7.1
|
||||||
onnxruntime==1.12.1
|
onnxruntime==1.12.1
|
||||||
pygltflib==1.16.5
|
pygltflib==1.15.3
|
||||||
codem==0.24.0
|
codem==0.24.0
|
||||||
trimesh==3.17.1
|
trimesh==3.17.1
|
||||||
pandas==1.5.2
|
pandas==1.5.2
|
||||||
|
|
|
@ -234,9 +234,9 @@ class ODMLoadDatasetStage(types.ODM_Stage):
|
||||||
item['p'].set_mask(os.path.basename(mask_file))
|
item['p'].set_mask(os.path.basename(mask_file))
|
||||||
log.ODM_INFO("Wrote %s" % os.path.basename(mask_file))
|
log.ODM_INFO("Wrote %s" % os.path.basename(mask_file))
|
||||||
else:
|
else:
|
||||||
log.ODM_WARNING("Cannot generate mask for %s" % item['file'])
|
log.ODM_WARNING("Cannot generate mask for %s" % img)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.ODM_WARNING("Cannot generate mask for %s: %s" % (item['file'], str(e)))
|
log.ODM_WARNING("Cannot generate mask for %s: %s" % (img, str(e)))
|
||||||
|
|
||||||
parallel_map(parallel_sky_filter, sky_images, max_workers=args.max_concurrency)
|
parallel_map(parallel_sky_filter, sky_images, max_workers=args.max_concurrency)
|
||||||
|
|
||||||
|
@ -338,10 +338,3 @@ class ODMLoadDatasetStage(types.ODM_Stage):
|
||||||
if args.rolling_shutter and not reconstruction.is_georeferenced():
|
if args.rolling_shutter and not reconstruction.is_georeferenced():
|
||||||
log.ODM_WARNING("Reconstruction is not georeferenced, disabling rolling shutter correction")
|
log.ODM_WARNING("Reconstruction is not georeferenced, disabling rolling shutter correction")
|
||||||
args.rolling_shutter = False
|
args.rolling_shutter = False
|
||||||
|
|
||||||
# GPS Z offset
|
|
||||||
if 'gps_z_offset_is_set' in args:
|
|
||||||
log.ODM_INFO("Adjusting GPS Z offset by %s for all images" % args.gps_z_offset)
|
|
||||||
|
|
||||||
for p in photos:
|
|
||||||
p.adjust_z_offset(args.gps_z_offset)
|
|
||||||
|
|
|
@ -12,8 +12,6 @@ from opendm.cropper import Cropper
|
||||||
from opendm import pseudogeo
|
from opendm import pseudogeo
|
||||||
from opendm.tiles.tiler import generate_dem_tiles
|
from opendm.tiles.tiler import generate_dem_tiles
|
||||||
from opendm.cogeo import convert_to_cogeo
|
from opendm.cogeo import convert_to_cogeo
|
||||||
from opendm.utils import add_raster_meta_tags
|
|
||||||
|
|
||||||
|
|
||||||
class ODMDEMStage(types.ODM_Stage):
|
class ODMDEMStage(types.ODM_Stage):
|
||||||
def process(self, args, outputs):
|
def process(self, args, outputs):
|
||||||
|
@ -89,8 +87,6 @@ class ODMDEMStage(types.ODM_Stage):
|
||||||
if pseudo_georeference:
|
if pseudo_georeference:
|
||||||
pseudogeo.add_pseudo_georeferencing(dem_geotiff_path)
|
pseudogeo.add_pseudo_georeferencing(dem_geotiff_path)
|
||||||
|
|
||||||
add_raster_meta_tags(dem_geotiff_path, reconstruction, tree, embed_gcp_meta=not outputs['large'])
|
|
||||||
|
|
||||||
if args.tiles:
|
if args.tiles:
|
||||||
generate_dem_tiles(dem_geotiff_path, tree.path("%s_tiles" % product), args.max_concurrency, resolution)
|
generate_dem_tiles(dem_geotiff_path, tree.path("%s_tiles" % product), args.max_concurrency, resolution)
|
||||||
|
|
||||||
|
|
|
@ -132,8 +132,7 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
|
||||||
else:
|
else:
|
||||||
log.ODM_INFO("Not a submodel run, skipping mask raster generation")
|
log.ODM_INFO("Not a submodel run, skipping mask raster generation")
|
||||||
|
|
||||||
orthophoto.post_orthophoto_steps(args, bounds_file_path, tree.odm_orthophoto_tif, tree.orthophoto_tiles, resolution,
|
orthophoto.post_orthophoto_steps(args, bounds_file_path, tree.odm_orthophoto_tif, tree.orthophoto_tiles, resolution)
|
||||||
reconstruction, tree, not outputs["large"])
|
|
||||||
|
|
||||||
# Generate feathered orthophoto also
|
# Generate feathered orthophoto also
|
||||||
if args.orthophoto_cutline and submodel_run:
|
if args.orthophoto_cutline and submodel_run:
|
||||||
|
|
|
@ -1,8 +1,12 @@
|
||||||
import os
|
import os
|
||||||
|
import rasterio
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from osgeo import gdal
|
||||||
from opendm import io
|
from opendm import io
|
||||||
from opendm import log
|
from opendm import log
|
||||||
from opendm import types
|
from opendm import types
|
||||||
|
from opendm import photo
|
||||||
from opendm.utils import copy_paths, get_processing_results_paths
|
from opendm.utils import copy_paths, get_processing_results_paths
|
||||||
from opendm.ogctiles import build_3dtiles
|
from opendm.ogctiles import build_3dtiles
|
||||||
|
|
||||||
|
@ -13,6 +17,54 @@ class ODMPostProcess(types.ODM_Stage):
|
||||||
|
|
||||||
log.ODM_INFO("Post Processing")
|
log.ODM_INFO("Post Processing")
|
||||||
|
|
||||||
|
rasters = [tree.odm_orthophoto_tif,
|
||||||
|
tree.path("odm_dem", "dsm.tif"),
|
||||||
|
tree.path("odm_dem", "dtm.tif")]
|
||||||
|
|
||||||
|
mean_capture_time = photo.find_mean_utc_time(reconstruction.photos)
|
||||||
|
mean_capture_dt = None
|
||||||
|
if mean_capture_time is not None:
|
||||||
|
mean_capture_dt = datetime.fromtimestamp(mean_capture_time).strftime('%Y:%m:%d %H:%M:%S') + '+00:00'
|
||||||
|
|
||||||
|
# Add TIFF tags
|
||||||
|
for product in rasters:
|
||||||
|
if os.path.isfile(product):
|
||||||
|
log.ODM_INFO("Adding TIFFTAGs to {}".format(product))
|
||||||
|
with rasterio.open(product, 'r+') as rst:
|
||||||
|
if mean_capture_dt is not None:
|
||||||
|
rst.update_tags(TIFFTAG_DATETIME=mean_capture_dt)
|
||||||
|
rst.update_tags(TIFFTAG_SOFTWARE='ODM {}'.format(log.odm_version()))
|
||||||
|
|
||||||
|
# GCP info
|
||||||
|
if not outputs['large']:
|
||||||
|
# TODO: support for split-merge?
|
||||||
|
|
||||||
|
# Embed GCP info in 2D results via
|
||||||
|
# XML metadata fields
|
||||||
|
gcp_gml_export_file = tree.path("odm_georeferencing", "ground_control_points.gml")
|
||||||
|
|
||||||
|
if reconstruction.has_gcp() and io.file_exists(gcp_gml_export_file):
|
||||||
|
skip_embed_gcp = False
|
||||||
|
gcp_xml = ""
|
||||||
|
|
||||||
|
with open(gcp_gml_export_file) as f:
|
||||||
|
gcp_xml = f.read()
|
||||||
|
|
||||||
|
for product in rasters:
|
||||||
|
if os.path.isfile(product):
|
||||||
|
ds = gdal.Open(product)
|
||||||
|
if ds is not None:
|
||||||
|
if ds.GetMetadata('xml:GROUND_CONTROL_POINTS') is None or self.rerun():
|
||||||
|
ds.SetMetadata(gcp_xml, 'xml:GROUND_CONTROL_POINTS')
|
||||||
|
ds = None
|
||||||
|
log.ODM_INFO("Wrote xml:GROUND_CONTROL_POINTS metadata to %s" % product)
|
||||||
|
else:
|
||||||
|
skip_embed_gcp = True
|
||||||
|
log.ODM_WARNING("Already embedded ground control point information")
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
log.ODM_WARNING("Cannot open %s for writing, skipping GCP embedding" % product)
|
||||||
|
|
||||||
if getattr(args, '3d_tiles'):
|
if getattr(args, '3d_tiles'):
|
||||||
build_3dtiles(args, tree, reconstruction, self.rerun())
|
build_3dtiles(args, tree, reconstruction, self.rerun())
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ from opendm.cropper import Cropper
|
||||||
from opendm.remote import LocalRemoteExecutor
|
from opendm.remote import LocalRemoteExecutor
|
||||||
from opendm.shots import merge_geojson_shots, merge_cameras
|
from opendm.shots import merge_geojson_shots, merge_cameras
|
||||||
from opendm import point_cloud
|
from opendm import point_cloud
|
||||||
from opendm.utils import double_quote, add_raster_meta_tags
|
from opendm.utils import double_quote
|
||||||
from opendm.tiles.tiler import generate_dem_tiles
|
from opendm.tiles.tiler import generate_dem_tiles
|
||||||
from opendm.cogeo import convert_to_cogeo
|
from opendm.cogeo import convert_to_cogeo
|
||||||
from opendm import multispectral
|
from opendm import multispectral
|
||||||
|
@ -263,8 +263,7 @@ class ODMMergeStage(types.ODM_Stage):
|
||||||
|
|
||||||
orthophoto_vars = orthophoto.get_orthophoto_vars(args)
|
orthophoto_vars = orthophoto.get_orthophoto_vars(args)
|
||||||
orthophoto.merge(all_orthos_and_ortho_cuts, tree.odm_orthophoto_tif, orthophoto_vars)
|
orthophoto.merge(all_orthos_and_ortho_cuts, tree.odm_orthophoto_tif, orthophoto_vars)
|
||||||
orthophoto.post_orthophoto_steps(args, merged_bounds_file, tree.odm_orthophoto_tif, tree.orthophoto_tiles, args.orthophoto_resolution,
|
orthophoto.post_orthophoto_steps(args, merged_bounds_file, tree.odm_orthophoto_tif, tree.orthophoto_tiles, args.orthophoto_resolution)
|
||||||
reconstruction, tree, False)
|
|
||||||
elif len(all_orthos_and_ortho_cuts) == 1:
|
elif len(all_orthos_and_ortho_cuts) == 1:
|
||||||
# Simply copy
|
# Simply copy
|
||||||
log.ODM_WARNING("A single orthophoto/cutline pair was found between all submodels.")
|
log.ODM_WARNING("A single orthophoto/cutline pair was found between all submodels.")
|
||||||
|
@ -306,8 +305,6 @@ class ODMMergeStage(types.ODM_Stage):
|
||||||
if args.tiles:
|
if args.tiles:
|
||||||
generate_dem_tiles(dem_file, tree.path("%s_tiles" % human_name.lower()), args.max_concurrency, args.dem_resolution)
|
generate_dem_tiles(dem_file, tree.path("%s_tiles" % human_name.lower()), args.max_concurrency, args.dem_resolution)
|
||||||
|
|
||||||
add_raster_meta_tags(dem_file, reconstruction, tree, embed_gcp_meta=False)
|
|
||||||
|
|
||||||
if args.cog:
|
if args.cog:
|
||||||
convert_to_cogeo(dem_file, max_workers=args.max_concurrency)
|
convert_to_cogeo(dem_file, max_workers=args.max_concurrency)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -1,18 +1,14 @@
|
||||||
set ODMBASE=%~dp0
|
set ODMBASE=%~dp0
|
||||||
set VIRTUAL_ENV=%ODMBASE%venv
|
set VIRTUAL_ENV=%ODMBASE%venv
|
||||||
IF "%~1"=="" (set WRITABLE_VIRTUAL_ENV=%VIRTUAL_ENV%) ELSE (set WRITABLE_VIRTUAL_ENV=%~1)
|
set PYENVCFG=%VIRTUAL_ENV%\pyvenv.cfg
|
||||||
mkdir "%WRITABLE_VIRTUAL_ENV%"
|
set SBBIN=%ODMBASE%SuperBuild\install\bin
|
||||||
|
|
||||||
rem Hot-patching pyvenv.cfg
|
rem Hot-patching pyvenv.cfg
|
||||||
set PYENVCFG=%WRITABLE_VIRTUAL_ENV%\pyvenv.cfg
|
echo home = %ODMBASE%venv\Scripts> "%PYENVCFG%"
|
||||||
echo home = %VIRTUAL_ENV%\Scripts> "%PYENVCFG%"
|
|
||||||
echo include-system-site-packages = false>> "%PYENVCFG%"
|
echo include-system-site-packages = false>> "%PYENVCFG%"
|
||||||
|
|
||||||
rem Hot-patching cv2 extension configs
|
rem Hot-patching cv2 extension configs
|
||||||
set SBBIN=%ODMBASE%SuperBuild\install\bin
|
echo BINARIES_PATHS = [r"%SBBIN%"] + BINARIES_PATHS> venv\Lib\site-packages\cv2\config.py
|
||||||
set CV2=%WRITABLE_VIRTUAL_ENV%\Lib\site-packages\cv2
|
echo PYTHON_EXTENSIONS_PATHS = [r'''%VIRTUAL_ENV%\lib\site-packages\cv2\python-3.8'''] + PYTHON_EXTENSIONS_PATHS> venv\Lib\site-packages\cv2\config-3.8.py
|
||||||
mkdir "%CV2%"
|
|
||||||
echo BINARIES_PATHS = [r"%SBBIN%"] + BINARIES_PATHS> "%CV2%\config.py"
|
|
||||||
echo PYTHON_EXTENSIONS_PATHS = [r'''%VIRTUAL_ENV%\lib\site-packages\cv2\python-3.8'''] + PYTHON_EXTENSIONS_PATHS> "%CV2%\config-3.8.py"
|
|
||||||
|
|
||||||
cls
|
cls
|
Ładowanie…
Reference in New Issue