Porównaj commity

..

No commits in common. "master" and "v3.5.3" have entirely different histories.

51 zmienionych plików z 182 dodań i 793 usunięć

Wyświetl plik

@ -13,7 +13,6 @@ jobs:
with: with:
ghToken: ${{ secrets.GITHUB_TOKEN }} ghToken: ${{ secrets.GITHUB_TOKEN }}
openAI: ${{ secrets.OPENAI_TOKEN }} openAI: ${{ secrets.OPENAI_TOKEN }}
model: gpt-4o-2024-08-06
filter: | filter: |
- "#" - "#"
variables: | variables: |

Wyświetl plik

@ -9,7 +9,7 @@ on:
jobs: jobs:
build: build:
runs-on: windows-2022 runs-on: windows-2019
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v2
@ -49,7 +49,7 @@ jobs:
run: | run: |
python configure.py dist --code-sign-cert-path $env:CODE_SIGN_CERT_PATH python configure.py dist --code-sign-cert-path $env:CODE_SIGN_CERT_PATH
- name: Upload Setup File - name: Upload Setup File
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v2
with: with:
name: Setup name: Setup
path: dist\*.exe path: dist\*.exe

Wyświetl plik

@ -81,7 +81,7 @@ jobs:
run: | run: |
python configure.py dist python configure.py dist
- name: Upload Setup File - name: Upload Setup File
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v2
with: with:
name: Setup name: Setup
path: dist\*.exe path: dist\*.exe

3
.gitignore vendored
Wyświetl plik

@ -34,5 +34,4 @@ vcpkg/
venv/ venv/
python38/ python38/
dist/ dist/
innosetup/ innosetup/
.DS_Store

Wyświetl plik

@ -15,19 +15,12 @@ If you would rather not type commands in a shell and are looking for a friendly
## Quickstart ## Quickstart
The easiest way to run ODM is via docker. To install docker, see [docs.docker.com](https://docs.docker.com). Once you have docker installed and [working](https://docs.docker.com/get-started/#test-docker-installation), you can get ODM by running from a Command Prompt / Terminal: The easiest way to run ODM on is via docker. To install docker, see [docs.docker.com](https://docs.docker.com). Once you have docker installed and [working](https://docs.docker.com/get-started/#test-docker-installation), you can run ODM by placing some images (JPEGs or TIFFs) in a folder named “images” (for example `C:\Users\youruser\datasets\project\images` or `/home/youruser/datasets/project/images`) and simply run from a Command Prompt / Terminal:
```bash
docker pull opendronemap/odm
```
Run ODM by placing some images (JPEGs, TIFFs or DNGs) in a folder named “images” (for example `C:\Users\youruser\datasets\project\images` or `/home/youruser/datasets/project/images`) and simply run from a Command Prompt / Terminal:
```bash ```bash
# Windows # Windows
docker run -ti --rm -v c:/Users/youruser/datasets:/datasets opendronemap/odm --project-path /datasets project docker run -ti --rm -v c:/Users/youruser/datasets:/datasets opendronemap/odm --project-path /datasets project
```
```bash
# Mac/Linux # Mac/Linux
docker run -ti --rm -v /home/youruser/datasets:/datasets opendronemap/odm --project-path /datasets project docker run -ti --rm -v /home/youruser/datasets:/datasets opendronemap/odm --project-path /datasets project
``` ```
@ -95,7 +88,7 @@ run C:\Users\youruser\datasets\project [--additional --parameters --here]
ODM has support for doing SIFT feature extraction on a GPU, which is about 2x faster than the CPU on a typical consumer laptop. To use this feature, you need to use the `opendronemap/odm:gpu` docker image instead of `opendronemap/odm` and you need to pass the `--gpus all` flag: ODM has support for doing SIFT feature extraction on a GPU, which is about 2x faster than the CPU on a typical consumer laptop. To use this feature, you need to use the `opendronemap/odm:gpu` docker image instead of `opendronemap/odm` and you need to pass the `--gpus all` flag:
``` ```
docker run -ti --rm -v c:/Users/youruser/datasets:/datasets --gpus all opendronemap/odm:gpu --project-path /datasets project --feature-type sift docker run -ti --rm -v c:/Users/youruser/datasets:/datasets --gpus all opendronemap/odm:gpu --project-path /datasets project
``` ```
When you run ODM, if the GPU is recognized, in the first few lines of output you should see: When you run ODM, if the GPU is recognized, in the first few lines of output you should see:

Wyświetl plik

@ -184,7 +184,7 @@ set(custom_libs OpenSfM
externalproject_add(mve externalproject_add(mve
GIT_REPOSITORY https://github.com/OpenDroneMap/mve.git GIT_REPOSITORY https://github.com/OpenDroneMap/mve.git
GIT_TAG 356 GIT_TAG 290
UPDATE_COMMAND "" UPDATE_COMMAND ""
SOURCE_DIR ${SB_SOURCE_DIR}/mve SOURCE_DIR ${SB_SOURCE_DIR}/mve
CMAKE_ARGS ${WIN32_CMAKE_ARGS} ${APPLE_CMAKE_ARGS} CMAKE_ARGS ${WIN32_CMAKE_ARGS} ${APPLE_CMAKE_ARGS}
@ -244,7 +244,7 @@ externalproject_add(dem2points
externalproject_add(odm_orthophoto externalproject_add(odm_orthophoto
DEPENDS opencv DEPENDS opencv
GIT_REPOSITORY https://github.com/OpenDroneMap/odm_orthophoto.git GIT_REPOSITORY https://github.com/OpenDroneMap/odm_orthophoto.git
GIT_TAG 355 GIT_TAG 353
PREFIX ${SB_BINARY_DIR}/odm_orthophoto PREFIX ${SB_BINARY_DIR}/odm_orthophoto
SOURCE_DIR ${SB_SOURCE_DIR}/odm_orthophoto SOURCE_DIR ${SB_SOURCE_DIR}/odm_orthophoto
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}

Wyświetl plik

@ -1,7 +1,7 @@
set(_proj_name obj2tiles) set(_proj_name obj2tiles)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}") set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
set(OBJ2TILES_VERSION v1.0.13) set(OBJ2TILES_VERSION v1.0.12)
set(OBJ2TILES_EXT "") set(OBJ2TILES_EXT "")
set(OBJ2TILES_ARCH "Linux64") set(OBJ2TILES_ARCH "Linux64")
@ -9,7 +9,7 @@ if (WIN32)
set(OBJ2TILES_ARCH "Win64") set(OBJ2TILES_ARCH "Win64")
set(OBJ2TILES_EXT ".exe") set(OBJ2TILES_EXT ".exe")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64") elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64")
set(OBJ2TILES_ARCH "LinuxArm64") set(OBJ2TILES_ARCH "LinuxArm")
elseif(APPLE) elseif(APPLE)
set(OBJ2TILES_ARCH "Osx64") set(OBJ2TILES_ARCH "Osx64")
endif() endif()

Wyświetl plik

@ -53,7 +53,7 @@ ExternalProject_Add(${_proj_name}
#--Download step-------------- #--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR} DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/openMVS GIT_REPOSITORY https://github.com/OpenDroneMap/openMVS
GIT_TAG 355 GIT_TAG 320
#--Update/Patch step---------- #--Update/Patch step----------
UPDATE_COMMAND "" UPDATE_COMMAND ""
#--Configure step------------- #--Configure step-------------

Wyświetl plik

@ -25,7 +25,7 @@ ExternalProject_Add(${_proj_name}
#--Download step-------------- #--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR} DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/ GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/
GIT_TAG 355 GIT_TAG 352
#--Update/Patch step---------- #--Update/Patch step----------
UPDATE_COMMAND git submodule update --init --recursive UPDATE_COMMAND git submodule update --init --recursive
#--Configure step------------- #--Configure step-------------

Wyświetl plik

@ -1 +1 @@
3.5.6 3.5.3

Wyświetl plik

@ -58,7 +58,7 @@ ensure_prereqs() {
if [[ "$UBUNTU_VERSION" == *"20.04"* ]]; then if [[ "$UBUNTU_VERSION" == *"20.04"* ]]; then
echo "Enabling PPA for Ubuntu GIS" echo "Enabling PPA for Ubuntu GIS"
sudo $APT_GET install -y -qq --no-install-recommends software-properties-common sudo $APT_GET install -y -qq --no-install-recommends software-properties-common
sudo add-apt-repository ppa:ubuntugis/ppa sudo add-apt-repository -y ppa:ubuntugis/ubuntugis-unstable
sudo $APT_GET update sudo $APT_GET update
fi fi

Wyświetl plik

@ -1,4 +1,5 @@
import bpy import bpy
import materials_utils
def loadMesh(file): def loadMesh(file):

Wyświetl plik

@ -1,13 +0,0 @@
# DEM Blending
Blend sets of DEMs by calculating euclidean distance to null values and weighting the combination of elevation models. Based on the split-merge tool within ODM.
Requirements:
* Directory full of images to blend together
* NoData should be coded as a value of -9999
## Usage
```BASH
docker run -ti --rm -v /home/youruser/folder_with_dems:/input --entrypoint /code/contrib/dem-blend/dem-blend.py opendronemap/odm /input
```

Wyświetl plik

@ -1,30 +0,0 @@
#!/usr/bin/env python3
# Authors: Piero Toffanin, Stephen Mather
# License: AGPLv3
import os
import glob
import sys
sys.path.insert(0, os.path.join("..", "..", os.path.dirname(__file__)))
import argparse
from opendm.dem import merge
parser = argparse.ArgumentParser(description='Merge and blend DEMs using OpenDroneMap\'s approach.')
parser.add_argument('input_dems',
type=str,
help='Path to input dems (.tif)')
args = parser.parse_args()
if not os.path.exists(args.input_dems):
print("%s does not exist" % args.input_dems)
exit(1)
output_dem = os.path.join(args.input_dems, 'merged_blended_dem.tif')
input_dem_path = os.path.join(args.input_dems, '*.tif')
input_dems = glob.glob(input_dem_path)
merge.euclidean_merge_dems(input_dems
,output_dem=output_dem
)

Wyświetl plik

@ -1,19 +0,0 @@
# Fix Ply
Use to translate a modified ply into a compatible format for subsequent steps in ODM. Via Jaime Chacoff, https://community.opendronemap.org/t/edited-point-cloud-with-cloudcompare-wont-rerun-from-odm-meshing/21449/6
The basic idea is to process through ODM until the point cloud is created, use a 3rd party tool, like CloudCompare to edit the point cloud, and then continue processing in OpenDroneMap.
This useful bit of python will convert the PLY exported from CloudCompare back into a compatible format for continued processing in OpenDroneMap.
1. Run project in WebODM and add this to your settings: `end-with: odm-filterpoints`
1. Once complete, go to your NodeODM container and copy `/var/www/data/[Task ID]/odm-filterpoints` directory
1. Open CloudCompare and from `odm-filterpoints` directory you've copied, open `point_cloud.ply`
1. In the box that pops up, add a scalar field `vertex - views`
1. To see the actual colours again - select the point cloud, then in properties change colours from "Scalar field" to "RGB"
1. Make your changes to the point cloud
1. Compute normals (Edit > Normals > Compute)
1. Save PLY file as ASCII
1. Run Python file above to fix PLY file and convert to binary
1. Copy `odm_filterpoints` directory (or just `point_cloud.ply`) back into NodeODM container
1. Restart project in WebODM "From Meshing" (don't forget to edit settings to remove `end-with: odm-filterpoints` or it's not going to do anything).

Wyświetl plik

@ -1,68 +0,0 @@
import os
import logging
from plyfile import PlyData, PlyElement
import numpy as np
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def pcd_ascii_to_binary_ply(ply_file: str, binary_ply: str) -> None:
"""Converts ASCII PLY to binary, ensuring 'views' is present and of type uchar.
Raises ValueError if neither 'scalar_views' nor 'views' is found.
"""
try:
logging.info(f"Reading ASCII PLY file: {ply_file}")
ply_data: PlyData = PlyData.read(ply_file)
except FileNotFoundError:
logging.error(f"File not found: {ply_file}")
return
except Exception as e:
logging.error(f"Error reading PLY file: {e}")
return
new_elements: list[PlyElement] = []
for element in ply_data.elements:
new_data = element.data.copy()
if 'scalar_views' in element.data.dtype.names:
new_data['views'] = new_data['scalar_views'].astype('u1')
del new_data['scalar_views']
elif 'views' in element.data.dtype.names:
new_data['views'] = new_data['views'].astype('u1')
else:
raise ValueError(f"Neither 'scalar_views' nor 'views' found - did you import them when opened the file in CloudCompare?")
new_element = PlyElement.describe(new_data, element.name)
new_elements.append(new_element)
new_ply_data = PlyData(new_elements, text=False)
try:
logging.info(f"Writing binary PLY file: {binary_ply}")
new_ply_data.write(binary_ply)
except Exception as e:
logging.error(f"Error writing PLY file: {e}")
return
logging.info("PLY conversion complete.")
if __name__ == '__main__':
# Parameters
base: str = os.path.dirname(os.path.abspath(__file__))
ply_file: str = os.path.join(base, 'point_cloud_ascii.ply')
binary_ply_file: str = os.path.join(base, 'point_cloud.ply')
if not os.path.exists(ply_file):
logging.error(f"Input file not found: {ply_file}")
exit(1) # Exit with error code
try:
pcd_ascii_to_binary_ply(ply_file, binary_ply_file)
except ValueError as e:
logging.error(f"PLY conversion failed: {e}")
exit(1) # Exit with error code to indicate failure

Wyświetl plik

@ -4,7 +4,7 @@ import os
import PIL import PIL
from PIL import Image from PIL import Image, ExifTags
import shutil import shutil

Wyświetl plik

@ -12,6 +12,7 @@ import numpy as np
import numpy.ma as ma import numpy.ma as ma
import multiprocessing import multiprocessing
import argparse import argparse
import functools
from skimage.draw import line from skimage.draw import line
from opensfm import dataset from opensfm import dataset

Wyświetl plik

@ -1,64 +0,0 @@
# Plugin Time-SIFT
This script does Time-SIFT processing with ODM. Time-SIFT is a method for multi-temporal analysis without the need to co-registrate the data.
> D. Feurer, F. Vinatier, Joining multi-epoch archival aerial images in a single SfM block allows 3-D change detection with almost exclusively image information, ISPRS Journal of Photogrammetry and Remote Sensing, Volume 146, 2018, Pages 495-506, ISSN 0924-2716, doi: 10.1016/j.isprsjprs.2018.10.016
(https://doi.org/10.1016/j.isprsjprs.2018.10.016)
## Requirements
* ODM ! :-)
* subprocess
* json
* os
* shutil
* pathlib
* sys
* argparse
* textwrap
## Usage
### Provided example
Download or clone [this repo](https://forge.inrae.fr/Denis.Feurer/timesift-odm-data-example.git) to get example data.
Then execute
```
python Timesift_odm.py datasets --end-with odm_filterpoints
```
It should make the Time-SIFT processing on the downloaded example data, stopping after the filtered dense clouds step.
In the destination dir, you should obtain new directories, ```0_before``` and ```1_after``` at the same level as the ```time-sift-block``` directory. These new directories contain all the results natively co-registered.
You can then use [CloudCompare](https://cloudcompare.org/) to compute distance between the ```datasets/0_before/odm_filterpoints/point_cloud.ply``` and the ```datasets/1_after/odm_filterpoints/point_cloud.ply``` and obtain this image showing the difference between the two 3D surfaces. Here, two soil samples were excavated as can be seen on the image below.
![](https://forge.inrae.fr/Denis.Feurer/timesift-odm-data-example/-/raw/main/Example.png?ref_type=heads)
### Your own data
In your dataset directory (usually ```datasets```, but you can have chosen another name) you have to prepare a Time-SIFT project directory (default name : ```time-sift-block```, *can be tuned via a parameter*) that contains :
* ```images/``` : a subdirectory with all images of all epochs. This directory name is fixed as it is the one expected by ODM
* ```images_epochs.txt``` : a file that has the same format as the file used for the split and merge ODM function. This file name *can be tuned via a parameter*.
The ```images_epochs.txt``` file has two columns, the first column contains image names and the second contains the epoch name as follows
```
DSC_0368.JPG 0_before
DSC_0369.JPG 0_before
DSC_0370.JPG 0_before
DSC_0389.JPG 1_after
DSC_0390.JPG 1_after
DSC_0391.JPG 1_after
```
Your directory, before running the script, should look like this :
```
$PWD/datasets/
└── time-sift-block/
├── images/
└── images_epochs.txt
```
At the end of the script you obtain a directory by epoch (at the same level as the Time-SIFT project directory). Each directory is processed with images of each epoch and all results are natively co-registered due to the initial sfm step done with all images.
```
$PWD/datasets/
├── 0_before/
├── 1_after/
└── time-sift-block/
```

Wyświetl plik

@ -1,167 +0,0 @@
# Script for Time-SIFT multi-temporal images alignment with ODM
#
# This is python script for ODM, based on the following publication :
#
# D. Feurer, F. Vinatier, Joining multi-epoch archival aerial images in a single SfM block allows 3-D change detection
# with almost exclusively image information, ISPRS Journal of Photogrammetry and Remote Sensing, Volume 146, 2018,
# Pages 495-506, ISSN 0924-2716, https://doi.org/10.1016/j.isprsjprs.2018.10.016.
import subprocess
import json
import os
import shutil
from pathlib import Path
import sys
import argparse
import textwrap
def main(argv):
# Parsing and checking args
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\
Timesift_odm.py datasetdir [-t <timesift-dir>] [-i <imageepochs-file>] [<options passed to ODM>]
you can add options passed to ODM, for instance [--end-with odm_filterpoints] so that the final step is point clouds
these options are not checked for before the final runs of each epoch, so use it carefully
'''))
parser.add_argument('datasetdir', help='dataset directory')
parser.add_argument('-t', '--timesift-dir',
help='Time-SIFT directory ; default value : "time-sift-block" # must be in the datasetdir')
parser.add_argument('-i', '--imageepochs-file',
help='Text file describing epochs ; default value : "images_epochs.txt" # must be in the TIMESIFT_DIR ')
args, additional_options_to_rerun = parser.parse_known_args()
datasets_DIR = Path(args.datasetdir).absolute().as_posix()
if args.timesift_dir:
timesift_DIR = args.timesift_dir
else:
timesift_DIR = 'time-sift-block'
if args.imageepochs_file:
images_epochs_file = args.imageepochs_file
else:
images_epochs_file = 'images_epochs.txt'
if '-h' in sys.argv or '--help' in sys.argv:
parser.print_help()
sys.exit()
if additional_options_to_rerun: # for instance, --end-with odm_filterpoints
print(f'[Time-SIFT] Options passed to ODM for the final steps: {additional_options_to_rerun}')
print(f'[Time-SIFT] \033[93mWARNING there is no check of these options done before the last ODM call\033[0m')
def check_path_args(var: Path):
if not var.exists():
print(
f'\033[91m[Time-SIFT] ERROR: the {var.as_posix()} directory does not exist. Exiting program\033[0m')
exit()
check_path_args(Path(datasets_DIR))
check_path_args(Path(datasets_DIR, timesift_DIR))
check_path_args(Path(datasets_DIR, timesift_DIR, images_epochs_file))
def clean_reconstruction_dict(subdict, key, images):
"""
Delete subdict elements where the key do not match any name in the images list.
To create the {epoch} block with only images of this epoch
"""
# The list of valid images is prepared by removing any extension (to be robust to the .tif added by ODM)
valid_images = {os.path.basename(image).split(os.extsep)[0] for image in images}
for item_key in list(subdict[key]):
image_name = os.path.basename(item_key).split(os.extsep)[0]
if image_name not in valid_images:
del subdict[key][item_key]
### Read images.txt file and create a dict of images/epochs
images_epochs_dict = {}
with open(Path(datasets_DIR, timesift_DIR, images_epochs_file), 'r') as f:
for line in f:
line = line.strip()
if not line:
continue # Empty lines are skipped
image, epoch = line.split()
if epoch not in images_epochs_dict:
images_epochs_dict[epoch] = []
images_epochs_dict[epoch].append(image)
### Check for existing epochs directories before computing anything (these directories must be deleted by hand)
path_exists_error = False
for epoch in images_epochs_dict:
if Path(datasets_DIR, epoch).exists():
if path_exists_error:
print(f"sudo rm -rf {Path(datasets_DIR, epoch).as_posix()}")
else:
print(f'\033[91m[Time-SIFT] ERROR: {Path(datasets_DIR, epoch).as_posix()} already exists.\033[0m')
print(f" Other epochs probably also exist.")
print(
f" The problem is \033[93mI CAN'T\033[0m delete it by myself, it requires root privileges.")
print(
f" The good news is \033[92mYOU CAN\033[0m do it with the following command (be careful).")
print(f'\033[91m => Consider doing it (at your own risks). Exiting program\033[0m')
print(f"- Commands to copy/paste (I'm kind, I prepared all the necessary commands for you).")
print(f"sudo rm -rf {Path(datasets_DIR, epoch).as_posix()}")
path_exists_error = True
if path_exists_error:
exit()
### LAUNCH global alignment (Time-SIFT multitemporal block)
try:
subprocess.run(['docker', 'run', '-i', '--rm', '-v', datasets_DIR + ':/datasets',
'opendronemap/odm', '--project-path', '/datasets', timesift_DIR, '--end-with', 'opensfm'])
except:
print(f'\033[91m[Time-SIFT] ERROR: {sys.exc_info()[0]}\033[0m')
exit()
print('\033[92m[Time-SIFT] Sfm on multi-temporal block done\033[0m')
print('[Time-SIFT] Going to dense matching on all epochs...')
### Loop on epochs for the dense matching
for epoch in images_epochs_dict:
#### We first duplicate the time-sift multitemporal block to save sfm results
shutil.copytree(Path(datasets_DIR, timesift_DIR),
Path(datasets_DIR, epoch))
#### Reads the datasets/{epoch}/opensfm/undistorted/reconstruction.json file that has to be modified
with open(Path(datasets_DIR, epoch, 'opensfm', 'undistorted', 'reconstruction.json'), mode="r",
encoding="utf-8") as read_file:
reconstruction_dict = json.load(read_file)
#### Removes images in this json dict (we delete the shot and the rig_instances that do not correspond to this epoch)
images = images_epochs_dict[epoch]
clean_reconstruction_dict(reconstruction_dict[0], 'shots', images)
clean_reconstruction_dict(reconstruction_dict[0], 'rig_instances', images)
#### Makes a backup of the reconstruction.json file and writes the modified json
shutil.copy(Path(datasets_DIR, epoch, 'opensfm', 'undistorted', 'reconstruction.json'),
Path(datasets_DIR, epoch, 'opensfm', 'undistorted', 'reconstruction.json.bak'))
with open(Path(datasets_DIR, epoch, 'opensfm', 'undistorted', 'reconstruction.json'), mode="w",
encoding="utf-8") as write_file:
json.dump(reconstruction_dict, write_file)
#### Launches dense matching from the good previous step, with possible options (e.g. => to stop at the point clouds)
command_rerun = ['docker', 'run', '-i', '--rm', '-v', datasets_DIR + ':/datasets',
'opendronemap/odm',
'--project-path', '/datasets', epoch,
'--rerun-from', 'openmvs']
if additional_options_to_rerun:
print(f'[Time-SIFT] Epoch {epoch}: Rerun with additionnal options: {additional_options_to_rerun}')
command_rerun.extend(additional_options_to_rerun)
else:
print(f'[Time-SIFT] Epoch {epoch}: Default full rerun')
result = subprocess.run(command_rerun)
if result.returncode != 0:
print(f'\033[91m[Time-SIFT] ERROR in processing epoch {epoch}\033[0m')
print(f'{result=}')
exit(result.returncode)
print(f'\033[92m[Time-SIFT] Epoch {epoch} finished\033[0m')
print('§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§')
print('§§§ §§ §§§ §§§§§ §§§ §§§§§§§§§§ §§ §§ §§ §§§')
print('§§§§§ §§§§ §§§ §§§ §§§ §§§§§§§§§§§§§ §§§§§§ §§ §§§§§§§§ §§§§§')
print('§§§§§ §§§§ §§§ § §§§ §§§§ §§§§§ §§§§ §§ §§§§§§ §§§§§')
print('§§§§§ §§§§ §§§ §§§§§ §§§ §§§§§§§§§§§§§§§§§ §§ §§ §§§§§§§§ §§§§§')
print('§§§§§ §§§§ §§§ §§§§§ §§§ §§§§§§§§§ §§§ §§ §§§§§§§§ §§§§§')
print('§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§')
print(' \033[92mTime-SIFT with ODM finished, congrats !\033[0m Want to cite the method ?')
print('=> D. Feurer, F. Vinatier, Joining multi-epoch archival aerial images in ')
print(' a single SfM block allows 3-D change detection with almost exclusively')
print(' image information, ISPRS Journal of Photogrammetry and Remote Sensing,')
print(' 2018, https://doi.org/10.1016/j.isprsjprs.2018.10.016 ')
if __name__ == "__main__":
main(sys.argv[1:])

Wyświetl plik

@ -1,8 +1,8 @@
FROM nvidia/cuda:11.8.0-devel-ubuntu20.04 AS builder FROM nvidia/cuda:11.2.2-devel-ubuntu20.04 AS builder
# Env variables # Env variables
ENV DEBIAN_FRONTEND=noninteractive \ ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \ PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
# Prepare directories # Prepare directories
@ -21,11 +21,12 @@ RUN bash configure.sh clean
### Use a second image for the final asset to reduce the number and ### Use a second image for the final asset to reduce the number and
# size of the layers. # size of the layers.
FROM nvidia/cuda:11.8.0-runtime-ubuntu20.04 FROM nvidia/cuda:11.2.2-runtime-ubuntu20.04
#FROM nvidia/cuda:11.2.0-devel-ubuntu20.04
# Env variables # Env variables
ENV DEBIAN_FRONTEND=noninteractive \ ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \ PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" \ LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" \
PDAL_DRIVER_PATH="/code/SuperBuild/install/bin" PDAL_DRIVER_PATH="/code/SuperBuild/install/bin"
@ -37,7 +38,6 @@ COPY --from=builder /code /code
# Copy the Python libraries installed via pip from the builder # Copy the Python libraries installed via pip from the builder
COPY --from=builder /usr/local /usr/local COPY --from=builder /usr/local /usr/local
#COPY --from=builder /usr/lib/x86_64-linux-gnu/libavcodec.so.58 /usr/lib/x86_64-linux-gnu/libavcodec.so.58 #COPY --from=builder /usr/lib/x86_64-linux-gnu/libavcodec.so.58 /usr/lib/x86_64-linux-gnu/libavcodec.so.58
RUN apt-get update -y \ RUN apt-get update -y \
&& apt-get install -y ffmpeg libtbb2 && apt-get install -y ffmpeg libtbb2
# Install shared libraries that we depend on via APT, but *not* # Install shared libraries that we depend on via APT, but *not*

Wyświetl plik

@ -4,25 +4,6 @@ from opendm import log
import zipfile import zipfile
import time import time
import sys import sys
import rawpy
import cv2
def read_image(img_path):
if img_path[-4:].lower() in [".dng", ".raw", ".nef"]:
try:
with rawpy.imread(img_path) as r:
img = r.postprocess(output_bps=8, use_camera_wb=True, use_auto_wb=False)
except:
return None
else:
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
return None
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def get_model(namespace, url, version, name = "model.onnx"): def get_model(namespace, url, version, name = "model.onnx"):
version = version.replace(".", "_") version = version.replace(".", "_")

Wyświetl plik

@ -5,7 +5,6 @@ import cv2
import os import os
import onnxruntime as ort import onnxruntime as ort
from opendm import log from opendm import log
from opendm.ai import read_image
from threading import Lock from threading import Lock
mutex = Lock() mutex = Lock()
@ -74,7 +73,11 @@ class BgFilter():
return output return output
def run_img(self, img_path, dest): def run_img(self, img_path, dest):
img = read_image(img_path) img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
return None
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
mask = self.get_mask(img) mask = self.get_mask(img)
img_name = os.path.basename(img_path) img_name = os.path.basename(img_path)

Wyświetl plik

@ -41,7 +41,6 @@ rerun_stages = {
'geo': 'dataset', 'geo': 'dataset',
'gltf': 'mvs_texturing', 'gltf': 'mvs_texturing',
'gps_accuracy': 'dataset', 'gps_accuracy': 'dataset',
'gps_z_offset': 'dataset',
'help': None, 'help': None,
'ignore_gsd': 'opensfm', 'ignore_gsd': 'opensfm',
'matcher_neighbors': 'opensfm', 'matcher_neighbors': 'opensfm',
@ -128,7 +127,7 @@ def url_string(string):
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE) r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if re.match(regex, string) is None: if re.match(regex, string) is None:
raise argparse.ArgumentTypeError("%s is not a valid URL. The URL must be in the format: http(s)://host[:port]/[?token=]" % string) raise argparse.ArgumentTypeError("%s is not a valid URL. The URL must be in the format: http(s)://host[:port]/[?token=]" % string)
return string return string
@ -165,7 +164,7 @@ def config(argv=None, parser=None):
parser = SettingsParser(description='ODM is a command line toolkit to generate maps, point clouds, 3D models and DEMs from drone, balloon or kite images.', parser = SettingsParser(description='ODM is a command line toolkit to generate maps, point clouds, 3D models and DEMs from drone, balloon or kite images.',
usage='%s [options] <dataset name>' % usage_bin, usage='%s [options] <dataset name>' % usage_bin,
yaml_file=open(context.settings_path)) yaml_file=open(context.settings_path))
parser.add_argument('--project-path', parser.add_argument('--project-path',
metavar='<path>', metavar='<path>',
action=StoreValue, action=StoreValue,
@ -214,7 +213,7 @@ def config(argv=None, parser=None):
'More features can be useful for finding more matches between images, ' 'More features can be useful for finding more matches between images, '
'potentially allowing the reconstruction of areas with little overlap or insufficient features. ' 'potentially allowing the reconstruction of areas with little overlap or insufficient features. '
'More features also slow down processing. Default: %(default)s')) 'More features also slow down processing. Default: %(default)s'))
parser.add_argument('--feature-type', parser.add_argument('--feature-type',
metavar='<string>', metavar='<string>',
action=StoreValue, action=StoreValue,
@ -223,7 +222,7 @@ def config(argv=None, parser=None):
help=('Choose the algorithm for extracting keypoints and computing descriptors. ' help=('Choose the algorithm for extracting keypoints and computing descriptors. '
'Can be one of: %(choices)s. Default: ' 'Can be one of: %(choices)s. Default: '
'%(default)s')) '%(default)s'))
parser.add_argument('--feature-quality', parser.add_argument('--feature-quality',
metavar='<string>', metavar='<string>',
action=StoreValue, action=StoreValue,
@ -232,7 +231,7 @@ def config(argv=None, parser=None):
help=('Set feature extraction quality. Higher quality generates better features, but requires more memory and takes longer. ' help=('Set feature extraction quality. Higher quality generates better features, but requires more memory and takes longer. '
'Can be one of: %(choices)s. Default: ' 'Can be one of: %(choices)s. Default: '
'%(default)s')) '%(default)s'))
parser.add_argument('--matcher-type', parser.add_argument('--matcher-type',
metavar='<string>', metavar='<string>',
action=StoreValue, action=StoreValue,
@ -248,7 +247,7 @@ def config(argv=None, parser=None):
default=0, default=0,
type=int, type=int,
help='Perform image matching with the nearest images based on GPS exif data. Set to 0 to match by triangulation. Default: %(default)s') help='Perform image matching with the nearest images based on GPS exif data. Set to 0 to match by triangulation. Default: %(default)s')
parser.add_argument('--matcher-order', parser.add_argument('--matcher-order',
metavar='<positive integer>', metavar='<positive integer>',
action=StoreValue, action=StoreValue,
@ -332,7 +331,7 @@ def config(argv=None, parser=None):
nargs=0, nargs=0,
default=False, default=False,
help='Automatically compute image masks using AI to remove the sky. Experimental. Default: %(default)s') help='Automatically compute image masks using AI to remove the sky. Experimental. Default: %(default)s')
parser.add_argument('--bg-removal', parser.add_argument('--bg-removal',
action=StoreTrue, action=StoreTrue,
nargs=0, nargs=0,
@ -350,19 +349,19 @@ def config(argv=None, parser=None):
nargs=0, nargs=0,
default=False, default=False,
help='Skip generation of a full 3D model. This can save time if you only need 2D results such as orthophotos and DEMs. Default: %(default)s') help='Skip generation of a full 3D model. This can save time if you only need 2D results such as orthophotos and DEMs. Default: %(default)s')
parser.add_argument('--skip-report', parser.add_argument('--skip-report',
action=StoreTrue, action=StoreTrue,
nargs=0, nargs=0,
default=False, default=False,
help='Skip generation of PDF report. This can save time if you don\'t need a report. Default: %(default)s') help='Skip generation of PDF report. This can save time if you don\'t need a report. Default: %(default)s')
parser.add_argument('--skip-orthophoto', parser.add_argument('--skip-orthophoto',
action=StoreTrue, action=StoreTrue,
nargs=0, nargs=0,
default=False, default=False,
help='Skip generation of the orthophoto. This can save time if you only need 3D results or DEMs. Default: %(default)s') help='Skip generation of the orthophoto. This can save time if you only need 3D results or DEMs. Default: %(default)s')
parser.add_argument('--ignore-gsd', parser.add_argument('--ignore-gsd',
action=StoreTrue, action=StoreTrue,
nargs=0, nargs=0,
@ -372,13 +371,13 @@ def config(argv=None, parser=None):
'Ordinarily, GSD estimates are used to cap the maximum resolution of image outputs and resizes images when necessary, resulting in faster processing and lower memory usage. ' 'Ordinarily, GSD estimates are used to cap the maximum resolution of image outputs and resizes images when necessary, resulting in faster processing and lower memory usage. '
'Since GSD is an estimate, sometimes ignoring it can result in slightly better image output quality. ' 'Since GSD is an estimate, sometimes ignoring it can result in slightly better image output quality. '
'Never set --ignore-gsd to true unless you are positive you need it, and even then: do not use it. Default: %(default)s') 'Never set --ignore-gsd to true unless you are positive you need it, and even then: do not use it. Default: %(default)s')
parser.add_argument('--no-gpu', parser.add_argument('--no-gpu',
action=StoreTrue, action=StoreTrue,
nargs=0, nargs=0,
default=False, default=False,
help='Do not use GPU acceleration, even if it\'s available. Default: %(default)s') help='Do not use GPU acceleration, even if it\'s available. Default: %(default)s')
parser.add_argument('--mesh-size', parser.add_argument('--mesh-size',
metavar='<positive integer>', metavar='<positive integer>',
action=StoreValue, action=StoreValue,
@ -463,7 +462,7 @@ def config(argv=None, parser=None):
nargs=0, nargs=0,
default=False, default=False,
help='Export the georeferenced point cloud in CSV format. Default: %(default)s') help='Export the georeferenced point cloud in CSV format. Default: %(default)s')
parser.add_argument('--pc-las', parser.add_argument('--pc-las',
action=StoreTrue, action=StoreTrue,
nargs=0, nargs=0,
@ -489,7 +488,7 @@ def config(argv=None, parser=None):
default=5, default=5,
help='Filters the point cloud by removing points that deviate more than N standard deviations from the local mean. Set to 0 to disable filtering. ' help='Filters the point cloud by removing points that deviate more than N standard deviations from the local mean. Set to 0 to disable filtering. '
'Default: %(default)s') 'Default: %(default)s')
parser.add_argument('--pc-sample', parser.add_argument('--pc-sample',
metavar='<positive float>', metavar='<positive float>',
action=StoreValue, action=StoreValue,
@ -520,7 +519,7 @@ def config(argv=None, parser=None):
default=0.15, default=0.15,
help='Simple Morphological Filter slope parameter (rise over run). ' help='Simple Morphological Filter slope parameter (rise over run). '
'Default: %(default)s') 'Default: %(default)s')
parser.add_argument('--smrf-threshold', parser.add_argument('--smrf-threshold',
metavar='<positive float>', metavar='<positive float>',
action=StoreValue, action=StoreValue,
@ -528,7 +527,7 @@ def config(argv=None, parser=None):
default=0.5, default=0.5,
help='Simple Morphological Filter elevation threshold parameter (meters). ' help='Simple Morphological Filter elevation threshold parameter (meters). '
'Default: %(default)s') 'Default: %(default)s')
parser.add_argument('--smrf-window', parser.add_argument('--smrf-window',
metavar='<positive float>', metavar='<positive float>',
action=StoreValue, action=StoreValue,
@ -587,7 +586,7 @@ def config(argv=None, parser=None):
'EPSG:<code> or <+proj definition>\n' 'EPSG:<code> or <+proj definition>\n'
'image_name geo_x geo_y geo_z [yaw (degrees)] [pitch (degrees)] [roll (degrees)] [horz accuracy (meters)] [vert accuracy (meters)]\n' 'image_name geo_x geo_y geo_z [yaw (degrees)] [pitch (degrees)] [roll (degrees)] [horz accuracy (meters)] [vert accuracy (meters)]\n'
'Default: %(default)s')) 'Default: %(default)s'))
parser.add_argument('--align', parser.add_argument('--align',
metavar='<path string>', metavar='<path string>',
action=StoreValue, action=StoreValue,
@ -643,7 +642,7 @@ def config(argv=None, parser=None):
type=int, type=int,
help='Decimate the points before generating the DEM. 1 is no decimation (full quality). ' help='Decimate the points before generating the DEM. 1 is no decimation (full quality). '
'100 decimates ~99%% of the points. Useful for speeding up generation of DEM results in very large datasets. Default: %(default)s') '100 decimates ~99%% of the points. Useful for speeding up generation of DEM results in very large datasets. Default: %(default)s')
parser.add_argument('--dem-euclidean-map', parser.add_argument('--dem-euclidean-map',
action=StoreTrue, action=StoreTrue,
nargs=0, nargs=0,
@ -676,13 +675,13 @@ def config(argv=None, parser=None):
default=False, default=False,
help='Set this parameter if you want to generate a PNG rendering of the orthophoto. ' help='Set this parameter if you want to generate a PNG rendering of the orthophoto. '
'Default: %(default)s') 'Default: %(default)s')
parser.add_argument('--orthophoto-kmz', parser.add_argument('--orthophoto-kmz',
action=StoreTrue, action=StoreTrue,
nargs=0, nargs=0,
default=False, default=False,
help='Set this parameter if you want to generate a Google Earth (KMZ) rendering of the orthophoto. ' help='Set this parameter if you want to generate a Google Earth (KMZ) rendering of the orthophoto. '
'Default: %(default)s') 'Default: %(default)s')
parser.add_argument('--orthophoto-compression', parser.add_argument('--orthophoto-compression',
metavar='<string>', metavar='<string>',
@ -691,7 +690,7 @@ def config(argv=None, parser=None):
choices=['JPEG', 'LZW', 'PACKBITS', 'DEFLATE', 'LZMA', 'NONE'], choices=['JPEG', 'LZW', 'PACKBITS', 'DEFLATE', 'LZMA', 'NONE'],
default='DEFLATE', default='DEFLATE',
help='Set the compression to use for orthophotos. Can be one of: %(choices)s. Default: %(default)s') help='Set the compression to use for orthophotos. Can be one of: %(choices)s. Default: %(default)s')
parser.add_argument('--orthophoto-cutline', parser.add_argument('--orthophoto-cutline',
action=StoreTrue, action=StoreTrue,
nargs=0, nargs=0,
@ -730,7 +729,7 @@ def config(argv=None, parser=None):
action=StoreValue, action=StoreValue,
metavar='<positive integer>', metavar='<positive integer>',
default=0, default=0,
help='Override the rolling shutter readout time for your camera sensor (in milliseconds), instead of using the rolling shutter readout database. ' help='Override the rolling shutter readout time for your camera sensor (in milliseconds), instead of using the rolling shutter readout database. '
'Note that not all cameras are present in the database. Set to 0 to use the database value. ' 'Note that not all cameras are present in the database. Set to 0 to use the database value. '
'Default: %(default)s') 'Default: %(default)s')
@ -769,7 +768,7 @@ def config(argv=None, parser=None):
default=4000, default=4000,
metavar='<positive integer>', metavar='<positive integer>',
help='The maximum output resolution of extracted video frames in pixels. Default: %(default)s') help='The maximum output resolution of extracted video frames in pixels. Default: %(default)s')
parser.add_argument('--split', parser.add_argument('--split',
type=int, type=int,
action=StoreValue, action=StoreValue,
@ -786,12 +785,11 @@ def config(argv=None, parser=None):
action=StoreValue, action=StoreValue,
metavar='<positive integer>', metavar='<positive integer>',
default=150, default=150,
help='Radius of the overlap between submodels in meters. ' help='Radius of the overlap between submodels. '
'After grouping images into clusters, images ' 'After grouping images into clusters, images '
'that are closer than this radius to a cluster ' 'that are closer than this radius to a cluster '
'are added to the cluster. This is done to ensure ' 'are added to the cluster. This is done to ensure '
'that neighboring submodels overlap. All images' \ 'that neighboring submodels overlap. Default: %(default)s')
'need GPS information. Default: %(default)s')
parser.add_argument('--split-image-groups', parser.add_argument('--split-image-groups',
metavar='<path string>', metavar='<path string>',
@ -835,7 +833,7 @@ def config(argv=None, parser=None):
help=('Use images\' GPS exif data for reconstruction, even if there are GCPs present.' help=('Use images\' GPS exif data for reconstruction, even if there are GCPs present.'
'This flag is useful if you have high precision GPS measurements. ' 'This flag is useful if you have high precision GPS measurements. '
'If there are no GCPs, this flag does nothing. Default: %(default)s')) 'If there are no GCPs, this flag does nothing. Default: %(default)s'))
parser.add_argument('--gps-accuracy', parser.add_argument('--gps-accuracy',
type=float, type=float,
action=StoreValue, action=StoreValue,
@ -846,17 +844,7 @@ def config(argv=None, parser=None):
'with high precision GPS information (RTK), this value will be automatically ' 'with high precision GPS information (RTK), this value will be automatically '
'set accordingly. You can use this option to manually set it in case the reconstruction ' 'set accordingly. You can use this option to manually set it in case the reconstruction '
'fails. Lowering this option can sometimes help control bowling-effects over large areas. Default: %(default)s') 'fails. Lowering this option can sometimes help control bowling-effects over large areas. Default: %(default)s')
parser.add_argument('--gps-z-offset',
type=float,
action=StoreValue,
metavar='<float>',
default=0,
help='Set a GPS offset in meters for the vertical axis (Z) '
'by adding it to the altitude value of the GPS EXIF data. This does not change the value of any GCPs. '
'This can be useful for example when adjusting from ellipsoidal to orthometric height. '
'Default: %(default)s')
parser.add_argument('--optimize-disk-space', parser.add_argument('--optimize-disk-space',
action=StoreTrue, action=StoreTrue,
nargs=0, nargs=0,
@ -880,7 +868,7 @@ def config(argv=None, parser=None):
default="auto", default="auto",
type=str, type=str,
help=('When processing multispectral datasets, you can specify the name of the primary band that will be used for reconstruction. ' help=('When processing multispectral datasets, you can specify the name of the primary band that will be used for reconstruction. '
'It\'s recommended to choose a band which has sharp details and is in focus. ' 'It\'s recommended to choose a band which has sharp details and is in focus. '
'Default: %(default)s')) 'Default: %(default)s'))
parser.add_argument('--skip-band-alignment', parser.add_argument('--skip-band-alignment',
@ -935,5 +923,5 @@ def config(argv=None, parser=None):
except exceptions.NodeConnectionError as e: except exceptions.NodeConnectionError as e:
log.ODM_ERROR("Cluster node seems to be offline: %s" % str(e)) log.ODM_ERROR("Cluster node seems to be offline: %s" % str(e))
sys.exit(1) sys.exit(1)
return args return args

Wyświetl plik

@ -40,7 +40,7 @@ odm_orthophoto_path = os.path.join(superbuild_bin_path, "odm_orthophoto")
settings_path = os.path.join(root_path, 'settings.yaml') settings_path = os.path.join(root_path, 'settings.yaml')
# Define supported image extensions # Define supported image extensions
supported_extensions = {'.jpg','.jpeg','.png', '.tif', '.tiff', '.bmp', '.raw', '.dng', '.nef'} supported_extensions = {'.jpg','.jpeg','.png', '.tif', '.tiff', '.bmp'}
supported_video_extensions = {'.mp4', '.mov', '.lrv', '.ts'} supported_video_extensions = {'.mp4', '.mov', '.lrv', '.ts'}
# Define the number of cores # Define the number of cores

Wyświetl plik

@ -1,5 +1,4 @@
import os import os
import subprocess
import sys import sys
import rasterio import rasterio
import numpy import numpy
@ -21,8 +20,6 @@ from opendm import log
from .ground_rectification.rectify import run_rectification from .ground_rectification.rectify import run_rectification
from . import pdal from . import pdal
gdal_proximity = None
try: try:
# GDAL >= 3.3 # GDAL >= 3.3
from osgeo_utils.gdal_proximity import main as gdal_proximity from osgeo_utils.gdal_proximity import main as gdal_proximity
@ -30,13 +27,8 @@ except ModuleNotFoundError:
# GDAL <= 3.2 # GDAL <= 3.2
try: try:
from osgeo.utils.gdal_proximity import main as gdal_proximity from osgeo.utils.gdal_proximity import main as gdal_proximity
except ModuleNotFoundError: except:
# GDAL <= 3.0 pass
gdal_proximity_script = shutil.which("gdal_proximity.py")
if gdal_proximity_script is not None:
def gdal_proximity(args):
subprocess.run([gdal_proximity_script] + args[1:], check=True)
def classify(lasFile, scalar, slope, threshold, window): def classify(lasFile, scalar, slope, threshold, window):
start = datetime.now() start = datetime.now()
@ -271,4 +263,4 @@ def get_dem_radius_steps(stats_file, steps, resolution, multiplier = 1.0):
for _ in range(steps - 1): for _ in range(steps - 1):
radius_steps.append(radius_steps[-1] * math.sqrt(2)) radius_steps.append(radius_steps[-1] * math.sqrt(2))
return radius_steps return radius_steps

Wyświetl plik

@ -1,6 +1,6 @@
from PIL import Image from PIL import Image
import cv2 import cv2
import rawpy
from opendm import log from opendm import log
Image.MAX_IMAGE_PIXELS = None Image.MAX_IMAGE_PIXELS = None
@ -9,18 +9,12 @@ def get_image_size(file_path, fallback_on_error=True):
""" """
Return (width, height) for a given img file Return (width, height) for a given img file
""" """
try: try:
if file_path[-4:].lower() in [".dng", ".raw", ".nef"]: with Image.open(file_path) as img:
with rawpy.imread(file_path) as img: width, height = img.size
s = img.sizes
width, height = s.raw_width, s.raw_height
else:
with Image.open(file_path) as img:
width, height = img.size
except Exception as e: except Exception as e:
if fallback_on_error: if fallback_on_error:
log.ODM_WARNING("Cannot read %s with image library, fallback to cv2: %s" % (file_path, str(e))) log.ODM_WARNING("Cannot read %s with PIL, fallback to cv2: %s" % (file_path, str(e)))
img = cv2.imread(file_path) img = cv2.imread(file_path)
width = img.shape[1] width = img.shape[1]
height = img.shape[0] height = img.shape[0]

Wyświetl plik

@ -6,7 +6,6 @@ import numpy as np
import pygltflib import pygltflib
from opendm import system from opendm import system
from opendm import io from opendm import io
from opendm import log
warnings.filterwarnings("ignore", category=rasterio.errors.NotGeoreferencedWarning) warnings.filterwarnings("ignore", category=rasterio.errors.NotGeoreferencedWarning)

Wyświetl plik

@ -19,17 +19,14 @@ def has_popsift_and_can_handle_texsize(width, height):
log.ODM_INFO("CUDA compute platform is not supported (detected: %s.%s but we need at least 3.5)" % (compute_major, compute_minor)) log.ODM_INFO("CUDA compute platform is not supported (detected: %s.%s but we need at least 3.5)" % (compute_major, compute_minor))
return False return False
except Exception as e: except Exception as e:
log.ODM_WARNING(str(e)) log.ODM_INFO("Using CPU for feature extraction: %s" % str(e))
return False return False
try: try:
from opensfm import pypopsift from opensfm import pypopsift
if pypopsift.fits_texture(int(width * 1.02), int(height * 1.02)): return pypopsift.fits_texture(int(width * 1.02), int(height * 1.02))
log.ODM_INFO("popsift can handle texture size %dx%d" % (width, height)) except (ModuleNotFoundError, ImportError):
return True return False
else:
log.ODM_INFO("popsift cannot handle texture size %dx%d" % (width, height))
return False
except Exception as e: except Exception as e:
log.ODM_WARNING(str(e)) log.ODM_WARNING(str(e))
return False return False
@ -84,12 +81,11 @@ def has_gpu(args):
log.ODM_INFO("CUDA drivers detected") log.ODM_INFO("CUDA drivers detected")
return True return True
else: else:
log.ODM_INFO("No CUDA drivers detected") log.ODM_INFO("No CUDA drivers detected, using CPU")
return False return False
else: else:
if shutil.which('nvidia-smi') is not None: if shutil.which('nvidia-smi') is not None:
log.ODM_INFO("nvidia-smi detected") log.ODM_INFO("nvidia-smi detected")
return True return True
else: else:
log.ODM_INFO("No nvidia-smi detected")
return False return False

Wyświetl plik

@ -5,7 +5,6 @@ import math
from repoze.lru import lru_cache from repoze.lru import lru_cache
from opendm import log from opendm import log
from opendm.shots import get_origin from opendm.shots import get_origin
from scipy import spatial
def rounded_gsd(reconstruction_json, default_value=None, ndigits=0, ignore_gsd=False): def rounded_gsd(reconstruction_json, default_value=None, ndigits=0, ignore_gsd=False):
""" """
@ -112,12 +111,16 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
with open(reconstruction_json) as f: with open(reconstruction_json) as f:
data = json.load(f) data = json.load(f)
# Calculate median height from sparse reconstruction
reconstruction = data[0] reconstruction = data[0]
points = np.array([reconstruction['points'][pointId]['coordinates'] for pointId in reconstruction['points']]) point_heights = []
tdpoints = points.copy()
tdpoints[:,2] = 0 for pointId in reconstruction['points']:
tree = spatial.cKDTree(tdpoints) point = reconstruction['points'][pointId]
point_heights.append(point['coordinates'][2])
ground_height = np.median(point_heights)
gsds = [] gsds = []
for shotImage in reconstruction['shots']: for shotImage in reconstruction['shots']:
shot = reconstruction['shots'][shotImage] shot = reconstruction['shots'][shotImage]
@ -129,17 +132,10 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
if not focal_ratio: if not focal_ratio:
log.ODM_WARNING("Cannot parse focal values from %s. This is likely an unsupported camera model." % reconstruction_json) log.ODM_WARNING("Cannot parse focal values from %s. This is likely an unsupported camera model." % reconstruction_json)
return None return None
shot_origin[2] = 0 gsds.append(calculate_gsd_from_focal_ratio(focal_ratio,
distances, neighbors = tree.query( shot_height - ground_height,
shot_origin, k=9 camera['width']))
)
if len(distances) > 0:
ground_height = np.median(points[neighbors][:,2])
gsds.append(calculate_gsd_from_focal_ratio(focal_ratio,
shot_height - ground_height,
camera['width']))
if len(gsds) > 0: if len(gsds) > 0:
mean = np.mean(gsds) mean = np.mean(gsds)
@ -149,6 +145,7 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
return None return None
def calculate_gsd(sensor_width, flight_height, focal_length, image_width): def calculate_gsd(sensor_width, flight_height, focal_length, image_width):
""" """
:param sensor_width in millimeters :param sensor_width in millimeters

Wyświetl plik

@ -6,7 +6,6 @@ import datetime
import dateutil.parser import dateutil.parser
import shutil import shutil
import multiprocessing import multiprocessing
from repoze.lru import lru_cache
from opendm.arghelpers import double_quote, args_to_dict from opendm.arghelpers import double_quote, args_to_dict
from vmem import virtual_memory from vmem import virtual_memory
@ -31,7 +30,6 @@ else:
lock = threading.Lock() lock = threading.Lock()
@lru_cache(maxsize=None)
def odm_version(): def odm_version():
with open(os.path.join(os.path.dirname(__file__), "..", "VERSION")) as f: with open(os.path.join(os.path.dirname(__file__), "..", "VERSION")) as f:
return f.read().split("\n")[0].strip() return f.read().split("\n")[0].strip()

Wyświetl plik

@ -273,10 +273,7 @@ def compute_band_maps(multi_camera, primary_band):
# Quick check # Quick check
if filename_without_band == p.filename: if filename_without_band == p.filename:
raise Exception("Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly." % p.filename) raise Exception("Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly." % p.filename)
if not filename_without_band in filename_map:
raise Exception("Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly, check that your images have the appropriate CaptureUUID XMP tag and that no images are missing." % p.filename)
s2p[p.filename] = filename_map[filename_without_band] s2p[p.filename] = filename_map[filename_without_band]
if band['name'] != band_name: if band['name'] != band_name:

Wyświetl plik

@ -13,9 +13,7 @@ from rasterio.mask import mask
from opendm import io from opendm import io
from opendm.tiles.tiler import generate_orthophoto_tiles from opendm.tiles.tiler import generate_orthophoto_tiles
from opendm.cogeo import convert_to_cogeo from opendm.cogeo import convert_to_cogeo
from opendm.utils import add_raster_meta_tags
from osgeo import gdal from osgeo import gdal
from osgeo import ogr
def get_orthophoto_vars(args): def get_orthophoto_vars(args):
@ -45,50 +43,33 @@ def generate_png(orthophoto_file, output_file=None, outsize=None):
output_file = base + '.png' output_file = base + '.png'
# See if we need to select top three bands # See if we need to select top three bands
params = [] bandparam = ""
try: gtif = gdal.Open(orthophoto_file)
gtif = gdal.Open(orthophoto_file) if gtif.RasterCount > 4:
bands = [] bands = []
for idx in range(1, gtif.RasterCount+1): for idx in range(1, gtif.RasterCount+1):
bands.append(gtif.GetRasterBand(idx).GetColorInterpretation()) bands.append(gtif.GetRasterBand(idx).GetColorInterpretation())
bands = dict(zip(bands, range(1, len(bands)+1))) bands = dict(zip(bands, range(1, len(bands)+1)))
if gtif.RasterCount >= 3: try:
red = bands.get(gdal.GCI_RedBand) red = bands.get(gdal.GCI_RedBand)
green = bands.get(gdal.GCI_GreenBand) green = bands.get(gdal.GCI_GreenBand)
blue = bands.get(gdal.GCI_BlueBand) blue = bands.get(gdal.GCI_BlueBand)
if red is None or green is None or blue is None: if red is None or green is None or blue is None:
params.append("-b 1 -b 2 -b 3") raise Exception("Cannot find bands")
else:
params.append("-b %s -b %s -b %s" % (red, green, blue))
elif gtif.RasterCount <= 2:
params.append("-b 1")
alpha = bands.get(gdal.GCI_AlphaBand)
if alpha is not None:
params.append("-b %s" % alpha)
else:
params.append("-a_nodata 0")
dtype = gtif.GetRasterBand(1).DataType bandparam = "-b %s -b %s -b %s -a_nodata 0" % (red, green, blue)
if dtype != gdal.GDT_Byte: except:
params.append("-ot Byte") bandparam = "-b 1 -b 2 -b 3 -a_nodata 0"
if gtif.RasterCount >= 3: gtif = None
params.append("-scale_1 -scale_2 -scale_3")
elif gtif.RasterCount <= 2:
params.append("-scale_1")
gtif = None
except Exception as e:
log.ODM_WARNING("Cannot read orthophoto information for PNG generation: %s" % str(e))
osparam = ""
if outsize is not None: if outsize is not None:
params.append("-outsize %s 0" % outsize) osparam = "-outsize %s 0" % outsize
system.run('gdal_translate -of png "%s" "%s" %s ' system.run('gdal_translate -of png "%s" "%s" %s %s '
'-co WORLDFILE=YES ' '--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, osparam, bandparam, get_max_memory()))
'--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, " ".join(params), get_max_memory()))
def generate_kmz(orthophoto_file, output_file=None, outsize=None): def generate_kmz(orthophoto_file, output_file=None, outsize=None):
if output_file is None: if output_file is None:
@ -103,71 +84,8 @@ def generate_kmz(orthophoto_file, output_file=None, outsize=None):
system.run('gdal_translate -of KMLSUPEROVERLAY -co FORMAT=PNG "%s" "%s" %s ' system.run('gdal_translate -of KMLSUPEROVERLAY -co FORMAT=PNG "%s" "%s" %s '
'--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, bandparam, get_max_memory())) '--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, bandparam, get_max_memory()))
def generate_extent_polygon(orthophoto_file): def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir, resolution):
"""Function to return the orthophoto extent as a polygon into a gpkg file
Args:
orthophoto_file (str): the path to orthophoto file
"""
base, ext = os.path.splitext(orthophoto_file)
output_file = base + '_extent.dxf'
try:
gtif = gdal.Open(orthophoto_file)
srs = gtif.GetSpatialRef()
geoTransform = gtif.GetGeoTransform()
# calculate the coordinates
minx = geoTransform[0]
maxy = geoTransform[3]
maxx = minx + geoTransform[1] * gtif.RasterXSize
miny = maxy + geoTransform[5] * gtif.RasterYSize
# create polygon in wkt format
poly_wkt = "POLYGON ((%s %s, %s %s, %s %s, %s %s, %s %s))" % (minx, miny, minx, maxy, maxx, maxy, maxx, miny, minx, miny)
# create vector file
# just the DXF to support AutoCAD users
# to load the geotiff raster correctly.
driver = ogr.GetDriverByName("DXF")
ds = driver.CreateDataSource(output_file)
layer = ds.CreateLayer("extent", srs, ogr.wkbPolygon)
# create the feature and set values
featureDefn = layer.GetLayerDefn()
feature = ogr.Feature(featureDefn)
feature.SetGeometry(ogr.CreateGeometryFromWkt(poly_wkt))
# add feature to layer
layer.CreateFeature(feature)
# save and close everything
feature = None
ds = None
gtif = None
log.ODM_INFO("Wrote %s" % output_file)
except Exception as e:
log.ODM_WARNING("Cannot create extent layer for %s: %s" % (orthophoto_file, str(e)))
def generate_tfw(orthophoto_file):
base, ext = os.path.splitext(orthophoto_file)
tfw_file = base + '.tfw'
try:
with rasterio.open(orthophoto_file) as ds:
t = ds.transform
with open(tfw_file, 'w') as f:
# rasterio affine values taken by
# https://mharty3.github.io/til/GIS/raster-affine-transforms/
f.write("\n".join([str(v) for v in [t.a, t.d, t.b, t.e, t.c, t.f]]) + "\n")
log.ODM_INFO("Wrote %s" % tfw_file)
except Exception as e:
log.ODM_WARNING("Cannot create .tfw for %s: %s" % (orthophoto_file, str(e)))
def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir, resolution, reconstruction, tree, embed_gcp_meta=False):
if args.crop > 0 or args.boundary: if args.crop > 0 or args.boundary:
Cropper.crop(bounds_file_path, orthophoto_file, get_orthophoto_vars(args), keep_original=not args.optimize_disk_space, warp_options=['-dstalpha']) Cropper.crop(bounds_file_path, orthophoto_file, get_orthophoto_vars(args), keep_original=not args.optimize_disk_space, warp_options=['-dstalpha'])
@ -180,17 +98,12 @@ def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_ti
if args.orthophoto_kmz: if args.orthophoto_kmz:
generate_kmz(orthophoto_file) generate_kmz(orthophoto_file)
add_raster_meta_tags(orthophoto_file, reconstruction, tree, embed_gcp_meta=embed_gcp_meta)
if args.tiles: if args.tiles:
generate_orthophoto_tiles(orthophoto_file, orthophoto_tiles_dir, args.max_concurrency, resolution) generate_orthophoto_tiles(orthophoto_file, orthophoto_tiles_dir, args.max_concurrency, resolution)
if args.cog: if args.cog:
convert_to_cogeo(orthophoto_file, max_workers=args.max_concurrency, compression=args.orthophoto_compression) convert_to_cogeo(orthophoto_file, max_workers=args.max_concurrency, compression=args.orthophoto_compression)
generate_extent_polygon(orthophoto_file)
generate_tfw(orthophoto_file)
def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance=20, only_max_coords_feature=False): def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance=20, only_max_coords_feature=False):
if not os.path.exists(input_raster): if not os.path.exists(input_raster):
log.ODM_WARNING("Cannot mask raster, %s does not exist" % input_raster) log.ODM_WARNING("Cannot mask raster, %s does not exist" % input_raster)

Wyświetl plik

@ -296,23 +296,19 @@ class OSFMContext:
config.append("matcher_type: %s" % osfm_matchers[matcher_type]) config.append("matcher_type: %s" % osfm_matchers[matcher_type])
# GPU acceleration? # GPU acceleration?
if feature_type == "SIFT": if has_gpu(args) and max_dims is not None:
log.ODM_INFO("Checking for GPU as using SIFT for extracting features") w, h = max_dims
if has_gpu(args) and max_dims is not None: if w > h:
w, h = max_dims h = int((h / w) * feature_process_size)
if w > h: w = int(feature_process_size)
h = int((h / w) * feature_process_size) else:
w = int(feature_process_size) w = int((w / h) * feature_process_size)
else: h = int(feature_process_size)
w = int((w / h) * feature_process_size)
h = int(feature_process_size) if has_popsift_and_can_handle_texsize(w, h) and feature_type == "SIFT":
log.ODM_INFO("Using GPU for extracting SIFT features")
if has_popsift_and_can_handle_texsize(w, h): feature_type = "SIFT_GPU"
log.ODM_INFO("Using GPU for extracting SIFT features") self.gpu_sift_feature_extraction = True
feature_type = "SIFT_GPU"
self.gpu_sift_feature_extraction = True
else:
log.ODM_INFO("Using CPU for extracting SIFT features as texture size is too large or GPU SIFT is not available")
config.append("feature_type: %s" % feature_type) config.append("feature_type: %s" % feature_type)

Wyświetl plik

@ -21,17 +21,6 @@ from opensfm.geo import ecef_from_lla
projections = ['perspective', 'fisheye', 'fisheye_opencv', 'brown', 'dual', 'equirectangular', 'spherical'] projections = ['perspective', 'fisheye', 'fisheye_opencv', 'brown', 'dual', 'equirectangular', 'spherical']
def find_mean_utc_time(photos):
utc_times = []
for p in photos:
if p.utc_time is not None:
utc_times.append(p.utc_time / 1000.0)
if len(utc_times) == 0:
return None
return np.mean(utc_times)
def find_largest_photo_dims(photos): def find_largest_photo_dims(photos):
max_mp = 0 max_mp = 0
max_dims = None max_dims = None
@ -785,12 +774,6 @@ class ODM_Photo:
def override_gps_dop(self, dop): def override_gps_dop(self, dop):
self.gps_xy_stddev = self.gps_z_stddev = dop self.gps_xy_stddev = self.gps_z_stddev = dop
def adjust_z_offset(self, z_offset):
if self.altitude is not None:
self.altitude += z_offset
else:
self.altitude = z_offset
def override_camera_projection(self, camera_projection): def override_camera_projection(self, camera_projection):
if camera_projection in projections: if camera_projection in projections:

Wyświetl plik

@ -327,7 +327,7 @@ def post_point_cloud_steps(args, tree, rerun=False):
tree.odm_georeferencing_model_laz, tree.odm_georeferencing_model_laz,
tree.odm_georeferencing_model_las)) tree.odm_georeferencing_model_las))
else: else:
log.ODM_WARNING("Found existing LAS file %s" % tree.odm_georeferencing_model_las) log.ODM_WARNING("Found existing LAS file %s" % tree.odm_georeferencing_xyz_file)
# EPT point cloud output # EPT point cloud output
if args.pc_ept: if args.pc_ept:

Wyświetl plik

@ -22,13 +22,7 @@ RS_DATABASE = {
'hasselblad l2d-20c': 16.6, # DJI Mavic 3 (not enterprise version) 'hasselblad l2d-20c': 16.6, # DJI Mavic 3 (not enterprise version)
'dji fc3582': lambda p: 26 if p.get_capture_megapixels() < 48 else 60, # DJI Mini 3 pro (at 48MP readout is 60ms, at 12MP it's 26ms) 'dji fc3582': lambda p: 26 if p.get_capture_megapixels() < 48 else 60, # DJI Mini 3 pro (at 48MP readout is 60ms, at 12MP it's 26ms)
'dji fc8482': lambda p: (
16 if p.get_capture_megapixels() < 12 else # 12MP 16:9 mode (actual 9.1MP)
21 if p.get_capture_megapixels() < 20 else # 12MP 4:3 mode (actual 12.2MP)
43 if p.get_capture_megapixels() < 45 else # 48MP 16:9 mode (actual 36.6MP)
58 # 48MP 4:3 mode (actual 48.8MP)
), # DJI Mini 4 Pro (readout varies by resolution and aspect ratio, image heights all different)
'dji fc350': 30, # Inspire 1 'dji fc350': 30, # Inspire 1
'dji mavic2-enterprise-advanced': 31, # DJI Mavic 2 Enterprise Advanced 'dji mavic2-enterprise-advanced': 31, # DJI Mavic 2 Enterprise Advanced

Wyświetl plik

@ -148,16 +148,3 @@ def merge_geojson_shots(geojson_shots_files, output_geojson_file):
with open(output_geojson_file, "w") as f: with open(output_geojson_file, "w") as f:
f.write(json.dumps(result)) f.write(json.dumps(result))
def merge_cameras(cameras_json_files, output_cameras_file):
result = {}
for cameras_file in cameras_json_files:
with open(cameras_file, "r") as f:
cameras = json.loads(f.read())
for cam_id in cameras:
if not cam_id in result:
result[cam_id] = cameras[cam_id]
with open(output_cameras_file, "w") as f:
f.write(json.dumps(result))

Wyświetl plik

@ -6,7 +6,6 @@ import os
import onnxruntime as ort import onnxruntime as ort
from .guidedfilter import guided_filter from .guidedfilter import guided_filter
from opendm import log from opendm import log
from opendm.ai import read_image
from threading import Lock from threading import Lock
mutex = Lock() mutex = Lock()
@ -73,7 +72,11 @@ class SkyFilter():
def run_img(self, img_path, dest): def run_img(self, img_path, dest):
img = read_image(img_path) img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
return None
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.array(img / 255., dtype=np.float32) img = np.array(img / 255., dtype=np.float32)
mask = self.get_mask(img) mask = self.get_mask(img)

Wyświetl plik

@ -39,7 +39,7 @@ def generate_colored_hillshade(geotiff):
system.run('gdaldem color-relief "%s" "%s" "%s" -alpha -co ALPHA=YES' % (geotiff, relief_file, colored_dem)) system.run('gdaldem color-relief "%s" "%s" "%s" -alpha -co ALPHA=YES' % (geotiff, relief_file, colored_dem))
system.run('gdaldem hillshade "%s" "%s" -z 1.0 -s 1.0 -az 315.0 -alt 45.0' % (geotiff, hillshade_dem)) system.run('gdaldem hillshade "%s" "%s" -z 1.0 -s 1.0 -az 315.0 -alt 45.0' % (geotiff, hillshade_dem))
system.run('"%s" "%s" "%s" "%s" "%s"' % (sys.executable, hsv_merge_script, colored_dem, hillshade_dem, colored_hillshade_dem)) system.run('%s "%s" "%s" "%s" "%s"' % (sys.executable, hsv_merge_script, colored_dem, hillshade_dem, colored_hillshade_dem))
return outputs return outputs
except Exception as e: except Exception as e:

Wyświetl plik

@ -1,12 +1,8 @@
import os, shutil import os, shutil
import numpy as np import numpy as np
import json import json
import rasterio
from osgeo import gdal
from datetime import datetime
from opendm import log from opendm import log
from opendm.photo import find_largest_photo_dims, find_mean_utc_time from opendm.photo import find_largest_photo_dims
from osgeo import gdal from osgeo import gdal
from opendm.arghelpers import double_quote from opendm.arghelpers import double_quote
@ -117,42 +113,4 @@ def np_to_json(arr):
return json.dumps(arr, cls=NumpyEncoder) return json.dumps(arr, cls=NumpyEncoder)
def np_from_json(json_dump): def np_from_json(json_dump):
return np.asarray(json.loads(json_dump)) return np.asarray(json.loads(json_dump))
def add_raster_meta_tags(raster, reconstruction, tree, embed_gcp_meta=True):
try:
if os.path.isfile(raster):
mean_capture_time = find_mean_utc_time(reconstruction.photos)
mean_capture_dt = None
if mean_capture_time is not None:
mean_capture_dt = datetime.fromtimestamp(mean_capture_time).strftime('%Y:%m:%d %H:%M:%S') + '+00:00'
log.ODM_INFO("Adding TIFFTAGs to {}".format(raster))
with rasterio.open(raster, 'r+') as rst:
if mean_capture_dt is not None:
rst.update_tags(TIFFTAG_DATETIME=mean_capture_dt)
rst.update_tags(TIFFTAG_SOFTWARE='ODM {}'.format(log.odm_version()))
if embed_gcp_meta:
# Embed GCP info in 2D results via
# XML metadata fields
gcp_gml_export_file = tree.path("odm_georeferencing", "ground_control_points.gml")
if reconstruction.has_gcp() and os.path.isfile(gcp_gml_export_file):
gcp_xml = ""
with open(gcp_gml_export_file) as f:
gcp_xml = f.read()
ds = gdal.Open(raster)
if ds is not None:
if ds.GetMetadata('xml:GROUND_CONTROL_POINTS') is None or self.rerun():
ds.SetMetadata(gcp_xml, 'xml:GROUND_CONTROL_POINTS')
ds = None
log.ODM_INFO("Wrote xml:GROUND_CONTROL_POINTS metadata to %s" % raster)
else:
log.ODM_WARNING("Already embedded ground control point information")
else:
log.ODM_WARNING("Cannot open %s for writing, skipping GCP embedding" % raster)
except Exception as e:
log.ODM_WARNING("Cannot write raster meta tags to %s: %s" % (raster, str(e)))

Wyświetl plik

@ -120,7 +120,6 @@ class SrtFileParser:
# <font size="36">SrtCnt : 1, DiffTime : 16ms # <font size="36">SrtCnt : 1, DiffTime : 16ms
# 2023-01-06 18:56:48,380,821 # 2023-01-06 18:56:48,380,821
# [iso : 3200] [shutter : 1/60.0] [fnum : 280] [ev : 0] [ct : 3925] [color_md : default] [focal_len : 240] [latitude: 0.000000] [longitude: 0.000000] [altitude: 0.000000] </font> # [iso : 3200] [shutter : 1/60.0] [fnum : 280] [ev : 0] [ct : 3925] [color_md : default] [focal_len : 240] [latitude: 0.000000] [longitude: 0.000000] [altitude: 0.000000] </font>
# </font>
# DJI Mavic Mini # DJI Mavic Mini
# 1 # 1
@ -165,10 +164,9 @@ class SrtFileParser:
end = None end = None
for line in f: for line in f:
# Remove html tags, spaces
line = re.sub('<[^<]+?>', '', line).strip()
if not line: # Check if line is empty
if not line.strip():
if start is not None: if start is not None:
self.data.append({ self.data.append({
"start": start, "start": start,
@ -195,6 +193,9 @@ class SrtFileParser:
continue continue
# Remove html tags
line = re.sub('<[^<]+?>', '', line)
# Search this "00:00:00,000 --> 00:00:00,016" # Search this "00:00:00,000 --> 00:00:00,016"
match = re.search("(\d{2}:\d{2}:\d{2},\d+) --> (\d{2}:\d{2}:\d{2},\d+)", line) match = re.search("(\d{2}:\d{2}:\d{2},\d+) --> (\d{2}:\d{2}:\d{2},\d+)", line)
if match: if match:
@ -224,14 +225,14 @@ class SrtFileParser:
("GPS \([\d\.\-]+,? ([\d\.\-]+),? [\d\.\-]+\)", lambda v: float(v) if v != 0 else None), ("GPS \([\d\.\-]+,? ([\d\.\-]+),? [\d\.\-]+\)", lambda v: float(v) if v != 0 else None),
("RTK \([-+]?\d+\.\d+, (-?\d+\.\d+), -?\d+\)", lambda v: float(v) if v != 0 else None), ("RTK \([-+]?\d+\.\d+, (-?\d+\.\d+), -?\d+\)", lambda v: float(v) if v != 0 else None),
], line) ], line)
longitude = match_single([ longitude = match_single([
("longitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None), ("longitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
("longtitude : ([\d\.\-]+)", lambda v: float(v) if v != 0 else None), ("longtitude : ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
("GPS \(([\d\.\-]+),? [\d\.\-]+,? [\d\.\-]+\)", lambda v: float(v) if v != 0 else None), ("GPS \(([\d\.\-]+),? [\d\.\-]+,? [\d\.\-]+\)", lambda v: float(v) if v != 0 else None),
("RTK \((-?\d+\.\d+), [-+]?\d+\.\d+, -?\d+\)", lambda v: float(v) if v != 0 else None), ("RTK \((-?\d+\.\d+), [-+]?\d+\.\d+, -?\d+\)", lambda v: float(v) if v != 0 else None),
], line) ], line)
altitude = match_single([ altitude = match_single([
("altitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None), ("altitude: ([\d\.\-]+)", lambda v: float(v) if v != 0 else None),
("GPS \([\d\.\-]+,? [\d\.\-]+,? ([\d\.\-]+)\)", lambda v: float(v) if v != 0 else None), ("GPS \([\d\.\-]+,? [\d\.\-]+,? ([\d\.\-]+)\)", lambda v: float(v) if v != 0 else None),

Wyświetl plik

@ -23,7 +23,6 @@ rasterio==1.2.3 ; sys_platform == 'linux'
rasterio==1.3.6 ; sys_platform == 'darwin' rasterio==1.3.6 ; sys_platform == 'darwin'
https://github.com/OpenDroneMap/windows-deps/raw/main/rasterio-1.2.3-cp38-cp38-win_amd64.whl ; sys_platform == 'win32' https://github.com/OpenDroneMap/windows-deps/raw/main/rasterio-1.2.3-cp38-cp38-win_amd64.whl ; sys_platform == 'win32'
https://github.com/OpenDroneMap/windows-deps/raw/main/GDAL-3.2.3-cp38-cp38-win_amd64.whl ; sys_platform == 'win32' https://github.com/OpenDroneMap/windows-deps/raw/main/GDAL-3.2.3-cp38-cp38-win_amd64.whl ; sys_platform == 'win32'
odmrawpy==0.24.1
repoze.lru==0.7 repoze.lru==0.7
scikit-learn==1.1.1 scikit-learn==1.1.1
Pywavelets==1.3.0 Pywavelets==1.3.0
@ -33,7 +32,7 @@ xmltodict==0.12.0
fpdf2==2.4.6 fpdf2==2.4.6
Shapely==1.7.1 Shapely==1.7.1
onnxruntime==1.12.1 onnxruntime==1.12.1
pygltflib==1.16.5 pygltflib==1.15.3
codem==0.24.0 codem==0.24.0
trimesh==3.17.1 trimesh==3.17.1
pandas==1.5.2 pandas==1.5.2

Wyświetl plik

@ -187,9 +187,6 @@ class ODMLoadDatasetStage(types.ODM_Stage):
p.compute_opk() p.compute_opk()
updated += 1 updated += 1
log.ODM_INFO("Updated %s image positions" % updated) log.ODM_INFO("Updated %s image positions" % updated)
# Warn if a file path is specified but it does not exist
elif tree.odm_geo_file is not None and not os.path.isfile(tree.odm_geo_file):
log.ODM_WARNING("Image geolocation file %s does not exist" % tree.odm_geo_file)
# GPSDOP override if we have GPS accuracy information (such as RTK) # GPSDOP override if we have GPS accuracy information (such as RTK)
if 'gps_accuracy_is_set' in args: if 'gps_accuracy_is_set' in args:
@ -234,9 +231,9 @@ class ODMLoadDatasetStage(types.ODM_Stage):
item['p'].set_mask(os.path.basename(mask_file)) item['p'].set_mask(os.path.basename(mask_file))
log.ODM_INFO("Wrote %s" % os.path.basename(mask_file)) log.ODM_INFO("Wrote %s" % os.path.basename(mask_file))
else: else:
log.ODM_WARNING("Cannot generate mask for %s" % item['file']) log.ODM_WARNING("Cannot generate mask for %s" % img)
except Exception as e: except Exception as e:
log.ODM_WARNING("Cannot generate mask for %s: %s" % (item['file'], str(e))) log.ODM_WARNING("Cannot generate mask for %s: %s" % (img, str(e)))
parallel_map(parallel_sky_filter, sky_images, max_workers=args.max_concurrency) parallel_map(parallel_sky_filter, sky_images, max_workers=args.max_concurrency)
@ -338,10 +335,3 @@ class ODMLoadDatasetStage(types.ODM_Stage):
if args.rolling_shutter and not reconstruction.is_georeferenced(): if args.rolling_shutter and not reconstruction.is_georeferenced():
log.ODM_WARNING("Reconstruction is not georeferenced, disabling rolling shutter correction") log.ODM_WARNING("Reconstruction is not georeferenced, disabling rolling shutter correction")
args.rolling_shutter = False args.rolling_shutter = False
# GPS Z offset
if 'gps_z_offset_is_set' in args:
log.ODM_INFO("Adjusting GPS Z offset by %s for all images" % args.gps_z_offset)
for p in photos:
p.adjust_z_offset(args.gps_z_offset)

Wyświetl plik

@ -12,8 +12,6 @@ from opendm.cropper import Cropper
from opendm import pseudogeo from opendm import pseudogeo
from opendm.tiles.tiler import generate_dem_tiles from opendm.tiles.tiler import generate_dem_tiles
from opendm.cogeo import convert_to_cogeo from opendm.cogeo import convert_to_cogeo
from opendm.utils import add_raster_meta_tags
class ODMDEMStage(types.ODM_Stage): class ODMDEMStage(types.ODM_Stage):
def process(self, args, outputs): def process(self, args, outputs):
@ -88,8 +86,6 @@ class ODMDEMStage(types.ODM_Stage):
if pseudo_georeference: if pseudo_georeference:
pseudogeo.add_pseudo_georeferencing(dem_geotiff_path) pseudogeo.add_pseudo_georeferencing(dem_geotiff_path)
add_raster_meta_tags(dem_geotiff_path, reconstruction, tree, embed_gcp_meta=not outputs['large'])
if args.tiles: if args.tiles:
generate_dem_tiles(dem_geotiff_path, tree.path("%s_tiles" % product), args.max_concurrency, resolution) generate_dem_tiles(dem_geotiff_path, tree.path("%s_tiles" % product), args.max_concurrency, resolution)

Wyświetl plik

@ -6,7 +6,6 @@ import fiona
import fiona.crs import fiona.crs
import json import json
import zipfile import zipfile
import math
from collections import OrderedDict from collections import OrderedDict
from pyproj import CRS from pyproj import CRS
@ -126,35 +125,13 @@ class ODMGeoreferencingStage(types.ODM_Stage):
stages.append("transformation") stages.append("transformation")
utmoffset = reconstruction.georef.utm_offset() utmoffset = reconstruction.georef.utm_offset()
# Establish appropriate las scale for export
las_scale = 0.001
filtered_point_cloud_stats = tree.path("odm_filterpoints", "point_cloud_stats.json")
# Function that rounds to the nearest 10
# and then chooses the one below so our
# las scale is sensible
def powerr(r):
return pow(10,round(math.log10(r))) / 10
if os.path.isfile(filtered_point_cloud_stats):
try:
with open(filtered_point_cloud_stats, 'r') as stats:
las_stats = json.load(stats)
spacing = powerr(las_stats['spacing'])
log.ODM_INFO("las scale calculated as the minimum of 1/10 estimated spacing or %s, which ever is less." % las_scale)
las_scale = min(spacing, 0.001)
except Exception as e:
log.ODM_WARNING("Cannot find file point_cloud_stats.json. Using default las scale: %s" % las_scale)
else:
log.ODM_INFO("No point_cloud_stats.json found. Using default las scale: %s" % las_scale)
params += [ params += [
f'--filters.transformation.matrix="1 0 0 {utmoffset[0]} 0 1 0 {utmoffset[1]} 0 0 1 0 0 0 0 1"', f'--filters.transformation.matrix="1 0 0 {utmoffset[0]} 0 1 0 {utmoffset[1]} 0 0 1 0 0 0 0 1"',
f'--writers.las.offset_x={reconstruction.georef.utm_east_offset}' , f'--writers.las.offset_x={reconstruction.georef.utm_east_offset}' ,
f'--writers.las.offset_y={reconstruction.georef.utm_north_offset}', f'--writers.las.offset_y={reconstruction.georef.utm_north_offset}',
f'--writers.las.scale_x={las_scale}', '--writers.las.scale_x=0.001',
f'--writers.las.scale_y={las_scale}', '--writers.las.scale_y=0.001',
f'--writers.las.scale_z={las_scale}', '--writers.las.scale_z=0.001',
'--writers.las.offset_z=0', '--writers.las.offset_z=0',
f'--writers.las.a_srs="{reconstruction.georef.proj4()}"' # HOBU this should maybe be WKT f'--writers.las.a_srs="{reconstruction.georef.proj4()}"' # HOBU this should maybe be WKT
] ]
@ -280,4 +257,3 @@ class ODMGeoreferencingStage(types.ODM_Stage):
os.remove(tree.filtered_point_cloud) os.remove(tree.filtered_point_cloud)

Wyświetl plik

@ -132,8 +132,7 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
else: else:
log.ODM_INFO("Not a submodel run, skipping mask raster generation") log.ODM_INFO("Not a submodel run, skipping mask raster generation")
orthophoto.post_orthophoto_steps(args, bounds_file_path, tree.odm_orthophoto_tif, tree.orthophoto_tiles, resolution, orthophoto.post_orthophoto_steps(args, bounds_file_path, tree.odm_orthophoto_tif, tree.orthophoto_tiles, resolution)
reconstruction, tree, not outputs["large"])
# Generate feathered orthophoto also # Generate feathered orthophoto also
if args.orthophoto_cutline and submodel_run: if args.orthophoto_cutline and submodel_run:

Wyświetl plik

@ -1,5 +1,6 @@
import os import os
from osgeo import gdal
from opendm import io from opendm import io
from opendm import log from opendm import log
from opendm import types from opendm import types
@ -13,6 +14,37 @@ class ODMPostProcess(types.ODM_Stage):
log.ODM_INFO("Post Processing") log.ODM_INFO("Post Processing")
if not outputs['large']:
# TODO: support for split-merge?
# Embed GCP info in 2D results via
# XML metadata fields
gcp_gml_export_file = tree.path("odm_georeferencing", "ground_control_points.gml")
if reconstruction.has_gcp() and io.file_exists(gcp_gml_export_file):
skip_embed_gcp = False
gcp_xml = ""
with open(gcp_gml_export_file) as f:
gcp_xml = f.read()
for product in [tree.odm_orthophoto_tif,
tree.path("odm_dem", "dsm.tif"),
tree.path("odm_dem", "dtm.tif")]:
if os.path.isfile(product):
ds = gdal.Open(product)
if ds is not None:
if ds.GetMetadata('xml:GROUND_CONTROL_POINTS') is None or self.rerun():
ds.SetMetadata(gcp_xml, 'xml:GROUND_CONTROL_POINTS')
ds = None
log.ODM_INFO("Wrote xml:GROUND_CONTROL_POINTS metadata to %s" % product)
else:
skip_embed_gcp = True
log.ODM_WARNING("Already embedded ground control point information")
break
else:
log.ODM_WARNING("Cannot open %s for writing, skipping GCP embedding" % product)
if getattr(args, '3d_tiles'): if getattr(args, '3d_tiles'):
build_3dtiles(args, tree, reconstruction, self.rerun()) build_3dtiles(args, tree, reconstruction, self.rerun())
@ -21,4 +53,3 @@ class ODMPostProcess(types.ODM_Stage):
copy_paths([os.path.join(args.project_path, p) for p in get_processing_results_paths()], args.copy_to, self.rerun()) copy_paths([os.path.join(args.project_path, p) for p in get_processing_results_paths()], args.copy_to, self.rerun())
except Exception as e: except Exception as e:
log.ODM_WARNING("Cannot copy to %s: %s" % (args.copy_to, str(e))) log.ODM_WARNING("Cannot copy to %s: %s" % (args.copy_to, str(e)))

Wyświetl plik

@ -63,13 +63,11 @@ class ODMOpenMVSStage(types.ODM_Stage):
densify_ini_file = os.path.join(tree.openmvs, 'Densify.ini') densify_ini_file = os.path.join(tree.openmvs, 'Densify.ini')
subres_levels = 2 # The number of lower resolutions to process before estimating output resolution depthmap. subres_levels = 2 # The number of lower resolutions to process before estimating output resolution depthmap.
filter_point_th = -20 filter_point_th = -20
min_resolution = 320 if args.pc_quality in ["low", "lowest"] else 640
config = [ config = [
"--resolution-level %s" % int(resolution_level), "--resolution-level %s" % int(resolution_level),
'--dense-config-file "%s"' % densify_ini_file, '--dense-config-file "%s"' % densify_ini_file,
"--max-resolution %s" % int(outputs['undist_image_max_size']), "--max-resolution %s" % int(outputs['undist_image_max_size']),
"--min-resolution %s" % min_resolution,
"--max-threads %s" % args.max_concurrency, "--max-threads %s" % args.max_concurrency,
"--number-views-fuse %s" % number_views_fuse, "--number-views-fuse %s" % number_views_fuse,
"--sub-resolution-levels %s" % subres_levels, "--sub-resolution-levels %s" % subres_levels,

Wyświetl plik

@ -1,19 +1,23 @@
import os import os
import shutil import shutil
import json
import yaml
from opendm import log from opendm import log
from opendm.osfm import OSFMContext, get_submodel_argv, get_submodel_paths, get_all_submodel_paths from opendm.osfm import OSFMContext, get_submodel_argv, get_submodel_paths, get_all_submodel_paths
from opendm import types from opendm import types
from opendm import io from opendm import io
from opendm import system from opendm import system
from opendm import orthophoto from opendm import orthophoto
from opendm.dem import utils from opendm.gcp import GCPFile
from opendm.dem import pdal, utils
from opendm.dem.merge import euclidean_merge_dems from opendm.dem.merge import euclidean_merge_dems
from opensfm.large import metadataset from opensfm.large import metadataset
from opendm.cropper import Cropper from opendm.cropper import Cropper
from opendm.concurrency import get_max_memory
from opendm.remote import LocalRemoteExecutor from opendm.remote import LocalRemoteExecutor
from opendm.shots import merge_geojson_shots, merge_cameras from opendm.shots import merge_geojson_shots
from opendm import point_cloud from opendm import point_cloud
from opendm.utils import double_quote, add_raster_meta_tags from opendm.utils import double_quote
from opendm.tiles.tiler import generate_dem_tiles from opendm.tiles.tiler import generate_dem_tiles
from opendm.cogeo import convert_to_cogeo from opendm.cogeo import convert_to_cogeo
from opendm import multispectral from opendm import multispectral
@ -263,8 +267,7 @@ class ODMMergeStage(types.ODM_Stage):
orthophoto_vars = orthophoto.get_orthophoto_vars(args) orthophoto_vars = orthophoto.get_orthophoto_vars(args)
orthophoto.merge(all_orthos_and_ortho_cuts, tree.odm_orthophoto_tif, orthophoto_vars) orthophoto.merge(all_orthos_and_ortho_cuts, tree.odm_orthophoto_tif, orthophoto_vars)
orthophoto.post_orthophoto_steps(args, merged_bounds_file, tree.odm_orthophoto_tif, tree.orthophoto_tiles, args.orthophoto_resolution, orthophoto.post_orthophoto_steps(args, merged_bounds_file, tree.odm_orthophoto_tif, tree.orthophoto_tiles, args.orthophoto_resolution)
reconstruction, tree, False)
elif len(all_orthos_and_ortho_cuts) == 1: elif len(all_orthos_and_ortho_cuts) == 1:
# Simply copy # Simply copy
log.ODM_WARNING("A single orthophoto/cutline pair was found between all submodels.") log.ODM_WARNING("A single orthophoto/cutline pair was found between all submodels.")
@ -306,8 +309,6 @@ class ODMMergeStage(types.ODM_Stage):
if args.tiles: if args.tiles:
generate_dem_tiles(dem_file, tree.path("%s_tiles" % human_name.lower()), args.max_concurrency, args.dem_resolution) generate_dem_tiles(dem_file, tree.path("%s_tiles" % human_name.lower()), args.max_concurrency, args.dem_resolution)
add_raster_meta_tags(dem_file, reconstruction, tree, embed_gcp_meta=False)
if args.cog: if args.cog:
convert_to_cogeo(dem_file, max_workers=args.max_concurrency) convert_to_cogeo(dem_file, max_workers=args.max_concurrency)
else: else:
@ -336,15 +337,6 @@ class ODMMergeStage(types.ODM_Stage):
else: else:
log.ODM_WARNING("Found merged shots.geojson in %s" % tree.odm_report) log.ODM_WARNING("Found merged shots.geojson in %s" % tree.odm_report)
# Merge cameras
cameras_json = tree.path("cameras.json")
if not io.file_exists(cameras_json) or self.rerun():
cameras_json_files = get_submodel_paths(tree.submodels_path, "cameras.json")
log.ODM_INFO("Merging %s cameras.json files" % len(cameras_json_files))
merge_cameras(cameras_json_files, cameras_json)
else:
log.ODM_WARNING("Found merged cameras.json in %s" % tree.root_path)
# Stop the pipeline short by skipping to the postprocess stage. # Stop the pipeline short by skipping to the postprocess stage.
# Afterwards, we're done. # Afterwards, we're done.
self.next_stage = self.last_stage() self.next_stage = self.last_stage()

Wyświetl plik

@ -1,18 +1,14 @@
set ODMBASE=%~dp0 set ODMBASE=%~dp0
set VIRTUAL_ENV=%ODMBASE%venv set VIRTUAL_ENV=%ODMBASE%venv
IF "%~1"=="" (set WRITABLE_VIRTUAL_ENV=%VIRTUAL_ENV%) ELSE (set WRITABLE_VIRTUAL_ENV=%~1) set PYENVCFG=%VIRTUAL_ENV%\pyvenv.cfg
mkdir "%WRITABLE_VIRTUAL_ENV%" set SBBIN=%ODMBASE%SuperBuild\install\bin
rem Hot-patching pyvenv.cfg rem Hot-patching pyvenv.cfg
set PYENVCFG=%WRITABLE_VIRTUAL_ENV%\pyvenv.cfg echo home = %ODMBASE%venv\Scripts> "%PYENVCFG%"
echo home = %VIRTUAL_ENV%\Scripts> "%PYENVCFG%"
echo include-system-site-packages = false>> "%PYENVCFG%" echo include-system-site-packages = false>> "%PYENVCFG%"
rem Hot-patching cv2 extension configs rem Hot-patching cv2 extension configs
set SBBIN=%ODMBASE%SuperBuild\install\bin echo BINARIES_PATHS = [r"%SBBIN%"] + BINARIES_PATHS> venv\Lib\site-packages\cv2\config.py
set CV2=%WRITABLE_VIRTUAL_ENV%\Lib\site-packages\cv2 echo PYTHON_EXTENSIONS_PATHS = [r'''%VIRTUAL_ENV%\lib\site-packages\cv2\python-3.8'''] + PYTHON_EXTENSIONS_PATHS> venv\Lib\site-packages\cv2\config-3.8.py
mkdir "%CV2%"
echo BINARIES_PATHS = [r"%SBBIN%"] + BINARIES_PATHS> "%CV2%\config.py"
echo PYTHON_EXTENSIONS_PATHS = [r'''%VIRTUAL_ENV%\lib\site-packages\cv2\python-3.8'''] + PYTHON_EXTENSIONS_PATHS> "%CV2%\config-3.8.py"
cls cls