Porównaj commity

..

No commits in common. "master" and "v3.5.3" have entirely different histories.

51 zmienionych plików z 182 dodań i 793 usunięć

Wyświetl plik

@ -13,7 +13,6 @@ jobs:
with:
ghToken: ${{ secrets.GITHUB_TOKEN }}
openAI: ${{ secrets.OPENAI_TOKEN }}
model: gpt-4o-2024-08-06
filter: |
- "#"
variables: |

Wyświetl plik

@ -9,7 +9,7 @@ on:
jobs:
build:
runs-on: windows-2022
runs-on: windows-2019
steps:
- name: Checkout
uses: actions/checkout@v2
@ -49,7 +49,7 @@ jobs:
run: |
python configure.py dist --code-sign-cert-path $env:CODE_SIGN_CERT_PATH
- name: Upload Setup File
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v2
with:
name: Setup
path: dist\*.exe

Wyświetl plik

@ -81,7 +81,7 @@ jobs:
run: |
python configure.py dist
- name: Upload Setup File
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v2
with:
name: Setup
path: dist\*.exe

1
.gitignore vendored
Wyświetl plik

@ -35,4 +35,3 @@ venv/
python38/
dist/
innosetup/
.DS_Store

Wyświetl plik

@ -15,19 +15,12 @@ If you would rather not type commands in a shell and are looking for a friendly
## Quickstart
The easiest way to run ODM is via docker. To install docker, see [docs.docker.com](https://docs.docker.com). Once you have docker installed and [working](https://docs.docker.com/get-started/#test-docker-installation), you can get ODM by running from a Command Prompt / Terminal:
```bash
docker pull opendronemap/odm
```
Run ODM by placing some images (JPEGs, TIFFs or DNGs) in a folder named “images” (for example `C:\Users\youruser\datasets\project\images` or `/home/youruser/datasets/project/images`) and simply run from a Command Prompt / Terminal:
The easiest way to run ODM on is via docker. To install docker, see [docs.docker.com](https://docs.docker.com). Once you have docker installed and [working](https://docs.docker.com/get-started/#test-docker-installation), you can run ODM by placing some images (JPEGs or TIFFs) in a folder named “images” (for example `C:\Users\youruser\datasets\project\images` or `/home/youruser/datasets/project/images`) and simply run from a Command Prompt / Terminal:
```bash
# Windows
docker run -ti --rm -v c:/Users/youruser/datasets:/datasets opendronemap/odm --project-path /datasets project
```
```bash
# Mac/Linux
docker run -ti --rm -v /home/youruser/datasets:/datasets opendronemap/odm --project-path /datasets project
```
@ -95,7 +88,7 @@ run C:\Users\youruser\datasets\project [--additional --parameters --here]
ODM has support for doing SIFT feature extraction on a GPU, which is about 2x faster than the CPU on a typical consumer laptop. To use this feature, you need to use the `opendronemap/odm:gpu` docker image instead of `opendronemap/odm` and you need to pass the `--gpus all` flag:
```
docker run -ti --rm -v c:/Users/youruser/datasets:/datasets --gpus all opendronemap/odm:gpu --project-path /datasets project --feature-type sift
docker run -ti --rm -v c:/Users/youruser/datasets:/datasets --gpus all opendronemap/odm:gpu --project-path /datasets project
```
When you run ODM, if the GPU is recognized, in the first few lines of output you should see:

Wyświetl plik

@ -184,7 +184,7 @@ set(custom_libs OpenSfM
externalproject_add(mve
GIT_REPOSITORY https://github.com/OpenDroneMap/mve.git
GIT_TAG 356
GIT_TAG 290
UPDATE_COMMAND ""
SOURCE_DIR ${SB_SOURCE_DIR}/mve
CMAKE_ARGS ${WIN32_CMAKE_ARGS} ${APPLE_CMAKE_ARGS}
@ -244,7 +244,7 @@ externalproject_add(dem2points
externalproject_add(odm_orthophoto
DEPENDS opencv
GIT_REPOSITORY https://github.com/OpenDroneMap/odm_orthophoto.git
GIT_TAG 355
GIT_TAG 353
PREFIX ${SB_BINARY_DIR}/odm_orthophoto
SOURCE_DIR ${SB_SOURCE_DIR}/odm_orthophoto
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}

Wyświetl plik

@ -1,7 +1,7 @@
set(_proj_name obj2tiles)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
set(OBJ2TILES_VERSION v1.0.13)
set(OBJ2TILES_VERSION v1.0.12)
set(OBJ2TILES_EXT "")
set(OBJ2TILES_ARCH "Linux64")
@ -9,7 +9,7 @@ if (WIN32)
set(OBJ2TILES_ARCH "Win64")
set(OBJ2TILES_EXT ".exe")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64")
set(OBJ2TILES_ARCH "LinuxArm64")
set(OBJ2TILES_ARCH "LinuxArm")
elseif(APPLE)
set(OBJ2TILES_ARCH "Osx64")
endif()

Wyświetl plik

@ -53,7 +53,7 @@ ExternalProject_Add(${_proj_name}
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/openMVS
GIT_TAG 355
GIT_TAG 320
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------

Wyświetl plik

@ -25,7 +25,7 @@ ExternalProject_Add(${_proj_name}
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/
GIT_TAG 355
GIT_TAG 352
#--Update/Patch step----------
UPDATE_COMMAND git submodule update --init --recursive
#--Configure step-------------

Wyświetl plik

@ -1 +1 @@
3.5.6
3.5.3

Wyświetl plik

@ -58,7 +58,7 @@ ensure_prereqs() {
if [[ "$UBUNTU_VERSION" == *"20.04"* ]]; then
echo "Enabling PPA for Ubuntu GIS"
sudo $APT_GET install -y -qq --no-install-recommends software-properties-common
sudo add-apt-repository ppa:ubuntugis/ppa
sudo add-apt-repository -y ppa:ubuntugis/ubuntugis-unstable
sudo $APT_GET update
fi

Wyświetl plik

@ -1,4 +1,5 @@
import bpy
import materials_utils
def loadMesh(file):

Wyświetl plik

@ -1,13 +0,0 @@
# DEM Blending
Blend sets of DEMs by calculating euclidean distance to null values and weighting the combination of elevation models. Based on the split-merge tool within ODM.
Requirements:
* Directory full of images to blend together
* NoData should be coded as a value of -9999
## Usage
```BASH
docker run -ti --rm -v /home/youruser/folder_with_dems:/input --entrypoint /code/contrib/dem-blend/dem-blend.py opendronemap/odm /input
```

Wyświetl plik

@ -1,30 +0,0 @@
#!/usr/bin/env python3
# Authors: Piero Toffanin, Stephen Mather
# License: AGPLv3
import os
import glob
import sys
sys.path.insert(0, os.path.join("..", "..", os.path.dirname(__file__)))
import argparse
from opendm.dem import merge
parser = argparse.ArgumentParser(description='Merge and blend DEMs using OpenDroneMap\'s approach.')
parser.add_argument('input_dems',
type=str,
help='Path to input dems (.tif)')
args = parser.parse_args()
if not os.path.exists(args.input_dems):
print("%s does not exist" % args.input_dems)
exit(1)
output_dem = os.path.join(args.input_dems, 'merged_blended_dem.tif')
input_dem_path = os.path.join(args.input_dems, '*.tif')
input_dems = glob.glob(input_dem_path)
merge.euclidean_merge_dems(input_dems
,output_dem=output_dem
)

Wyświetl plik

@ -1,19 +0,0 @@
# Fix Ply
Use to translate a modified ply into a compatible format for subsequent steps in ODM. Via Jaime Chacoff, https://community.opendronemap.org/t/edited-point-cloud-with-cloudcompare-wont-rerun-from-odm-meshing/21449/6
The basic idea is to process through ODM until the point cloud is created, use a 3rd party tool, like CloudCompare to edit the point cloud, and then continue processing in OpenDroneMap.
This useful bit of python will convert the PLY exported from CloudCompare back into a compatible format for continued processing in OpenDroneMap.
1. Run project in WebODM and add this to your settings: `end-with: odm-filterpoints`
1. Once complete, go to your NodeODM container and copy `/var/www/data/[Task ID]/odm-filterpoints` directory
1. Open CloudCompare and from `odm-filterpoints` directory you've copied, open `point_cloud.ply`
1. In the box that pops up, add a scalar field `vertex - views`
1. To see the actual colours again - select the point cloud, then in properties change colours from "Scalar field" to "RGB"
1. Make your changes to the point cloud
1. Compute normals (Edit > Normals > Compute)
1. Save PLY file as ASCII
1. Run Python file above to fix PLY file and convert to binary
1. Copy `odm_filterpoints` directory (or just `point_cloud.ply`) back into NodeODM container
1. Restart project in WebODM "From Meshing" (don't forget to edit settings to remove `end-with: odm-filterpoints` or it's not going to do anything).

Wyświetl plik

@ -1,68 +0,0 @@
import os
import logging
from plyfile import PlyData, PlyElement
import numpy as np
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def pcd_ascii_to_binary_ply(ply_file: str, binary_ply: str) -> None:
"""Converts ASCII PLY to binary, ensuring 'views' is present and of type uchar.
Raises ValueError if neither 'scalar_views' nor 'views' is found.
"""
try:
logging.info(f"Reading ASCII PLY file: {ply_file}")
ply_data: PlyData = PlyData.read(ply_file)
except FileNotFoundError:
logging.error(f"File not found: {ply_file}")
return
except Exception as e:
logging.error(f"Error reading PLY file: {e}")
return
new_elements: list[PlyElement] = []
for element in ply_data.elements:
new_data = element.data.copy()
if 'scalar_views' in element.data.dtype.names:
new_data['views'] = new_data['scalar_views'].astype('u1')
del new_data['scalar_views']
elif 'views' in element.data.dtype.names:
new_data['views'] = new_data['views'].astype('u1')
else:
raise ValueError(f"Neither 'scalar_views' nor 'views' found - did you import them when opened the file in CloudCompare?")
new_element = PlyElement.describe(new_data, element.name)
new_elements.append(new_element)
new_ply_data = PlyData(new_elements, text=False)
try:
logging.info(f"Writing binary PLY file: {binary_ply}")
new_ply_data.write(binary_ply)
except Exception as e:
logging.error(f"Error writing PLY file: {e}")
return
logging.info("PLY conversion complete.")
if __name__ == '__main__':
# Parameters
base: str = os.path.dirname(os.path.abspath(__file__))
ply_file: str = os.path.join(base, 'point_cloud_ascii.ply')
binary_ply_file: str = os.path.join(base, 'point_cloud.ply')
if not os.path.exists(ply_file):
logging.error(f"Input file not found: {ply_file}")
exit(1) # Exit with error code
try:
pcd_ascii_to_binary_ply(ply_file, binary_ply_file)
except ValueError as e:
logging.error(f"PLY conversion failed: {e}")
exit(1) # Exit with error code to indicate failure

Wyświetl plik

@ -4,7 +4,7 @@ import os
import PIL
from PIL import Image
from PIL import Image, ExifTags
import shutil

Wyświetl plik

@ -12,6 +12,7 @@ import numpy as np
import numpy.ma as ma
import multiprocessing
import argparse
import functools
from skimage.draw import line
from opensfm import dataset

Wyświetl plik

@ -1,64 +0,0 @@
# Plugin Time-SIFT
This script does Time-SIFT processing with ODM. Time-SIFT is a method for multi-temporal analysis without the need to co-registrate the data.
> D. Feurer, F. Vinatier, Joining multi-epoch archival aerial images in a single SfM block allows 3-D change detection with almost exclusively image information, ISPRS Journal of Photogrammetry and Remote Sensing, Volume 146, 2018, Pages 495-506, ISSN 0924-2716, doi: 10.1016/j.isprsjprs.2018.10.016
(https://doi.org/10.1016/j.isprsjprs.2018.10.016)
## Requirements
* ODM ! :-)
* subprocess
* json
* os
* shutil
* pathlib
* sys
* argparse
* textwrap
## Usage
### Provided example
Download or clone [this repo](https://forge.inrae.fr/Denis.Feurer/timesift-odm-data-example.git) to get example data.
Then execute
```
python Timesift_odm.py datasets --end-with odm_filterpoints
```
It should make the Time-SIFT processing on the downloaded example data, stopping after the filtered dense clouds step.
In the destination dir, you should obtain new directories, ```0_before``` and ```1_after``` at the same level as the ```time-sift-block``` directory. These new directories contain all the results natively co-registered.
You can then use [CloudCompare](https://cloudcompare.org/) to compute distance between the ```datasets/0_before/odm_filterpoints/point_cloud.ply``` and the ```datasets/1_after/odm_filterpoints/point_cloud.ply``` and obtain this image showing the difference between the two 3D surfaces. Here, two soil samples were excavated as can be seen on the image below.
![](https://forge.inrae.fr/Denis.Feurer/timesift-odm-data-example/-/raw/main/Example.png?ref_type=heads)
### Your own data
In your dataset directory (usually ```datasets```, but you can have chosen another name) you have to prepare a Time-SIFT project directory (default name : ```time-sift-block```, *can be tuned via a parameter*) that contains :
* ```images/``` : a subdirectory with all images of all epochs. This directory name is fixed as it is the one expected by ODM
* ```images_epochs.txt``` : a file that has the same format as the file used for the split and merge ODM function. This file name *can be tuned via a parameter*.
The ```images_epochs.txt``` file has two columns, the first column contains image names and the second contains the epoch name as follows
```
DSC_0368.JPG 0_before
DSC_0369.JPG 0_before
DSC_0370.JPG 0_before
DSC_0389.JPG 1_after
DSC_0390.JPG 1_after
DSC_0391.JPG 1_after
```
Your directory, before running the script, should look like this :
```
$PWD/datasets/
└── time-sift-block/
├── images/
└── images_epochs.txt
```
At the end of the script you obtain a directory by epoch (at the same level as the Time-SIFT project directory). Each directory is processed with images of each epoch and all results are natively co-registered due to the initial sfm step done with all images.
```
$PWD/datasets/
├── 0_before/
├── 1_after/
└── time-sift-block/
```

Wyświetl plik

@ -1,167 +0,0 @@
# Script for Time-SIFT multi-temporal images alignment with ODM
#
# This is python script for ODM, based on the following publication :
#
# D. Feurer, F. Vinatier, Joining multi-epoch archival aerial images in a single SfM block allows 3-D change detection
# with almost exclusively image information, ISPRS Journal of Photogrammetry and Remote Sensing, Volume 146, 2018,
# Pages 495-506, ISSN 0924-2716, https://doi.org/10.1016/j.isprsjprs.2018.10.016.
import subprocess
import json
import os
import shutil
from pathlib import Path
import sys
import argparse
import textwrap
def main(argv):
# Parsing and checking args
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\
Timesift_odm.py datasetdir [-t <timesift-dir>] [-i <imageepochs-file>] [<options passed to ODM>]
you can add options passed to ODM, for instance [--end-with odm_filterpoints] so that the final step is point clouds
these options are not checked for before the final runs of each epoch, so use it carefully
'''))
parser.add_argument('datasetdir', help='dataset directory')
parser.add_argument('-t', '--timesift-dir',
help='Time-SIFT directory ; default value : "time-sift-block" # must be in the datasetdir')
parser.add_argument('-i', '--imageepochs-file',
help='Text file describing epochs ; default value : "images_epochs.txt" # must be in the TIMESIFT_DIR ')
args, additional_options_to_rerun = parser.parse_known_args()
datasets_DIR = Path(args.datasetdir).absolute().as_posix()
if args.timesift_dir:
timesift_DIR = args.timesift_dir
else:
timesift_DIR = 'time-sift-block'
if args.imageepochs_file:
images_epochs_file = args.imageepochs_file
else:
images_epochs_file = 'images_epochs.txt'
if '-h' in sys.argv or '--help' in sys.argv:
parser.print_help()
sys.exit()
if additional_options_to_rerun: # for instance, --end-with odm_filterpoints
print(f'[Time-SIFT] Options passed to ODM for the final steps: {additional_options_to_rerun}')
print(f'[Time-SIFT] \033[93mWARNING there is no check of these options done before the last ODM call\033[0m')
def check_path_args(var: Path):
if not var.exists():
print(
f'\033[91m[Time-SIFT] ERROR: the {var.as_posix()} directory does not exist. Exiting program\033[0m')
exit()
check_path_args(Path(datasets_DIR))
check_path_args(Path(datasets_DIR, timesift_DIR))
check_path_args(Path(datasets_DIR, timesift_DIR, images_epochs_file))
def clean_reconstruction_dict(subdict, key, images):
"""
Delete subdict elements where the key do not match any name in the images list.
To create the {epoch} block with only images of this epoch
"""
# The list of valid images is prepared by removing any extension (to be robust to the .tif added by ODM)
valid_images = {os.path.basename(image).split(os.extsep)[0] for image in images}
for item_key in list(subdict[key]):
image_name = os.path.basename(item_key).split(os.extsep)[0]
if image_name not in valid_images:
del subdict[key][item_key]
### Read images.txt file and create a dict of images/epochs
images_epochs_dict = {}
with open(Path(datasets_DIR, timesift_DIR, images_epochs_file), 'r') as f:
for line in f:
line = line.strip()
if not line:
continue # Empty lines are skipped
image, epoch = line.split()
if epoch not in images_epochs_dict:
images_epochs_dict[epoch] = []
images_epochs_dict[epoch].append(image)
### Check for existing epochs directories before computing anything (these directories must be deleted by hand)
path_exists_error = False
for epoch in images_epochs_dict:
if Path(datasets_DIR, epoch).exists():
if path_exists_error:
print(f"sudo rm -rf {Path(datasets_DIR, epoch).as_posix()}")
else:
print(f'\033[91m[Time-SIFT] ERROR: {Path(datasets_DIR, epoch).as_posix()} already exists.\033[0m')
print(f" Other epochs probably also exist.")
print(
f" The problem is \033[93mI CAN'T\033[0m delete it by myself, it requires root privileges.")
print(
f" The good news is \033[92mYOU CAN\033[0m do it with the following command (be careful).")
print(f'\033[91m => Consider doing it (at your own risks). Exiting program\033[0m')
print(f"- Commands to copy/paste (I'm kind, I prepared all the necessary commands for you).")
print(f"sudo rm -rf {Path(datasets_DIR, epoch).as_posix()}")
path_exists_error = True
if path_exists_error:
exit()
### LAUNCH global alignment (Time-SIFT multitemporal block)
try:
subprocess.run(['docker', 'run', '-i', '--rm', '-v', datasets_DIR + ':/datasets',
'opendronemap/odm', '--project-path', '/datasets', timesift_DIR, '--end-with', 'opensfm'])
except:
print(f'\033[91m[Time-SIFT] ERROR: {sys.exc_info()[0]}\033[0m')
exit()
print('\033[92m[Time-SIFT] Sfm on multi-temporal block done\033[0m')
print('[Time-SIFT] Going to dense matching on all epochs...')
### Loop on epochs for the dense matching
for epoch in images_epochs_dict:
#### We first duplicate the time-sift multitemporal block to save sfm results
shutil.copytree(Path(datasets_DIR, timesift_DIR),
Path(datasets_DIR, epoch))
#### Reads the datasets/{epoch}/opensfm/undistorted/reconstruction.json file that has to be modified
with open(Path(datasets_DIR, epoch, 'opensfm', 'undistorted', 'reconstruction.json'), mode="r",
encoding="utf-8") as read_file:
reconstruction_dict = json.load(read_file)
#### Removes images in this json dict (we delete the shot and the rig_instances that do not correspond to this epoch)
images = images_epochs_dict[epoch]
clean_reconstruction_dict(reconstruction_dict[0], 'shots', images)
clean_reconstruction_dict(reconstruction_dict[0], 'rig_instances', images)
#### Makes a backup of the reconstruction.json file and writes the modified json
shutil.copy(Path(datasets_DIR, epoch, 'opensfm', 'undistorted', 'reconstruction.json'),
Path(datasets_DIR, epoch, 'opensfm', 'undistorted', 'reconstruction.json.bak'))
with open(Path(datasets_DIR, epoch, 'opensfm', 'undistorted', 'reconstruction.json'), mode="w",
encoding="utf-8") as write_file:
json.dump(reconstruction_dict, write_file)
#### Launches dense matching from the good previous step, with possible options (e.g. => to stop at the point clouds)
command_rerun = ['docker', 'run', '-i', '--rm', '-v', datasets_DIR + ':/datasets',
'opendronemap/odm',
'--project-path', '/datasets', epoch,
'--rerun-from', 'openmvs']
if additional_options_to_rerun:
print(f'[Time-SIFT] Epoch {epoch}: Rerun with additionnal options: {additional_options_to_rerun}')
command_rerun.extend(additional_options_to_rerun)
else:
print(f'[Time-SIFT] Epoch {epoch}: Default full rerun')
result = subprocess.run(command_rerun)
if result.returncode != 0:
print(f'\033[91m[Time-SIFT] ERROR in processing epoch {epoch}\033[0m')
print(f'{result=}')
exit(result.returncode)
print(f'\033[92m[Time-SIFT] Epoch {epoch} finished\033[0m')
print('§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§')
print('§§§ §§ §§§ §§§§§ §§§ §§§§§§§§§§ §§ §§ §§ §§§')
print('§§§§§ §§§§ §§§ §§§ §§§ §§§§§§§§§§§§§ §§§§§§ §§ §§§§§§§§ §§§§§')
print('§§§§§ §§§§ §§§ § §§§ §§§§ §§§§§ §§§§ §§ §§§§§§ §§§§§')
print('§§§§§ §§§§ §§§ §§§§§ §§§ §§§§§§§§§§§§§§§§§ §§ §§ §§§§§§§§ §§§§§')
print('§§§§§ §§§§ §§§ §§§§§ §§§ §§§§§§§§§ §§§ §§ §§§§§§§§ §§§§§')
print('§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§')
print(' \033[92mTime-SIFT with ODM finished, congrats !\033[0m Want to cite the method ?')
print('=> D. Feurer, F. Vinatier, Joining multi-epoch archival aerial images in ')
print(' a single SfM block allows 3-D change detection with almost exclusively')
print(' image information, ISPRS Journal of Photogrammetry and Remote Sensing,')
print(' 2018, https://doi.org/10.1016/j.isprsjprs.2018.10.016 ')
if __name__ == "__main__":
main(sys.argv[1:])

Wyświetl plik

@ -1,8 +1,8 @@
FROM nvidia/cuda:11.8.0-devel-ubuntu20.04 AS builder
FROM nvidia/cuda:11.2.2-devel-ubuntu20.04 AS builder
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
# Prepare directories
@ -21,11 +21,12 @@ RUN bash configure.sh clean
### Use a second image for the final asset to reduce the number and
# size of the layers.
FROM nvidia/cuda:11.8.0-runtime-ubuntu20.04
FROM nvidia/cuda:11.2.2-runtime-ubuntu20.04
#FROM nvidia/cuda:11.2.0-devel-ubuntu20.04
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib" \
PDAL_DRIVER_PATH="/code/SuperBuild/install/bin"
@ -37,7 +38,6 @@ COPY --from=builder /code /code
# Copy the Python libraries installed via pip from the builder
COPY --from=builder /usr/local /usr/local
#COPY --from=builder /usr/lib/x86_64-linux-gnu/libavcodec.so.58 /usr/lib/x86_64-linux-gnu/libavcodec.so.58
RUN apt-get update -y \
&& apt-get install -y ffmpeg libtbb2
# Install shared libraries that we depend on via APT, but *not*

Wyświetl plik

@ -4,25 +4,6 @@ from opendm import log
import zipfile
import time
import sys
import rawpy
import cv2
def read_image(img_path):
if img_path[-4:].lower() in [".dng", ".raw", ".nef"]:
try:
with rawpy.imread(img_path) as r:
img = r.postprocess(output_bps=8, use_camera_wb=True, use_auto_wb=False)
except:
return None
else:
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
return None
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def get_model(namespace, url, version, name = "model.onnx"):
version = version.replace(".", "_")

Wyświetl plik

@ -5,7 +5,6 @@ import cv2
import os
import onnxruntime as ort
from opendm import log
from opendm.ai import read_image
from threading import Lock
mutex = Lock()
@ -74,7 +73,11 @@ class BgFilter():
return output
def run_img(self, img_path, dest):
img = read_image(img_path)
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
return None
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
mask = self.get_mask(img)
img_name = os.path.basename(img_path)

Wyświetl plik

@ -41,7 +41,6 @@ rerun_stages = {
'geo': 'dataset',
'gltf': 'mvs_texturing',
'gps_accuracy': 'dataset',
'gps_z_offset': 'dataset',
'help': None,
'ignore_gsd': 'opensfm',
'matcher_neighbors': 'opensfm',
@ -786,12 +785,11 @@ def config(argv=None, parser=None):
action=StoreValue,
metavar='<positive integer>',
default=150,
help='Radius of the overlap between submodels in meters. '
help='Radius of the overlap between submodels. '
'After grouping images into clusters, images '
'that are closer than this radius to a cluster '
'are added to the cluster. This is done to ensure '
'that neighboring submodels overlap. All images' \
'need GPS information. Default: %(default)s')
'that neighboring submodels overlap. Default: %(default)s')
parser.add_argument('--split-image-groups',
metavar='<path string>',
@ -847,16 +845,6 @@ def config(argv=None, parser=None):
'set accordingly. You can use this option to manually set it in case the reconstruction '
'fails. Lowering this option can sometimes help control bowling-effects over large areas. Default: %(default)s')
parser.add_argument('--gps-z-offset',
type=float,
action=StoreValue,
metavar='<float>',
default=0,
help='Set a GPS offset in meters for the vertical axis (Z) '
'by adding it to the altitude value of the GPS EXIF data. This does not change the value of any GCPs. '
'This can be useful for example when adjusting from ellipsoidal to orthometric height. '
'Default: %(default)s')
parser.add_argument('--optimize-disk-space',
action=StoreTrue,
nargs=0,

Wyświetl plik

@ -40,7 +40,7 @@ odm_orthophoto_path = os.path.join(superbuild_bin_path, "odm_orthophoto")
settings_path = os.path.join(root_path, 'settings.yaml')
# Define supported image extensions
supported_extensions = {'.jpg','.jpeg','.png', '.tif', '.tiff', '.bmp', '.raw', '.dng', '.nef'}
supported_extensions = {'.jpg','.jpeg','.png', '.tif', '.tiff', '.bmp'}
supported_video_extensions = {'.mp4', '.mov', '.lrv', '.ts'}
# Define the number of cores

Wyświetl plik

@ -1,5 +1,4 @@
import os
import subprocess
import sys
import rasterio
import numpy
@ -21,8 +20,6 @@ from opendm import log
from .ground_rectification.rectify import run_rectification
from . import pdal
gdal_proximity = None
try:
# GDAL >= 3.3
from osgeo_utils.gdal_proximity import main as gdal_proximity
@ -30,13 +27,8 @@ except ModuleNotFoundError:
# GDAL <= 3.2
try:
from osgeo.utils.gdal_proximity import main as gdal_proximity
except ModuleNotFoundError:
# GDAL <= 3.0
gdal_proximity_script = shutil.which("gdal_proximity.py")
if gdal_proximity_script is not None:
def gdal_proximity(args):
subprocess.run([gdal_proximity_script] + args[1:], check=True)
except:
pass
def classify(lasFile, scalar, slope, threshold, window):
start = datetime.now()

Wyświetl plik

@ -1,6 +1,6 @@
from PIL import Image
import cv2
import rawpy
from opendm import log
Image.MAX_IMAGE_PIXELS = None
@ -9,18 +9,12 @@ def get_image_size(file_path, fallback_on_error=True):
"""
Return (width, height) for a given img file
"""
try:
if file_path[-4:].lower() in [".dng", ".raw", ".nef"]:
with rawpy.imread(file_path) as img:
s = img.sizes
width, height = s.raw_width, s.raw_height
else:
with Image.open(file_path) as img:
width, height = img.size
with Image.open(file_path) as img:
width, height = img.size
except Exception as e:
if fallback_on_error:
log.ODM_WARNING("Cannot read %s with image library, fallback to cv2: %s" % (file_path, str(e)))
log.ODM_WARNING("Cannot read %s with PIL, fallback to cv2: %s" % (file_path, str(e)))
img = cv2.imread(file_path)
width = img.shape[1]
height = img.shape[0]

Wyświetl plik

@ -6,7 +6,6 @@ import numpy as np
import pygltflib
from opendm import system
from opendm import io
from opendm import log
warnings.filterwarnings("ignore", category=rasterio.errors.NotGeoreferencedWarning)

Wyświetl plik

@ -19,17 +19,14 @@ def has_popsift_and_can_handle_texsize(width, height):
log.ODM_INFO("CUDA compute platform is not supported (detected: %s.%s but we need at least 3.5)" % (compute_major, compute_minor))
return False
except Exception as e:
log.ODM_WARNING(str(e))
log.ODM_INFO("Using CPU for feature extraction: %s" % str(e))
return False
try:
from opensfm import pypopsift
if pypopsift.fits_texture(int(width * 1.02), int(height * 1.02)):
log.ODM_INFO("popsift can handle texture size %dx%d" % (width, height))
return True
else:
log.ODM_INFO("popsift cannot handle texture size %dx%d" % (width, height))
return False
return pypopsift.fits_texture(int(width * 1.02), int(height * 1.02))
except (ModuleNotFoundError, ImportError):
return False
except Exception as e:
log.ODM_WARNING(str(e))
return False
@ -84,12 +81,11 @@ def has_gpu(args):
log.ODM_INFO("CUDA drivers detected")
return True
else:
log.ODM_INFO("No CUDA drivers detected")
log.ODM_INFO("No CUDA drivers detected, using CPU")
return False
else:
if shutil.which('nvidia-smi') is not None:
log.ODM_INFO("nvidia-smi detected")
return True
else:
log.ODM_INFO("No nvidia-smi detected")
return False

Wyświetl plik

@ -5,7 +5,6 @@ import math
from repoze.lru import lru_cache
from opendm import log
from opendm.shots import get_origin
from scipy import spatial
def rounded_gsd(reconstruction_json, default_value=None, ndigits=0, ignore_gsd=False):
"""
@ -112,11 +111,15 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
with open(reconstruction_json) as f:
data = json.load(f)
# Calculate median height from sparse reconstruction
reconstruction = data[0]
points = np.array([reconstruction['points'][pointId]['coordinates'] for pointId in reconstruction['points']])
tdpoints = points.copy()
tdpoints[:,2] = 0
tree = spatial.cKDTree(tdpoints)
point_heights = []
for pointId in reconstruction['points']:
point = reconstruction['points'][pointId]
point_heights.append(point['coordinates'][2])
ground_height = np.median(point_heights)
gsds = []
for shotImage in reconstruction['shots']:
@ -130,16 +133,9 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
log.ODM_WARNING("Cannot parse focal values from %s. This is likely an unsupported camera model." % reconstruction_json)
return None
shot_origin[2] = 0
distances, neighbors = tree.query(
shot_origin, k=9
)
if len(distances) > 0:
ground_height = np.median(points[neighbors][:,2])
gsds.append(calculate_gsd_from_focal_ratio(focal_ratio,
shot_height - ground_height,
camera['width']))
gsds.append(calculate_gsd_from_focal_ratio(focal_ratio,
shot_height - ground_height,
camera['width']))
if len(gsds) > 0:
mean = np.mean(gsds)
@ -149,6 +145,7 @@ def opensfm_reconstruction_average_gsd(reconstruction_json, use_all_shots=False)
return None
def calculate_gsd(sensor_width, flight_height, focal_length, image_width):
"""
:param sensor_width in millimeters

Wyświetl plik

@ -6,7 +6,6 @@ import datetime
import dateutil.parser
import shutil
import multiprocessing
from repoze.lru import lru_cache
from opendm.arghelpers import double_quote, args_to_dict
from vmem import virtual_memory
@ -31,7 +30,6 @@ else:
lock = threading.Lock()
@lru_cache(maxsize=None)
def odm_version():
with open(os.path.join(os.path.dirname(__file__), "..", "VERSION")) as f:
return f.read().split("\n")[0].strip()

Wyświetl plik

@ -274,9 +274,6 @@ def compute_band_maps(multi_camera, primary_band):
if filename_without_band == p.filename:
raise Exception("Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly." % p.filename)
if not filename_without_band in filename_map:
raise Exception("Cannot match bands by filename on %s, make sure to name your files [filename]_band[.ext] uniformly, check that your images have the appropriate CaptureUUID XMP tag and that no images are missing." % p.filename)
s2p[p.filename] = filename_map[filename_without_band]
if band['name'] != band_name:

Wyświetl plik

@ -13,9 +13,7 @@ from rasterio.mask import mask
from opendm import io
from opendm.tiles.tiler import generate_orthophoto_tiles
from opendm.cogeo import convert_to_cogeo
from opendm.utils import add_raster_meta_tags
from osgeo import gdal
from osgeo import ogr
def get_orthophoto_vars(args):
@ -45,50 +43,33 @@ def generate_png(orthophoto_file, output_file=None, outsize=None):
output_file = base + '.png'
# See if we need to select top three bands
params = []
bandparam = ""
try:
gtif = gdal.Open(orthophoto_file)
gtif = gdal.Open(orthophoto_file)
if gtif.RasterCount > 4:
bands = []
for idx in range(1, gtif.RasterCount+1):
bands.append(gtif.GetRasterBand(idx).GetColorInterpretation())
bands = dict(zip(bands, range(1, len(bands)+1)))
if gtif.RasterCount >= 3:
try:
red = bands.get(gdal.GCI_RedBand)
green = bands.get(gdal.GCI_GreenBand)
blue = bands.get(gdal.GCI_BlueBand)
if red is None or green is None or blue is None:
params.append("-b 1 -b 2 -b 3")
else:
params.append("-b %s -b %s -b %s" % (red, green, blue))
elif gtif.RasterCount <= 2:
params.append("-b 1")
raise Exception("Cannot find bands")
alpha = bands.get(gdal.GCI_AlphaBand)
if alpha is not None:
params.append("-b %s" % alpha)
else:
params.append("-a_nodata 0")
dtype = gtif.GetRasterBand(1).DataType
if dtype != gdal.GDT_Byte:
params.append("-ot Byte")
if gtif.RasterCount >= 3:
params.append("-scale_1 -scale_2 -scale_3")
elif gtif.RasterCount <= 2:
params.append("-scale_1")
gtif = None
except Exception as e:
log.ODM_WARNING("Cannot read orthophoto information for PNG generation: %s" % str(e))
bandparam = "-b %s -b %s -b %s -a_nodata 0" % (red, green, blue)
except:
bandparam = "-b 1 -b 2 -b 3 -a_nodata 0"
gtif = None
osparam = ""
if outsize is not None:
params.append("-outsize %s 0" % outsize)
osparam = "-outsize %s 0" % outsize
system.run('gdal_translate -of png "%s" "%s" %s '
'-co WORLDFILE=YES '
'--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, " ".join(params), get_max_memory()))
system.run('gdal_translate -of png "%s" "%s" %s %s '
'--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, osparam, bandparam, get_max_memory()))
def generate_kmz(orthophoto_file, output_file=None, outsize=None):
if output_file is None:
@ -104,70 +85,7 @@ def generate_kmz(orthophoto_file, output_file=None, outsize=None):
system.run('gdal_translate -of KMLSUPEROVERLAY -co FORMAT=PNG "%s" "%s" %s '
'--config GDAL_CACHEMAX %s%% ' % (orthophoto_file, output_file, bandparam, get_max_memory()))
def generate_extent_polygon(orthophoto_file):
"""Function to return the orthophoto extent as a polygon into a gpkg file
Args:
orthophoto_file (str): the path to orthophoto file
"""
base, ext = os.path.splitext(orthophoto_file)
output_file = base + '_extent.dxf'
try:
gtif = gdal.Open(orthophoto_file)
srs = gtif.GetSpatialRef()
geoTransform = gtif.GetGeoTransform()
# calculate the coordinates
minx = geoTransform[0]
maxy = geoTransform[3]
maxx = minx + geoTransform[1] * gtif.RasterXSize
miny = maxy + geoTransform[5] * gtif.RasterYSize
# create polygon in wkt format
poly_wkt = "POLYGON ((%s %s, %s %s, %s %s, %s %s, %s %s))" % (minx, miny, minx, maxy, maxx, maxy, maxx, miny, minx, miny)
# create vector file
# just the DXF to support AutoCAD users
# to load the geotiff raster correctly.
driver = ogr.GetDriverByName("DXF")
ds = driver.CreateDataSource(output_file)
layer = ds.CreateLayer("extent", srs, ogr.wkbPolygon)
# create the feature and set values
featureDefn = layer.GetLayerDefn()
feature = ogr.Feature(featureDefn)
feature.SetGeometry(ogr.CreateGeometryFromWkt(poly_wkt))
# add feature to layer
layer.CreateFeature(feature)
# save and close everything
feature = None
ds = None
gtif = None
log.ODM_INFO("Wrote %s" % output_file)
except Exception as e:
log.ODM_WARNING("Cannot create extent layer for %s: %s" % (orthophoto_file, str(e)))
def generate_tfw(orthophoto_file):
base, ext = os.path.splitext(orthophoto_file)
tfw_file = base + '.tfw'
try:
with rasterio.open(orthophoto_file) as ds:
t = ds.transform
with open(tfw_file, 'w') as f:
# rasterio affine values taken by
# https://mharty3.github.io/til/GIS/raster-affine-transforms/
f.write("\n".join([str(v) for v in [t.a, t.d, t.b, t.e, t.c, t.f]]) + "\n")
log.ODM_INFO("Wrote %s" % tfw_file)
except Exception as e:
log.ODM_WARNING("Cannot create .tfw for %s: %s" % (orthophoto_file, str(e)))
def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir, resolution, reconstruction, tree, embed_gcp_meta=False):
def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_tiles_dir, resolution):
if args.crop > 0 or args.boundary:
Cropper.crop(bounds_file_path, orthophoto_file, get_orthophoto_vars(args), keep_original=not args.optimize_disk_space, warp_options=['-dstalpha'])
@ -180,17 +98,12 @@ def post_orthophoto_steps(args, bounds_file_path, orthophoto_file, orthophoto_ti
if args.orthophoto_kmz:
generate_kmz(orthophoto_file)
add_raster_meta_tags(orthophoto_file, reconstruction, tree, embed_gcp_meta=embed_gcp_meta)
if args.tiles:
generate_orthophoto_tiles(orthophoto_file, orthophoto_tiles_dir, args.max_concurrency, resolution)
if args.cog:
convert_to_cogeo(orthophoto_file, max_workers=args.max_concurrency, compression=args.orthophoto_compression)
generate_extent_polygon(orthophoto_file)
generate_tfw(orthophoto_file)
def compute_mask_raster(input_raster, vector_mask, output_raster, blend_distance=20, only_max_coords_feature=False):
if not os.path.exists(input_raster):
log.ODM_WARNING("Cannot mask raster, %s does not exist" % input_raster)

Wyświetl plik

@ -296,23 +296,19 @@ class OSFMContext:
config.append("matcher_type: %s" % osfm_matchers[matcher_type])
# GPU acceleration?
if feature_type == "SIFT":
log.ODM_INFO("Checking for GPU as using SIFT for extracting features")
if has_gpu(args) and max_dims is not None:
w, h = max_dims
if w > h:
h = int((h / w) * feature_process_size)
w = int(feature_process_size)
else:
w = int((w / h) * feature_process_size)
h = int(feature_process_size)
if has_gpu(args) and max_dims is not None:
w, h = max_dims
if w > h:
h = int((h / w) * feature_process_size)
w = int(feature_process_size)
else:
w = int((w / h) * feature_process_size)
h = int(feature_process_size)
if has_popsift_and_can_handle_texsize(w, h):
log.ODM_INFO("Using GPU for extracting SIFT features")
feature_type = "SIFT_GPU"
self.gpu_sift_feature_extraction = True
else:
log.ODM_INFO("Using CPU for extracting SIFT features as texture size is too large or GPU SIFT is not available")
if has_popsift_and_can_handle_texsize(w, h) and feature_type == "SIFT":
log.ODM_INFO("Using GPU for extracting SIFT features")
feature_type = "SIFT_GPU"
self.gpu_sift_feature_extraction = True
config.append("feature_type: %s" % feature_type)

Wyświetl plik

@ -21,17 +21,6 @@ from opensfm.geo import ecef_from_lla
projections = ['perspective', 'fisheye', 'fisheye_opencv', 'brown', 'dual', 'equirectangular', 'spherical']
def find_mean_utc_time(photos):
utc_times = []
for p in photos:
if p.utc_time is not None:
utc_times.append(p.utc_time / 1000.0)
if len(utc_times) == 0:
return None
return np.mean(utc_times)
def find_largest_photo_dims(photos):
max_mp = 0
max_dims = None
@ -786,12 +775,6 @@ class ODM_Photo:
def override_gps_dop(self, dop):
self.gps_xy_stddev = self.gps_z_stddev = dop
def adjust_z_offset(self, z_offset):
if self.altitude is not None:
self.altitude += z_offset
else:
self.altitude = z_offset
def override_camera_projection(self, camera_projection):
if camera_projection in projections:
self.camera_projection = camera_projection

Wyświetl plik

@ -327,7 +327,7 @@ def post_point_cloud_steps(args, tree, rerun=False):
tree.odm_georeferencing_model_laz,
tree.odm_georeferencing_model_las))
else:
log.ODM_WARNING("Found existing LAS file %s" % tree.odm_georeferencing_model_las)
log.ODM_WARNING("Found existing LAS file %s" % tree.odm_georeferencing_xyz_file)
# EPT point cloud output
if args.pc_ept:

Wyświetl plik

@ -22,12 +22,6 @@ RS_DATABASE = {
'hasselblad l2d-20c': 16.6, # DJI Mavic 3 (not enterprise version)
'dji fc3582': lambda p: 26 if p.get_capture_megapixels() < 48 else 60, # DJI Mini 3 pro (at 48MP readout is 60ms, at 12MP it's 26ms)
'dji fc8482': lambda p: (
16 if p.get_capture_megapixels() < 12 else # 12MP 16:9 mode (actual 9.1MP)
21 if p.get_capture_megapixels() < 20 else # 12MP 4:3 mode (actual 12.2MP)
43 if p.get_capture_megapixels() < 45 else # 48MP 16:9 mode (actual 36.6MP)
58 # 48MP 4:3 mode (actual 48.8MP)
), # DJI Mini 4 Pro (readout varies by resolution and aspect ratio, image heights all different)
'dji fc350': 30, # Inspire 1

Wyświetl plik

@ -148,16 +148,3 @@ def merge_geojson_shots(geojson_shots_files, output_geojson_file):
with open(output_geojson_file, "w") as f:
f.write(json.dumps(result))
def merge_cameras(cameras_json_files, output_cameras_file):
result = {}
for cameras_file in cameras_json_files:
with open(cameras_file, "r") as f:
cameras = json.loads(f.read())
for cam_id in cameras:
if not cam_id in result:
result[cam_id] = cameras[cam_id]
with open(output_cameras_file, "w") as f:
f.write(json.dumps(result))

Wyświetl plik

@ -6,7 +6,6 @@ import os
import onnxruntime as ort
from .guidedfilter import guided_filter
from opendm import log
from opendm.ai import read_image
from threading import Lock
mutex = Lock()
@ -73,7 +72,11 @@ class SkyFilter():
def run_img(self, img_path, dest):
img = read_image(img_path)
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
return None
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.array(img / 255., dtype=np.float32)
mask = self.get_mask(img)

Wyświetl plik

@ -39,7 +39,7 @@ def generate_colored_hillshade(geotiff):
system.run('gdaldem color-relief "%s" "%s" "%s" -alpha -co ALPHA=YES' % (geotiff, relief_file, colored_dem))
system.run('gdaldem hillshade "%s" "%s" -z 1.0 -s 1.0 -az 315.0 -alt 45.0' % (geotiff, hillshade_dem))
system.run('"%s" "%s" "%s" "%s" "%s"' % (sys.executable, hsv_merge_script, colored_dem, hillshade_dem, colored_hillshade_dem))
system.run('%s "%s" "%s" "%s" "%s"' % (sys.executable, hsv_merge_script, colored_dem, hillshade_dem, colored_hillshade_dem))
return outputs
except Exception as e:

Wyświetl plik

@ -1,12 +1,8 @@
import os, shutil
import numpy as np
import json
import rasterio
from osgeo import gdal
from datetime import datetime
from opendm import log
from opendm.photo import find_largest_photo_dims, find_mean_utc_time
from opendm.photo import find_largest_photo_dims
from osgeo import gdal
from opendm.arghelpers import double_quote
@ -118,41 +114,3 @@ def np_to_json(arr):
def np_from_json(json_dump):
return np.asarray(json.loads(json_dump))
def add_raster_meta_tags(raster, reconstruction, tree, embed_gcp_meta=True):
try:
if os.path.isfile(raster):
mean_capture_time = find_mean_utc_time(reconstruction.photos)
mean_capture_dt = None
if mean_capture_time is not None:
mean_capture_dt = datetime.fromtimestamp(mean_capture_time).strftime('%Y:%m:%d %H:%M:%S') + '+00:00'
log.ODM_INFO("Adding TIFFTAGs to {}".format(raster))
with rasterio.open(raster, 'r+') as rst:
if mean_capture_dt is not None:
rst.update_tags(TIFFTAG_DATETIME=mean_capture_dt)
rst.update_tags(TIFFTAG_SOFTWARE='ODM {}'.format(log.odm_version()))
if embed_gcp_meta:
# Embed GCP info in 2D results via
# XML metadata fields
gcp_gml_export_file = tree.path("odm_georeferencing", "ground_control_points.gml")
if reconstruction.has_gcp() and os.path.isfile(gcp_gml_export_file):
gcp_xml = ""
with open(gcp_gml_export_file) as f:
gcp_xml = f.read()
ds = gdal.Open(raster)
if ds is not None:
if ds.GetMetadata('xml:GROUND_CONTROL_POINTS') is None or self.rerun():
ds.SetMetadata(gcp_xml, 'xml:GROUND_CONTROL_POINTS')
ds = None
log.ODM_INFO("Wrote xml:GROUND_CONTROL_POINTS metadata to %s" % raster)
else:
log.ODM_WARNING("Already embedded ground control point information")
else:
log.ODM_WARNING("Cannot open %s for writing, skipping GCP embedding" % raster)
except Exception as e:
log.ODM_WARNING("Cannot write raster meta tags to %s: %s" % (raster, str(e)))

Wyświetl plik

@ -120,7 +120,6 @@ class SrtFileParser:
# <font size="36">SrtCnt : 1, DiffTime : 16ms
# 2023-01-06 18:56:48,380,821
# [iso : 3200] [shutter : 1/60.0] [fnum : 280] [ev : 0] [ct : 3925] [color_md : default] [focal_len : 240] [latitude: 0.000000] [longitude: 0.000000] [altitude: 0.000000] </font>
# </font>
# DJI Mavic Mini
# 1
@ -165,10 +164,9 @@ class SrtFileParser:
end = None
for line in f:
# Remove html tags, spaces
line = re.sub('<[^<]+?>', '', line).strip()
if not line:
# Check if line is empty
if not line.strip():
if start is not None:
self.data.append({
"start": start,
@ -195,6 +193,9 @@ class SrtFileParser:
continue
# Remove html tags
line = re.sub('<[^<]+?>', '', line)
# Search this "00:00:00,000 --> 00:00:00,016"
match = re.search("(\d{2}:\d{2}:\d{2},\d+) --> (\d{2}:\d{2}:\d{2},\d+)", line)
if match:

Wyświetl plik

@ -23,7 +23,6 @@ rasterio==1.2.3 ; sys_platform == 'linux'
rasterio==1.3.6 ; sys_platform == 'darwin'
https://github.com/OpenDroneMap/windows-deps/raw/main/rasterio-1.2.3-cp38-cp38-win_amd64.whl ; sys_platform == 'win32'
https://github.com/OpenDroneMap/windows-deps/raw/main/GDAL-3.2.3-cp38-cp38-win_amd64.whl ; sys_platform == 'win32'
odmrawpy==0.24.1
repoze.lru==0.7
scikit-learn==1.1.1
Pywavelets==1.3.0
@ -33,7 +32,7 @@ xmltodict==0.12.0
fpdf2==2.4.6
Shapely==1.7.1
onnxruntime==1.12.1
pygltflib==1.16.5
pygltflib==1.15.3
codem==0.24.0
trimesh==3.17.1
pandas==1.5.2

Wyświetl plik

@ -187,9 +187,6 @@ class ODMLoadDatasetStage(types.ODM_Stage):
p.compute_opk()
updated += 1
log.ODM_INFO("Updated %s image positions" % updated)
# Warn if a file path is specified but it does not exist
elif tree.odm_geo_file is not None and not os.path.isfile(tree.odm_geo_file):
log.ODM_WARNING("Image geolocation file %s does not exist" % tree.odm_geo_file)
# GPSDOP override if we have GPS accuracy information (such as RTK)
if 'gps_accuracy_is_set' in args:
@ -234,9 +231,9 @@ class ODMLoadDatasetStage(types.ODM_Stage):
item['p'].set_mask(os.path.basename(mask_file))
log.ODM_INFO("Wrote %s" % os.path.basename(mask_file))
else:
log.ODM_WARNING("Cannot generate mask for %s" % item['file'])
log.ODM_WARNING("Cannot generate mask for %s" % img)
except Exception as e:
log.ODM_WARNING("Cannot generate mask for %s: %s" % (item['file'], str(e)))
log.ODM_WARNING("Cannot generate mask for %s: %s" % (img, str(e)))
parallel_map(parallel_sky_filter, sky_images, max_workers=args.max_concurrency)
@ -338,10 +335,3 @@ class ODMLoadDatasetStage(types.ODM_Stage):
if args.rolling_shutter and not reconstruction.is_georeferenced():
log.ODM_WARNING("Reconstruction is not georeferenced, disabling rolling shutter correction")
args.rolling_shutter = False
# GPS Z offset
if 'gps_z_offset_is_set' in args:
log.ODM_INFO("Adjusting GPS Z offset by %s for all images" % args.gps_z_offset)
for p in photos:
p.adjust_z_offset(args.gps_z_offset)

Wyświetl plik

@ -12,8 +12,6 @@ from opendm.cropper import Cropper
from opendm import pseudogeo
from opendm.tiles.tiler import generate_dem_tiles
from opendm.cogeo import convert_to_cogeo
from opendm.utils import add_raster_meta_tags
class ODMDEMStage(types.ODM_Stage):
def process(self, args, outputs):
@ -89,8 +87,6 @@ class ODMDEMStage(types.ODM_Stage):
if pseudo_georeference:
pseudogeo.add_pseudo_georeferencing(dem_geotiff_path)
add_raster_meta_tags(dem_geotiff_path, reconstruction, tree, embed_gcp_meta=not outputs['large'])
if args.tiles:
generate_dem_tiles(dem_geotiff_path, tree.path("%s_tiles" % product), args.max_concurrency, resolution)

Wyświetl plik

@ -6,7 +6,6 @@ import fiona
import fiona.crs
import json
import zipfile
import math
from collections import OrderedDict
from pyproj import CRS
@ -126,35 +125,13 @@ class ODMGeoreferencingStage(types.ODM_Stage):
stages.append("transformation")
utmoffset = reconstruction.georef.utm_offset()
# Establish appropriate las scale for export
las_scale = 0.001
filtered_point_cloud_stats = tree.path("odm_filterpoints", "point_cloud_stats.json")
# Function that rounds to the nearest 10
# and then chooses the one below so our
# las scale is sensible
def powerr(r):
return pow(10,round(math.log10(r))) / 10
if os.path.isfile(filtered_point_cloud_stats):
try:
with open(filtered_point_cloud_stats, 'r') as stats:
las_stats = json.load(stats)
spacing = powerr(las_stats['spacing'])
log.ODM_INFO("las scale calculated as the minimum of 1/10 estimated spacing or %s, which ever is less." % las_scale)
las_scale = min(spacing, 0.001)
except Exception as e:
log.ODM_WARNING("Cannot find file point_cloud_stats.json. Using default las scale: %s" % las_scale)
else:
log.ODM_INFO("No point_cloud_stats.json found. Using default las scale: %s" % las_scale)
params += [
f'--filters.transformation.matrix="1 0 0 {utmoffset[0]} 0 1 0 {utmoffset[1]} 0 0 1 0 0 0 0 1"',
f'--writers.las.offset_x={reconstruction.georef.utm_east_offset}' ,
f'--writers.las.offset_y={reconstruction.georef.utm_north_offset}',
f'--writers.las.scale_x={las_scale}',
f'--writers.las.scale_y={las_scale}',
f'--writers.las.scale_z={las_scale}',
'--writers.las.scale_x=0.001',
'--writers.las.scale_y=0.001',
'--writers.las.scale_z=0.001',
'--writers.las.offset_z=0',
f'--writers.las.a_srs="{reconstruction.georef.proj4()}"' # HOBU this should maybe be WKT
]
@ -280,4 +257,3 @@ class ODMGeoreferencingStage(types.ODM_Stage):
os.remove(tree.filtered_point_cloud)

Wyświetl plik

@ -132,8 +132,7 @@ class ODMOrthoPhotoStage(types.ODM_Stage):
else:
log.ODM_INFO("Not a submodel run, skipping mask raster generation")
orthophoto.post_orthophoto_steps(args, bounds_file_path, tree.odm_orthophoto_tif, tree.orthophoto_tiles, resolution,
reconstruction, tree, not outputs["large"])
orthophoto.post_orthophoto_steps(args, bounds_file_path, tree.odm_orthophoto_tif, tree.orthophoto_tiles, resolution)
# Generate feathered orthophoto also
if args.orthophoto_cutline and submodel_run:

Wyświetl plik

@ -1,5 +1,6 @@
import os
from osgeo import gdal
from opendm import io
from opendm import log
from opendm import types
@ -13,6 +14,37 @@ class ODMPostProcess(types.ODM_Stage):
log.ODM_INFO("Post Processing")
if not outputs['large']:
# TODO: support for split-merge?
# Embed GCP info in 2D results via
# XML metadata fields
gcp_gml_export_file = tree.path("odm_georeferencing", "ground_control_points.gml")
if reconstruction.has_gcp() and io.file_exists(gcp_gml_export_file):
skip_embed_gcp = False
gcp_xml = ""
with open(gcp_gml_export_file) as f:
gcp_xml = f.read()
for product in [tree.odm_orthophoto_tif,
tree.path("odm_dem", "dsm.tif"),
tree.path("odm_dem", "dtm.tif")]:
if os.path.isfile(product):
ds = gdal.Open(product)
if ds is not None:
if ds.GetMetadata('xml:GROUND_CONTROL_POINTS') is None or self.rerun():
ds.SetMetadata(gcp_xml, 'xml:GROUND_CONTROL_POINTS')
ds = None
log.ODM_INFO("Wrote xml:GROUND_CONTROL_POINTS metadata to %s" % product)
else:
skip_embed_gcp = True
log.ODM_WARNING("Already embedded ground control point information")
break
else:
log.ODM_WARNING("Cannot open %s for writing, skipping GCP embedding" % product)
if getattr(args, '3d_tiles'):
build_3dtiles(args, tree, reconstruction, self.rerun())
@ -21,4 +53,3 @@ class ODMPostProcess(types.ODM_Stage):
copy_paths([os.path.join(args.project_path, p) for p in get_processing_results_paths()], args.copy_to, self.rerun())
except Exception as e:
log.ODM_WARNING("Cannot copy to %s: %s" % (args.copy_to, str(e)))

Wyświetl plik

@ -63,13 +63,11 @@ class ODMOpenMVSStage(types.ODM_Stage):
densify_ini_file = os.path.join(tree.openmvs, 'Densify.ini')
subres_levels = 2 # The number of lower resolutions to process before estimating output resolution depthmap.
filter_point_th = -20
min_resolution = 320 if args.pc_quality in ["low", "lowest"] else 640
config = [
"--resolution-level %s" % int(resolution_level),
'--dense-config-file "%s"' % densify_ini_file,
"--max-resolution %s" % int(outputs['undist_image_max_size']),
"--min-resolution %s" % min_resolution,
"--max-threads %s" % args.max_concurrency,
"--number-views-fuse %s" % number_views_fuse,
"--sub-resolution-levels %s" % subres_levels,

Wyświetl plik

@ -1,19 +1,23 @@
import os
import shutil
import json
import yaml
from opendm import log
from opendm.osfm import OSFMContext, get_submodel_argv, get_submodel_paths, get_all_submodel_paths
from opendm import types
from opendm import io
from opendm import system
from opendm import orthophoto
from opendm.dem import utils
from opendm.gcp import GCPFile
from opendm.dem import pdal, utils
from opendm.dem.merge import euclidean_merge_dems
from opensfm.large import metadataset
from opendm.cropper import Cropper
from opendm.concurrency import get_max_memory
from opendm.remote import LocalRemoteExecutor
from opendm.shots import merge_geojson_shots, merge_cameras
from opendm.shots import merge_geojson_shots
from opendm import point_cloud
from opendm.utils import double_quote, add_raster_meta_tags
from opendm.utils import double_quote
from opendm.tiles.tiler import generate_dem_tiles
from opendm.cogeo import convert_to_cogeo
from opendm import multispectral
@ -263,8 +267,7 @@ class ODMMergeStage(types.ODM_Stage):
orthophoto_vars = orthophoto.get_orthophoto_vars(args)
orthophoto.merge(all_orthos_and_ortho_cuts, tree.odm_orthophoto_tif, orthophoto_vars)
orthophoto.post_orthophoto_steps(args, merged_bounds_file, tree.odm_orthophoto_tif, tree.orthophoto_tiles, args.orthophoto_resolution,
reconstruction, tree, False)
orthophoto.post_orthophoto_steps(args, merged_bounds_file, tree.odm_orthophoto_tif, tree.orthophoto_tiles, args.orthophoto_resolution)
elif len(all_orthos_and_ortho_cuts) == 1:
# Simply copy
log.ODM_WARNING("A single orthophoto/cutline pair was found between all submodels.")
@ -306,8 +309,6 @@ class ODMMergeStage(types.ODM_Stage):
if args.tiles:
generate_dem_tiles(dem_file, tree.path("%s_tiles" % human_name.lower()), args.max_concurrency, args.dem_resolution)
add_raster_meta_tags(dem_file, reconstruction, tree, embed_gcp_meta=False)
if args.cog:
convert_to_cogeo(dem_file, max_workers=args.max_concurrency)
else:
@ -336,15 +337,6 @@ class ODMMergeStage(types.ODM_Stage):
else:
log.ODM_WARNING("Found merged shots.geojson in %s" % tree.odm_report)
# Merge cameras
cameras_json = tree.path("cameras.json")
if not io.file_exists(cameras_json) or self.rerun():
cameras_json_files = get_submodel_paths(tree.submodels_path, "cameras.json")
log.ODM_INFO("Merging %s cameras.json files" % len(cameras_json_files))
merge_cameras(cameras_json_files, cameras_json)
else:
log.ODM_WARNING("Found merged cameras.json in %s" % tree.root_path)
# Stop the pipeline short by skipping to the postprocess stage.
# Afterwards, we're done.
self.next_stage = self.last_stage()

Wyświetl plik

@ -1,18 +1,14 @@
set ODMBASE=%~dp0
set VIRTUAL_ENV=%ODMBASE%venv
IF "%~1"=="" (set WRITABLE_VIRTUAL_ENV=%VIRTUAL_ENV%) ELSE (set WRITABLE_VIRTUAL_ENV=%~1)
mkdir "%WRITABLE_VIRTUAL_ENV%"
set PYENVCFG=%VIRTUAL_ENV%\pyvenv.cfg
set SBBIN=%ODMBASE%SuperBuild\install\bin
rem Hot-patching pyvenv.cfg
set PYENVCFG=%WRITABLE_VIRTUAL_ENV%\pyvenv.cfg
echo home = %VIRTUAL_ENV%\Scripts> "%PYENVCFG%"
echo home = %ODMBASE%venv\Scripts> "%PYENVCFG%"
echo include-system-site-packages = false>> "%PYENVCFG%"
rem Hot-patching cv2 extension configs
set SBBIN=%ODMBASE%SuperBuild\install\bin
set CV2=%WRITABLE_VIRTUAL_ENV%\Lib\site-packages\cv2
mkdir "%CV2%"
echo BINARIES_PATHS = [r"%SBBIN%"] + BINARIES_PATHS> "%CV2%\config.py"
echo PYTHON_EXTENSIONS_PATHS = [r'''%VIRTUAL_ENV%\lib\site-packages\cv2\python-3.8'''] + PYTHON_EXTENSIONS_PATHS> "%CV2%\config-3.8.py"
echo BINARIES_PATHS = [r"%SBBIN%"] + BINARIES_PATHS> venv\Lib\site-packages\cv2\config.py
echo PYTHON_EXTENSIONS_PATHS = [r'''%VIRTUAL_ENV%\lib\site-packages\cv2\python-3.8'''] + PYTHON_EXTENSIONS_PATHS> venv\Lib\site-packages\cv2\config-3.8.py
cls