kopia lustrzana https://github.com/OpenDroneMap/ODM
commit
6a3f7005f0
|
@ -0,0 +1,32 @@
|
|||
name: Publish Docker GPU Images
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push Docker image
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
file: ./gpu.Dockerfile
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: opendronemap/odm:gpu
|
40
README.md
40
README.md
|
@ -99,6 +99,46 @@ snap run opendronemap
|
|||
|
||||
Snap packages will be kept up-to-date automatically, so you don't need to update ODM manually.
|
||||
|
||||
## GPU Acceleration
|
||||
|
||||
ODM has support for doing SIFT feature extraction on a GPU, which is about 2x faster CPU on a consumer laptop. To use this feature, you need to use the `opendronemap/odm:gpu` docker image instead of `opendronemap/odm` and you need to pass the `--gpus all` flag to docker:
|
||||
|
||||
```
|
||||
docker run -ti --rm -v c:/Users/youruser/datasets:/datasets --gpus all opendronemap/odm:gpu --project-path /datasets project
|
||||
```
|
||||
|
||||
When you run ODM, if the GPU is recognized, in the first few lines of the output log you should notice:
|
||||
|
||||
```
|
||||
[INFO] Writing exif overrides
|
||||
[INFO] Maximum photo dimensions: 4000px
|
||||
[INFO] Found GPU device: Intel(R) OpenCL HD Graphics
|
||||
[INFO] Using GPU for extracting SIFT features
|
||||
```
|
||||
|
||||
The implementation is OpenCL-based, so the acceleration should work with most graphics card (not just NVIDIA).
|
||||
|
||||
If you have an NVIDIA card, you can test that docker is recognizing the GPU by running:
|
||||
|
||||
```
|
||||
docker run --rm --gpus all nvidia/cuda:10.0-base nvidia-smi
|
||||
```
|
||||
|
||||
If you see an output that looks like this:
|
||||
|
||||
```
|
||||
Fri Jul 24 18:51:55 2020
|
||||
+-----------------------------------------------------------------------------+
|
||||
| NVIDIA-SMI 440.82 Driver Version: 440.82 CUDA Version: 10.2 |
|
||||
|-------------------------------+----------------------+----------------------+
|
||||
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
|
||||
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|
||||
```
|
||||
|
||||
You're in good shape!
|
||||
|
||||
See https://github.com/NVIDIA/nvidia-docker and https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker for information on docker/NVIDIA setup.
|
||||
|
||||
## WSL or WSL2 Install
|
||||
|
||||
Note: This requires that you have installed WSL already by following [the instructions on Microsoft's Website](https://docs.microsoft.com/en-us/windows/wsl/install-win10).
|
||||
|
|
|
@ -9,7 +9,7 @@ ExternalProject_Add(${_proj_name}
|
|||
#--Download step--------------
|
||||
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
|
||||
GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/
|
||||
GIT_TAG 244
|
||||
GIT_TAG 246
|
||||
#--Update/Patch step----------
|
||||
UPDATE_COMMAND git submodule update --init --recursive
|
||||
#--Configure step-------------
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
2.4.5
|
||||
2.4.7
|
||||
|
|
|
@ -120,6 +120,9 @@ install() {
|
|||
|
||||
|
||||
pip install --ignore-installed -r requirements.txt
|
||||
if [ ! -z "$GPU_INSTALL" ]; then
|
||||
pip install --ignore-installed -r requirements.gpu.txt
|
||||
fi
|
||||
|
||||
if [ ! -z "$PORTABLE_INSTALL" ]; then
|
||||
echo "Replacing g++ and gcc with our scripts for portability..."
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
FROM nvidia/cuda:11.2.0-runtime-ubuntu20.04 AS builder
|
||||
|
||||
# Env variables
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/src/opensfm" \
|
||||
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
|
||||
|
||||
# Prepare directories
|
||||
WORKDIR /code
|
||||
|
||||
# Copy everything
|
||||
COPY . ./
|
||||
|
||||
# Run the build
|
||||
RUN PORTABLE_INSTALL=YES GPU_INSTALL=YES bash configure.sh install
|
||||
|
||||
# Clean Superbuild
|
||||
RUN bash configure.sh clean
|
||||
|
||||
### END Builder
|
||||
|
||||
### Use a second image for the final asset to reduce the number and
|
||||
# size of the layers.
|
||||
FROM nvidia/cuda:11.2.0-runtime-ubuntu20.04
|
||||
|
||||
# Env variables
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/src/opensfm" \
|
||||
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
|
||||
|
||||
WORKDIR /code
|
||||
|
||||
# Copy everything we built from the builder
|
||||
COPY --from=builder /code /code
|
||||
|
||||
# Copy the Python libraries installed via pip from the builder
|
||||
COPY --from=builder /usr/local /usr/local
|
||||
|
||||
# Install OpenCL Drivers
|
||||
RUN apt update && apt install -y nvidia-opencl-icd-340 intel-opencl-icd
|
||||
|
||||
# Install shared libraries that we depend on via APT, but *not*
|
||||
# the -dev packages to save space!
|
||||
RUN bash configure.sh installruntimedepsonly \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# Entry point
|
||||
ENTRYPOINT ["python3", "/code/run.py"]
|
|
@ -0,0 +1,19 @@
|
|||
from opendm import log
|
||||
from repoze.lru import lru_cache
|
||||
|
||||
@lru_cache(maxsize=None)
|
||||
def has_gpus():
|
||||
try:
|
||||
import pyopencl
|
||||
except:
|
||||
log.ODM_INFO("PyOpenCL is missing (not a GPU build)")
|
||||
return False
|
||||
|
||||
try:
|
||||
platforms = pyopencl.get_platforms()
|
||||
for p in platforms:
|
||||
log.ODM_INFO("Found GPU device: %s" % p.name)
|
||||
|
||||
return len(platforms) > 0
|
||||
except Exception as e:
|
||||
return False
|
|
@ -17,7 +17,7 @@ from opensfm.actions import undistort
|
|||
from opensfm.dataset import DataSet
|
||||
from opensfm import report
|
||||
from opendm.multispectral import get_photos_by_band
|
||||
|
||||
from opendm.gpu import has_gpus
|
||||
|
||||
class OSFMContext:
|
||||
def __init__(self, opensfm_project_path):
|
||||
|
@ -214,6 +214,12 @@ class OSFMContext:
|
|||
log.ODM_WARNING("Using BOW matching, will use HAHOG feature type, not SIFT")
|
||||
feature_type = "HAHOG"
|
||||
|
||||
# GPU acceleration?
|
||||
if has_gpus() and feature_type == "SIFT":
|
||||
log.ODM_INFO("Using GPU for extracting SIFT features")
|
||||
log.ODM_INFO("--min-num-features will be ignored")
|
||||
feature_type = "SIFT_GPU"
|
||||
|
||||
config.append("feature_type: %s" % feature_type)
|
||||
|
||||
if has_alt:
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
silx>=0.12.0
|
||||
pyopencl==2021.1.1
|
|
@ -90,6 +90,7 @@ fi
|
|||
export PORT="${PORT:=3000}"
|
||||
export QTC="${QTC:=NO}"
|
||||
export IMAGE="${IMAGE:=opendronemap/nodeodm}"
|
||||
export GPU="${GPU:=NO}"
|
||||
|
||||
if [ -z "$DATA" ]; then
|
||||
echo "Usage: DATA=/path/to/datasets [VARS] $0"
|
||||
|
@ -98,6 +99,7 @@ if [ -z "$DATA" ]; then
|
|||
echo " DATA Path to directory that contains datasets for testing. The directory will be mounted in /datasets. If you don't have any, simply set it to a folder outside the ODM repository."
|
||||
echo " PORT Port to expose for NodeODM (default: $PORT)"
|
||||
echo " IMAGE Docker image to use (default: $IMAGE)"
|
||||
echo " GPU Enable GPU support (default: $GPU)"
|
||||
echo " QTC When set to YES, installs QT Creator for C++ development (default: $QTC)"
|
||||
exit 1
|
||||
fi
|
||||
|
@ -108,6 +110,7 @@ echo "Datasets path: $DATA"
|
|||
echo "Expose port: $PORT"
|
||||
echo "QT Creator: $QTC"
|
||||
echo "Image: $IMAGE"
|
||||
echo "GPU: $GPU"
|
||||
|
||||
if [ ! -e "$HOME"/.odm-dev-home ]; then
|
||||
mkdir -p "$HOME"/.odm-dev-home
|
||||
|
@ -116,6 +119,11 @@ fi
|
|||
USER_ID=$(id -u)
|
||||
GROUP_ID=$(id -g)
|
||||
USER=$(id -un)
|
||||
GPU_FLAG=""
|
||||
if [[ "$GPU" != "NO" ]]; then
|
||||
GPU_FLAG="--gpus all"
|
||||
fi
|
||||
|
||||
xhost + || true
|
||||
docker run -ti --entrypoint bash --name odmdev -v $(pwd):/code -v "$DATA":/datasets -p $PORT:3000 --privileged -e DISPLAY -e LANG=C.UTF-8 -e LC_ALL=C.UTF-8 -v="/tmp/.X11-unix:/tmp/.X11-unix:rw" -v="$HOME/.odm-dev-home:/home/$USER" $IMAGE -c "/code/start-dev-env.sh --setup $USER $USER_ID $GROUP_ID $QTC"
|
||||
docker run -ti --entrypoint bash --name odmdev -v $(pwd):/code -v "$DATA":/datasets -p $PORT:3000 $GPU_FLAG --privileged -e DISPLAY -e LANG=C.UTF-8 -e LC_ALL=C.UTF-8 -v="/tmp/.X11-unix:/tmp/.X11-unix:rw" -v="$HOME/.odm-dev-home:/home/$USER" $IMAGE -c "/code/start-dev-env.sh --setup $USER $USER_ID $GROUP_ID $QTC"
|
||||
exit 0
|
||||
|
|
Ładowanie…
Reference in New Issue