From c242008cffe462222494840cea60d23c736f0ab3 Mon Sep 17 00:00:00 2001 From: pyup-bot Date: Fri, 4 Jan 2019 16:01:31 -0500 Subject: [PATCH 01/12] Update django from 2.0.3 to 2.1.5 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 979f996f..86d5ca40 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ APScheduler==3.2.0 billiard==3.5.0.3 celery==4.1.0 coreapi==2.0.9 -Django==2.0.3 +Django==2.1.5 django-appconf==1.0.2 django-codemirror2==0.2 django-colorfield==0.1.14 From 3e5de5ad64fc0f9b2c6c25ee6d5aa40dd7eb020d Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 15 Jan 2019 09:06:22 -0500 Subject: [PATCH 02/12] Expose ODM version, label for processing nodes --- app/api/processingnodes.py | 6 ++++- app/static/app/js/components/EditTaskForm.jsx | 2 +- app/templates/app/processing_node.html | 8 +++++++ nodeodm/admin.py | 2 +- nodeodm/migrations/0005_auto_20190115_1346.py | 23 +++++++++++++++++++ nodeodm/models.py | 9 +++++++- plugins/lightning/plugin.py | 2 +- plugins/lightning/public/Dashboard.jsx | 4 ++-- 8 files changed, 49 insertions(+), 7 deletions(-) create mode 100644 nodeodm/migrations/0005_auto_20190115_1346.py diff --git a/app/api/processingnodes.py b/app/api/processingnodes.py index f1d4eef5..0b1ea188 100644 --- a/app/api/processingnodes.py +++ b/app/api/processingnodes.py @@ -10,10 +10,14 @@ from nodeodm.models import ProcessingNode class ProcessingNodeSerializer(serializers.ModelSerializer): online = serializers.SerializerMethodField() + label = serializers.SerializerMethodField() def get_online(self, obj): return obj.is_online() + def get_label(self, obj): + return str(obj) + class Meta: model = ProcessingNode fields = '__all__' @@ -30,7 +34,7 @@ class ProcessingNodeFilter(FilterSet): class Meta: model = ProcessingNode - fields = ['has_available_options', 'id', 'hostname', 'port', 'api_version', 'queue_count', 'max_images', ] + fields = ['has_available_options', 'id', 'hostname', 'port', 'api_version', 'queue_count', 'max_images', 'label', ] class ProcessingNodeViewSet(viewsets.ModelViewSet): """ diff --git a/app/static/app/js/components/EditTaskForm.jsx b/app/static/app/js/components/EditTaskForm.jsx index fbffefcf..9a603621 100644 --- a/app/static/app/js/components/EditTaskForm.jsx +++ b/app/static/app/js/components/EditTaskForm.jsx @@ -108,7 +108,7 @@ class EditTaskForm extends React.Component { return { id: node.id, key: node.id, - label: `${node.hostname}:${node.port} (queue: ${node.queue_count})`, + label: `${node.label} (queue: ${node.queue_count})`, options: node.available_options, queue_count: node.queue_count, enabled: node.online, diff --git a/app/templates/app/processing_node.html b/app/templates/app/processing_node.html index c76007c9..e8328c76 100644 --- a/app/templates/app/processing_node.html +++ b/app/templates/app/processing_node.html @@ -19,6 +19,10 @@ {% trans "API Version" %} {{ processing_node.api_version }} + + {% trans "ODM Version" %} + {{ processing_node.odm_version }} + {% trans "Queue Count" %} {{ processing_node.queue_count }} @@ -27,6 +31,10 @@ {% trans "Max Images Limit" %} {{ processing_node.max_images }} + + {% trans "Label" %} + {{ processing_node.label|default:"None" }} + {% trans "Last Refreshed" %} {{ processing_node.last_refreshed|timesince }} {% trans 'ago' %} ({{ processing_node.last_refreshed|localtime }}) diff --git a/nodeodm/admin.py b/nodeodm/admin.py index 4be2e9ba..ba8df199 100644 --- a/nodeodm/admin.py +++ b/nodeodm/admin.py @@ -4,6 +4,6 @@ from guardian.admin import GuardedModelAdmin from .models import ProcessingNode class ProcessingNodeAdmin(GuardedModelAdmin): - fields = ('hostname', 'port', 'token') + fields = ('hostname', 'port', 'token', 'label', ) admin.site.register(ProcessingNode, ProcessingNodeAdmin) diff --git a/nodeodm/migrations/0005_auto_20190115_1346.py b/nodeodm/migrations/0005_auto_20190115_1346.py new file mode 100644 index 00000000..a6561cac --- /dev/null +++ b/nodeodm/migrations/0005_auto_20190115_1346.py @@ -0,0 +1,23 @@ +# Generated by Django 2.0.3 on 2019-01-15 13:46 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('nodeodm', '0004_processingnode_max_images'), + ] + + operations = [ + migrations.AddField( + model_name='processingnode', + name='label', + field=models.CharField(blank=True, default='', help_text='Optional label for this node. When set, this label will be shown instead of hostname:port.', max_length=255), + ), + migrations.AddField( + model_name='processingnode', + name='odm_version', + field=models.CharField(help_text='OpenDroneMap version used by the node.', max_length=32, null=True), + ), + ] diff --git a/nodeodm/models.py b/nodeodm/models.py index e6bef318..efd1d691 100644 --- a/nodeodm/models.py +++ b/nodeodm/models.py @@ -43,9 +43,14 @@ class ProcessingNode(models.Model): available_options = fields.JSONField(default=dict(), help_text="Description of the options that can be used for processing") token = models.CharField(max_length=1024, blank=True, default="", help_text="Token to use for authentication. If the node doesn't have authentication, you can leave this field blank.") max_images = models.PositiveIntegerField(help_text="Maximum number of images accepted by this node.", blank=True, null=True) + odm_version = models.CharField(max_length=32, null=True, help_text="OpenDroneMap version used by the node") + label = models.CharField(max_length=255, default="", blank=True, help_text="Optional label for this node. When set, this label will be shown instead of the hostname:port name.") def __str__(self): - return '{}:{}'.format(self.hostname, self.port) + if self.label != "": + return self.label + else: + return '{}:{}'.format(self.hostname, self.port) @staticmethod def find_best_available_node(): @@ -79,6 +84,8 @@ class ProcessingNode(models.Model): if 'maxImages' in info: self.max_images = info['maxImages'] + if 'odmVersion' in info: + self.odm_version = info['odmVersion'] options = api_client.options() self.available_options = options diff --git a/plugins/lightning/plugin.py b/plugins/lightning/plugin.py index a997c345..20c0ac49 100644 --- a/plugins/lightning/plugin.py +++ b/plugins/lightning/plugin.py @@ -64,7 +64,7 @@ class Plugin(PluginBase): matches = [n for n in nodes if n.hostname == hostname and n.port == port and n.token == token] if len(matches) == 0: # Add - node = ProcessingNode.objects.create(hostname=hostname, port=port, token=token) + node = ProcessingNode.objects.create(hostname=hostname, port=port, token=token, label="Lightning") assign_perm('view_processingnode', request.user, node) assign_perm('change_processingnode', request.user, node) assign_perm('delete_processingnode', request.user, node) diff --git a/plugins/lightning/public/Dashboard.jsx b/plugins/lightning/public/Dashboard.jsx index 5d9a30f6..09ea51c0 100644 --- a/plugins/lightning/public/Dashboard.jsx +++ b/plugins/lightning/public/Dashboard.jsx @@ -130,7 +130,7 @@ export default class Dashboard extends React.Component {
@@ -140,7 +140,7 @@ export default class Dashboard extends React.Component { {nodes.length > 0 ?

- You are all set! When creating a new task from the Dashboard, select {nodes[0].hostname}:{nodes[0].port} from the Processing Node drop-down instead of Auto. + You are all set! When creating a new task from the Dashboard, select {nodes[0].label} from the Processing Node drop-down instead of Auto.
: ""}
From 534fa9d565ddf70b7454d8b090f084dc4e90268a Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 15 Jan 2019 09:26:24 -0500 Subject: [PATCH 03/12] Added unit tests, updated docs --- app/tests/test_api.py | 13 +++++++++++++ nodeodm/migrations/0005_auto_20190115_1346.py | 2 +- slate/source/includes/reference/_processingnode.md | 3 +++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/app/tests/test_api.py b/app/tests/test_api.py index 6e3158f2..af5cfd4d 100644 --- a/app/tests/test_api.py +++ b/app/tests/test_api.py @@ -355,6 +355,19 @@ class TestApi(BootTestCase): # Verify max images field self.assertTrue("max_images" in res.data) + # Verify odm version + self.assertTrue("odm_version" in res.data) + + # label should be hostname:port (since no label is set) + self.assertEqual(res.data['label'], pnode.hostname + ":" + str(pnode.port)) + + # If we update the label, the label is used instead + pnode.label = "Test" + pnode.save() + + res = client.get('/api/processingnodes/{}/'.format(pnode.id)) + self.assertEqual(res.data['label'], "Test") + # Cannot delete a processing node as normal user res = client.delete('/api/processingnodes/{}/'.format(pnode.id)) self.assertTrue(res.status_code, status.HTTP_403_FORBIDDEN) diff --git a/nodeodm/migrations/0005_auto_20190115_1346.py b/nodeodm/migrations/0005_auto_20190115_1346.py index a6561cac..2bb4d60a 100644 --- a/nodeodm/migrations/0005_auto_20190115_1346.py +++ b/nodeodm/migrations/0005_auto_20190115_1346.py @@ -18,6 +18,6 @@ class Migration(migrations.Migration): migrations.AddField( model_name='processingnode', name='odm_version', - field=models.CharField(help_text='OpenDroneMap version used by the node.', max_length=32, null=True), + field=models.CharField(help_text='ODM version used by the node.', max_length=32, null=True), ), ] diff --git a/slate/source/includes/reference/_processingnode.md b/slate/source/includes/reference/_processingnode.md index 4ffd59dc..1a0d53a6 100644 --- a/slate/source/includes/reference/_processingnode.md +++ b/slate/source/includes/reference/_processingnode.md @@ -12,6 +12,7 @@ "last_refreshed": "2017-03-01T21:14:49.918276Z", "queue_count": 0, "max_images": null, + "label": "nodeodm.masseranolabs.com:80", "available_options": [ { "help": "Oct-tree depth at which the Laplacian equation is solved in the surface reconstruction step. Increasing this value increases computation times slightly but helps reduce memory usage. Default: 9", @@ -33,9 +34,11 @@ online | bool | Whether the processing node could be reached in the last 5 minut hostname | string | Hostname/IP address port | int | Port api_version | string | Version of NodeODM currently running +odm_version | string | Version of ODM currently being used last_refreshed | string | Date and time this node was last seen online. This value is typically refreshed every 15-30 seconds and is used to decide whether a node is offline or not queue_count | int | Number of [Task](#task) items currently being processed/queued on this node. max_images | int | Optional maximum number of images this processing node can accept. null indicates no limit. +label | string | Label for the node available_options | JSON[] | JSON-encoded list of options that this node is capable of handling. See [Available Options](#available-options) for more information From bda31f3a00dd9128f190d8b299418c615da48d1f Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 15 Jan 2019 12:51:04 -0500 Subject: [PATCH 04/12] Moved certbot install command --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 0faed03b..9439a5dd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,7 +18,7 @@ RUN printf "deb http://mirror.steadfast.net/debian/ stable main contrib n RUN printf "deb http://mirror.steadfast.net/debian/ testing main contrib non-free\ndeb-src http://mirror.steadfast.net/debian/ testing main contrib non-free" > /etc/apt/sources.list.d/testing.list # Install Node.js GDAL, nginx, letsencrypt, psql -RUN apt-get -qq update && apt-get -qq install -t testing -y binutils libproj-dev gdal-bin nginx grass-core && apt-get -qq install -y gettext-base cron certbot postgresql-client-9.6 +RUN apt-get -qq update && apt-get -qq install -t testing -y binutils libproj-dev gdal-bin nginx grass-core certbot && apt-get -qq install -y gettext-base cron postgresql-client-9.6 # Install pip reqs From 8d1bbf22d9e5e0129c0424ce2de1048014c927b9 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 15 Jan 2019 13:40:07 -0500 Subject: [PATCH 05/12] More resiliant task assets download --- app/models/task.py | 45 +++++++++++++++++++++---------------------- nodeodm/api_client.py | 2 +- 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/app/models/task.py b/app/models/task.py index 585c7f10..5e3372f4 100644 --- a/app/models/task.py +++ b/app/models/task.py @@ -10,6 +10,8 @@ from shlex import quote import piexif import re + +import requests from PIL import Image from django.contrib.gis.gdal import GDALRaster from django.contrib.gis.gdal import OGRGeometry @@ -19,7 +21,7 @@ from django.core.exceptions import ValidationError from django.db import models from django.db import transaction from django.utils import timezone -from django.template.defaultfilters import filesizeformat +from requests.packages.urllib3.exceptions import ReadTimeoutError from app import pending_actions from django.contrib.gis.db.models.fields import GeometryField @@ -508,9 +510,6 @@ class Task(models.Model): logger.info("Processing status: {} for {}".format(self.status, self)) if self.status == status_codes.COMPLETED: - # Since we're downloading/extracting results, set temporarely the status back to running - self.status = status_codes.RUNNING - assets_dir = self.assets_path("") # Remove previous assets directory @@ -523,29 +522,29 @@ class Task(models.Model): logger.info("Downloading all.zip for {}".format(self)) # Download all assets - zip_stream = self.processing_node.download_task_asset(self.uuid, "all.zip") - zip_path = os.path.join(assets_dir, "all.zip") + try: + zip_stream = self.processing_node.download_task_asset(self.uuid, "all.zip") + zip_path = os.path.join(assets_dir, "all.zip") - # Keep track of download progress (if possible) - content_length = zip_stream.headers.get('content-length') - total_length = int(content_length) if content_length is not None else None - downloaded = 0 - last_update = 0 + # Keep track of download progress (if possible) + content_length = zip_stream.headers.get('content-length') + total_length = int(content_length) if content_length is not None else None + downloaded = 0 + last_update = 0 - self.console_output += "Downloading results (%s). Please wait...\n" % (filesizeformat(total_length) if total_length is not None else 'unknown size') - self.save() + with open(zip_path, 'wb') as fd: + for chunk in zip_stream.iter_content(4096): + downloaded += len(chunk) - with open(zip_path, 'wb') as fd: - for chunk in zip_stream.iter_content(4096): - downloaded += len(chunk) + # Track progress if we know the content header length + # every 2 seconds + if total_length > 0 and time.time() - last_update >= 2: + Task.objects.filter(pk=self.id).update(running_progress=(self.TASK_OUTPUT_MILESTONES_LAST_VALUE + (float(downloaded) / total_length) * 0.1)) + last_update = time.time() - # Track progress if we know the content header length - # every 2 seconds - if total_length > 0 and time.time() - last_update >= 2: - Task.objects.filter(pk=self.id).update(running_progress=(self.TASK_OUTPUT_MILESTONES_LAST_VALUE + (float(downloaded) / total_length) * 0.1)) - last_update = time.time() - - fd.write(chunk) + fd.write(chunk) + except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, ReadTimeoutError) as e: + raise ProcessingTimeout(e) logger.info("Done downloading all.zip for {}".format(self)) diff --git a/nodeodm/api_client.py b/nodeodm/api_client.py index 49cdb184..173f283a 100644 --- a/nodeodm/api_client.py +++ b/nodeodm/api_client.py @@ -93,7 +93,7 @@ class ApiClient: return requests.post(self.url('/task/restart'), data=data, timeout=self.timeout).json() def task_download(self, uuid, asset): - res = requests.get(self.url('/task/{}/download/{}').format(uuid, asset), stream=True) + res = requests.get(self.url('/task/{}/download/{}').format(uuid, asset), stream=True, timeout=self.timeout) if "Content-Type" in res.headers and "application/json" in res.headers['Content-Type']: return res.json() else: From 8332624906b48d7bdb27424ab59639641bfbc8fe Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 15 Jan 2019 14:07:35 -0500 Subject: [PATCH 06/12] More presets --- app/boot.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/app/boot.py b/app/boot.py index e9edcc77..0e7c9416 100644 --- a/app/boot.py +++ b/app/boot.py @@ -96,6 +96,24 @@ def boot(): def add_default_presets(): try: + Preset.objects.update_or_create(name='3D Model', system=True, + defaults={'options': [{'name': 'mesh-octree-depth', 'value': "11"}, + {'name': 'use-3dmesh', 'value': True}, + {'name': 'depthmap-resolution', 'value': '1000'}, + {'name': 'mesh-size', 'value': '600000'}]}) + Preset.objects.update_or_create(name='Buildings', system=True, + defaults={'options': [{'name': 'mesh-octree-depth', 'value': "10"}, + {'name': 'mesh-size', 'value': '300000'}, + {'name': 'depthmap-resolution', 'value': '1000'}, + {'name': 'texturing-nadir-weight', 'value': "28"}]}) + Preset.objects.update_or_create(name='Point of Interest', system=True, + defaults={'options': [{'name': 'matcher-neighbors', 'value': "24"}, + {'name': 'mesh-size', 'value': '600000'}, + {'name': 'use-3dmesh', 'value': True}]}) + Preset.objects.update_or_create(name='Forest', system=True, + defaults={'options': [{'name': 'min-num-features', 'value': "18000"}, + {'name': 'matcher-neighbors', 'value': "21"}, + {'name': 'texturing-data-term', 'value': "area"}]}) Preset.objects.update_or_create(name='DSM + DTM', system=True, defaults={ 'options': [{'name': 'dsm', 'value': True}, {'name': 'dtm', 'value': True}]}) @@ -103,11 +121,13 @@ def add_default_presets(): defaults={'options': [{'name': 'fast-orthophoto', 'value': True}]}) Preset.objects.update_or_create(name='High Resolution', system=True, defaults={'options': [{'name': 'dsm', 'value': True}, - {'name': 'dem-resolution', 'value': "2.5"}, - {'name': 'orthophoto-resolution', 'value': "2.5"}, + {'name': 'depthmap-resolution', 'value': '1000'}, + {'name': 'dem-resolution', 'value': "2.0"}, + {'name': 'orthophoto-resolution', 'value': "2.0"}, ]}) Preset.objects.update_or_create(name='Default', system=True, defaults={'options': [{'name': 'dsm', 'value': True}]}) + except MultipleObjectsReturned: # Mostly to handle a legacy code problem where # multiple system presets with the same name were From 57a7a7092504058a2604385c598e3283324af15f Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 15 Jan 2019 16:48:59 -0500 Subject: [PATCH 07/12] Ability to download tiles directly --- app/migrations/0024_update_task_assets.py | 80 +++++++++++++++++++++ app/models/task.py | 12 ++++ app/static/app/js/classes/AssetDownloads.js | 3 + app/static/app/js/css/Map.scss | 4 ++ app/tests/test_api_preset.py | 6 +- app/tests/test_api_task.py | 6 ++ 6 files changed, 110 insertions(+), 1 deletion(-) create mode 100644 app/migrations/0024_update_task_assets.py diff --git a/app/migrations/0024_update_task_assets.py b/app/migrations/0024_update_task_assets.py new file mode 100644 index 00000000..e3cc40b4 --- /dev/null +++ b/app/migrations/0024_update_task_assets.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.1 on 2017-07-07 18:05 +from __future__ import unicode_literals + +import django.contrib.postgres.fields +import os +from django.db import migrations, models + +from webodm import settings + +ASSETS_MAP = { + 'all.zip': 'all.zip', + 'orthophoto.tif': os.path.join('odm_orthophoto', 'odm_orthophoto.tif'), + 'orthophoto.png': os.path.join('odm_orthophoto', 'odm_orthophoto.png'), + 'orthophoto.mbtiles': os.path.join('odm_orthophoto', 'odm_orthophoto.mbtiles'), + 'georeferenced_model.las': os.path.join('odm_georeferencing', 'odm_georeferenced_model.las'), + 'georeferenced_model.laz': os.path.join('odm_georeferencing', 'odm_georeferenced_model.laz'), + 'georeferenced_model.ply': os.path.join('odm_georeferencing', 'odm_georeferenced_model.ply'), + 'georeferenced_model.csv': os.path.join('odm_georeferencing', 'odm_georeferenced_model.csv'), + 'textured_model.zip': { + 'deferred_path': 'textured_model.zip', + 'deferred_compress_dir': 'odm_texturing' + }, + 'dtm.tif': os.path.join('odm_dem', 'dtm.tif'), + 'dsm.tif': os.path.join('odm_dem', 'dsm.tif'), + 'dtm_tiles.zip': { + 'deferred_path': 'dtm_tiles.zip', + 'deferred_compress_dir': 'dtm_tiles' + }, + 'dsm_tiles.zip': { + 'deferred_path': 'dsm_tiles.zip', + 'deferred_compress_dir': 'dsm_tiles' + }, + 'orthophoto_tiles.zip': { + 'deferred_path': 'orthophoto_tiles.zip', + 'deferred_compress_dir': 'orthophoto_tiles' + }, +} + +def assets_path(project_id, task_id, *args): + return os.path.join(settings.MEDIA_ROOT, + "project", + str(project_id), + "task", + str(task_id), + "assets", + *args) + +def is_asset_available_slow(t, asset): + if asset in ASSETS_MAP: + value = ASSETS_MAP[asset] + if isinstance(value, str): + return os.path.exists(assets_path(t.project.id, t.id, value)) + elif isinstance(value, dict): + if 'deferred_compress_dir' in value: + return os.path.exists(assets_path(t.project.id, t.id, value['deferred_compress_dir'])) + + return False + + +def detect_available_assets(apps, schema_editor): + Task = apps.get_model('app', 'Task') + + for t in Task.objects.all(): + print("Updating {}".format(t)) + + all_assets = list(ASSETS_MAP.keys()) + t.available_assets = [asset for asset in all_assets if is_asset_available_slow(t, asset)] + t.save() + + +class Migration(migrations.Migration): + + dependencies = [ + ('app', '0023_task_running_progress'), + ] + + operations = [ + migrations.RunPython(detect_available_assets), + ] diff --git a/app/models/task.py b/app/models/task.py index 5e3372f4..6b75254e 100644 --- a/app/models/task.py +++ b/app/models/task.py @@ -142,6 +142,18 @@ class Task(models.Model): }, 'dtm.tif': os.path.join('odm_dem', 'dtm.tif'), 'dsm.tif': os.path.join('odm_dem', 'dsm.tif'), + 'dtm_tiles.zip': { + 'deferred_path': 'dtm_tiles.zip', + 'deferred_compress_dir': 'dtm_tiles' + }, + 'dsm_tiles.zip': { + 'deferred_path': 'dsm_tiles.zip', + 'deferred_compress_dir': 'dsm_tiles' + }, + 'orthophoto_tiles.zip': { + 'deferred_path': 'orthophoto_tiles.zip', + 'deferred_compress_dir': 'orthophoto_tiles' + }, } STATUS_CODES = ( diff --git a/app/static/app/js/classes/AssetDownloads.js b/app/static/app/js/classes/AssetDownloads.js index 855b230c..c8cbf96a 100644 --- a/app/static/app/js/classes/AssetDownloads.js +++ b/app/static/app/js/classes/AssetDownloads.js @@ -34,8 +34,11 @@ const api = { new AssetDownload("Orthophoto (GeoTIFF)","orthophoto.tif","fa fa-map-o"), new AssetDownload("Orthophoto (PNG)","orthophoto.png","fa fa-picture-o"), new AssetDownload("Orthophoto (MBTiles)","orthophoto.mbtiles","fa fa-picture-o"), + new AssetDownload("Orthophoto (Tiles)","orthophoto_tiles.zip","fa fa-table"), new AssetDownload("Terrain Model (GeoTIFF)","dtm.tif","fa fa-area-chart"), + new AssetDownload("Terrain Model (Tiles)","dtm_tiles.zip","fa fa-table"), new AssetDownload("Surface Model (GeoTIFF)","dsm.tif","fa fa-area-chart"), + new AssetDownload("Surface Model (Tiles)","dsm_tiles.zip","fa fa-table"), new AssetDownload("Point Cloud (LAS)","georeferenced_model.las","fa fa-cube"), new AssetDownload("Point Cloud (LAZ)","georeferenced_model.laz","fa fa-cube"), new AssetDownload("Point Cloud (PLY)","georeferenced_model.ply","fa fa-cube"), diff --git a/app/static/app/js/css/Map.scss b/app/static/app/js/css/Map.scss index 3f70efdf..b17753f7 100644 --- a/app/static/app/js/css/Map.scss +++ b/app/static/app/js/css/Map.scss @@ -10,6 +10,10 @@ .asset-links{ margin-top: 8px; padding-left: 16px; + + columns: 2; + -webkit-columns: 2; + -moz-columns: 2; } .switchModeButton{ bottom: 12px; diff --git a/app/tests/test_api_preset.py b/app/tests/test_api_preset.py index 8d01ed1e..4772351f 100644 --- a/app/tests/test_api_preset.py +++ b/app/tests/test_api_preset.py @@ -24,6 +24,10 @@ class TestApiPreset(BootTestCase): self.assertTrue(Preset.objects.filter(name="Default", system=True).exists()) self.assertTrue(Preset.objects.filter(name="DSM + DTM", system=True).exists()) self.assertTrue(Preset.objects.filter(name="High Resolution", system=True).exists()) + self.assertTrue(Preset.objects.filter(name="Forest", system=True).exists()) + self.assertTrue(Preset.objects.filter(name="Buildings", system=True).exists()) + self.assertTrue(Preset.objects.filter(name="3D Model", system=True).exists()) + self.assertTrue(Preset.objects.filter(name="Point of Interest", system=True).exists()) def test_preset(self): client = APIClient() @@ -53,7 +57,7 @@ class TestApiPreset(BootTestCase): self.assertTrue(res.status_code == status.HTTP_200_OK) # Only ours and global presets are available - self.assertTrue(len(res.data) == 7) + self.assertTrue(len(res.data) == 11) self.assertTrue('My Local Preset' in [preset['name'] for preset in res.data]) self.assertTrue('High Resolution' in [preset['name'] for preset in res.data]) self.assertTrue('Global Preset #1' in [preset['name'] for preset in res.data]) diff --git a/app/tests/test_api_task.py b/app/tests/test_api_task.py index 873fffe9..42179590 100644 --- a/app/tests/test_api_task.py +++ b/app/tests/test_api_task.py @@ -322,6 +322,11 @@ class TestApiTask(BootTransactionTestCase): # A textured mesh archive file should exist self.assertTrue(os.path.exists(task.assets_path(task.ASSETS_MAP["textured_model.zip"]["deferred_path"]))) + # Tiles archives should have been created + self.assertTrue(os.path.exists(task.assets_path(task.ASSETS_MAP["dsm_tiles.zip"]["deferred_path"]))) + self.assertTrue(os.path.exists(task.assets_path(task.ASSETS_MAP["dtm_tiles.zip"]["deferred_path"]))) + self.assertTrue(os.path.exists(task.assets_path(task.ASSETS_MAP["orthophoto_tiles.zip"]["deferred_path"]))) + # Can download raw assets res = client.get("/api/projects/{}/tasks/{}/assets/odm_orthophoto/odm_orthophoto.tif".format(project.id, task.id)) self.assertTrue(res.status_code == status.HTTP_200_OK) @@ -550,6 +555,7 @@ class TestApiTask(BootTransactionTestCase): # but others such as textured_model.zip should be available res = client.get("/api/projects/{}/tasks/{}/".format(project.id, task.id)) self.assertFalse('orthophoto.tif' in res.data['available_assets']) + self.assertFalse('orthophoto_tiles.zip' in res.data['available_assets']) self.assertTrue('textured_model.zip' in res.data['available_assets']) image1.close() From 074a6fce6445b869259c4a8e5f672e03ea31c36a Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Tue, 15 Jan 2019 17:51:32 -0500 Subject: [PATCH 08/12] Minor refactoring, streaming downloads for large files --- app/api/tasks.py | 68 +++++++++++++++++++++++--------------- app/tests/test_api_task.py | 5 +++ 2 files changed, 46 insertions(+), 27 deletions(-) diff --git a/app/api/tasks.py b/app/api/tasks.py index 1c29eb2b..944fe7c0 100644 --- a/app/api/tasks.py +++ b/app/api/tasks.py @@ -1,9 +1,10 @@ -import mimetypes import os from wsgiref.util import FileWrapper +import mimetypes from django.core.exceptions import ObjectDoesNotExist, SuspiciousFileOperation, ValidationError from django.db import transaction +from django.http import FileResponse from django.http import HttpResponse from rest_framework import status, serializers, viewsets, filters, exceptions, permissions, parsers from rest_framework.decorators import detail_route @@ -253,33 +254,52 @@ class TaskTilesJson(TaskNestedView): return Response(json) +def download_file_response(request, filePath, content_disposition): + filename = os.path.basename(filePath) + filesize = os.stat(filePath).st_size + file = open(filePath, "rb") + + # More than 100mb, normal http response, otherwise stream + # Django docs say to avoid streaming when possible + stream = filesize > 1e8 or request.GET.get('_force_stream', False) + if stream: + response = FileResponse(file) + else: + response = HttpResponse(FileWrapper(file), + content_type=(mimetypes.guess_type(filename)[0] or "application/zip")) + + response['Content-Type'] = mimetypes.guess_type(filename)[0] or "application/zip" + response['Content-Disposition'] = "{}; filename={}".format(content_disposition, filename) + response['Content-Length'] = filesize + + # For testing + if stream: + response['_stream'] = 'yes' + + return response + + """ Task downloads are simply aliases to download the task's assets (but require a shorter path and look nicer the API user) """ class TaskDownloads(TaskNestedView): - def get(self, request, pk=None, project_pk=None, asset=""): - """ - Downloads a task asset (if available) - """ - task = self.get_and_check_task(request, pk) + def get(self, request, pk=None, project_pk=None, asset=""): + """ + Downloads a task asset (if available) + """ + task = self.get_and_check_task(request, pk) - # Check and download - try: - asset_path = task.get_asset_download_path(asset) - except FileNotFoundError: - raise exceptions.NotFound("Asset does not exist") + # Check and download + try: + asset_path = task.get_asset_download_path(asset) + except FileNotFoundError: + raise exceptions.NotFound("Asset does not exist") - if not os.path.exists(asset_path): - raise exceptions.NotFound("Asset does not exist") + if not os.path.exists(asset_path): + raise exceptions.NotFound("Asset does not exist") - asset_filename = os.path.basename(asset_path) - - file = open(asset_path, "rb") - response = HttpResponse(FileWrapper(file), - content_type=(mimetypes.guess_type(asset_filename)[0] or "application/zip")) - response['Content-Disposition'] = "attachment; filename={}".format(asset) - return response + return download_file_response(request, asset_path, 'attachment') """ Raw access to the task's asset folder resources @@ -301,10 +321,4 @@ class TaskAssets(TaskNestedView): if (not os.path.exists(asset_path)) or os.path.isdir(asset_path): raise exceptions.NotFound("Asset does not exist") - asset_filename = os.path.basename(asset_path) - - file = open(asset_path, "rb") - response = HttpResponse(FileWrapper(file), - content_type=(mimetypes.guess_type(asset_filename)[0] or "application/zip")) - response['Content-Disposition'] = "inline; filename={}".format(asset_filename) - return response + return download_file_response(request, asset_path, 'inline') diff --git a/app/tests/test_api_task.py b/app/tests/test_api_task.py index 42179590..b14544b4 100644 --- a/app/tests/test_api_task.py +++ b/app/tests/test_api_task.py @@ -319,6 +319,11 @@ class TestApiTask(BootTransactionTestCase): res = client.get("/api/projects/{}/tasks/{}/download/{}".format(project.id, task.id, asset)) self.assertTrue(res.status_code == status.HTTP_200_OK) + # We can stream downloads + res = client.get("/api/projects/{}/tasks/{}/download/{}?_force_stream=1".format(project.id, task.id, task.ASSETS_MAP.keys()[0])) + self.assertTrue(res.status_code == status.HTTP_200_OK) + self.assertTrue(res.has_header('_stream')) + # A textured mesh archive file should exist self.assertTrue(os.path.exists(task.assets_path(task.ASSETS_MAP["textured_model.zip"]["deferred_path"]))) From 0fca0529002fd9d8bc02999d44ad4b9aff8cfa43 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 16 Jan 2019 10:47:35 -0500 Subject: [PATCH 09/12] Upgraded Django to 2.1 --- app/migrations/0001_initial.py | 9 +- app/models/preset.py | 2 +- app/models/project.py | 5 - app/models/task.py | 9 +- app/permissions.py | 15 -- app/postgis.py | 218 ----------------------------- app/tests/test_api_task.py | 2 +- app/tests/test_gdal.py | 20 +++ app/tests/test_postgis.py | 38 ----- nodeodm/migrations/0001_initial.py | 3 - nodeodm/models.py | 8 +- requirements.txt | 2 +- webodm/settings.py | 2 +- 13 files changed, 28 insertions(+), 305 deletions(-) delete mode 100644 app/permissions.py delete mode 100644 app/postgis.py create mode 100644 app/tests/test_gdal.py delete mode 100644 app/tests/test_postgis.py diff --git a/app/migrations/0001_initial.py b/app/migrations/0001_initial.py index 9f86966d..a96fd404 100644 --- a/app/migrations/0001_initial.py +++ b/app/migrations/0001_initial.py @@ -3,7 +3,6 @@ from __future__ import unicode_literals import app.models -import app.postgis from django.conf import settings import django.contrib.postgres.fields.jsonb from django.db import migrations, models @@ -39,9 +38,6 @@ class Migration(migrations.Migration): ('deleting', models.BooleanField(db_index=True, default=False, help_text='Whether this project has been marked for deletion. Projects that have running tasks need to wait for tasks to be properly cleaned up before they can be deleted.')), ('owner', models.ForeignKey(help_text='The person who created the project', on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], - options={ - 'permissions': (('view_project', 'Can view project'),), - }, ), migrations.CreateModel( name='ProjectGroupObjectPermission', @@ -80,15 +76,12 @@ class Migration(migrations.Migration): ('options', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, help_text='Options that are being used to process this task', validators=[app.models.validate_task_options])), ('console_output', models.TextField(blank=True, default='', help_text="Console output of the OpenDroneMap's process")), ('ground_control_points', models.FileField(blank=True, help_text='Optional Ground Control Points file to use for processing', null=True, upload_to=app.models.gcp_directory_path)), - ('orthophoto', app.postgis.OffDbRasterField(blank=True, help_text='Orthophoto created by OpenDroneMap', null=True, srid=4326)), + ('orthophoto', django.contrib.gis.db.models.RasterField(blank=True, help_text='Orthophoto created by OpenDroneMap', null=True, srid=4326)), ('created_at', models.DateTimeField(default=django.utils.timezone.now, help_text='Creation date')), ('pending_action', models.IntegerField(blank=True, choices=[(1, 'CANCEL'), (2, 'REMOVE'), (3, 'RESTART')], db_index=True, help_text='A requested action to be performed on the task. The selected action will be performed by the scheduler at the next iteration.', null=True)), ('processing_node', models.ForeignKey(blank=True, help_text='Processing node assigned to this task (or null if this task has not been associated yet)', null=True, on_delete=django.db.models.deletion.CASCADE, to='nodeodm.ProcessingNode')), ('project', models.ForeignKey(help_text='Project that this task belongs to', on_delete=django.db.models.deletion.CASCADE, to='app.Project')), ], - options={ - 'permissions': (('view_task', 'Can view task'),), - }, ), migrations.AddField( model_name='imageupload', diff --git a/app/models/preset.py b/app/models/preset.py index 5a571e03..03d7772c 100644 --- a/app/models/preset.py +++ b/app/models/preset.py @@ -12,7 +12,7 @@ logger = logging.getLogger('app.logger') class Preset(models.Model): owner = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE, help_text="The person who owns this preset") name = models.CharField(max_length=255, blank=False, null=False, help_text="A label used to describe the preset") - options = JSONField(default=list(), blank=True, help_text="Options that define this preset (same format as in a Task's options).", + options = JSONField(default=list, blank=True, help_text="Options that define this preset (same format as in a Task's options).", validators=[validate_task_options]) created_at = models.DateTimeField(default=timezone.now, help_text="Creation date") system = models.BooleanField(db_index=True, default=False, help_text="Whether this preset is available to every user in the system or just to its owner.") diff --git a/app/models/project.py b/app/models/project.py index 800dcbdb..0b488d6f 100644 --- a/app/models/project.py +++ b/app/models/project.py @@ -51,11 +51,6 @@ class Project(models.Model): ).filter(Q(orthophoto_extent__isnull=False) | Q(dsm_extent__isnull=False) | Q(dtm_extent__isnull=False)) .only('id', 'project_id')] - class Meta: - permissions = ( - ('view_project', 'Can view project'), - ) - @receiver(signals.post_save, sender=Project, dispatch_uid="project_post_save") def project_post_save(sender, instance, created, **kwargs): diff --git a/app/models/task.py b/app/models/task.py index 6b75254e..d0cf228a 100644 --- a/app/models/task.py +++ b/app/models/task.py @@ -198,8 +198,8 @@ class Task(models.Model): auto_processing_node = models.BooleanField(default=True, help_text="A flag indicating whether this task should be automatically assigned a processing node") status = models.IntegerField(choices=STATUS_CODES, db_index=True, null=True, blank=True, help_text="Current status of the task") last_error = models.TextField(null=True, blank=True, help_text="The last processing error received") - options = fields.JSONField(default=dict(), blank=True, help_text="Options that are being used to process this task", validators=[validate_task_options]) - available_assets = fields.ArrayField(models.CharField(max_length=80), default=list(), blank=True, help_text="List of available assets to download") + options = fields.JSONField(default=dict, blank=True, help_text="Options that are being used to process this task", validators=[validate_task_options]) + available_assets = fields.ArrayField(models.CharField(max_length=80), default=list, blank=True, help_text="List of available assets to download") console_output = models.TextField(null=False, default="", blank=True, help_text="Console output of the OpenDroneMap's process") orthophoto_extent = GeometryField(null=True, blank=True, srid=4326, help_text="Extent of the orthophoto created by OpenDroneMap") @@ -769,8 +769,3 @@ class Task(models.Model): except subprocess.CalledProcessError as e: logger.warning("Could not resize GCP file {}: {}".format(gcp_path, str(e))) return None - - class Meta: - permissions = ( - ('view_task', 'Can view task'), - ) diff --git a/app/permissions.py b/app/permissions.py deleted file mode 100644 index 39bb0c06..00000000 --- a/app/permissions.py +++ /dev/null @@ -1,15 +0,0 @@ -from rest_framework import permissions - -class GuardianObjectPermissions(permissions.DjangoObjectPermissions): - """ - Similar to `DjangoObjectPermissions`, but adding 'view' permissions. - """ - perms_map = { - 'GET': ['%(app_label)s.view_%(model_name)s'], - 'OPTIONS': ['%(app_label)s.view_%(model_name)s'], - 'HEAD': ['%(app_label)s.view_%(model_name)s'], - 'POST': ['%(app_label)s.add_%(model_name)s'], - 'PUT': ['%(app_label)s.change_%(model_name)s'], - 'PATCH': ['%(app_label)s.change_%(model_name)s'], - 'DELETE': ['%(app_label)s.delete_%(model_name)s'], - } \ No newline at end of file diff --git a/app/postgis.py b/app/postgis.py deleted file mode 100644 index e54f718d..00000000 --- a/app/postgis.py +++ /dev/null @@ -1,218 +0,0 @@ -import binascii -import struct - -from django.contrib.gis.db.backends.postgis.const import GDAL_TO_POSTGIS -from django.contrib.gis.db.backends.postgis.pgraster import ( - GDAL_TO_STRUCT, POSTGIS_HEADER_STRUCTURE, POSTGIS_TO_GDAL, - STRUCT_SIZE, - pack) -from django.contrib.gis.db.backends.postgis.pgraster import chunk, unpack -from django.contrib.gis.db.models.fields import RasterField, BaseSpatialField -from django.contrib.gis.gdal import GDALException -from django.contrib.gis.gdal import GDALRaster -from django.forms import ValidationError -from django.utils.translation import ugettext_lazy as _ - - -class OffDbRasterField(RasterField): - """ - Out-of-db Raster field for GeoDjango -- evaluates into GDALRaster objects. - """ - - description = _("Out-of-db Raster Field") - - def from_db_value(self, value, expression, connection, context): - return from_pgraster(value, True) - - def get_db_prep_save(self, value, connection): - """ - Prepare the value for saving in the database. - """ - if not value: - return None - else: - return to_pgraster(value, True) - - def get_db_prep_value(self, value, connection, prepared=False): - self._check_connection(connection) - # Prepare raster for writing to database. - if not prepared: - value = to_pgraster(value, True) - - # Call RasterField's base class get_db_prep_value - return BaseSpatialField.get_db_prep_value(self, value, connection, prepared) - - def get_raster_prep_value(self, value, is_candidate): - """ - Return a GDALRaster if conversion is successful, otherwise return None. - """ - if isinstance(value, GDALRaster): - return value - elif is_candidate: - try: - return GDALRaster(value) - except GDALException: - pass - elif isinstance(value, (dict, str)): - try: - return GDALRaster(value) - except GDALException: - raise ValueError("Couldn't create spatial object from lookup value '%s'." % value) - - -class POSTGIS_BANDTYPES(object): - BANDTYPE_FLAG_OFFDB = 1 << 7 - BANDTYPE_FLAG_HASNODATA = 1 << 6 - BANDTYPE_FLAG_ISNODATA = 1 << 5 - - -def from_pgraster(data, offdb = False): - """ - Convert a PostGIS HEX String into a dictionary. - """ - if data is None: - return - - # Split raster header from data - header, data = chunk(data, 122) - header = unpack(POSTGIS_HEADER_STRUCTURE, header) - - # Parse band data - bands = [] - pixeltypes = [] - - while data: - # Get pixel type for this band - pixeltype, data = chunk(data, 2) - pixeltype = unpack('B', pixeltype)[0] - - # Check flags - offdb = has_nodata = False - - if POSTGIS_BANDTYPES.BANDTYPE_FLAG_OFFDB & pixeltype == POSTGIS_BANDTYPES.BANDTYPE_FLAG_OFFDB: - offdb = True - pixeltype ^= POSTGIS_BANDTYPES.BANDTYPE_FLAG_OFFDB - if POSTGIS_BANDTYPES.BANDTYPE_FLAG_HASNODATA & pixeltype == POSTGIS_BANDTYPES.BANDTYPE_FLAG_HASNODATA: - has_nodata = True - pixeltype ^= POSTGIS_BANDTYPES.BANDTYPE_FLAG_HASNODATA - if POSTGIS_BANDTYPES.BANDTYPE_FLAG_ISNODATA & pixeltype == POSTGIS_BANDTYPES.BANDTYPE_FLAG_ISNODATA: - raise ValidationError("Band has pixeltype BANDTYPE_FLAG_ISNODATA flag set, but we don't know how to handle it.") - - # Convert datatype from PostGIS to GDAL & get pack type and size - pixeltype = POSTGIS_TO_GDAL[pixeltype] - pack_type = GDAL_TO_STRUCT[pixeltype] - pack_size = 2 * STRUCT_SIZE[pack_type] - - # Parse band nodata value. The nodata value is part of the - # PGRaster string even if the nodata flag is True, so it always - # has to be chunked off the data string. - nodata, data = chunk(data, pack_size) - nodata = unpack(pack_type, nodata)[0] - - if offdb: - # Extract band number - band_num, data = chunk(data, 2) - - # Find NULL byte for end of file path - file_path_length = (binascii.unhexlify(data).find(b'\x00') + 1) * 2 - - # Extract path - file_path, data = chunk(data, file_path_length) - band_result = {'path' : binascii.unhexlify(file_path).decode()[:-1]} # Remove last NULL byte - else: - # Chunk and unpack band data (pack size times nr of pixels) - band, data = chunk(data, pack_size * header[10] * header[11]) - band_result = {'data': binascii.unhexlify(band)} - - # If the nodata flag is True, set the nodata value. - if has_nodata: - band_result['nodata_value'] = nodata - if offdb: - band_result['offdb'] = True - - # Append band data to band list - bands.append(band_result) - - # Store pixeltype of this band in pixeltypes array - pixeltypes.append(pixeltype) - - # Check that all bands have the same pixeltype. - # This is required by GDAL. PostGIS rasters could have different pixeltypes - # for bands of the same raster. - if len(set(pixeltypes)) != 1: - raise ValidationError("Band pixeltypes are not all equal.") - - if offdb and len(bands) > 0: - return bands[0]['path'] - else: - return { - 'srid': int(header[9]), - 'width': header[10], 'height': header[11], - 'datatype': pixeltypes[0], - 'origin': (header[5], header[6]), - 'scale': (header[3], header[4]), - 'skew': (header[7], header[8]), - 'bands': bands, - } - - -def to_pgraster(rast, offdb = False): - """ - Convert a GDALRaster into PostGIS Raster format. - """ - # Return if the raster is null - if rast is None or rast == '': - return - - # Prepare the raster header data as a tuple. The first two numbers are - # the endianness and the PostGIS Raster Version, both are fixed by - # PostGIS at the moment. - rasterheader = ( - 1, 0, len(rast.bands), rast.scale.x, rast.scale.y, - rast.origin.x, rast.origin.y, rast.skew.x, rast.skew.y, - rast.srs.srid, rast.width, rast.height, - ) - - # Hexlify raster header - result = pack(POSTGIS_HEADER_STRUCTURE, rasterheader) - i = 0 - - for band in rast.bands: - # The PostGIS raster band header has exactly two elements, a 8BUI byte - # and the nodata value. - # - # The 8BUI stores both the PostGIS pixel data type and a nodata flag. - # It is composed as the datatype integer plus optional flags for existing - # nodata values, offdb or isnodata: - # 8BUI_VALUE = PG_PIXEL_TYPE (0-11) + FLAGS - # - # For example, if the byte value is 71, then the datatype is - # 71-64 = 7 (32BSI) and the nodata value is True. - structure = 'B' + GDAL_TO_STRUCT[band.datatype()] - - # Get band pixel type in PostGIS notation - pixeltype = GDAL_TO_POSTGIS[band.datatype()] - - # Set the nodata flag - if band.nodata_value is not None: - pixeltype |= POSTGIS_BANDTYPES.BANDTYPE_FLAG_HASNODATA - if offdb: - pixeltype |= POSTGIS_BANDTYPES.BANDTYPE_FLAG_OFFDB - - # Pack band header - bandheader = pack(structure, (pixeltype, band.nodata_value or 0)) - - # Hexlify band data - if offdb: - # Band num | Path | NULL terminator - band_data_hex = binascii.hexlify(struct.Struct('b').pack(i) + rast.name.encode('utf-8') + b'\x00').upper() - else: - band_data_hex = binascii.hexlify(band.data(as_memoryview=True)).upper() - - # Add packed header and band data to result - result += bandheader + band_data_hex - - i += 1 - - # Cast raster to string before passing it to the DB - return result.decode() \ No newline at end of file diff --git a/app/tests/test_api_task.py b/app/tests/test_api_task.py index b14544b4..f492c18e 100644 --- a/app/tests/test_api_task.py +++ b/app/tests/test_api_task.py @@ -320,7 +320,7 @@ class TestApiTask(BootTransactionTestCase): self.assertTrue(res.status_code == status.HTTP_200_OK) # We can stream downloads - res = client.get("/api/projects/{}/tasks/{}/download/{}?_force_stream=1".format(project.id, task.id, task.ASSETS_MAP.keys()[0])) + res = client.get("/api/projects/{}/tasks/{}/download/{}?_force_stream=1".format(project.id, task.id, list(task.ASSETS_MAP.keys())[0])) self.assertTrue(res.status_code == status.HTTP_200_OK) self.assertTrue(res.has_header('_stream')) diff --git a/app/tests/test_gdal.py b/app/tests/test_gdal.py new file mode 100644 index 00000000..9bbfeac4 --- /dev/null +++ b/app/tests/test_gdal.py @@ -0,0 +1,20 @@ +from django.contrib.gis.gdal import GDALRaster + +from .classes import BootTestCase +import os + +class TestApi(BootTestCase): + def setUp(self): + pass + + def tearDown(self): + pass + + def test_gdal_functions(self): + raster = GDALRaster(os.path.join("app", "fixtures", "orthophoto.tif")) + + self.assertTrue(raster.srid == 32615) + self.assertTrue(raster.width == 212) + + + diff --git a/app/tests/test_postgis.py b/app/tests/test_postgis.py deleted file mode 100644 index fae08eae..00000000 --- a/app/tests/test_postgis.py +++ /dev/null @@ -1,38 +0,0 @@ -from django.contrib.gis.gdal import GDALRaster - -from .classes import BootTestCase -from app.postgis import from_pgraster, to_pgraster -import os - -class TestApi(BootTestCase): - def setUp(self): - pass - - def tearDown(self): - pass - - def test_pgraster_functions(self): - # Make sure conversion from PostGIS <---> GDALRaster works - # for out-of-db - raster = GDALRaster(os.path.join("app", "fixtures", "orthophoto.tif")) - - self.assertTrue(raster.srid == 32615) - self.assertTrue(raster.width == 212) - - # Classic - hexwkb = to_pgraster(raster) - deserialized_raster = GDALRaster(from_pgraster(hexwkb)) - self.assertTrue(len(deserialized_raster.bands) == 4) - self.assertTrue(deserialized_raster.srid == raster.srid) - self.assertTrue(deserialized_raster.width == raster.width) - self.assertTrue(deserialized_raster.height == raster.height) - - # Off-db - hexwkb = to_pgraster(raster, True) - deserialized_raster = GDALRaster(from_pgraster(hexwkb, True)) - - self.assertTrue(deserialized_raster.name == raster.name) - self.assertTrue(deserialized_raster.srid == raster.srid) - self.assertTrue(deserialized_raster.width == raster.width) - self.assertTrue(deserialized_raster.height == raster.height) - diff --git a/nodeodm/migrations/0001_initial.py b/nodeodm/migrations/0001_initial.py index 3404c54a..42521dfd 100644 --- a/nodeodm/migrations/0001_initial.py +++ b/nodeodm/migrations/0001_initial.py @@ -29,9 +29,6 @@ class Migration(migrations.Migration): ('queue_count', models.PositiveIntegerField(default=0, help_text='Number of tasks currently being processed by this node (as reported by the node itself)')), ('available_options', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='Description of the options that can be used for processing')), ], - options={ - 'permissions': (('view_processingnode', 'Can view processing node'),), - }, ), migrations.CreateModel( name='ProcessingNodeGroupObjectPermission', diff --git a/nodeodm/models.py b/nodeodm/models.py index efd1d691..54a47b22 100644 --- a/nodeodm/models.py +++ b/nodeodm/models.py @@ -40,7 +40,7 @@ class ProcessingNode(models.Model): api_version = models.CharField(max_length=32, null=True, help_text="API version used by the node") last_refreshed = models.DateTimeField(null=True, help_text="When was the information about this node last retrieved?") queue_count = models.PositiveIntegerField(default=0, help_text="Number of tasks currently being processed by this node (as reported by the node itself)") - available_options = fields.JSONField(default=dict(), help_text="Description of the options that can be used for processing") + available_options = fields.JSONField(default=dict, help_text="Description of the options that can be used for processing") token = models.CharField(max_length=1024, blank=True, default="", help_text="Token to use for authentication. If the node doesn't have authentication, you can leave this field blank.") max_images = models.PositiveIntegerField(help_text="Maximum number of images accepted by this node.", blank=True, null=True) odm_version = models.CharField(max_length=32, null=True, help_text="OpenDroneMap version used by the node") @@ -223,12 +223,6 @@ class ProcessingNode(models.Model): plugin_signals.processing_node_removed.send_robust(sender=self.__class__, processing_node_id=pnode_id) - class Meta: - permissions = ( - ('view_processingnode', 'Can view processing node'), - ) - - # First time a processing node is created, automatically try to update @receiver(signals.post_save, sender=ProcessingNode, dispatch_uid="update_processing_node_info") def auto_update_node_info(sender, instance, created, **kwargs): diff --git a/requirements.txt b/requirements.txt index 86d5ca40..089357a0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,7 +11,7 @@ django-codemirror2==0.2 django-colorfield==0.1.14 django-compressor==2.2 django-cors-headers==2.2.0 -django-filter==1.1.0 +django-filter==2.0.0 django-guardian==1.4.9 django-imagekit==4.0.1 django-libsass==0.7 diff --git a/webodm/settings.py b/webodm/settings.py index 16cd665a..69d41e0c 100644 --- a/webodm/settings.py +++ b/webodm/settings.py @@ -265,7 +265,7 @@ MESSAGE_TAGS = { # Use Django's standard django.contrib.auth permissions (no anonymous usage) REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': [ - 'app.permissions.GuardianObjectPermissions', + 'rest_framework.permissions.DjangoObjectPermissions', ], 'DEFAULT_FILTER_BACKENDS': [ 'rest_framework.filters.DjangoObjectPermissionsFilter', From b32c552fddcd7d96351dad4342f6b5c4742541ea Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 16 Jan 2019 12:12:17 -0500 Subject: [PATCH 10/12] Flag to disable creation of default node --- README.md | 10 ++++++++-- webodm.sh | 13 ++++++++++++- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 8e9a1e7b..e30ad513 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ A free, user-friendly, extendable application and [API](http://docs.webodm.org) * [Getting Started](#getting-started) - * [Add More Processing Nodes](#add-more-processing-nodes) + * [Manage Processing Nodes](#manage-processing-nodes) * [Enable SSL](#enable-ssl) * [Where Are My Files Stored?](#where-are-my-files-stored) * [Common Troubleshooting](#common-troubleshooting) @@ -80,7 +80,7 @@ For Windows and macOS users an [installer](https://www.webodm.org/installer) is You can also run WebODM from a Live USB/DVD. See [LiveODM](https://www.opendronemap.org/liveodm/). -### Add More Processing Nodes +### Manage Processing Nodes WebODM can be linked to one or more processing nodes running [NodeODM](https://github.com/OpenDroneMap/NodeODM). The default configuration already includes a "node-odm-1" processing node which runs on the same machine as WebODM, just to help you get started. As you become more familiar with WebODM, you might want to install processing nodes on separate machines. @@ -88,6 +88,12 @@ Adding more processing nodes will allow you to run multiple jobs in parallel. You **will not be able to distribute a single job across multiple processing nodes**. We are actively working to bring this feature to reality, but we're not there yet. +If you don't need the default "node-odm-1" node, simply pass the `--no-default-node` flag when starting WebODM: + +`./webodm.sh restart --no-default-node`. + +Then from the web interface simply manually remove the "node-odm-1" node. + ### Enable SSL WebODM has the ability to automatically request and install a SSL certificate via [Let’s Encrypt](https://letsencrypt.org/), or you can manually specify your own key/certificate pair. diff --git a/webodm.sh b/webodm.sh index 6c02d70f..535f7ee3 100755 --- a/webodm.sh +++ b/webodm.sh @@ -28,6 +28,8 @@ elif [[ $platform = "MacOS / OSX" ]] && [[ $(pwd) == /Users* ]]; then plugins_volume=true fi +load_default_node=true + # Load default values source .env DEFAULT_PORT="$WO_PORT" @@ -91,6 +93,10 @@ case $key in plugins_volume=true shift # past argument ;; + --no-default-node) + load_default_node=false + shift # past argument + ;; *) # unknown option POSITIONAL+=("$1") # save it in an array for later shift # past argument @@ -125,6 +131,7 @@ usage(){ echo " --port Set the port that WebODM should bind to (default: $DEFAULT_PORT)" echo " --hostname Set the hostname that WebODM will be accessible from (default: $DEFAULT_HOST)" echo " --media-dir Path where processing results will be stored to (default: $DEFAULT_MEDIA_DIR (docker named volume))" + echo " --no-default-node Do not create a default NodeODM node attached to WebODM on startup (default: disabled)" echo " --ssl Enable SSL and automatically request and install a certificate from letsencrypt.org. (default: $DEFAULT_SSL)" echo " --ssl-key Manually specify a path to the private key file (.pem) to use with nginx to enable SSL (default: None)" echo " --ssl-cert Manually specify a path to the certificate file (.pem) to use with nginx to enable SSL (default: None)" @@ -193,7 +200,11 @@ start(){ echo "Make sure to issue a $0 down if you decide to change the environment." echo "" - command="docker-compose -f docker-compose.yml -f docker-compose.nodeodm.yml" + command="docker-compose -f docker-compose.yml" + + if [[ $load_default_node = true ]]; then + command+=" -f docker-compose.nodeodm.yml" + fi if [ "$WO_SSL" = "YES" ]; then if [ ! -z "$WO_SSL_KEY" ] && [ ! -e "$WO_SSL_KEY" ]; then From d196e4bd13908c9ad6e47064966674398a2498c9 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 16 Jan 2019 13:04:32 -0500 Subject: [PATCH 11/12] Cancel image upload / resize on task cancel / delete --- app/models/task.py | 16 ++++++++++++++-- app/static/app/js/components/TaskListItem.jsx | 5 ++++- app/tests/test_api_task.py | 9 ++++++++- package.json | 2 +- 4 files changed, 27 insertions(+), 5 deletions(-) diff --git a/app/models/task.py b/app/models/task.py index d0cf228a..0066acef 100644 --- a/app/models/task.py +++ b/app/models/task.py @@ -39,7 +39,8 @@ import subprocess logger = logging.getLogger('app.logger') - +class TaskInterruptedException(Exception): + pass def task_directory_path(taskId, projectId): return 'project/{0}/task/{1}/'.format(projectId, taskId) @@ -395,6 +396,7 @@ class Task(models.Model): nonlocal last_update if time.time() - last_update >= 2 or (progress >= 1.0 - 1e-6 and progress <= 1.0 + 1e-6): Task.objects.filter(pk=self.id).update(upload_progress=progress) + self.check_if_canceled() last_update = time.time() # This takes a while @@ -613,7 +615,9 @@ class Task(models.Model): logger.warning("{} cannot communicate with processing node: {}".format(self, str(e))) except ProcessingTimeout as e: logger.warning("{} timed out with error: {}. We'll try reprocessing at the next tick.".format(self, str(e))) - + except TaskInterruptedException as e: + # Task was interrupted during image resize / upload + logger.warning("{} interrupted".format(self, str(e))) def get_tile_path(self, tile_type, z, x, y): return self.assets_path("{}_tiles".format(tile_type), z, x, "{}.png".format(y)) @@ -706,6 +710,12 @@ class Task(models.Model): return [os.path.join(directory, f) for f in os.listdir(directory) if re.match(regex, f, re.IGNORECASE)] + def check_if_canceled(self): + # Check if task has been canceled/removed + if Task.objects.only("pending_action").get(pk=self.id).pending_action in [pending_actions.CANCEL, + pending_actions.REMOVE]: + raise TaskInterruptedException() + def resize_images(self): """ Destructively resize this task's JPG images while retaining EXIF tags. @@ -729,7 +739,9 @@ class Task(models.Model): resized_images_count += 1 if time.time() - last_update >= 2: + # Update progress Task.objects.filter(pk=self.id).update(resize_progress=(float(resized_images_count) / float(total_images))) + self.check_if_canceled() last_update = time.time() with ThreadPoolExecutor(max_workers=cpu_count()) as executor: diff --git a/app/static/app/js/components/TaskListItem.jsx b/app/static/app/js/components/TaskListItem.jsx index a7958309..37c2a186 100644 --- a/app/static/app/js/components/TaskListItem.jsx +++ b/app/static/app/js/components/TaskListItem.jsx @@ -426,7 +426,10 @@ class TaskListItem extends React.Component { defaultError: "Cannot delete task." })); - const disabled = this.state.actionButtonsDisabled || !!task.pending_action; + const disabled = this.state.actionButtonsDisabled || + ([pendingActions.CANCEL, + pendingActions.REMOVE, + pendingActions.RESTART].indexOf(task.pending_action) !== -1); actionButtons = (
{task.status === statusCodes.COMPLETED ? diff --git a/app/tests/test_api_task.py b/app/tests/test_api_task.py index f492c18e..0e559893 100644 --- a/app/tests/test_api_task.py +++ b/app/tests/test_api_task.py @@ -15,7 +15,7 @@ from rest_framework.test import APIClient import worker from django.utils import timezone from app.models import Project, Task, ImageUpload -from app.models.task import task_directory_path, full_task_directory_path +from app.models.task import task_directory_path, full_task_directory_path, TaskInterruptedException from app.plugins.signals import task_completed, task_removed, task_removing from app.tests.classes import BootTransactionTestCase from nodeodm import status_codes @@ -394,9 +394,16 @@ class TestApiTask(BootTransactionTestCase): self.assertTrue(task.status in [status_codes.RUNNING, status_codes.COMPLETED]) + # Should return without issues + task.check_if_canceled() + # Cancel a task res = client.post("/api/projects/{}/tasks/{}/cancel/".format(project.id, task.id)) self.assertTrue(res.status_code == status.HTTP_200_OK) + + # Should raise TaskInterruptedException + self.assertRaises(TaskInterruptedException, task.check_if_canceled) + # task is processed right away # Should have been canceled diff --git a/package.json b/package.json index 6cb4a70b..7fd87c19 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "WebODM", - "version": "0.7.1", + "version": "0.8.0", "description": "Open Source Drone Image Processing", "main": "index.js", "scripts": { From e79de9490ddaeec3c142ca8cb5e2effc0cca7192 Mon Sep 17 00:00:00 2001 From: Piero Toffanin Date: Wed, 16 Jan 2019 14:51:57 -0500 Subject: [PATCH 12/12] Updated README, fixed unit tests --- README.md | 24 ++++++++++++++++++++++++ app/models/task.py | 14 ++++++++------ app/tests/test_api_task.py | 17 ++++++++++++++--- 3 files changed, 46 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index e30ad513..d39e9299 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ A free, user-friendly, extendable application and [API](http://docs.webodm.org) * [Backup and Restore](#backup-and-restore) * [Reset Password](#reset-password) * [Manage Plugins](#manage-plugins) + * [Update](#update) * [Customizing and Extending](#customizing-and-extending) * [API Docs](#api-docs) * [ODM, NodeODM, WebODM... what?](#odm-nodeodm-webodm-what) @@ -134,6 +135,7 @@ On Windows, docker-compose fails with `Failed to execute the script docker-compo Cannot access WebODM using Microsoft Edge on Windows 10 | Try to tweak your internet properties according to [these instructions](http://www.hanselman.com/blog/FixedMicrosoftEdgeCantSeeOrOpenVirtualBoxhostedLocalWebSites.aspx) Getting a `No space left on device` error, but hard drive has enough space left | Docker on Windows by default will allocate only 20GB of space to the default docker-machine. You need to increase that amount. See [this link](http://support.divio.com/local-development/docker/managing-disk-space-in-your-docker-vm) and [this link](https://www.howtogeek.com/124622/how-to-enlarge-a-virtual-machines-disk-in-virtualbox-or-vmware/) Cannot start WebODM via `./webodm.sh start`, error messages are different at each retry | You could be running out of memory. Make sure you have enough RAM available. 2GB should be the recommended minimum, unless you know what you are doing +While running WebODM with Docker Toolbox (VirtualBox) you cannot access WebODM from another computer in the same network. | As Administrator, run `cmd.exe` and then type `"C:\Program Files\Oracle\VirtualBox\VBoxManage.exe" controlvm "default" natpf1 "rule-name,tcp,,8000,,8000"` Have you had other issues? Please [report them](https://github.com/OpenDroneMap/WebODM/issues/new) so that we can include them in this document. @@ -187,6 +189,28 @@ To enable/disable a plugin type: On some platforms (eg. Windows), if you want to manage plugins, you will need to make sure that the `./plugins` directory can be mounted as a docker volume and then pass the `--mount-plugins-volume` flag to `webodm.sh`. Check the docker documentation. +### Update + +If you use docker, updating is as simple as running: + +```bash +./webodm.sh update +``` + +If you are running WebODM [natively](#run-it-natively), these commands should do it: + +```bash +cd /webodm +sudo su odm # Only in case you are running WebODM with a different user +git pull origin master +source python3-venv/bin/activate # If you are running a virtualenv +npm install +pip install -r requirements.txt +webpack --mode production +python manage.py collectstatic --noinput +python manage.py migrate +``` + ## Customizing and Extending Small customizations such as changing the application colors, name, logo, or addying custom CSS/HTML/Javascript can be performed directly from the Customize -- Brand/Theme panels within WebODM. No need to fork or change the code. diff --git a/app/models/task.py b/app/models/task.py index 0066acef..759f4a16 100644 --- a/app/models/task.py +++ b/app/models/task.py @@ -394,9 +394,14 @@ class Task(models.Model): last_update = 0 def callback(progress): nonlocal last_update - if time.time() - last_update >= 2 or (progress >= 1.0 - 1e-6 and progress <= 1.0 + 1e-6): - Task.objects.filter(pk=self.id).update(upload_progress=progress) + + time_has_elapsed = time.time() - last_update >= 2 + + if time_has_elapsed: self.check_if_canceled() + + if time_has_elapsed or (progress >= 1.0 - 1e-6 and progress <= 1.0 + 1e-6): + Task.objects.filter(pk=self.id).update(upload_progress=progress) last_update = time.time() # This takes a while @@ -744,10 +749,7 @@ class Task(models.Model): self.check_if_canceled() last_update = time.time() - with ThreadPoolExecutor(max_workers=cpu_count()) as executor: - resized_images = list(filter(lambda i: i is not None, executor.map( - partial(resize_image, resize_to=self.resize_to, done=callback), - images_path))) + resized_images = list(map(partial(resize_image, resize_to=self.resize_to, done=callback), images_path)) Task.objects.filter(pk=self.id).update(resize_progress=1.0) diff --git a/app/tests/test_api_task.py b/app/tests/test_api_task.py index 0e559893..376f89a0 100644 --- a/app/tests/test_api_task.py +++ b/app/tests/test_api_task.py @@ -14,6 +14,8 @@ from rest_framework.test import APIClient import worker from django.utils import timezone + +from app import pending_actions from app.models import Project, Task, ImageUpload from app.models.task import task_directory_path, full_task_directory_path, TaskInterruptedException from app.plugins.signals import task_completed, task_removed, task_removing @@ -401,14 +403,23 @@ class TestApiTask(BootTransactionTestCase): res = client.post("/api/projects/{}/tasks/{}/cancel/".format(project.id, task.id)) self.assertTrue(res.status_code == status.HTTP_200_OK) - # Should raise TaskInterruptedException - self.assertRaises(TaskInterruptedException, task.check_if_canceled) - # task is processed right away # Should have been canceled task.refresh_from_db() self.assertTrue(task.status == status_codes.CANCELED) + self.assertTrue(task.pending_action is None) + + # Manually set pending action + task.pending_action = pending_actions.CANCEL + task.save() + + # Should raise TaskInterruptedException + self.assertRaises(TaskInterruptedException, task.check_if_canceled) + + # Restore + task.pending_action = None + task.save() # Remove a task and verify that it calls the proper plugins signals with catch_signal(task_removing) as h1: