Merge pull request #1371 from pierotofy/quotas

External auth support, task sizes, quotas
pull/1386/head v2.1.0
Piero Toffanin 2023-09-11 14:34:47 -04:00 zatwierdzone przez GitHub
commit 53079dbd30
Nie znaleziono w bazie danych klucza dla tego podpisu
ID klucza GPG: 4AEE18F83AFDEB23
42 zmienionych plików z 1088 dodań i 120 usunięć

1
.env
Wyświetl plik

@ -10,3 +10,4 @@ WO_DEBUG=NO
WO_DEV=NO
WO_BROKER=redis://broker
WO_DEFAULT_NODES=1
WO_SETTINGS=

Wyświetl plik

@ -10,10 +10,13 @@ from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.html import format_html
from guardian.admin import GuardedModelAdmin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from app.models import PluginDatum
from app.models import Preset
from app.models import Plugin
from app.models import Profile
from app.plugins import get_plugin_by_name, enable_plugin, disable_plugin, delete_plugin, valid_plugin, \
get_plugins_persistent_path, clear_plugins_cache, init_plugins
from .models import Project, Task, Setting, Theme
@ -260,3 +263,14 @@ class PluginAdmin(admin.ModelAdmin):
admin.site.register(Plugin, PluginAdmin)
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
class UserAdmin(BaseUserAdmin):
inlines = [ProfileInline]
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)

Wyświetl plik

@ -1,7 +1,10 @@
from django.contrib.auth.models import User, Group
from rest_framework import serializers, viewsets, generics, status
from app.models import Profile
from rest_framework import serializers, viewsets, generics, status, exceptions
from rest_framework.decorators import action
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.hashers import make_password
from app import models
@ -20,6 +23,7 @@ class AdminUserViewSet(viewsets.ModelViewSet):
if email is not None:
queryset = queryset.filter(email=email)
return queryset
def create(self, request):
data = request.data.copy()
password = data.get('password')
@ -44,3 +48,37 @@ class AdminGroupViewSet(viewsets.ModelViewSet):
if name is not None:
queryset = queryset.filter(name=name)
return queryset
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
exclude = ('id', )
read_only_fields = ('user', )
class AdminProfileViewSet(viewsets.ModelViewSet):
pagination_class = None
serializer_class = ProfileSerializer
permission_classes = [IsAdminUser]
lookup_field = 'user'
def get_queryset(self):
return Profile.objects.all()
@action(detail=True, methods=['post'])
def update_quota_deadline(self, request, user=None):
try:
hours = float(request.data.get('hours', ''))
if hours < 0:
raise ValueError("hours must be >= 0")
except ValueError as e:
raise exceptions.ValidationError(str(e))
try:
p = Profile.objects.get(user=user)
except ObjectDoesNotExist:
raise exceptions.NotFound()
return Response({'deadline': p.set_quota_deadline(hours)}, status=status.HTTP_200_OK)

Wyświetl plik

@ -0,0 +1,39 @@
from django.contrib.auth.models import User
from django.contrib.auth import login
from rest_framework.views import APIView
from rest_framework import exceptions, permissions, parsers
from rest_framework.response import Response
from app.auth.backends import get_user_from_external_auth_response
import requests
from webodm import settings
class ExternalTokenAuth(APIView):
permission_classes = (permissions.AllowAny,)
parser_classes = (parsers.JSONParser, parsers.FormParser,)
def post(self, request):
# This should never happen
if settings.EXTERNAL_AUTH_ENDPOINT == '':
return Response({'error': 'EXTERNAL_AUTH_ENDPOINT not set'})
token = request.COOKIES.get('external_access_token', '')
if token == '':
return Response({'error': 'external_access_token cookie not set'})
try:
r = requests.post(settings.EXTERNAL_AUTH_ENDPOINT, headers={
'Authorization': "Bearer %s" % token
})
res = r.json()
if res.get('user_id') is not None:
user = get_user_from_external_auth_response(res)
if user is not None:
login(request, user, backend='django.contrib.auth.backends.ModelBackend')
return Response({'redirect': '/'})
else:
return Response({'error': 'Invalid credentials'})
else:
return Response({'error': res.get('message', 'Invalid external server response')})
except Exception as e:
return Response({'error': str(e)})

Wyświetl plik

@ -75,7 +75,7 @@ class TaskSerializer(serializers.ModelSerializer):
class Meta:
model = models.Task
exclude = ('console_output', 'orthophoto_extent', 'dsm_extent', 'dtm_extent', )
read_only_fields = ('processing_time', 'status', 'last_error', 'created_at', 'pending_action', 'available_assets', )
read_only_fields = ('processing_time', 'status', 'last_error', 'created_at', 'pending_action', 'available_assets', 'size', )
class TaskViewSet(viewsets.ViewSet):
"""
@ -184,6 +184,7 @@ class TaskViewSet(viewsets.ViewSet):
if task.images_count < 1:
raise exceptions.ValidationError(detail=_("You need to upload at least 1 file before commit"))
task.update_size()
task.save()
worker_tasks.process_task.delay(task.id)

Wyświetl plik

@ -6,13 +6,14 @@ from .projects import ProjectViewSet
from .tasks import TaskViewSet, TaskDownloads, TaskAssets, TaskAssetsImport
from .imageuploads import Thumbnail, ImageDownload
from .processingnodes import ProcessingNodeViewSet, ProcessingNodeOptionsView
from .admin import AdminUserViewSet, AdminGroupViewSet
from .admin import AdminUserViewSet, AdminGroupViewSet, AdminProfileViewSet
from rest_framework_nested import routers
from rest_framework_jwt.views import obtain_jwt_token
from .tiler import TileJson, Bounds, Metadata, Tiles, Export
from .potree import Scene, CameraView
from .workers import CheckTask, GetTaskResult
from .users import UsersList
from .externalauth import ExternalTokenAuth
from webodm import settings
router = routers.DefaultRouter()
@ -26,6 +27,7 @@ tasks_router.register(r'tasks', TaskViewSet, basename='projects-tasks')
admin_router = routers.DefaultRouter()
admin_router.register(r'admin/users', AdminUserViewSet, basename='admin-users')
admin_router.register(r'admin/groups', AdminGroupViewSet, basename='admin-groups')
admin_router.register(r'admin/profiles', AdminProfileViewSet, basename='admin-groups')
urlpatterns = [
url(r'processingnodes/options/$', ProcessingNodeOptionsView.as_view()),
@ -56,9 +58,12 @@ urlpatterns = [
url(r'^auth/', include('rest_framework.urls')),
url(r'^token-auth/', obtain_jwt_token),
url(r'^plugins/(?P<plugin_name>[^/.]+)/(.*)$', api_view_handler)
url(r'^plugins/(?P<plugin_name>[^/.]+)/(.*)$', api_view_handler),
]
if settings.ENABLE_USERS_API:
urlpatterns.append(url(r'users', UsersList.as_view()))
if settings.EXTERNAL_AUTH_ENDPOINT != '':
urlpatterns.append(url(r'^external-token-auth/', ExternalTokenAuth.as_view()))

Wyświetl plik

@ -0,0 +1,88 @@
import requests
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from nodeodm.models import ProcessingNode
from webodm import settings
from guardian.shortcuts import assign_perm
import logging
logger = logging.getLogger('app.logger')
def get_user_from_external_auth_response(res):
if 'message' in res or 'error' in res:
return None
if 'user_id' in res and 'username' in res:
try:
user = User.objects.get(pk=res['user_id'])
except User.DoesNotExist:
user = User(pk=res['user_id'], username=res['username'])
user.save()
# Update user info
if user.username != res['username']:
user.username = res['username']
user.save()
maxQuota = -1
if 'maxQuota' in res:
maxQuota = res['maxQuota']
if 'node' in res and 'limits' in res['node'] and 'maxQuota' in res['node']['limits']:
maxQuota = res['node']['limits']['maxQuota']
# Update quotas
if user.profile.quota != maxQuota:
user.profile.quota = maxQuota
user.save()
# Setup/update processing node
if 'node' in res and 'hostname' in res['node'] and 'port' in res['node']:
hostname = res['node']['hostname']
port = res['node']['port']
token = res['node'].get('token', '')
# Only add/update if a token is provided, since we use
# tokens as unique identifiers for hostname/port updates
if token != "":
try:
node = ProcessingNode.objects.get(token=token)
if node.hostname != hostname or node.port != port:
node.hostname = hostname
node.port = port
node.save()
except ProcessingNode.DoesNotExist:
node = ProcessingNode(hostname=hostname, port=port, token=token)
node.save()
if not user.has_perm('view_processingnode', node):
assign_perm('view_processingnode', user, node)
return user
else:
return None
class ExternalBackend(ModelBackend):
def authenticate(self, request, username=None, password=None):
if settings.EXTERNAL_AUTH_ENDPOINT == "":
return None
try:
r = requests.post(settings.EXTERNAL_AUTH_ENDPOINT, {
'username': username,
'password': password
}, headers={'Accept': 'application/json'})
res = r.json()
return get_user_from_external_auth_response(res)
except:
return None
def get_user(self, user_id):
if settings.EXTERNAL_AUTH_ENDPOINT == "":
return None
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None

Wyświetl plik

@ -0,0 +1,50 @@
# Generated by Django 2.2.27 on 2023-08-21 14:50
import os
from django.db import migrations, models
from webodm import settings
def task_path(project_id, task_id, *args):
return os.path.join(settings.MEDIA_ROOT,
"project",
str(project_id),
"task",
str(task_id),
*args)
def update_size(task):
try:
total_bytes = 0
for dirpath, _, filenames in os.walk(task_path(task.project.id, task.id)):
for f in filenames:
fp = os.path.join(dirpath, f)
if not os.path.islink(fp):
total_bytes += os.path.getsize(fp)
task.size = (total_bytes / 1024 / 1024)
task.save()
print("Updated {} with size {}".format(task, task.size))
except Exception as e:
print("Cannot update size for task {}: {}".format(task, str(e)))
def update_task_sizes(apps, schema_editor):
Task = apps.get_model('app', 'Task')
for t in Task.objects.all():
update_size(t)
class Migration(migrations.Migration):
dependencies = [
('app', '0035_task_orthophoto_bands'),
]
operations = [
migrations.AddField(
model_name='task',
name='size',
field=models.FloatField(blank=True, default=0.0, help_text='Size of the task on disk in megabytes', verbose_name='Size'),
),
migrations.RunPython(update_task_sizes),
]

Wyświetl plik

@ -0,0 +1,35 @@
# Generated by Django 2.2.27 on 2023-08-24 16:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
def create_profiles(apps, schema_editor):
User = apps.get_model('auth', 'User')
Profile = apps.get_model('app', 'Profile')
for u in User.objects.all():
p = Profile.objects.create(user=u)
p.save()
print("Created user profile for %s" % u.username)
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0036_task_size'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quota', models.FloatField(blank=True, default=-1, help_text='Maximum disk quota in megabytes', verbose_name='Quota')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RunPython(create_profiles),
]

Wyświetl plik

@ -5,6 +5,7 @@ from .theme import Theme
from .setting import Setting
from .plugin_datum import PluginDatum
from .plugin import Plugin
from .profile import Profile
# deprecated
def image_directory_path(image_upload, filename):

Wyświetl plik

@ -0,0 +1,74 @@
import time
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.db.models.signals import post_save
from django.dispatch import receiver
from app.models import Task
from django.db.models import Sum
from django.core.cache import cache
from webodm import settings
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
quota = models.FloatField(default=-1, blank=True, help_text=_("Maximum disk quota in megabytes"), verbose_name=_("Quota"))
def has_quota(self):
return self.quota != -1
def used_quota(self):
q = Task.objects.filter(project__owner=self.user).aggregate(total=Sum('size'))['total']
if q is None:
q = 0
return q
def has_exceeded_quota(self):
if not self.has_quota():
return False
q = self.used_quota()
return q > self.quota
def used_quota_cached(self):
k = f'used_quota_{self.user.id}'
cached = cache.get(k)
if cached is not None:
return cached
v = self.used_quota()
cache.set(k, v, 1800) # 30 minutes
return v
def has_exceeded_quota_cached(self):
if not self.has_quota():
return False
q = self.used_quota_cached()
return q > self.quota
def clear_used_quota_cache(self):
cache.delete(f'used_quota_{self.user.id}')
def get_quota_deadline(self):
return cache.get(f'quota_deadline_{self.user.id}')
def set_quota_deadline(self, hours):
k = f'quota_deadline_{self.user.id}'
seconds = (hours * 60 * 60)
v = time.time() + seconds
cache.set(k, v, int(max(seconds * 10, settings.QUOTA_EXCEEDED_GRACE_PERIOD * 60 * 60)))
return v
def clear_quota_deadline(self):
cache.delete(f'quota_deadline_{self.user.id}')
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()

Wyświetl plik

@ -279,6 +279,7 @@ class Task(models.Model):
epsg = models.IntegerField(null=True, default=None, blank=True, help_text=_("EPSG code of the dataset (if georeferenced)"), verbose_name="EPSG")
tags = models.TextField(db_index=True, default="", blank=True, help_text=_("Task tags"), verbose_name=_("Tags"))
orthophoto_bands = fields.JSONField(default=list, blank=True, help_text=_("List of orthophoto bands"), verbose_name=_("Orthophoto Bands"))
size = models.FloatField(default=0.0, blank=True, help_text=_("Size of the task on disk in megabytes"), verbose_name=_("Size"))
class Meta:
verbose_name = _("Task")
@ -432,6 +433,8 @@ class Task(models.Model):
shutil.copytree(self.task_path(), task.task_path())
else:
logger.warning("Task {} doesn't have folder, will skip copying".format(self))
self.project.owner.profile.clear_used_quota_cache()
return task
except Exception as e:
logger.warning("Cannot duplicate task: {}".format(str(e)))
@ -885,6 +888,7 @@ class Task(models.Model):
self.update_available_assets_field()
self.update_epsg_field()
self.update_orthophoto_bands_field()
self.update_size()
self.potree_scene = {}
self.running_progress = 1.0
self.console_output += gettext("Done!") + "\n"
@ -1034,6 +1038,8 @@ class Task(models.Model):
except FileNotFoundError as e:
logger.warning(e)
self.project.owner.profile.clear_used_quota_cache()
plugin_signals.task_removed.send_robust(sender=self.__class__, task_id=task_id)
def set_failure(self, error_message):
@ -1161,3 +1167,18 @@ class Task(models.Model):
else:
with open(file.temporary_file_path(), 'rb') as f:
shutil.copyfileobj(f, fd)
def update_size(self, commit=False):
try:
total_bytes = 0
for dirpath, _, filenames in os.walk(self.task_path()):
for f in filenames:
fp = os.path.join(dirpath, f)
if not os.path.islink(fp):
total_bytes += os.path.getsize(fp)
self.size = (total_bytes / 1024 / 1024)
if commit: self.save()
self.project.owner.profile.clear_used_quota_cache()
except Exception as e:
logger.warn("Cannot update size for task {}: {}".format(self, str(e)))

Wyświetl plik

@ -50,11 +50,26 @@ body {
margin-right: 0;
}
.navbar-top-links .dropdown-menu li a {
.navbar-top-links .dropdown-menu li a{
padding: 3px 20px;
min-height: 0;
}
.navbar-top-links .dropdown-menu li div.info-item{
padding: 3px 8px;
min-height: 0;
}
.navbar-top-links .dropdown-menu li div.info-item.quotas{
min-width: 232px;
}
.navbar-top-links .dropdown-menu li .progress{
margin-bottom: 0;
margin-top: 6px;
}
.navbar-top-links .dropdown-menu li a div {
white-space: normal;
}

Wyświetl plik

@ -298,7 +298,7 @@ class ModelView extends React.Component {
window.viewer = new Potree.Viewer(container);
viewer.setEDLEnabled(true);
viewer.setFOV(60);
viewer.setPointBudget(1*1000*1000);
viewer.setPointBudget(10*1000*1000);
viewer.setEDLEnabled(true);
viewer.loadSettingsFromURL();
@ -644,9 +644,10 @@ class ModelView extends React.Component {
return;
}
const offset = {
x: gltf.scene.CESIUM_RTC.center[0],
y: gltf.scene.CESIUM_RTC.center[1]
const offset = {x: 0, y: 0};
if (gltf.scene.CESIUM_RTC && gltf.scene.CESIUM_RTC.center){
offset.x = gltf.scene.CESIUM_RTC.center[0];
offset.y = gltf.scene.CESIUM_RTC.center[1];
}
addObject(gltf.scene, offset);

Wyświetl plik

@ -93,6 +93,16 @@ export default {
saveAs: function(text, filename){
var blob = new Blob([text], {type: "text/plain;charset=utf-8"});
FileSaver.saveAs(blob, filename);
},
// http://stackoverflow.com/questions/15900485/correct-way-to-convert-size-in-bytes-to-kb-mb-gb-in-javascript
bytesToSize: function(bytes, decimals = 2){
if(bytes == 0) return '0 byte';
var k = 1000; // or 1024 for binary
var dm = decimals || 3;
var sizes = ['bytes', 'Kb', 'Mb', 'Gb', 'Tb', 'Pb', 'Eb', 'Zb', 'Yb'];
var i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i];
}
};

Wyświetl plik

@ -14,6 +14,7 @@ import PipelineSteps from '../classes/PipelineSteps';
import Css from '../classes/Css';
import Tags from '../classes/Tags';
import Trans from './Trans';
import Utils from '../classes/Utils';
import { _, interpolate } from '../classes/gettext';
class TaskListItem extends React.Component {
@ -572,6 +573,11 @@ class TaskListItem extends React.Component {
<td><strong>{_("Reconstructed Points:")}</strong></td>
<td>{stats.pointcloud.points.toLocaleString()}</td>
</tr>}
{task.size > 0 &&
<tr>
<td><strong>{_("Disk Usage:")}</strong></td>
<td>{Utils.bytesToSize(task.size * 1024 * 1024)}</td>
</tr>}
<tr>
<td><strong>{_("Task Output:")}</strong></td>
<td><div className="btn-group btn-toggle">

Wyświetl plik

@ -2,6 +2,7 @@ import '../css/UploadProgressBar.scss';
import React from 'react';
import PropTypes from 'prop-types';
import { _, interpolate } from '../classes/gettext';
import Utils from '../classes/Utils';
class UploadProgressBar extends React.Component {
static propTypes = {
@ -11,22 +12,12 @@ class UploadProgressBar extends React.Component {
totalCount: PropTypes.number // number of files
}
// http://stackoverflow.com/questions/15900485/correct-way-to-convert-size-in-bytes-to-kb-mb-gb-in-javascript
bytesToSize(bytes, decimals = 2){
if(bytes == 0) return '0 byte';
var k = 1000; // or 1024 for binary
var dm = decimals || 3;
var sizes = ['bytes', 'Kb', 'Mb', 'Gb', 'Tb', 'Pb', 'Eb', 'Zb', 'Yb'];
var i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i];
}
render() {
let percentage = (this.props.progress !== undefined ?
this.props.progress :
0).toFixed(2);
let bytes = this.props.totalBytesSent !== undefined && this.props.totalBytes !== undefined ?
' ' + interpolate(_("remaining to upload: %(bytes)s"), { bytes: this.bytesToSize(this.props.totalBytes - this.props.totalBytesSent)}) :
' ' + interpolate(_("remaining to upload: %(bytes)s"), { bytes: Utils.bytesToSize(this.props.totalBytes - this.props.totalBytesSent)}) :
"";
let active = percentage < 100 ? "active" : "";

Wyświetl plik

@ -1,94 +1,94 @@
// Auto-generated with extract_odm_strings.py, do not edit!
_("Set a value in meters for the GPS Dilution of Precision (DOP) information for all images. If your images are tagged with high precision GPS information (RTK), this value will be automatically set accordingly. You can use this option to manually set it in case the reconstruction fails. Lowering this option can sometimes help control bowling-effects over large areas. Default: %(default)s");
_("GeoJSON polygon limiting the area of the reconstruction. Can be specified either as path to a GeoJSON file or as a JSON string representing the contents of a GeoJSON file. Default: %(default)s");
_("Maximum number of frames to extract from video files for processing. Set to 0 for no limit. Default: %(default)s");
_("Export the georeferenced point cloud in LAS format. Default: %(default)s");
_("Skip normalization of colors across all images. Useful when processing radiometric data. Default: %(default)s");
_("Path to the file containing the ground control points used for georeferencing. The file needs to use the following format: EPSG:<code> or <+proj definition>geo_x geo_y geo_z im_x im_y image_name [gcp_name] [extra1] [extra2]Default: %(default)s");
_("Use this tag if you have a GCP File but want to use the EXIF information for georeferencing instead. Default: %(default)s");
_("Path to the image geolocation file containing the camera center coordinates used for georeferencing. If you don't have values for yaw/pitch/roll you can set them to 0. The file needs to use the following format: EPSG:<code> or <+proj definition>image_name geo_x geo_y geo_z [yaw (degrees)] [pitch (degrees)] [roll (degrees)] [horz accuracy (meters)] [vert accuracy (meters)]Default: %(default)s");
_("Automatically compute image masks using AI to remove the background. Experimental. Default: %(default)s");
_("Build orthophoto overviews for faster display in programs such as QGIS. Default: %(default)s");
_("Classify the point cloud outputs. You can control the behavior of this option by tweaking the --dem-* parameters. Default: %(default)s");
_("Use the camera parameters computed from another dataset instead of calculating them. Can be specified either as path to a cameras.json file or as a JSON string representing the contents of a cameras.json file. Default: %(default)s");
_("Run local bundle adjustment for every image added to the reconstruction and a global adjustment every 100 images. Speeds up reconstruction for very large datasets. Default: %(default)s");
_("Save the georeferenced point cloud in Cloud Optimized Point Cloud (COPC) format. Default: %(default)s");
_("The maximum output resolution of extracted video frames in pixels. Default: %(default)s");
_("Use this tag to build a DTM (Digital Terrain Model, ground only) using a simple morphological filter. Check the --dem* and --smrf* parameters for finer tuning. Default: %(default)s");
_("Delete heavy intermediate files to optimize disk space usage. This affects the ability to restart the pipeline from an intermediate stage, but allows datasets to be processed on machines that don't have sufficient disk space available. Default: %(default)s");
_("Automatically crop image outputs by creating a smooth buffer around the dataset boundaries, shrunk by N meters. Use 0 to disable cropping. Default: %(default)s");
_("Geometric estimates improve the accuracy of the point cloud by computing geometrically consistent depthmaps but may not be usable in larger datasets. This flag disables geometric estimates. Default: %(default)s");
_("Automatically compute image masks using AI to remove the sky. Experimental. Default: %(default)s");
_("Set the compression to use for orthophotos. Can be one of: %(choices)s. Default: %(default)s");
_("Skip generation of a full 3D model. This can save time if you only need 2D results such as orthophotos and DEMs. Default: %(default)s");
_("Skips dense reconstruction and 3D model generation. It generates an orthophoto directly from the sparse reconstruction. If you just need an orthophoto and do not need a full 3D model, turn on this option. Default: %(default)s");
_("show this help message and exit");
_("Turn off camera parameter optimization during bundle adjustment. This can be sometimes useful for improving results that exhibit doming/bowling or when images are taken with a rolling shutter camera. Default: %(default)s");
_("Keep faces in the mesh that are not seen in any camera. Default: %(default)s");
_("Perform ground rectification on the point cloud. This means that wrongly classified ground points will be re-classified and gaps will be filled. Useful for generating DTMs. Default: %(default)s");
_("Computes an euclidean raster map for each DEM. The map reports the distance from each cell to the nearest NODATA value (before any hole filling takes place). This can be useful to isolate the areas that have been filled. Default: %(default)s");
_("When processing multispectral datasets, you can specify the name of the primary band that will be used for reconstruction. It's recommended to choose a band which has sharp details and is in focus. Default: %(default)s");
_("Do not attempt to merge partial reconstructions. This can happen when images do not have sufficient overlap or are isolated. Default: %(default)s");
_("Set point cloud quality. Higher quality generates better, denser point clouds, but requires more memory and takes longer. Each step up in quality increases processing time roughly by a factor of 4x.Can be one of: %(choices)s. Default: %(default)s");
_("Number of steps used to fill areas with gaps. Set to 0 to disable gap filling. Starting with a radius equal to the output resolution, N different DEMs are generated with progressively bigger radius using the inverse distance weighted (IDW) algorithm and merged together. Remaining gaps are then merged using nearest neighbor interpolation. Default: %(default)s");
_("Export the georeferenced point cloud in CSV format. Default: %(default)s");
_("URL to a ClusterODM instance for distributing a split-merge workflow on multiple nodes in parallel. Default: %(default)s");
_("Path to a GeoTIFF DEM or a LAS/LAZ point cloud that the reconstruction outputs should be automatically aligned to. Experimental. Default: %(default)s");
_("Create Cloud-Optimized GeoTIFFs instead of normal GeoTIFFs. Default: %(default)s");
_("Simple Morphological Filter slope parameter (rise over run). Default: %(default)s");
_("Export the georeferenced point cloud in Entwine Point Tile (EPT) format. Default: %(default)s");
_("Turn on rolling shutter correction. If the camera has a rolling shutter and the images were taken in motion, you can turn on this option to improve the accuracy of the results. See also --rolling-shutter-readout. Default: %(default)s");
_("Choose the structure from motion algorithm. For aerial datasets, if camera GPS positions and angles are available, triangulation can generate better results. For planar scenes captured at fixed altitude with nadir-only images, planar can be much faster. Can be one of: %(choices)s. Default: %(default)s");
_("Choose what to merge in the merge step in a split dataset. By default all available outputs are merged. Options: %(choices)s. Default: %(default)s");
_("Perform image matching with the nearest images based on GPS exif data. Set to 0 to match by triangulation. Default: %(default)s");
_("Minimum number of features to extract per image. More features can be useful for finding more matches between images, potentially allowing the reconstruction of areas with little overlap or insufficient features. More features also slow down processing. Default: %(default)s");
_("Simple Morphological Filter elevation threshold parameter (meters). Default: %(default)s");
_("Override the rolling shutter readout time for your camera sensor (in milliseconds), instead of using the rolling shutter readout database. Note that not all cameras are present in the database. Set to 0 to use the database value. Default: %(default)s");
_("Simple Morphological Filter elevation scalar parameter. Default: %(default)s");
_("Permanently delete all previous results and rerun the processing pipeline.");
_("Generates a polygon around the cropping area that cuts the orthophoto around the edges of features. This polygon can be useful for stitching seamless mosaics with multiple overlapping orthophotos. Default: %(default)s");
_("Set feature extraction quality. Higher quality generates better features, but requires more memory and takes longer. Can be one of: %(choices)s. Default: %(default)s");
_("DSM/DTM resolution in cm / pixel. Note that this value is capped to 2x the ground sampling distance (GSD) estimate. To remove the cap, check --ignore-gsd also. Default: %(default)s");
_("Displays version number and exits. ");
_("Set this parameter if you want to generate a PNG rendering of the orthophoto. Default: %(default)s");
_("The maximum number of processes to use in various processes. Peak memory requirement is ~1GB per thread and 2 megapixel image resolution. Default: %(default)s");
_("Skip the blending of colors near seams. Default: %(default)s");
_("Radius of the overlap between submodels. After grouping images into clusters, images that are closer than this radius to a cluster are added to the cluster. This is done to ensure that neighboring submodels overlap. Default: %(default)s");
_("Do not use GPU acceleration, even if it's available. Default: %(default)s");
_("When processing multispectral datasets, ODM will automatically align the images for each band. If the images have been postprocessed and are already aligned, use this option. Default: %(default)s");
_("Filters the point cloud by removing points that deviate more than N standard deviations from the local mean. Set to 0 to disable filtering. Default: %(default)s");
_("Orthophoto resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate. To remove the cap, check --ignore-gsd also. Default: %(default)s");
_("Automatically set a boundary using camera shot locations to limit the area of the reconstruction. This can help remove far away background artifacts (sky, background landscapes, etc.). See also --boundary. Default: %(default)s");
_("Filters the point cloud by keeping only a single point around a radius N (in meters). This can be useful to limit the output resolution of the point cloud and remove duplicate points. Set to 0 to disable sampling. Default: %(default)s");
_("Skip alignment of submodels in split-merge. Useful if GPS is good enough on very large datasets. Default: %(default)s");
_("Set this parameter if you want to generate a Google Earth (KMZ) rendering of the orthophoto. Default: %(default)s");
_("Path to the image groups file that controls how images should be split into groups. The file needs to use the following format: image_name group_nameDefault: %(default)s");
_("Choose the algorithm for extracting keypoints and computing descriptors. Can be one of: %(choices)s. Default: %(default)s");
_("Set this parameter if you want a striped GeoTIFF. Default: %(default)s");
_("Specify the distance between camera shot locations and the outer edge of the boundary when computing the boundary with --auto-boundary. Set to 0 to automatically choose a value. Default: %(default)s");
_("Average number of images per submodel. When splitting a large dataset into smaller submodels, images are grouped into clusters. This value regulates the number of images that each cluster should have on average. Default: %(default)s");
_("Decimate the points before generating the DEM. 1 is no decimation (full quality). 100 decimates ~99%% of the points. Useful for speeding up generation of DEM results in very large datasets. Default: %(default)s");
_("Copy output results to this folder after processing.");
_("Generate static tiles for orthophotos and DEMs that are suitable for viewers like Leaflet or OpenLayers. Default: %(default)s");
_("Generate OBJs that have a single material and a single texture file instead of multiple ones. Default: %(default)s");
_("Rerun this stage only and stop. Can be one of: %(choices)s. Default: %(default)s");
_("Set a camera projection type. Manually setting a value can help improve geometric undistortion. By default the application tries to determine a lens type from the images metadata. Can be one of: %(choices)s. Default: %(default)s");
_("End processing at this stage. Can be one of: %(choices)s. Default: %(default)s");
_("Use images' GPS exif data for reconstruction, even if there are GCPs present.This flag is useful if you have high precision GPS measurements. If there are no GCPs, this flag does nothing. Default: %(default)s");
_("Path to the project folder. Your project folder should contain subfolders for each dataset. Each dataset should have an \"images\" folder.");
_("Octree depth used in the mesh reconstruction, increase to get more vertices, recommended values are 8-12. Default: %(default)s");
_("Generate OGC 3D Tiles outputs. Default: %(default)s");
_("Use a full 3D mesh to compute the orthophoto instead of a 2.5D mesh. This option is a bit faster and provides similar results in planar areas. Default: %(default)s");
_("Matcher algorithm, Fast Library for Approximate Nearest Neighbors or Bag of Words. FLANN is slower, but more stable. BOW is faster, but can sometimes miss valid matches. BRUTEFORCE is very slow but robust.Can be one of: %(choices)s. Default: %(default)s");
_("Generate single file Binary glTF (GLB) textured models. Default: %(default)s");
_("Set the radiometric calibration to perform on images. When processing multispectral and thermal images you should set this option to obtain reflectance/temperature values (otherwise you will get digital number values). [camera] applies black level, vignetting, row gradient gain/exposure compensation (if appropriate EXIF tags are found) and computes absolute temperature values. [camera+sun] is experimental, applies all the corrections of [camera], plus compensates for spectral radiance registered via a downwelling light sensor (DLS) taking in consideration the angle of the sun. Can be one of: %(choices)s. Default: %(default)s");
_("Skip generation of PDF report. This can save time if you don't need a report. Default: %(default)s");
_("Ignore Ground Sampling Distance (GSD). GSD caps the maximum resolution of image outputs and resizes images when necessary, resulting in faster processing and lower memory usage. Since GSD is an estimate, sometimes ignoring it can result in slightly better image output quality. Default: %(default)s");
_("Perform image matching with the nearest N images based on image filename order. Can speed up processing of sequential images, such as those extracted from video. Set to 0 to disable. Default: %(default)s");
_("Choose what to merge in the merge step in a split dataset. By default all available outputs are merged. Options: %(choices)s. Default: %(default)s");
_("End processing at this stage. Can be one of: %(choices)s. Default: %(default)s");
_("Choose the algorithm for extracting keypoints and computing descriptors. Can be one of: %(choices)s. Default: %(default)s");
_("Choose the structure from motion algorithm. For aerial datasets, if camera GPS positions and angles are available, triangulation can generate better results. For planar scenes captured at fixed altitude with nadir-only images, planar can be much faster. Can be one of: %(choices)s. Default: %(default)s");
_("Generate static tiles for orthophotos and DEMs that are suitable for viewers like Leaflet or OpenLayers. Default: %(default)s");
_("Perform ground rectification on the point cloud. This means that wrongly classified ground points will be re-classified and gaps will be filled. Useful for generating DTMs. Default: %(default)s");
_("Octree depth used in the mesh reconstruction, increase to get more vertices, recommended values are 8-12. Default: %(default)s");
_("Use this tag if you have a GCP File but want to use the EXIF information for georeferencing instead. Default: %(default)s");
_("Export the georeferenced point cloud in Entwine Point Tile (EPT) format. Default: %(default)s");
_("Set this parameter if you want to generate a Google Earth (KMZ) rendering of the orthophoto. Default: %(default)s");
_("Override the rolling shutter readout time for your camera sensor (in milliseconds), instead of using the rolling shutter readout database. Note that not all cameras are present in the database. Set to 0 to use the database value. Default: %(default)s");
_("DSM/DTM resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate. Default: %(default)s");
_("Path to a GeoTIFF DEM or a LAS/LAZ point cloud that the reconstruction outputs should be automatically aligned to. Experimental. Default: %(default)s");
_("Filters the point cloud by keeping only a single point around a radius N (in meters). This can be useful to limit the output resolution of the point cloud and remove duplicate points. Set to 0 to disable sampling. Default: %(default)s");
_("Simple Morphological Filter slope parameter (rise over run). Default: %(default)s");
_("Automatically set a boundary using camera shot locations to limit the area of the reconstruction. This can help remove far away background artifacts (sky, background landscapes, etc.). See also --boundary. Default: %(default)s");
_("Name of dataset (i.e subfolder name within project folder). Default: %(default)s");
_("The maximum vertex count of the output mesh. Default: %(default)s");
_("Automatically compute image masks using AI to remove the background. Experimental. Default: %(default)s");
_("Generate OGC 3D Tiles outputs. Default: %(default)s");
_("Skip generation of the orthophoto. This can save time if you only need 3D results or DEMs. Default: %(default)s");
_("Ignore Ground Sampling Distance (GSD).A memory and processor hungry change relative to the default behavior if set to true. Ordinarily, GSD estimates are used to cap the maximum resolution of image outputs and resizes images when necessary, resulting in faster processing and lower memory usage. Since GSD is an estimate, sometimes ignoring it can result in slightly better image output quality. Never set --ignore-gsd to true unless you are positive you need it, and even then: do not use it. Default: %(default)s");
_("Generate single file Binary glTF (GLB) textured models. Default: %(default)s");
_("When processing multispectral datasets, ODM will automatically align the images for each band. If the images have been postprocessed and are already aligned, use this option. Default: %(default)s");
_("Classify the point cloud outputs. You can control the behavior of this option by tweaking the --dem-* parameters. Default: %(default)s");
_("Matcher algorithm, Fast Library for Approximate Nearest Neighbors or Bag of Words. FLANN is slower, but more stable. BOW is faster, but can sometimes miss valid matches. BRUTEFORCE is very slow but robust.Can be one of: %(choices)s. Default: %(default)s");
_("Skips dense reconstruction and 3D model generation. It generates an orthophoto directly from the sparse reconstruction. If you just need an orthophoto and do not need a full 3D model, turn on this option. Default: %(default)s");
_("Generate OBJs that have a single material and a single texture file instead of multiple ones. Default: %(default)s");
_("Create Cloud-Optimized GeoTIFFs instead of normal GeoTIFFs. Default: %(default)s");
_("Set feature extraction quality. Higher quality generates better features, but requires more memory and takes longer. Can be one of: %(choices)s. Default: %(default)s");
_("Computes an euclidean raster map for each DEM. The map reports the distance from each cell to the nearest NODATA value (before any hole filling takes place). This can be useful to isolate the areas that have been filled. Default: %(default)s");
_("Minimum number of features to extract per image. More features can be useful for finding more matches between images, potentially allowing the reconstruction of areas with little overlap or insufficient features. More features also slow down processing. Default: %(default)s");
_("Automatically crop image outputs by creating a smooth buffer around the dataset boundaries, shrunk by N meters. Use 0 to disable cropping. Default: %(default)s");
_("Set a camera projection type. Manually setting a value can help improve geometric undistortion. By default the application tries to determine a lens type from the images metadata. Can be one of: %(choices)s. Default: %(default)s");
_("Turn on rolling shutter correction. If the camera has a rolling shutter and the images were taken in motion, you can turn on this option to improve the accuracy of the results. See also --rolling-shutter-readout. Default: %(default)s");
_("Number of steps used to fill areas with gaps. Set to 0 to disable gap filling. Starting with a radius equal to the output resolution, N different DEMs are generated with progressively bigger radius using the inverse distance weighted (IDW) algorithm and merged together. Remaining gaps are then merged using nearest neighbor interpolation. Default: %(default)s");
_("Keep faces in the mesh that are not seen in any camera. Default: %(default)s");
_("Filters the point cloud by removing points that deviate more than N standard deviations from the local mean. Set to 0 to disable filtering. Default: %(default)s");
_("The maximum vertex count of the output mesh. Default: %(default)s");
_("Delete heavy intermediate files to optimize disk space usage. This affects the ability to restart the pipeline from an intermediate stage, but allows datasets to be processed on machines that don't have sufficient disk space available. Default: %(default)s");
_("Save the georeferenced point cloud in Cloud Optimized Point Cloud (COPC) format. Default: %(default)s");
_("Path to the image geolocation file containing the camera center coordinates used for georeferencing. If you don't have values for yaw/pitch/roll you can set them to 0. The file needs to use the following format: EPSG:<code> or <+proj definition>image_name geo_x geo_y geo_z [yaw (degrees)] [pitch (degrees)] [roll (degrees)] [horz accuracy (meters)] [vert accuracy (meters)]Default: %(default)s");
_("Average number of images per submodel. When splitting a large dataset into smaller submodels, images are grouped into clusters. This value regulates the number of images that each cluster should have on average. Default: %(default)s");
_("The maximum number of processes to use in various processes. Peak memory requirement is ~1GB per thread and 2 megapixel image resolution. Default: %(default)s");
_("Build orthophoto overviews for faster display in programs such as QGIS. Default: %(default)s");
_("Decimate the points before generating the DEM. 1 is no decimation (full quality). 100 decimates ~99%% of the points. Useful for speeding up generation of DEM results in very large datasets. Default: %(default)s");
_("Set this parameter if you want a striped GeoTIFF. Default: %(default)s");
_("Use this tag to build a DTM (Digital Terrain Model, ground only) using a simple morphological filter. Check the --dem* and --smrf* parameters for finer tuning. Default: %(default)s");
_("Use this tag to build a DSM (Digital Surface Model, ground + objects) using a progressive morphological filter. Check the --dem* parameters for finer tuning. Default: %(default)s");
_("Turn off camera parameter optimization during bundle adjustment. This can be sometimes useful for improving results that exhibit doming/bowling or when images are taken with a rolling shutter camera. Default: %(default)s");
_("show this help message and exit");
_("Rerun this stage only and stop. Can be one of: %(choices)s. Default: %(default)s");
_("Automatically compute image masks using AI to remove the sky. Experimental. Default: %(default)s");
_("Orthophoto resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate.Default: %(default)s");
_("When processing multispectral datasets, you can specify the name of the primary band that will be used for reconstruction. It's recommended to choose a band which has sharp details and is in focus. Default: %(default)s");
_("Use images' GPS exif data for reconstruction, even if there are GCPs present.This flag is useful if you have high precision GPS measurements. If there are no GCPs, this flag does nothing. Default: %(default)s");
_("Simple Morphological Filter window radius parameter (meters). Default: %(default)s");
_("Do not use GPU acceleration, even if it's available. Default: %(default)s");
_("Export the georeferenced point cloud in LAS format. Default: %(default)s");
_("Use the camera parameters computed from another dataset instead of calculating them. Can be specified either as path to a cameras.json file or as a JSON string representing the contents of a cameras.json file. Default: %(default)s");
_("Maximum number of frames to extract from video files for processing. Set to 0 for no limit. Default: %(default)s");
_("URL to a ClusterODM instance for distributing a split-merge workflow on multiple nodes in parallel. Default: %(default)s");
_("Set a value in meters for the GPS Dilution of Precision (DOP) information for all images. If your images are tagged with high precision GPS information (RTK), this value will be automatically set accordingly. You can use this option to manually set it in case the reconstruction fails. Lowering this option can sometimes help control bowling-effects over large areas. Default: %(default)s");
_("Do not attempt to merge partial reconstructions. This can happen when images do not have sufficient overlap or are isolated. Default: %(default)s");
_("Set the compression to use for orthophotos. Can be one of: %(choices)s. Default: %(default)s");
_("Path to the project folder. Your project folder should contain subfolders for each dataset. Each dataset should have an \"images\" folder.");
_("Generates a polygon around the cropping area that cuts the orthophoto around the edges of features. This polygon can be useful for stitching seamless mosaics with multiple overlapping orthophotos. Default: %(default)s");
_("Perform image matching with the nearest images based on GPS exif data. Set to 0 to match by triangulation. Default: %(default)s");
_("Skip generation of a full 3D model. This can save time if you only need 2D results such as orthophotos and DEMs. Default: %(default)s");
_("The maximum output resolution of extracted video frames in pixels. Default: %(default)s");
_("Set the radiometric calibration to perform on images. When processing multispectral and thermal images you should set this option to obtain reflectance/temperature values (otherwise you will get digital number values). [camera] applies black level, vignetting, row gradient gain/exposure compensation (if appropriate EXIF tags are found) and computes absolute temperature values. [camera+sun] is experimental, applies all the corrections of [camera], plus compensates for spectral radiance registered via a downwelling light sensor (DLS) taking in consideration the angle of the sun. Can be one of: %(choices)s. Default: %(default)s");
_("Simple Morphological Filter elevation threshold parameter (meters). Default: %(default)s");
_("Geometric estimates improve the accuracy of the point cloud by computing geometrically consistent depthmaps but may not be usable in larger datasets. This flag disables geometric estimates. Default: %(default)s");
_("Skip normalization of colors across all images. Useful when processing radiometric data. Default: %(default)s");
_("GeoJSON polygon limiting the area of the reconstruction. Can be specified either as path to a GeoJSON file or as a JSON string representing the contents of a GeoJSON file. Default: %(default)s");
_("Set point cloud quality. Higher quality generates better, denser point clouds, but requires more memory and takes longer. Each step up in quality increases processing time roughly by a factor of 4x.Can be one of: %(choices)s. Default: %(default)s");
_("Skip alignment of submodels in split-merge. Useful if GPS is good enough on very large datasets. Default: %(default)s");
_("Use a full 3D mesh to compute the orthophoto instead of a 2.5D mesh. This option is a bit faster and provides similar results in planar areas. Default: %(default)s");
_("Radius of the overlap between submodels. After grouping images into clusters, images that are closer than this radius to a cluster are added to the cluster. This is done to ensure that neighboring submodels overlap. Default: %(default)s");
_("Set this parameter if you want to generate a PNG rendering of the orthophoto. Default: %(default)s");
_("Displays version number and exits. ");
_("Path to the image groups file that controls how images should be split into groups. The file needs to use the following format: image_name group_nameDefault: %(default)s");
_("Rerun processing from this stage. Can be one of: %(choices)s. Default: %(default)s");
_("Path to the file containing the ground control points used for georeferencing. The file needs to use the following format: EPSG:<code> or <+proj definition>geo_x geo_y geo_z im_x im_y image_name [gcp_name] [extra1] [extra2]Default: %(default)s");
_("Permanently delete all previous results and rerun the processing pipeline.");
_("Simple Morphological Filter elevation scalar parameter. Default: %(default)s");
_("Perform image matching with the nearest N images based on image filename order. Can speed up processing of sequential images, such as those extracted from video. It is applied only on non-georeferenced datasets. Set to 0 to disable. Default: %(default)s");
_("Copy output results to this folder after processing.");
_("Run local bundle adjustment for every image added to the reconstruction and a global adjustment every 100 images. Speeds up reconstruction for very large datasets. Default: %(default)s");
_("Export the georeferenced point cloud in CSV format. Default: %(default)s");
_("Skip the blending of colors near seams. Default: %(default)s");

Wyświetl plik

@ -1,5 +1,6 @@
{% extends "app/logged_in_base.html" %}
{% load i18n %}
{% load settings %}
{% block content %}
{% load render_bundle from webpack_loader %}
@ -12,6 +13,8 @@
<![endif]-->
{% if no_processingnodes %}
{% include "quota.html" %}
<h3>{% trans 'Welcome!' %} ☺</h3>
{% trans 'Add a Processing Node' as add_processing_node %}
{% with nodeodm_link='<a href="https://github.com/OpenDroneMap/NodeODM" target="_blank">NodeODM</a>' api_link='<a href="https://github.com/OpenDroneMap/NodeODM/blob/master/docs/index.adoc" target="_blank">API</a>' %}
@ -38,6 +41,8 @@
</ul>
</p>
{% endif %}
{% include "quota.html" %}
<div id="dashboard-app" data-dashboard></div>

Wyświetl plik

@ -12,9 +12,33 @@
</a>
<ul class="dropdown-menu dropdown-user">
<li class="user-profile">
{% blocktrans with user=user.username %}Hello, {{ user }}!{% endblocktrans %}<br/>
<span class="email">{{ user.email }}</span>
<div class="info-item">
{% blocktrans with user=user.username %}Hello, {{ user }}!{% endblocktrans %}<br/>
<span class="email">{{ user.email }}</span>
</div>
</li>
{% if user.profile.has_quota %}
<li class="divider"></li>
{% with tot_quota=user.profile.quota used_quota=user.profile.used_quota_cached %}
{% percentage used_quota tot_quota as perc_quota %}
{% percentage used_quota tot_quota 100 as bar_width %}
<li>
<div class="info-item quotas">
{% with usage=perc_quota|floatformat:0 used=used_quota|disk_size total=tot_quota|disk_size %}
<i class="fa fa-database fa-fw"></i> {% blocktrans %}{{used}} of {{total}} used{% endblocktrans %}
<div class="progress">
<div class="progress-bar progress-bar-{% if perc_quota <= 100 %}success{% else %}warning{% endif %} active" style="width: {{ bar_width }}%">
{{usage}}%
</div>
</div>
{% endwith %}
</div>
</li>
{% endwith %}
{% endif %}
<li class="divider"></li>
<li><a href="/logout/"><i class="fa fa-sign-out-alt fa-fw"></i> {% trans 'Logout' %}</a>
</li>

Wyświetl plik

@ -0,0 +1,11 @@
{% load i18n %}
{% load settings %}
{% if user.profile.has_exceeded_quota_cached %}
{% with total=user.profile.quota|disk_size used=user.profile.used_quota_cached|disk_size %}
{% quota_exceeded_grace_period as when %}
<div class="alert alert-warning alert-dismissible">
<i class="fas fa-info-circle"></i> {% blocktrans %}The disk quota is being exceeded ({{ used }} of {{ total }} used). The most recent tasks will be automatically deleted {{ when }}, until usage falls below {{ total }}.{% endblocktrans %}
</div>
{% endwith %}
{% endif %}

Wyświetl plik

@ -10,11 +10,13 @@
{% endif %}
{% is_single_user_mode as autologin %}
{% has_external_auth as ext_auth %}
{% reset_password_link as reset_pwd_link %}
{% if autologin %}
<script>location.href='/';</script>
{% else %}
<form action="{% url 'login' %}" method="post" class="form-horizontal" role="form">{% csrf_token %}
<form id="loginForm" {% if ext_auth %} style="display: none" {% endif %} action="{% url 'login' %}" method="post" class="form-horizontal" role="form">{% csrf_token %}
{% for field in form %}
{% include 'registration/form_field.html' %}
{% endfor %}
@ -23,16 +25,66 @@
<button type="submit" class="btn btn-default">{% trans 'Log in' %}</button>
</div>
<div class="top-buffer col-sm-offset-2 col-sm-10">
<!--<p><a href="{% url 'password_reset' %}">{% trans "Reset My Password" %}</a></p>-->
<p><a href="javascript:toggleForgotPasswordHint();">Forgot your password?</a></p>
{% if reset_pwd_link != '' %}
<p><a href="{{ reset_pwd_link }}">{% trans "Forgot your password?" %}</a></p>
{% else %}
<p><a href="javascript:toggleForgotPasswordHint();">{% trans "Forgot your password?" %}</a></p>
<script>function toggleForgotPasswordHint(){ $("#forgotPasswordHint").toggle(); }</script>
<div id="forgotPasswordHint" style="display: none; font-size: 90%; padding: 4px;" class="theme-secondary">
You can reset the administrator password by running the following command:
<span class="theme-background-highlight" style="padding: 4px; margin: 8px 0; display: inline-block;">./webodm.sh resetadminpassword yournewpass</span><br/>
If you used WebODM Manager to launch WebODM, find the "Reset Password" button within the maintenance panel or within one of WebODM Manager menus.
</div>
{% endif %}
</div>
</div>
</form>
{% if ext_auth %}
<div class="text-center" id="authLoading">
<i class="fa fa-spin fa-circle-notch fa-spin fa-fw fa-2x"></i>
</div>
<script>
function getAutoLoginCookie() {
var value = "; " + document.cookie;
var parts = value.split("; autologin=");
if (parts.length === 2) return parts.pop().split(';').shift();
}
function delAutoLoginCookie() {
var domain = getAutoLoginCookie();
document.cookie = 'autologin=; Path=/; Expires=Thu, 01 Jan 1970 00:00:01 GMT; Domain=' + domain + ';';
}
function showLoginForm(){
$("#authLoading").hide();
$("#loginForm").show();
}
$(function(){
if (getAutoLoginCookie() !== undefined){
$.ajax({
url: "/api/external-token-auth/",
type: "POST",
xhrFields: {
withCredentials: true
},
}).done(function(res){
delAutoLoginCookie();
if (res.redirect){
location.href = res.redirect;
}else{
if (res.error) console.error(res.error);
showLoginForm();
}
}).fail(function(){
delAutoLoginCookie();
showLoginForm();
console.error("Auto login failed");
});
}else{
showLoginForm();
}
});
</script>
{% endif %}
{% endif %}
{% endblock %}

Wyświetl plik

@ -1,12 +1,60 @@
import datetime
import math
import logging
import time
from django import template
from webodm import settings
from django.utils.translation import gettext as _
register = template.Library()
logger = logging.getLogger('app.logger')
@register.simple_tag
def reset_password_link():
return settings.RESET_PASSWORD_LINK
@register.simple_tag
def has_external_auth():
return settings.EXTERNAL_AUTH_ENDPOINT != ""
@register.filter
def disk_size(megabytes):
k = 1000
k2 = k ** 2
k3 = k ** 3
if megabytes <= k2:
return str(round(megabytes / k, 2)) + ' GB'
elif megabytes <= k3:
return str(round(megabytes / k2, 2)) + ' TB'
else:
return str(round(megabytes / k3, 2)) + ' PB'
@register.simple_tag
def percentage(num, den, maximum=None):
if den == 0:
return 0
perc = max(0, num / den * 100)
if maximum is not None:
perc = min(perc, maximum)
return perc
@register.simple_tag(takes_context=True)
def quota_exceeded_grace_period(context):
deadline = context.request.user.profile.get_quota_deadline()
now = time.time()
if deadline is None:
deadline = now + settings.QUOTA_EXCEEDED_GRACE_PERIOD * 60 * 60
diff = max(0, deadline - now)
if diff >= 60*60*24*2:
return _("in %(num)s days") % {"num": math.floor(diff / (60*60*24))}
elif diff >= 60*60*2:
return _("in %(num)s hours") % {"num": math.floor(diff / (60*60))}
elif diff > 1:
return _("in %(num)s minutes") % {"num": math.floor(diff / 60)}
else:
return _("very soon")
@register.simple_tag
def is_single_user_mode():
return settings.SINGLE_USER_MODE

Wyświetl plik

@ -0,0 +1,97 @@
import http.server
from http.server import SimpleHTTPRequestHandler
import socketserver
import sys
import threading
from time import sleep
import json
class MyHandler(SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(bytes("Simple auth server is running", encoding="utf-8"))
def send_error(self, code, error):
self.send_json(code, {"error": error})
def send_json(self, code, data):
response = bytes(json.dumps(data), encoding="utf-8")
self.send_response(200)
self.send_header('Content-type','application/json')
self.send_header('Content-length', len(response))
self.end_headers()
self.wfile.write(response)
def do_POST(self):
if self.path == '/auth':
if not 'Content-Length' in self.headers:
self.send_error(403, "Missing form data")
return
content_length = int(self.headers['Content-Length'])
post_data_str = self.rfile.read(content_length).decode("utf-8")
post_data = {}
for item in post_data_str.split('&'):
k,v = item.split('=')
post_data[k] = v
username = post_data.get("username")
password = post_data.get("password")
print("Login request for " + username)
if username == "extuser1" and password == "test1234":
print("Granted")
self.send_json(200, {
'user_id': 100,
'username': 'extuser1',
'maxQuota': 500,
'node': {
'hostname': 'localhost',
'port': 4444,
'token': 'test'
}
})
else:
print("Unauthorized")
return self.send_error(401, "unauthorized")
else:
self.send_error(404, "not found")
class WebServer(threading.Thread):
def __init__(self):
super().__init__()
self.host = "0.0.0.0"
self.port = int(sys.argv[1]) if len(sys.argv) >= 2 else 8080
self.ws = socketserver.TCPServer((self.host, self.port), MyHandler)
def run(self):
print("WebServer started at Port:", self.port)
self.ws.serve_forever()
def shutdown(self):
# set the two flags needed to shutdown the HTTP server manually
# self.ws._BaseServer__is_shut_down.set()
# self.ws.__shutdown_request = True
print('Shutting down server.')
# call it anyway, for good measure...
self.ws.shutdown()
print('Closing server.')
self.ws.server_close()
self.join()
if __name__=='__main__':
webServer = WebServer()
webServer.start()
while True:
try:
sleep(0.5)
except KeyboardInterrupt:
print('Keyboard Interrupt sent.')
webServer.shutdown()
exit(0)

Wyświetl plik

@ -1,3 +1,4 @@
import time
from django.contrib.auth.models import User, Group
from rest_framework import status
from rest_framework.test import APIClient
@ -202,3 +203,59 @@ class TestApi(BootTestCase):
res = client.delete('/api/admin/groups/{}/'.format(group.id))
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
def test_profile(self):
client = APIClient()
client.login(username="testuser", password="test1234")
user = User.objects.get(username="testuser")
# Cannot list profiles (not admin)
res = client.get('/api/admin/profiles/')
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
res = client.get('/api/admin/profiles/%s/' % user.id)
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
# Cannot update quota deadlines
res = client.post('/api/admin/profiles/%s/update_quota_deadline/' % user.id, data={'hours': 1})
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
# Admin can
client.login(username="testsuperuser", password="test1234")
res = client.get('/api/admin/profiles/')
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertTrue(len(res.data) > 0)
res = client.get('/api/admin/profiles/%s/' % user.id)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertTrue('quota' in res.data)
self.assertTrue('user' in res.data)
# User is the primary key (not profile id)
self.assertEqual(res.data['user'], user.id)
# There should be no quota by default
self.assertEqual(res.data['quota'], -1)
# Try updating
user.profile.quota = 10
user.save()
res = client.get('/api/admin/profiles/%s/' % user.id)
self.assertEqual(res.data['quota'], 10)
# Update quota deadlines
self.assertTrue(user.profile.get_quota_deadline() is None)
# Miss parameters
res = client.post('/api/admin/profiles/%s/update_quota_deadline/' % user.id)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
res = client.post('/api/admin/profiles/%s/update_quota_deadline/' % user.id, data={'hours': 48})
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertTrue('deadline' in res.data and res.data['deadline'] > time.time() + 47*60*60)
res = client.post('/api/admin/profiles/%s/update_quota_deadline/' % user.id, data={'hours': 0})
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertTrue(abs(user.profile.get_quota_deadline() - time.time()) < 10)

Wyświetl plik

@ -249,6 +249,9 @@ class TestApiTask(BootTransactionTestCase):
# Orthophoto bands field should be an empty list
self.assertEqual(len(task.orthophoto_bands), 0)
# Size should be zero
self.assertEqual(task.size, 0)
# tiles.json, bounds, metadata should not be accessible at this point
tile_types = ['orthophoto', 'dsm', 'dtm']
endpoints = ['tiles.json', 'bounds', 'metadata']
@ -384,6 +387,12 @@ class TestApiTask(BootTransactionTestCase):
# Orthophoto bands field should be populated
self.assertEqual(len(task.orthophoto_bands), 4)
# Size should be updated
self.assertTrue(task.size > 0)
# The owner's used quota should have increased
self.assertTrue(task.project.owner.profile.used_quota_cached() > 0)
# Can export orthophoto (when formula and bands are specified)
res = client.post("/api/projects/{}/tasks/{}/orthophoto/export".format(project.id, task.id), {
'formula': 'NDVI'
@ -946,6 +955,7 @@ class TestApiTask(BootTransactionTestCase):
self.assertTrue(res.data['success'])
new_task_id = res.data['task']['id']
self.assertNotEqual(res.data['task']['id'], task.id)
self.assertEqual(res.data['task']['size'], task.size)
new_task = Task.objects.get(pk=new_task_id)

Wyświetl plik

@ -0,0 +1,48 @@
from django.contrib.auth.models import User, Group
from nodeodm.models import ProcessingNode
from rest_framework import status
from rest_framework.test import APIClient
from .classes import BootTestCase
from .utils import start_simple_auth_server
from webodm import settings
class TestAuth(BootTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_ext_auth(self):
client = APIClient()
# Disable
settings.EXTERNAL_AUTH_ENDPOINT = ''
# Try to log-in
ok = client.login(username='extuser1', password='test1234')
self.assertFalse(ok)
# Enable
settings.EXTERNAL_AUTH_ENDPOINT = 'http://0.0.0.0:5555/auth'
with start_simple_auth_server(["5555"]):
ok = client.login(username='extuser1', password='invalid')
self.assertFalse(ok)
self.assertFalse(User.objects.filter(username="extuser1").exists())
ok = client.login(username='extuser1', password='test1234')
self.assertTrue(ok)
user = User.objects.get(username="extuser1")
self.assertEqual(user.id, 100)
self.assertEqual(user.profile.quota, 500)
pnode = ProcessingNode.objects.get(token='test')
self.assertEqual(pnode.hostname, 'localhost')
self.assertEqual(pnode.port, 4444)
self.assertTrue(user.has_perm('view_processingnode', pnode))
self.assertFalse(user.has_perm('delete_processingnode', pnode))
self.assertFalse(user.has_perm('change_processingnode', pnode))
# Re-test login
ok = client.login(username='extuser1', password='test1234')
self.assertTrue(ok)

Wyświetl plik

@ -0,0 +1,34 @@
import os
from django.test import Client
from webodm import settings
from .classes import BootTestCase
class TestLogin(BootTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_reset_password_render(self):
c = Client()
c.login(username="testuser", password="test1234")
settings.RESET_PASSWORD_LINK = ''
res = c.get('/login/', follow=True)
body = res.content.decode("utf-8")
# The reset password link should show instructions
self.assertTrue("You can reset the administrator password" in body)
settings.RESET_PASSWORD_LINK = 'http://0.0.0.0/reset_test'
res = c.get('/login/', follow=True)
body = res.content.decode("utf-8")
# The reset password link is a link
self.assertTrue('<a href="http://0.0.0.0/reset_test' in body)

Wyświetl plik

@ -0,0 +1,88 @@
from django.contrib.auth.models import User, Group
from rest_framework import status
from rest_framework.test import APIClient
from app.models import Task, Project
from nodeodm.models import ProcessingNode
from worker.tasks import check_quotas
from .classes import BootTestCase
class TestQuota(BootTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_quota(self):
c = APIClient()
c.login(username="testuser", password="test1234")
user = User.objects.get(username="testuser")
self.assertEqual(user.profile.quota, -1)
# There should be no quota panel
res = c.get('/dashboard/', follow=True)
body = res.content.decode("utf-8")
# There should be no quota panel
self.assertFalse('<div class="info-item quotas">' in body)
user.profile.quota = 2000
user.save()
res = c.get('/dashboard/', follow=True)
body = res.content.decode("utf-8")
# There should be a quota panel
self.assertTrue('<div class="info-item quotas">' in body)
# There should be no warning
self.assertFalse("disk quota is being exceeded" in body)
self.assertEqual(user.profile.used_quota(), 0)
self.assertEqual(user.profile.used_quota_cached(), 0)
# Create a task with size
p = Project.objects.create(owner=user, name='Test')
p.save()
t = Task.objects.create(project=p, name='Test', size=1005)
t.save()
t = Task.objects.create(project=p, name='Test2', size=1010)
t.save()
# Simulate call to task.update_size which calls clear_used_quota_cache
user.profile.clear_used_quota_cache()
self.assertTrue(user.profile.has_exceeded_quota())
self.assertTrue(user.profile.has_exceeded_quota_cached())
res = c.get('/dashboard/', follow=True)
body = res.content.decode("utf-8")
self.assertTrue("disk quota is being exceeded" in body)
self.assertTrue("in 8 hours" in body)
# Running the workers check_quota function will not remove tasks
check_quotas()
self.assertEqual(len(Task.objects.filter(project__owner=user)), 2)
# Update grace period
def check_quota_warning(hours, text):
user.profile.set_quota_deadline(hours)
res = c.get('/dashboard/', follow=True)
body = res.content.decode("utf-8")
self.assertTrue(text in body)
check_quota_warning(73, "in 3 days")
check_quota_warning(71, "in 2 days")
check_quota_warning(47.9, "in 47 hours")
check_quota_warning(3.1, "in 3 hours")
check_quota_warning(1.51, "in 90 minutes")
check_quota_warning(0.99, "in 59 minutes")
check_quota_warning(0, "very soon")
# Running the check_quotas function should remove the last task only
check_quotas()
tasks = Task.objects.filter(project__owner=user)
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].name, "Test")

Wyświetl plik

@ -25,6 +25,16 @@ def start_processing_node(args = []):
node_odm.terminate()
time.sleep(1) # Wait for the server to stop
@contextmanager
def start_simple_auth_server(args = []):
current_dir = os.path.dirname(os.path.realpath(__file__))
s = subprocess.Popen(['python', 'simple_auth_server.py'] + args, shell=False,
cwd=os.path.join(current_dir, "scripts"))
time.sleep(2) # Wait for the server to launch
yield s
s.terminate()
time.sleep(1) # Wait for the server to stop
# We need to clear previous media_root content
# This points to the test directory, but just in case
# we double check that the directory is indeed a test directory

Wyświetl plik

@ -4,6 +4,9 @@ from django.utils.translation import gettext as _
_("Detect changes between two different tasks in the same project.")
_("Import images from external sources directly")
_("Compute, preview and export contours from DEMs")
_("Display program version, memory and disk space usage statistics")
_("Integrate WebODM with DroneDB: import images and share results")
_("Create editable short links when sharing task URLs")
_("Calculate and draw an elevation map based on a task's DEMs")
_("Add a fullscreen button to the 2D map view")
_("Sync accounts from webodm.net")
@ -11,9 +14,7 @@ _("Compute volume, area and length measurements on Leaflet")
_("A plugin to upload orthophotos to OpenAerialMap")
_("A plugin to add a button for quickly opening OpenStreetMap's iD editor and setup a TMS basemap.")
_("A plugin to create GCP files from images")
_("A plugin to create GCP files from images")
_("A plugin to show charts of projects and tasks")
_("Create short links when sharing task URLs")
_("Create editable short links when sharing task URLs")
_("Integrate WebODM with DroneDB: import images and share results")
_("Get notified when a task has finished processing, has been removed or has failed")
_("Display program version, memory and disk space usage statistics")
_("A plugin to create GCP files from images")

Wyświetl plik

@ -38,9 +38,10 @@ def dashboard(request):
return redirect(settings.PROCESSING_NODES_ONBOARDING)
no_tasks = Task.objects.filter(project__owner=request.user).count() == 0
no_projects = Project.objects.filter(owner=request.user).count() == 0
# Create first project automatically
if Project.objects.count() == 0:
if no_projects and request.user.has_perm('app.add_project'):
Project.objects.create(owner=request.user, name=_("First Project"))
return render(request, 'app/dashboard.html', {'title': _('Dashboard'),

Wyświetl plik

@ -0,0 +1,8 @@
version: '2.1'
services:
webapp:
volumes:
- ${WO_SETTINGS}:/webodm/webodm/settings_override.py
worker:
volumes:
- ${WO_SETTINGS}:/webodm/webodm/settings_override.py

2
locale

@ -1 +1 @@
Subproject commit 31a7b8fc6d955e8bd6c13d2de84501bc43895190
Subproject commit 6469d33dccdc2b7cc4c3596e9f11dfc907736e28

Wyświetl plik

@ -15,7 +15,9 @@ from pyodm import Node
from pyodm import exceptions
from django.db.models import signals
from datetime import timedelta
import logging
logger = logging.getLogger('app.logger')
class ProcessingNode(models.Model):
hostname = models.CharField(verbose_name=_("Hostname"), max_length=255, help_text=_("Hostname or IP address where the node is located (can be an internal hostname as well). If you are using Docker, this is never 127.0.0.1 or localhost. Find the IP address of your host machine by running ifconfig on Linux or by checking your network settings."))
@ -197,6 +199,8 @@ def auto_update_node_info(sender, instance, created, **kwargs):
instance.update_node_info()
except exceptions.OdmError:
pass
except Exception as e:
logger.warning("auto_update_node_info: " + str(e))
class ProcessingNodeUserObjectPermission(UserObjectPermissionBase):
content_object = models.ForeignKey(ProcessingNode, on_delete=models.CASCADE)

Wyświetl plik

@ -1,6 +1,6 @@
{
"name": "WebODM",
"version": "2.0.3",
"version": "2.1.0",
"description": "User-friendly, extendable application and API for processing aerial imagery.",
"main": "index.js",
"scripts": {

Wyświetl plik

@ -14,6 +14,7 @@ django-filter==2.4.0
django-guardian==1.4.9
django-imagekit==4.0.1
django-libsass==0.7
django-redis==4.12.1
django-webpack-loader==0.6.0
djangorestframework==3.13.1
djangorestframework-jwt==1.9.0

Wyświetl plik

@ -130,6 +130,12 @@ case $key in
shift # past argument
shift # past value
;;
--settings)
WO_SETTINGS=$(realpath "$2")
export WO_SETTINGS
shift # past argument
shift # past value
;;
*) # unknown option
POSITIONAL+=("$1") # save it in an array for later
shift # past argument
@ -170,6 +176,7 @@ usage(){
echo " --broker Set the URL used to connect to the celery broker (default: $DEFAULT_BROKER)"
echo " --detached Run WebODM in detached mode. This means WebODM will run in the background, without blocking the terminal (default: disabled)"
echo " --gpu Use GPU NodeODM nodes (Linux only) (default: disabled)"
echo " --settings Path to a settings.py file to enable modifications of system settings (default: None)"
exit
}
@ -339,6 +346,7 @@ start(){
echo "SSL insecure port redirect: $WO_SSL_INSECURE_PORT_REDIRECT"
echo "Celery Broker: $WO_BROKER"
echo "Default Nodes: $WO_DEFAULT_NODES"
echo "Settings: $WO_SETTINGS"
echo "================================"
echo "Make sure to issue a $0 down if you decide to change the environment."
echo ""
@ -401,6 +409,14 @@ start(){
echo "Will enable SSL ($method)"
fi
if [ ! -z "$WO_SETTINGS" ]; then
if [ ! -e "$WO_SETTINGS" ]; then
echo -e "\033[91mSettings file does not exist: $WO_SETTINGS\033[39m"
exit 1
fi
command+=" -f docker-compose.settings.yml"
fi
command="$command up"
if [[ $detached = true ]]; then

Wyświetl plik

@ -169,6 +169,7 @@ AUTH_PASSWORD_VALIDATORS = [
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend', # this is default
'guardian.backends.ObjectPermissionBackend',
'app.auth.backends.ExternalBackend',
)
# Internationalization
@ -376,14 +377,38 @@ CELERY_INCLUDE=['worker.tasks', 'app.plugins.worker']
CELERY_WORKER_REDIRECT_STDOUTS = False
CELERY_WORKER_HIJACK_ROOT_LOGGER = False
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": os.environ.get('WO_BROKER', 'redis://localhost'),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
# Number of minutes a processing node hasn't been seen
# before it should be considered offline
NODE_OFFLINE_MINUTES = 5
EXTERNAL_AUTH_ENDPOINT = ''
RESET_PASSWORD_LINK = ''
# Number of hours before tasks are automatically deleted
# from an account that is exceeding a disk quota
QUOTA_EXCEEDED_GRACE_PERIOD = 8
if TESTING or FLUSHING:
CELERY_TASK_ALWAYS_EAGER = True
EXTERNAL_AUTH_ENDPOINT = 'http://0.0.0.0:5555/auth'
try:
from .local_settings import *
except ImportError:
pass
try:
from .settings_override import *
except ImportError:
pass

Wyświetl plik

@ -0,0 +1,2 @@
# Do not touch. This file can be bind-mount replaced
# by docker-compose for customized settings

Wyświetl plik

@ -44,6 +44,14 @@ app.conf.beat_schedule = {
'retry': False
}
},
'check-quotas': {
'task': 'worker.tasks.check_quotas',
'schedule': 3600,
'options': {
'expires': 1799,
'retry': False
}
},
}
# Mock class for handling async results during testing

Wyświetl plik

@ -11,6 +11,7 @@ from celery.utils.log import get_task_logger
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Count
from django.db.models import Q
from app.models import Profile
from app.models import Project
from app.models import Task
@ -202,4 +203,31 @@ def export_pointcloud(self, input, **opts):
return result
except Exception as e:
logger.error(str(e))
return {'error': str(e)}
return {'error': str(e)}
@app.task
def check_quotas():
profiles = Profile.objects.filter(quota__gt=-1)
for p in profiles:
if p.has_exceeded_quota():
deadline = p.get_quota_deadline()
if deadline is None:
deadline = p.set_quota_deadline(settings.QUOTA_EXCEEDED_GRACE_PERIOD)
now = time.time()
if now > deadline:
# deadline passed, delete tasks until quota is met
logger.info("Quota deadline expired for %s, deleting tasks" % str(p.user.username))
while p.has_exceeded_quota():
try:
last_task = Task.objects.filter(project__owner=p.user).order_by("-created_at").first()
if last_task is None:
break
logger.info("Deleting %s" % last_task)
last_task.delete()
except Exception as e:
logger.warn("Cannot delete %s for %s: %s" % (str(last_task), str(p.user.username), str(e)))
break
else:
p.clear_quota_deadline()