Faster timeout on update_node_info(), oom preferences

pull/441/head
Piero Toffanin 2018-04-22 14:49:20 -04:00
rodzic 9a162841f1
commit 8317283b87
7 zmienionych plików z 21 dodań i 15 usunięć

Wyświetl plik

@ -14,3 +14,4 @@ services:
ports:
- "3000"
restart: on-failure:10
oom_score_adj: 500

Wyświetl plik

@ -13,6 +13,7 @@ services:
volumes:
- dbdata:/var/lib/postgresql/data
restart: on-failure:10
oom_score_adj: -100
webapp:
image: opendronemap/webodm_webapp
container_name: webapp
@ -31,10 +32,14 @@ services:
- WO_DEBUG
- WO_BROKER
restart: on-failure:10
oom_score_adj: 0
broker:
image: redis
container_name: broker
restart: on-failure:10
mem_limit: 256m
oom_kill_disable: true
oom_score_adj: -500
worker:
image: opendronemap/webodm_webapp
container_name: worker
@ -48,3 +53,4 @@ services:
- WO_BROKER
- WO_DEBUG
restart: on-failure:10
oom_score_adj: 250

Wyświetl plik

@ -9,12 +9,11 @@ import os
from urllib.parse import urlunparse
from app.testwatch import TestWatch
TIMEOUT = 30
class ApiClient:
def __init__(self, host, port):
def __init__(self, host, port, timeout=30):
self.host = host
self.port = port
self.timeout = timeout
def url(self, url):
netloc = self.host if self.port == 80 else "{}:{}".format(self.host, self.port)
@ -23,28 +22,28 @@ class ApiClient:
return urlunparse(('http', netloc, url, '', '', ''))
def info(self):
return requests.get(self.url('/info'), timeout=TIMEOUT).json()
return requests.get(self.url('/info'), timeout=self.timeout).json()
def options(self):
return requests.get(self.url('/options'), timeout=TIMEOUT).json()
return requests.get(self.url('/options'), timeout=self.timeout).json()
def task_info(self, uuid):
return requests.get(self.url('/task/{}/info').format(uuid), timeout=TIMEOUT).json()
return requests.get(self.url('/task/{}/info').format(uuid), timeout=self.timeout).json()
@TestWatch.watch()
def task_output(self, uuid, line = 0):
return requests.get(self.url('/task/{}/output?line={}').format(uuid, line), timeout=TIMEOUT).json()
return requests.get(self.url('/task/{}/output?line={}').format(uuid, line), timeout=self.timeout).json()
def task_cancel(self, uuid):
return requests.post(self.url('/task/cancel'), data={'uuid': uuid}, timeout=TIMEOUT).json()
return requests.post(self.url('/task/cancel'), data={'uuid': uuid}, timeout=self.timeout).json()
def task_remove(self, uuid):
return requests.post(self.url('/task/remove'), data={'uuid': uuid}, timeout=TIMEOUT).json()
return requests.post(self.url('/task/remove'), data={'uuid': uuid}, timeout=self.timeout).json()
def task_restart(self, uuid, options = None):
data = {'uuid': uuid}
if options is not None: data['options'] = json.dumps(options)
return requests.post(self.url('/task/restart'), data=data, timeout=TIMEOUT).json()
return requests.post(self.url('/task/restart'), data=data, timeout=self.timeout).json()
def task_download(self, uuid, asset):
res = requests.get(self.url('/task/{}/download/{}').format(uuid, asset), stream=True)

Wyświetl plik

@ -66,7 +66,7 @@ class ProcessingNode(models.Model):
:returns: True if information could be updated, False otherwise
"""
api_client = self.api_client()
api_client = self.api_client(timeout=5)
try:
info = api_client.info()
self.api_version = info['version']
@ -80,8 +80,8 @@ class ProcessingNode(models.Model):
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, json.decoder.JSONDecodeError, simplejson.JSONDecodeError):
return False
def api_client(self):
return ApiClient(self.hostname, self.port)
def api_client(self, timeout=30):
return ApiClient(self.hostname, self.port, timeout)
def get_available_options_json(self, pretty=False):
"""

Wyświetl plik

@ -326,6 +326,7 @@ CELERY_ACCEPT_CONTENT = ['json']
CELERY_INCLUDE=['worker.tasks']
CELERY_WORKER_REDIRECT_STDOUTS = False
CELERY_WORKER_HIJACK_ROOT_LOGGER = False
CELERY_TASK_RESULT_EXPIRES = 60
if TESTING:
CELERY_TASK_ALWAYS_EAGER = True

Wyświetl plik

@ -52,7 +52,7 @@ start(){
action=$1
echo "Starting worker using broker at $WO_BROKER"
celery -A worker worker --concurrency 1 --max-tasks-per-child 1000 --loglevel=warn > /dev/null
celery -A worker worker --autoscale $(grep -c '^processor' /proc/cpuinfo),2 --max-tasks-per-child 1000 --loglevel=warn > /dev/null
}
start_scheduler(){

Wyświetl plik

@ -79,7 +79,6 @@ def get_pending_tasks():
@app.task
def process_pending_tasks():
tasks = get_pending_tasks()
for task in tasks:
process_task.delay(task.id)