Add rudimentary benchmarking code and a couple of benchmarks

This commit implements a '--bench' argument for runtests.py. When
specified, this runs a couple of special "benchmark" tests that are
specifically created for testing performance of the explorer page.

It's very rudimentary at the moment as I needed something quickly to
implement the performance improvement in this PR. It gets the job done
though.
pull/4600/head
Karl Hobley 2017-02-09 15:06:50 +00:00 zatwierdzone przez Matt Westcott
rodzic c24fabe1d5
commit e5a4208774
3 zmienionych plików z 123 dodań i 1 usunięć

Wyświetl plik

@ -18,6 +18,7 @@ def make_parser():
parser.add_argument('--elasticsearch2', action='store_true')
parser.add_argument('--elasticsearch5', action='store_true')
parser.add_argument('--elasticsearch6', action='store_true')
parser.add_argument('--bench', action='store_true')
return parser
@ -61,7 +62,15 @@ def runtests():
# forcibly delete the ELASTICSEARCH_URL setting to skip those tests
del os.environ['ELASTICSEARCH_URL']
argv = [sys.argv[0], 'test'] + rest
if args.bench:
benchmarks = [
'wagtail.admin.tests.benches',
]
argv = [sys.argv[0], 'test', '-v2'] + benchmarks + rest
else:
argv = [sys.argv[0], 'test'] + rest
try:
execute_from_command_line(argv)
finally:

Wyświetl plik

@ -0,0 +1,86 @@
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from wagtail.core.models import Page, Site
from wagtail.tests.benchmark import Benchmark
from wagtail.tests.testapp.models import SingleEventPage, StreamPage
from wagtail.tests.utils import WagtailTestUtils
class BenchPageExplorerWith50LargePages(Benchmark, WagtailTestUtils, TestCase):
"""
Creates 50 pages with large body content and benches the explorer.
This will be slow if the body content is being loaded from the database.
"""
def setUp(self):
self.root_page = Page.objects.get(id=1)
# Add a site so the URLs render correctly
Site.objects.create(is_default_site=True, root_page=self.root_page)
# Create a large piece of body text
body = '[' + ','.join(['{"type": "text", "value": "%s"}' % ('foo' * 2000)] * 100) + ']'
# Create 50 simple pages with long content fields
for i in range(50):
self.root_page.add_child(instance=StreamPage(
title="Page {}".format(i + 1),
slug=str(i + 1),
body=body,
))
self.login()
def bench(self):
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check the response was good
self.assertEqual(response.status_code, 200)
# Check every single page was rendered
self.assertContains(response, "Page 1")
self.assertContains(response, "Page 49")
class BenchPageExplorerWithCustomURLPages(Benchmark, WagtailTestUtils, TestCase):
"""
Creates 50 pages of a class with a customised the .url property.
This will check how long it takes to generate URLs for all of these
pages.
"""
def setUp(self):
self.root_page = Page.objects.get(id=1)
# Add a site so the URLs render correctly
Site.objects.create(is_default_site=True, root_page=self.root_page)
# Create 50 blog pages
for i in range(50):
self.root_page.add_child(instance=SingleEventPage(
title="Event {}".format(i + 1),
slug=str(i + 1),
date_from=timezone.now(),
audience="public",
location="reykjavik",
cost="cost",
))
self.login()
def bench(self):
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check the response was good
self.assertEqual(response.status_code, 200)
# Check every single page was rendered
self.assertContains(response, "Event 1")
self.assertContains(response, "Event 49")
# Check the URLs were rendered correctly
self.assertContains(response, 'a href="http:///49/pointless-suffix/"')

Wyświetl plik

@ -0,0 +1,27 @@
from __future__ import absolute_import, unicode_literals
import time
import tracemalloc
class Benchmark():
repeat = 10
def test(self):
timings = []
memory_usage = []
tracemalloc.start()
for i in range(self.repeat):
before_memory = tracemalloc.take_snapshot()
start_time = time.time()
self.bench()
end_time = time.time()
after_memory = tracemalloc.take_snapshot()
timings.append(end_time - start_time)
memory_usage.append(sum([t.size for t in after_memory.compare_to(before_memory, 'filename')]))
print("time min:", min(timings), "max:", max(timings), "avg:", sum(timings) / len(timings))
print("memory min:", min(memory_usage), "max:", max(memory_usage), "avg:", sum(memory_usage) / len(memory_usage))