kopia lustrzana https://github.com/jupyterhub/repo2docker
Make memory limit checking tests more solid
- Use a more precise way of triggering OOM conditions - Make sure that the image is rebuilt each time with a cachebust - Don't have two copies of the OOM triggering scriptpull/159/head
rodzic
43603225de
commit
43e12631da
|
@ -1,36 +1,77 @@
|
||||||
|
"""
|
||||||
|
Test that build time memory limits are actually enforced.
|
||||||
|
|
||||||
|
We give the container image at least 128M of RAM (so base things like
|
||||||
|
apt and pip can run), and then try to allocate & use 256MB in postBuild.
|
||||||
|
This should fail!
|
||||||
|
"""
|
||||||
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import time
|
||||||
|
|
||||||
def test_memlimit_nondockerfile():
|
def does_build(builddir, mem_limit, mem_allocate_mb):
|
||||||
|
mem_allocate_mb_file = os.path.join(builddir, 'mem_allocate_mb')
|
||||||
|
|
||||||
|
# Cache bust so we actually do a rebuild each time this is run!
|
||||||
|
with open(os.path.join(builddir, 'cachebust'), 'w') as cachebust:
|
||||||
|
cachebust.write(str(time.time()))
|
||||||
|
try:
|
||||||
|
with open(mem_allocate_mb_file, 'w') as f:
|
||||||
|
f.write(str(mem_allocate_mb))
|
||||||
|
try:
|
||||||
|
output = subprocess.check_output(
|
||||||
|
[
|
||||||
|
'repo2docker',
|
||||||
|
'--no-run',
|
||||||
|
'--build-memory-limit', '{}M'.format(mem_limit),
|
||||||
|
builddir
|
||||||
|
],
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
).decode()
|
||||||
|
print(output)
|
||||||
|
return True
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
output = e.output.decode()
|
||||||
|
print(output)
|
||||||
|
if "The command '/bin/sh -c ./postBuild' returned a non-zero code: 137" in output:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
os.remove(mem_allocate_mb_file)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def test_memlimit_nondockerfile_fail():
|
||||||
"""
|
"""
|
||||||
Test if memory limited builds are working for non dockerfile builds
|
Test if memory limited builds are working for non dockerfile builds
|
||||||
"""
|
"""
|
||||||
try:
|
basedir = os.path.dirname(__file__)
|
||||||
subprocess.check_call([
|
assert not does_build(
|
||||||
'repo2docker',
|
os.path.join(basedir, 'memlimit/non-dockerfile'),
|
||||||
'--no-run',
|
128,
|
||||||
'--build-memory-limit', '4M',
|
256
|
||||||
'tests/memlimit/non-dockerfile'
|
)
|
||||||
])
|
assert does_build(
|
||||||
# If this doesn't throw an exception, then memory limit was
|
os.path.join(basedir, 'memlimit/non-dockerfile'),
|
||||||
# not enforced!
|
512,
|
||||||
assert False
|
256
|
||||||
except subprocess.CalledProcessError as e:
|
)
|
||||||
assert True
|
|
||||||
|
|
||||||
|
|
||||||
def test_memlimit_dockerfile():
|
def test_memlimit_dockerfile_fail():
|
||||||
"""
|
"""
|
||||||
Test if memory limited builds are working for non dockerfile builds
|
Test if memory limited builds are working for dockerfile builds
|
||||||
"""
|
"""
|
||||||
try:
|
basedir = os.path.dirname(__file__)
|
||||||
subprocess.check_call([
|
assert not does_build(
|
||||||
'repo2docker',
|
os.path.join(basedir, 'memlimit/dockerfile'),
|
||||||
'--no-run',
|
128,
|
||||||
'--build-memory-limit', '4M',
|
256
|
||||||
'tests/memlimit/dockerfile'
|
)
|
||||||
])
|
|
||||||
# If this doesn't throw an exception, then memory limit was
|
assert does_build(
|
||||||
# not enforced!
|
os.path.join(basedir, 'memlimit/dockerfile'),
|
||||||
assert False
|
512,
|
||||||
except subprocess.CalledProcessError as e:
|
256
|
||||||
assert True
|
)
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
cachebust
|
|
@ -1,4 +1,6 @@
|
||||||
FROM python:3.6
|
FROM ubuntu:zesty
|
||||||
|
|
||||||
COPY postBuild /usr/local/bin/postBuild
|
RUN apt-get update && apt-get install --yes python3
|
||||||
|
|
||||||
|
COPY . .
|
||||||
RUN ./postBuild
|
RUN ./postBuild
|
||||||
|
|
|
@ -1,5 +1,22 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Simplest program that tries to allocate a large amount of RAM.
|
||||||
|
|
||||||
array = []
|
malloc lies on Linux by default, so we use memset to force the
|
||||||
for i in range(1024 * 1024 * 64):
|
kernel to actually give us real memory.
|
||||||
array.append(i)
|
"""
|
||||||
|
from ctypes import cdll, c_void_p, memset
|
||||||
|
import os
|
||||||
|
|
||||||
|
libc = cdll.LoadLibrary("libc.so.6")
|
||||||
|
libc.malloc.restype = c_void_p
|
||||||
|
|
||||||
|
with open('mem_allocate_mb') as f:
|
||||||
|
mem_allocate_mb = int(f.read().strip())
|
||||||
|
|
||||||
|
size = 1024 * 1024 * mem_allocate_mb
|
||||||
|
print("trying to allocate {}MB".format(mem_allocate_mb))
|
||||||
|
|
||||||
|
ret = libc.malloc(size)
|
||||||
|
|
||||||
|
memset(ret, 0, size)
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
array = []
|
|
||||||
for i in range(1024 * 1024 * 64):
|
|
||||||
array.append(i)
|
|
|
@ -0,0 +1 @@
|
||||||
|
../dockerfile/postBuild
|
Ładowanie…
Reference in New Issue