Make the memory limit test simpler

Instead of checking that a build which allocates too much memory does
fail, we now only check that we pass the correct arguments to the Docker
API client. It seems reasonable to rely on the docker client working and
doing the right thing. This solves a problem where our CI is flakey
because the kernel of the VM the tests run on doesn't support this.
pull/912/head
Tim Head 2020-06-12 19:03:48 +02:00
rodzic 8d85a51cd2
commit df77a99ce1
1 zmienionych plików z 32 dodań i 49 usunięć

Wyświetl plik

@ -1,14 +1,8 @@
""" """
Test that build time memory limits are actually enforced. Test that build time memory limits are enforced
We give the container image at least 128M of RAM (so base things like
apt and pip can run), and then try to allocate & use 256MB in postBuild.
This should fail!
""" """
import os import os
import shutil
import time
from unittest.mock import MagicMock from unittest.mock import MagicMock
@ -16,61 +10,50 @@ import docker
import pytest import pytest
from repo2docker.app import Repo2Docker
from repo2docker.buildpacks import BaseImage, DockerBuildPack from repo2docker.buildpacks import BaseImage, DockerBuildPack
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def does_build(tmpdir, build_src_dir, mem_limit, mem_allocate_mb): def test_memory_limit_enforced(tmpdir):
builddir = tmpdir.join("build") fake_cache_from = ["image-1:latest"]
shutil.copytree(build_src_dir, builddir) fake_log_value = {"stream": "fake"}
builddir.chdir() fake_client = MagicMock(spec=docker.APIClient)
print(os.getcwd(), os.listdir(".")) fake_client.build.return_value = iter([fake_log_value])
mem_allocate_mb_file = os.path.join(builddir, "mem_allocate_mb") fake_extra_build_kwargs = {"somekey": "somevalue"}
# Cache bust so we actually do a rebuild each time this is run! # some memory limit value, the important bit is that this value is
with builddir.join("cachebust").open("w") as cachebust: # later passed to the `build` method of the Docker API client
cachebust.write(str(time.time())) memory_limit = 128 * 1024
# we don't have an easy way to pass env vars or whatever to # Test that the buildpack passes the right arguments to the docker
# postBuild from here, so we write a file into the repo that is # client in order to enforce the memory limit
# read by the postBuild script! tmpdir.chdir()
with open(mem_allocate_mb_file, "w") as f: for line in BaseImage().build(
f.write(str(mem_allocate_mb)) fake_client,
r2d = Repo2Docker(build_memory_limit=str(mem_limit) + "M") "image-2",
r2d.initialize() memory_limit,
try: {},
r2d.build() fake_cache_from,
except Exception: fake_extra_build_kwargs,
return False ):
else: pass
return True
# check that we pass arguments asking for memory limiting
@pytest.mark.parametrize( # to the Docker API client
"test, mem_limit, mem_allocate_mb, expected", args, kwargs = fake_client.build.call_args
[ assert "container_limits" in kwargs
("dockerfile", 128, 256, False), assert kwargs["container_limits"] == {
("dockerfile", 512, 256, True), "memory": memory_limit,
("non-dockerfile", 128, 256, False), "memswap": memory_limit,
("non-dockerfile", 512, 256, True), }
],
)
def test_memlimit_nondockerfile(tmpdir, test, mem_limit, mem_allocate_mb, expected):
"""
Test if memory limited builds are working for non dockerfile builds
"""
success = does_build(
tmpdir, os.path.join(basedir, "memlimit", test), mem_limit, mem_allocate_mb
)
assert success == expected
def test_memlimit_same_postbuild(): def test_memlimit_same_postbuild():
""" """
Validate that the postBuild files for dockerfile & nondockerfile are same Validate that the postBuild files for the dockerfile and non-dockerfile
tests are the same
Until https://github.com/jupyter/repo2docker/issues/160 gets fixed. Until https://github.com/jupyter/repo2docker/issues/160 gets fixed.
""" """