diff --git a/tests/unit/test_memlimit.py b/tests/unit/test_memlimit.py index 80396242..3cb539d6 100644 --- a/tests/unit/test_memlimit.py +++ b/tests/unit/test_memlimit.py @@ -1,14 +1,8 @@ """ -Test that build time memory limits are actually enforced. - -We give the container image at least 128M of RAM (so base things like -apt and pip can run), and then try to allocate & use 256MB in postBuild. -This should fail! +Test that build time memory limits are enforced """ import os -import shutil -import time from unittest.mock import MagicMock @@ -16,61 +10,50 @@ import docker import pytest -from repo2docker.app import Repo2Docker from repo2docker.buildpacks import BaseImage, DockerBuildPack basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -def does_build(tmpdir, build_src_dir, mem_limit, mem_allocate_mb): - builddir = tmpdir.join("build") - shutil.copytree(build_src_dir, builddir) - builddir.chdir() - print(os.getcwd(), os.listdir(".")) - mem_allocate_mb_file = os.path.join(builddir, "mem_allocate_mb") +def test_memory_limit_enforced(tmpdir): + fake_cache_from = ["image-1:latest"] + fake_log_value = {"stream": "fake"} + fake_client = MagicMock(spec=docker.APIClient) + fake_client.build.return_value = iter([fake_log_value]) + fake_extra_build_kwargs = {"somekey": "somevalue"} - # Cache bust so we actually do a rebuild each time this is run! - with builddir.join("cachebust").open("w") as cachebust: - cachebust.write(str(time.time())) + # some memory limit value, the important bit is that this value is + # later passed to the `build` method of the Docker API client + memory_limit = 128 * 1024 - # we don't have an easy way to pass env vars or whatever to - # postBuild from here, so we write a file into the repo that is - # read by the postBuild script! - with open(mem_allocate_mb_file, "w") as f: - f.write(str(mem_allocate_mb)) - r2d = Repo2Docker(build_memory_limit=str(mem_limit) + "M") - r2d.initialize() - try: - r2d.build() - except Exception: - return False - else: - return True + # Test that the buildpack passes the right arguments to the docker + # client in order to enforce the memory limit + tmpdir.chdir() + for line in BaseImage().build( + fake_client, + "image-2", + memory_limit, + {}, + fake_cache_from, + fake_extra_build_kwargs, + ): + pass - -@pytest.mark.parametrize( - "test, mem_limit, mem_allocate_mb, expected", - [ - ("dockerfile", 128, 256, False), - ("dockerfile", 512, 256, True), - ("non-dockerfile", 128, 256, False), - ("non-dockerfile", 512, 256, True), - ], -) -def test_memlimit_nondockerfile(tmpdir, test, mem_limit, mem_allocate_mb, expected): - """ - Test if memory limited builds are working for non dockerfile builds - """ - success = does_build( - tmpdir, os.path.join(basedir, "memlimit", test), mem_limit, mem_allocate_mb - ) - assert success == expected + # check that we pass arguments asking for memory limiting + # to the Docker API client + args, kwargs = fake_client.build.call_args + assert "container_limits" in kwargs + assert kwargs["container_limits"] == { + "memory": memory_limit, + "memswap": memory_limit, + } def test_memlimit_same_postbuild(): """ - Validate that the postBuild files for dockerfile & nondockerfile are same + Validate that the postBuild files for the dockerfile and non-dockerfile + tests are the same Until https://github.com/jupyter/repo2docker/issues/160 gets fixed. """