Set limits for how much memory docker build can use

This prevents builds from gobbling all the RAM on a node!
pull/159/head
yuvipanda 2017-11-30 17:14:42 -08:00
rodzic bd3bf07ebe
commit 86c24c1a18
5 zmienionych plików z 88 dodań i 9 usunięć

Wyświetl plik

@ -29,8 +29,7 @@ from .buildpacks import (
PythonBuildPack, DockerBuildPack, LegacyBinderDockerBuildPack,
CondaBuildPack, JuliaBuildPack, Python2BuildPack, BaseImage
)
from .utils import execute_cmd
from .utils import maybe_cleanup
from .utils import execute_cmd, ByteSpecification, maybe_cleanup
from . import __version__
@ -92,6 +91,16 @@ class Repo2Docker(Application):
"""
)
build_memory_limit = ByteSpecification(
0,
help="""
Total memory that can be used by the docker image building process.
Set to 0 for no limits.
""",
config=True
)
def fetch(self, url, ref, checkout_path):
try:
for line in execute_cmd(['git', 'clone', url, checkout_path],
@ -153,6 +162,11 @@ class Repo2Docker(Application):
help="Do not actually build the image. Useful in conjunction with --debug."
)
argparser.add_argument(
'---build-memory-limit',
help='Total Memory that can be used by the docker build process'
)
argparser.add_argument(
'cmd',
nargs=argparse.REMAINDER,
@ -250,6 +264,9 @@ class Repo2Docker(Application):
self.run_cmd = args.cmd
if args.build_memory_limit:
self.build_memory_limit = args.build_memory_limit
def push_image(self):
client = docker.APIClient(version='auto', **kwargs_from_env())
# Build a progress setup for each layer, and only emit per-layer info every 1.5s
@ -347,7 +364,7 @@ class Repo2Docker(Application):
if self.build:
self.log.info('Using %s builder\n', bp.name,
extra=dict(phase='building'))
for l in picked_buildpack.build(self.output_image_spec):
for l in picked_buildpack.build(self.output_image_spec, self.build_memory_limit):
if 'stream' in l:
self.log.info(l['stream'],
extra=dict(phase='building'))

Wyświetl plik

@ -356,7 +356,7 @@ class BuildPack(LoggingConfigurable):
post_build_scripts=self.post_build_scripts,
)
def build(self, image_spec):
def build(self, image_spec, memory_limit):
tarf = io.BytesIO()
tar = tarfile.open(fileobj=tarf, mode='w')
dockerfile_tarinfo = tarfile.TarInfo("Dockerfile")
@ -389,6 +389,13 @@ class BuildPack(LoggingConfigurable):
tar.close()
tarf.seek(0)
limits = {
# Always disable memory swap for building, since mostly
# nothing good can come of that.
'memoryswap': -1
}
if memory_limit:
limits['memory'] = memory_limit
client = docker.APIClient(version='auto', **docker.utils.kwargs_from_env())
for line in client.build(
fileobj=tarf,
@ -397,7 +404,8 @@ class BuildPack(LoggingConfigurable):
buildargs={},
decode=True,
forcerm=True,
rm=True
rm=True,
container_limits=limits
):
yield line

Wyświetl plik

@ -18,7 +18,14 @@ class DockerBuildPack(BuildPack):
with open(Dockerfile) as f:
return f.read()
def build(self, image_spec):
def build(self, image_spec, memory_limit):
limits = {
# Always disable memory swap for building, since mostly
# nothing good can come of that.
'memoryswap': -1
}
if memory_limit:
limits['memory'] = memory_limit
client = docker.APIClient(version='auto', **docker.utils.kwargs_from_env())
for line in client.build(
path=os.getcwd(),
@ -27,6 +34,7 @@ class DockerBuildPack(BuildPack):
buildargs={},
decode=True,
forcerm=True,
rm=True
rm=True,
container_limits=limits
):
yield line

Wyświetl plik

@ -31,10 +31,10 @@ class LegacyBinderDockerBuildPack(DockerBuildPack):
with open('Dockerfile') as f:
return f.read() + self.dockerfile_appendix
def build(self, image_spec):
def build(self, image_spec, memory_limit):
with open(self.dockerfile, 'w') as f:
f.write(self.render())
return super().build(image_spec)
return super().build(image_spec, memory_limit)
def detect(self):
try:

Wyświetl plik

@ -3,6 +3,7 @@ from functools import partial
import shutil
import subprocess
from traitlets import Integer
def execute_cmd(cmd, capture=False, **kwargs):
"""
@ -51,3 +52,48 @@ def maybe_cleanup(path, cleanup=False):
yield
if cleanup:
shutil.rmtree(path, ignore_errors=True)
class ByteSpecification(Integer):
"""
Allow easily specifying bytes in units of 1024 with suffixes
Suffixes allowed are:
- K -> Kilobyte
- M -> Megabyte
- G -> Gigabyte
- T -> Terabyte
Stolen from JupyterHub
"""
UNIT_SUFFIXES = {
'K': 1024,
'M': 1024 * 1024,
'G': 1024 * 1024 * 1024,
'T': 1024 * 1024 * 1024 * 1024,
}
# Default to allowing None as a value
allow_none = True
def validate(self, obj, value):
"""
Validate that the passed in value is a valid memory specification
It could either be a pure int, when it is taken as a byte value.
If it has one of the suffixes, it is converted into the appropriate
pure byte value.
"""
if isinstance(value, (int, float)):
return int(value)
try:
num = float(value[:-1])
except ValueError:
raise TraitError('{val} is not a valid memory specification. Must be an int or a string with suffix K, M, G, T'.format(val=value))
suffix = value[-1]
if suffix not in self.UNIT_SUFFIXES:
raise TraitError('{val} is not a valid memory specification. Must be an int or a string with suffix K, M, G, T'.format(val=value))
else:
return int(float(num) * self.UNIT_SUFFIXES[suffix])