2017-05-23 19:29:27 +00:00
|
|
|
"""repo2docker: convert git repositories into jupyter-suitable docker images
|
|
|
|
|
2017-10-23 22:39:01 +00:00
|
|
|
Images produced by repo2docker can be used with Jupyter notebooks standalone
|
|
|
|
or with BinderHub.
|
2017-05-23 19:29:27 +00:00
|
|
|
|
|
|
|
Usage:
|
|
|
|
|
|
|
|
python -m repo2docker https://github.com/you/your-repo
|
|
|
|
"""
|
2018-02-05 17:25:12 +00:00
|
|
|
import argparse
|
2017-05-09 08:37:19 +00:00
|
|
|
import json
|
2018-02-05 17:25:12 +00:00
|
|
|
import sys
|
2017-05-16 01:54:51 +00:00
|
|
|
import logging
|
2018-02-05 17:25:12 +00:00
|
|
|
import os
|
2017-12-21 22:30:10 +00:00
|
|
|
import pwd
|
2018-02-05 17:25:12 +00:00
|
|
|
import subprocess
|
|
|
|
import tempfile
|
|
|
|
import time
|
2017-05-09 08:37:19 +00:00
|
|
|
|
|
|
|
import docker
|
2018-07-20 09:45:01 +00:00
|
|
|
from urllib.parse import urlparse
|
2017-05-22 17:29:48 +00:00
|
|
|
from docker.utils import kwargs_from_env
|
2018-01-10 02:39:07 +00:00
|
|
|
from docker.errors import DockerException
|
2018-02-05 17:25:12 +00:00
|
|
|
import escapism
|
|
|
|
from pythonjsonlogger import jsonlogger
|
2017-05-09 08:37:19 +00:00
|
|
|
|
2018-02-05 23:15:49 +00:00
|
|
|
from traitlets import Any, Dict, Int, List, Unicode, default
|
2018-02-05 17:25:12 +00:00
|
|
|
from traitlets.config import Application
|
2017-05-09 08:37:19 +00:00
|
|
|
|
2018-02-05 17:25:12 +00:00
|
|
|
from . import __version__
|
2017-11-30 07:20:24 +00:00
|
|
|
from .buildpacks import (
|
2017-11-01 20:15:27 +00:00
|
|
|
PythonBuildPack, DockerBuildPack, LegacyBinderDockerBuildPack,
|
2018-04-10 13:44:03 +00:00
|
|
|
CondaBuildPack, JuliaBuildPack, BaseImage,
|
2018-02-02 06:08:08 +00:00
|
|
|
RBuildPack
|
2017-05-25 22:15:00 +00:00
|
|
|
)
|
2018-02-05 17:25:12 +00:00
|
|
|
from .utils import (
|
|
|
|
execute_cmd, ByteSpecification, maybe_cleanup, is_valid_docker_image_name,
|
|
|
|
validate_and_generate_port_mapping
|
|
|
|
)
|
2017-07-04 17:28:23 +00:00
|
|
|
|
|
|
|
|
2017-05-22 23:22:36 +00:00
|
|
|
class Repo2Docker(Application):
|
2018-02-05 23:15:49 +00:00
|
|
|
"""An application for converting git repositories to docker images"""
|
2017-05-23 19:29:27 +00:00
|
|
|
name = 'jupyter-repo2docker'
|
|
|
|
version = __version__
|
|
|
|
description = __doc__
|
2017-05-24 00:56:03 +00:00
|
|
|
|
2017-05-24 21:11:37 +00:00
|
|
|
@default('log_level')
|
|
|
|
def _default_log_level(self):
|
2018-02-07 01:25:44 +00:00
|
|
|
"""The application's default log level"""
|
2017-05-24 21:11:37 +00:00
|
|
|
return logging.INFO
|
2017-05-09 08:37:19 +00:00
|
|
|
|
|
|
|
git_workdir = Unicode(
|
2017-11-01 20:15:27 +00:00
|
|
|
None,
|
2017-05-23 03:10:59 +00:00
|
|
|
config=True,
|
2017-11-09 15:41:00 +00:00
|
|
|
allow_none=True,
|
2017-05-23 03:10:59 +00:00
|
|
|
help="""
|
2018-02-05 23:15:49 +00:00
|
|
|
Working directory to use for check out of git repositories.
|
2017-05-23 03:10:59 +00:00
|
|
|
|
2017-11-01 20:15:27 +00:00
|
|
|
The default is to use the system's temporary directory. Should be
|
|
|
|
somewhere ephemeral, such as /tmp.
|
2017-05-23 03:10:59 +00:00
|
|
|
"""
|
2017-05-09 08:37:19 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
buildpacks = List(
|
2017-07-04 17:28:23 +00:00
|
|
|
[
|
2018-02-09 12:14:34 +00:00
|
|
|
LegacyBinderDockerBuildPack,
|
|
|
|
DockerBuildPack,
|
|
|
|
JuliaBuildPack,
|
|
|
|
RBuildPack,
|
2018-05-23 13:22:21 +00:00
|
|
|
CondaBuildPack,
|
2018-02-09 12:14:34 +00:00
|
|
|
PythonBuildPack,
|
2017-07-04 17:28:23 +00:00
|
|
|
],
|
2017-05-23 03:10:59 +00:00
|
|
|
config=True,
|
|
|
|
help="""
|
2018-02-05 23:15:49 +00:00
|
|
|
Ordered list of BuildPacks to try when building a git repository.
|
2017-05-23 03:10:59 +00:00
|
|
|
"""
|
2017-05-09 08:37:19 +00:00
|
|
|
)
|
|
|
|
|
2018-02-01 11:19:06 +00:00
|
|
|
default_buildpack = Any(
|
2018-02-09 12:14:34 +00:00
|
|
|
PythonBuildPack,
|
2017-07-30 00:14:49 +00:00
|
|
|
config=True,
|
|
|
|
help="""
|
2018-02-05 23:15:49 +00:00
|
|
|
The default build pack to use when no other buildpacks are found.
|
2017-07-30 00:14:49 +00:00
|
|
|
"""
|
|
|
|
)
|
|
|
|
|
2017-12-01 01:14:42 +00:00
|
|
|
build_memory_limit = ByteSpecification(
|
|
|
|
0,
|
|
|
|
help="""
|
|
|
|
Total memory that can be used by the docker image building process.
|
|
|
|
|
|
|
|
Set to 0 for no limits.
|
|
|
|
""",
|
|
|
|
config=True
|
|
|
|
)
|
|
|
|
|
2017-12-19 19:02:27 +00:00
|
|
|
volumes = Dict(
|
|
|
|
{},
|
|
|
|
help="""
|
|
|
|
Volumes to mount when running the container.
|
|
|
|
|
2018-02-05 23:15:49 +00:00
|
|
|
Only used when running, not during build process!
|
2017-12-19 19:02:27 +00:00
|
|
|
|
2018-02-05 23:15:49 +00:00
|
|
|
Use a key-value pair, with the key being the volume source &
|
|
|
|
value being the destination volume.
|
2018-07-03 08:55:31 +00:00
|
|
|
|
|
|
|
Both source and destination can be relative. Source is resolved
|
2018-02-05 23:15:49 +00:00
|
|
|
relative to the current working directory on the host, and
|
2018-07-03 08:55:31 +00:00
|
|
|
destination is resolved relative to the working directory of the
|
2018-02-05 23:15:49 +00:00
|
|
|
image - ($HOME by default)
|
2017-12-19 19:02:27 +00:00
|
|
|
""",
|
|
|
|
config=True
|
|
|
|
)
|
|
|
|
|
2017-12-19 20:46:22 +00:00
|
|
|
user_id = Int(
|
|
|
|
help="""
|
|
|
|
UID of the user to create inside the built image.
|
|
|
|
|
|
|
|
Should be a uid that is not currently used by anything in the image.
|
2017-12-21 22:01:22 +00:00
|
|
|
Defaults to uid of currently running user, since that is the most
|
|
|
|
common case when running r2d manually.
|
2017-12-19 20:46:22 +00:00
|
|
|
|
|
|
|
Might not affect Dockerfile builds.
|
|
|
|
""",
|
|
|
|
config=True
|
|
|
|
)
|
|
|
|
|
2017-12-21 22:01:22 +00:00
|
|
|
@default('user_id')
|
|
|
|
def _user_id_default(self):
|
|
|
|
"""
|
|
|
|
Default user_id to current running user.
|
|
|
|
"""
|
|
|
|
return os.geteuid()
|
|
|
|
|
2017-12-19 20:46:22 +00:00
|
|
|
user_name = Unicode(
|
|
|
|
'jovyan',
|
|
|
|
help="""
|
|
|
|
Username of the user to create inside the built image.
|
|
|
|
|
2018-02-05 23:15:49 +00:00
|
|
|
Should be a username that is not currently used by anything in the
|
|
|
|
image, and should conform to the restrictions on user names for Linux.
|
2017-12-21 22:01:22 +00:00
|
|
|
|
|
|
|
Defaults to username of currently running user, since that is the most
|
2018-02-05 23:15:49 +00:00
|
|
|
common case when running repo2docker manually.
|
2017-12-19 20:46:22 +00:00
|
|
|
""",
|
|
|
|
config=True
|
|
|
|
)
|
|
|
|
|
2017-12-21 22:01:22 +00:00
|
|
|
@default('user_name')
|
|
|
|
def _user_name_default(self):
|
|
|
|
"""
|
|
|
|
Default user_name to current running user.
|
|
|
|
"""
|
|
|
|
return pwd.getpwuid(os.getuid()).pw_name
|
|
|
|
|
2018-02-09 10:54:55 +00:00
|
|
|
appendix = Unicode(
|
|
|
|
config=True,
|
|
|
|
help="""
|
|
|
|
Appendix of Dockerfile commands to run at the end of the build.
|
|
|
|
|
|
|
|
Can be used to customize the resulting image after all
|
|
|
|
standard build steps finish.
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
|
2017-05-23 03:26:27 +00:00
|
|
|
def fetch(self, url, ref, checkout_path):
|
2018-02-05 23:15:49 +00:00
|
|
|
"""Check out a repo using url and ref to the checkout_path location"""
|
2017-11-01 23:27:26 +00:00
|
|
|
try:
|
2018-02-12 12:16:06 +00:00
|
|
|
for line in execute_cmd(['git', 'clone', '--recursive', url, checkout_path],
|
2017-11-01 23:27:26 +00:00
|
|
|
capture=self.json_logs):
|
|
|
|
self.log.info(line, extra=dict(phase='fetching'))
|
|
|
|
except subprocess.CalledProcessError:
|
2017-11-13 07:26:58 +00:00
|
|
|
self.log.error('Failed to clone repository!',
|
|
|
|
extra=dict(phase='failed'))
|
2017-11-01 23:27:26 +00:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
if ref:
|
2017-10-25 06:26:47 +00:00
|
|
|
try:
|
2017-11-13 07:26:58 +00:00
|
|
|
for line in execute_cmd(['git', 'reset', '--hard', ref],
|
|
|
|
cwd=checkout_path,
|
2017-07-29 06:46:04 +00:00
|
|
|
capture=self.json_logs):
|
|
|
|
self.log.info(line, extra=dict(phase='fetching'))
|
|
|
|
except subprocess.CalledProcessError:
|
2017-11-13 07:26:58 +00:00
|
|
|
self.log.error('Failed to check out ref %s', ref,
|
|
|
|
extra=dict(phase='failed'))
|
2017-11-01 23:27:26 +00:00
|
|
|
sys.exit(1)
|
2017-10-25 06:26:47 +00:00
|
|
|
|
2017-12-21 03:41:20 +00:00
|
|
|
def validate_image_name(self, image_name):
|
|
|
|
"""
|
2018-02-05 23:15:49 +00:00
|
|
|
Validate image_name read by argparse
|
|
|
|
|
2018-03-15 19:28:11 +00:00
|
|
|
Note: Container names must start with an alphanumeric character and
|
|
|
|
can then use _ . or - in addition to alphanumeric.
|
|
|
|
[a-zA-Z0-9][a-zA-Z0-9_.-]+
|
2017-12-22 11:38:58 +00:00
|
|
|
|
|
|
|
Args:
|
2017-12-23 03:45:16 +00:00
|
|
|
image_name (string): argument read by the argument parser
|
2017-12-22 11:38:58 +00:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
unmodified image_name
|
|
|
|
|
|
|
|
Raises:
|
2018-03-15 19:28:11 +00:00
|
|
|
ArgumentTypeError: if image_name contains characters that do not
|
|
|
|
meet the logic that container names must start
|
|
|
|
with an alphanumeric character and can then
|
|
|
|
use _ . or - in addition to alphanumeric.
|
|
|
|
[a-zA-Z0-9][a-zA-Z0-9_.-]+
|
2017-12-21 03:41:20 +00:00
|
|
|
"""
|
2017-12-23 03:45:16 +00:00
|
|
|
if not is_valid_docker_image_name(image_name):
|
2018-03-15 19:28:11 +00:00
|
|
|
msg = ("%r is not a valid docker image name. Image name"
|
|
|
|
"must start with an alphanumeric character and"
|
|
|
|
"can then use _ . or - in addition to alphanumeric." % image_name)
|
2017-12-21 03:41:20 +00:00
|
|
|
raise argparse.ArgumentTypeError(msg)
|
|
|
|
return image_name
|
|
|
|
|
2017-07-29 06:46:04 +00:00
|
|
|
def get_argparser(self):
|
2018-02-05 23:15:49 +00:00
|
|
|
"""Get arguments that may be used by repo2docker"""
|
2017-07-29 06:46:04 +00:00
|
|
|
argparser = argparse.ArgumentParser()
|
|
|
|
argparser.add_argument(
|
|
|
|
'--config',
|
|
|
|
default='repo2docker_config.py',
|
|
|
|
help="Path to config file for repo2docker"
|
|
|
|
)
|
2017-05-09 08:37:19 +00:00
|
|
|
|
2017-07-29 06:46:04 +00:00
|
|
|
argparser.add_argument(
|
|
|
|
'--json-logs',
|
|
|
|
default=False,
|
|
|
|
action='store_true',
|
|
|
|
help='Emit JSON logs instead of human readable logs'
|
|
|
|
)
|
|
|
|
|
|
|
|
argparser.add_argument(
|
|
|
|
'repo',
|
2017-11-13 07:26:58 +00:00
|
|
|
help=('Path to repository that should be built. Could be '
|
|
|
|
'local path or a git URL.')
|
2017-07-29 06:46:04 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
argparser.add_argument(
|
|
|
|
'--image-name',
|
2017-11-13 07:26:58 +00:00
|
|
|
help=('Name of image to be built. If unspecified will be '
|
2017-12-21 03:41:20 +00:00
|
|
|
'autogenerated'),
|
|
|
|
type=self.validate_image_name
|
2017-07-29 06:46:04 +00:00
|
|
|
)
|
|
|
|
|
2017-07-29 21:17:32 +00:00
|
|
|
argparser.add_argument(
|
|
|
|
'--ref',
|
|
|
|
help='If building a git url, which ref to check out'
|
|
|
|
)
|
|
|
|
|
2017-07-29 06:46:04 +00:00
|
|
|
argparser.add_argument(
|
2017-10-23 15:22:12 +00:00
|
|
|
'--debug',
|
|
|
|
help="Turn on debug logging",
|
2017-07-29 06:46:04 +00:00
|
|
|
action='store_true',
|
|
|
|
)
|
2017-05-09 08:37:19 +00:00
|
|
|
|
2017-08-27 14:51:01 +00:00
|
|
|
argparser.add_argument(
|
2017-10-23 15:22:12 +00:00
|
|
|
'--no-build',
|
|
|
|
dest='build',
|
|
|
|
action='store_false',
|
2017-11-13 07:26:58 +00:00
|
|
|
help=('Do not actually build the image. Useful in conjunction '
|
|
|
|
'with --debug.')
|
2017-10-23 15:22:12 +00:00
|
|
|
)
|
|
|
|
|
2017-12-01 01:14:42 +00:00
|
|
|
argparser.add_argument(
|
2017-12-01 01:23:00 +00:00
|
|
|
'--build-memory-limit',
|
2017-12-01 01:14:42 +00:00
|
|
|
help='Total Memory that can be used by the docker build process'
|
|
|
|
)
|
|
|
|
|
2017-10-23 15:22:12 +00:00
|
|
|
argparser.add_argument(
|
|
|
|
'cmd',
|
2017-10-23 15:53:33 +00:00
|
|
|
nargs=argparse.REMAINDER,
|
2017-10-23 15:22:12 +00:00
|
|
|
help='Custom command to run after building container'
|
2017-08-27 14:51:01 +00:00
|
|
|
)
|
|
|
|
|
2017-07-29 06:46:04 +00:00
|
|
|
argparser.add_argument(
|
|
|
|
'--no-run',
|
|
|
|
dest='run',
|
|
|
|
action='store_false',
|
|
|
|
help='Do not run container after it has been built'
|
|
|
|
)
|
|
|
|
|
2017-12-25 02:03:17 +00:00
|
|
|
argparser.add_argument(
|
|
|
|
'--publish', '-p',
|
|
|
|
dest='ports',
|
|
|
|
action='append',
|
2018-02-05 23:15:49 +00:00
|
|
|
help=('Specify port mappings for the image. Needs a command to '
|
|
|
|
'run in the container.')
|
2017-12-25 02:03:17 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
argparser.add_argument(
|
|
|
|
'--publish-all', '-P',
|
|
|
|
dest='all_ports',
|
|
|
|
action='store_true',
|
|
|
|
help='Publish all exposed ports to random host ports.'
|
|
|
|
)
|
|
|
|
|
2017-07-30 10:44:21 +00:00
|
|
|
argparser.add_argument(
|
|
|
|
'--no-clean',
|
|
|
|
dest='clean',
|
|
|
|
action='store_false',
|
|
|
|
help="Don't clean up remote checkouts after we are done"
|
|
|
|
)
|
|
|
|
|
2017-08-27 14:51:01 +00:00
|
|
|
argparser.add_argument(
|
2017-10-23 15:22:12 +00:00
|
|
|
'--push',
|
|
|
|
dest='push',
|
|
|
|
action='store_true',
|
|
|
|
help='Push docker image to repository'
|
2017-07-29 06:46:04 +00:00
|
|
|
)
|
|
|
|
|
2017-12-19 19:02:27 +00:00
|
|
|
argparser.add_argument(
|
|
|
|
'--volume', '-v',
|
|
|
|
dest='volumes',
|
|
|
|
action='append',
|
2017-12-19 19:15:43 +00:00
|
|
|
help='Volumes to mount inside the container, in form src:dest',
|
|
|
|
default=[]
|
2017-12-19 19:02:27 +00:00
|
|
|
)
|
|
|
|
|
2017-12-19 20:46:22 +00:00
|
|
|
argparser.add_argument(
|
|
|
|
'--user-id',
|
2017-12-21 22:01:22 +00:00
|
|
|
help='User ID of the primary user in the image',
|
2017-12-19 20:46:22 +00:00
|
|
|
type=int
|
|
|
|
)
|
|
|
|
|
|
|
|
argparser.add_argument(
|
|
|
|
'--user-name',
|
2017-12-21 22:01:22 +00:00
|
|
|
help='Username of the primary user in the image',
|
2017-12-19 20:46:22 +00:00
|
|
|
)
|
|
|
|
|
2018-01-07 15:43:03 +00:00
|
|
|
argparser.add_argument(
|
|
|
|
'--env', '-e',
|
|
|
|
dest='environment',
|
2018-01-09 08:22:38 +00:00
|
|
|
action='append',
|
2018-01-07 15:43:03 +00:00
|
|
|
help='Environment variables to define at container run time',
|
|
|
|
default=[]
|
|
|
|
)
|
|
|
|
|
2018-02-09 10:54:55 +00:00
|
|
|
argparser.add_argument(
|
|
|
|
'--appendix',
|
|
|
|
type=str,
|
|
|
|
help=self.traits()['appendix'].help,
|
|
|
|
)
|
2017-07-29 06:46:04 +00:00
|
|
|
return argparser
|
|
|
|
|
2017-10-17 12:04:16 +00:00
|
|
|
def json_excepthook(self, etype, evalue, traceback):
|
|
|
|
"""Called on an uncaught exception when using json logging
|
|
|
|
|
|
|
|
Avoids non-JSON output on errors when using --json-logs
|
|
|
|
"""
|
|
|
|
self.log.error("Error during build: %s", evalue,
|
2017-11-13 07:26:58 +00:00
|
|
|
exc_info=(etype, evalue, traceback),
|
|
|
|
extra=dict(phase='failed'))
|
2017-10-17 12:04:16 +00:00
|
|
|
|
2018-02-09 12:46:35 +00:00
|
|
|
def initialize(self, argv=None):
|
2018-02-05 23:15:49 +00:00
|
|
|
"""Init repo2docker configuration before start"""
|
2018-02-09 12:46:35 +00:00
|
|
|
if argv is None:
|
|
|
|
argv = sys.argv[1:]
|
|
|
|
args = self.get_argparser().parse_args(argv)
|
2017-07-29 06:46:04 +00:00
|
|
|
|
2017-08-27 15:04:01 +00:00
|
|
|
if args.debug:
|
|
|
|
self.log_level = logging.DEBUG
|
|
|
|
|
2017-07-29 06:46:04 +00:00
|
|
|
self.load_config_file(args.config)
|
2018-02-09 10:54:55 +00:00
|
|
|
if args.appendix:
|
|
|
|
self.appendix = args.appendix
|
2017-07-29 06:46:04 +00:00
|
|
|
|
2017-07-29 21:17:32 +00:00
|
|
|
if os.path.exists(args.repo):
|
|
|
|
# Let's treat this as a local directory we are building
|
|
|
|
self.repo_type = 'local'
|
2017-07-29 06:46:04 +00:00
|
|
|
self.repo = args.repo
|
|
|
|
self.ref = None
|
2017-07-30 10:44:21 +00:00
|
|
|
self.cleanup_checkout = False
|
2017-07-29 21:17:32 +00:00
|
|
|
else:
|
|
|
|
self.repo_type = 'remote'
|
|
|
|
self.repo = args.repo
|
|
|
|
self.ref = args.ref
|
2017-07-30 10:44:21 +00:00
|
|
|
self.cleanup_checkout = args.clean
|
2017-07-29 06:46:04 +00:00
|
|
|
|
|
|
|
if args.json_logs:
|
2017-10-17 12:04:16 +00:00
|
|
|
# register JSON excepthook to avoid non-JSON output on errors
|
|
|
|
sys.excepthook = self.json_excepthook
|
2017-05-24 21:11:37 +00:00
|
|
|
# Need to reset existing handlers, or we repeat messages
|
|
|
|
logHandler = logging.StreamHandler()
|
|
|
|
formatter = jsonlogger.JsonFormatter()
|
|
|
|
logHandler.setFormatter(formatter)
|
2018-02-01 12:27:15 +00:00
|
|
|
self.log = logging.getLogger("repo2docker")
|
2017-05-24 21:11:37 +00:00
|
|
|
self.log.handlers = []
|
|
|
|
self.log.addHandler(logHandler)
|
|
|
|
self.log.setLevel(logging.INFO)
|
|
|
|
else:
|
|
|
|
# due to json logger stuff above,
|
|
|
|
# our log messages include carriage returns, newlines, etc.
|
|
|
|
# remove the additional newline from the stream handler
|
|
|
|
self.log.handlers[0].terminator = ''
|
2017-07-29 03:06:54 +00:00
|
|
|
# We don't want a [Repo2Docker] on all messages
|
2017-11-13 07:26:58 +00:00
|
|
|
self.log.handlers[0].formatter = logging.Formatter(
|
|
|
|
fmt='%(message)s'
|
|
|
|
)
|
2017-05-24 21:11:37 +00:00
|
|
|
|
2017-07-29 06:46:04 +00:00
|
|
|
if args.image_name:
|
|
|
|
self.output_image_spec = args.image_name
|
|
|
|
else:
|
2017-05-23 05:16:30 +00:00
|
|
|
# Attempt to set a sane default!
|
|
|
|
# HACK: Provide something more descriptive?
|
2017-11-13 07:26:58 +00:00
|
|
|
self.output_image_spec = (
|
|
|
|
'r2d' +
|
|
|
|
escapism.escape(self.repo, escape_char='-').lower() +
|
|
|
|
str(int(time.time()))
|
|
|
|
)
|
2017-07-29 06:46:04 +00:00
|
|
|
|
|
|
|
self.push = args.push
|
|
|
|
self.run = args.run
|
|
|
|
self.json_logs = args.json_logs
|
|
|
|
|
2017-08-27 14:51:01 +00:00
|
|
|
self.build = args.build
|
|
|
|
if not self.build:
|
|
|
|
# Can't push nor run if we aren't building
|
|
|
|
self.run = False
|
|
|
|
self.push = False
|
|
|
|
|
2018-02-05 23:15:49 +00:00
|
|
|
# check against self.run and not args.run as self.run is false on
|
|
|
|
# --no-build
|
2017-12-24 23:03:09 +00:00
|
|
|
if args.volumes and not self.run:
|
2017-12-19 19:02:27 +00:00
|
|
|
# Can't mount if we aren't running
|
2018-02-05 23:15:49 +00:00
|
|
|
print('To Mount volumes with -v, you also need to run the '
|
|
|
|
'container')
|
2017-12-19 19:02:27 +00:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
for v in args.volumes:
|
|
|
|
src, dest = v.split(':')
|
|
|
|
self.volumes[src] = dest
|
|
|
|
|
2017-07-29 06:46:04 +00:00
|
|
|
self.run_cmd = args.cmd
|
2017-05-23 05:16:30 +00:00
|
|
|
|
2017-12-25 02:03:17 +00:00
|
|
|
if args.all_ports and not self.run:
|
2018-02-05 23:15:49 +00:00
|
|
|
print('To publish user defined port mappings, the container must '
|
|
|
|
'also be run')
|
2017-12-25 02:03:17 +00:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
if args.ports and not self.run:
|
2018-02-05 23:15:49 +00:00
|
|
|
print('To publish user defined port mappings, the container must '
|
|
|
|
'also be run')
|
2017-12-25 02:03:17 +00:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
if args.ports and not self.run_cmd:
|
2018-02-05 23:15:49 +00:00
|
|
|
print('To publish user defined port mapping, user must specify '
|
|
|
|
'the command to run in the container')
|
2017-12-25 02:03:17 +00:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
self.ports = validate_and_generate_port_mapping(args.ports)
|
|
|
|
self.all_ports = args.all_ports
|
|
|
|
|
2017-12-21 22:01:22 +00:00
|
|
|
if args.user_id:
|
|
|
|
self.user_id = args.user_id
|
|
|
|
if args.user_name:
|
|
|
|
self.user_name = args.user_name
|
2017-12-19 20:46:22 +00:00
|
|
|
|
2017-12-01 01:14:42 +00:00
|
|
|
if args.build_memory_limit:
|
|
|
|
self.build_memory_limit = args.build_memory_limit
|
|
|
|
|
2018-01-07 15:43:03 +00:00
|
|
|
if args.environment and not self.run:
|
2018-02-05 23:15:49 +00:00
|
|
|
print('To specify environment variables, you also need to run '
|
|
|
|
'the container')
|
2018-01-07 15:43:03 +00:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
self.environment = args.environment
|
|
|
|
|
2017-05-23 05:55:17 +00:00
|
|
|
def push_image(self):
|
2018-02-05 23:15:49 +00:00
|
|
|
"""Push docker image to registry"""
|
2017-05-23 05:55:17 +00:00
|
|
|
client = docker.APIClient(version='auto', **kwargs_from_env())
|
2017-11-13 07:26:58 +00:00
|
|
|
# Build a progress setup for each layer, and only emit per-layer
|
|
|
|
# info every 1.5s
|
2017-05-23 05:55:17 +00:00
|
|
|
layers = {}
|
|
|
|
last_emit_time = time.time()
|
|
|
|
for line in client.push(self.output_image_spec, stream=True):
|
|
|
|
progress = json.loads(line.decode('utf-8'))
|
|
|
|
if 'error' in progress:
|
|
|
|
self.log.error(progress['error'], extra=dict(phase='failed'))
|
|
|
|
sys.exit(1)
|
|
|
|
if 'id' not in progress:
|
|
|
|
continue
|
|
|
|
if 'progressDetail' in progress and progress['progressDetail']:
|
|
|
|
layers[progress['id']] = progress['progressDetail']
|
|
|
|
else:
|
|
|
|
layers[progress['id']] = progress['status']
|
|
|
|
if time.time() - last_emit_time > 1.5:
|
2017-11-13 07:26:58 +00:00
|
|
|
self.log.info('Pushing image\n',
|
|
|
|
extra=dict(progress=layers, phase='pushing'))
|
2017-05-23 05:55:17 +00:00
|
|
|
last_emit_time = time.time()
|
|
|
|
|
|
|
|
def run_image(self):
|
2018-06-15 11:08:20 +00:00
|
|
|
"""Run docker container from built image
|
|
|
|
|
|
|
|
and wait for it to finish.
|
|
|
|
"""
|
|
|
|
container = self.start_container()
|
|
|
|
self.wait_for_container(container)
|
|
|
|
|
|
|
|
def start_container(self):
|
|
|
|
"""Start docker container from built image
|
|
|
|
|
|
|
|
Returns running container
|
|
|
|
"""
|
2017-05-23 05:55:17 +00:00
|
|
|
client = docker.from_env(version='auto')
|
2018-07-31 09:28:20 +00:00
|
|
|
|
|
|
|
docker_host = os.environ.get('DOCKER_HOST')
|
|
|
|
if docker_host:
|
|
|
|
host_name = urlparse(docker_host).hostname
|
|
|
|
else:
|
|
|
|
host_name = '127.0.0.1'
|
|
|
|
self.hostname = host_name
|
|
|
|
|
2017-07-29 06:46:04 +00:00
|
|
|
if not self.run_cmd:
|
|
|
|
port = str(self._get_free_port())
|
2018-07-03 08:55:31 +00:00
|
|
|
self.port = port
|
2018-07-31 09:28:20 +00:00
|
|
|
# To use the option --NotebookApp.custom_display_url
|
|
|
|
# make sure the base-notebook image is updated:
|
|
|
|
# docker pull jupyter/base-notebook
|
|
|
|
run_cmd = [
|
|
|
|
'jupyter', 'notebook',
|
|
|
|
'--ip', '0.0.0.0',
|
|
|
|
'--port', port,
|
|
|
|
"--NotebookApp.custom_display_url=http://{}:{}".format(host_name, port),
|
|
|
|
]
|
2017-11-13 07:26:58 +00:00
|
|
|
ports = {'%s/tcp' % port: port}
|
2017-07-29 06:46:04 +00:00
|
|
|
else:
|
2017-12-25 02:03:17 +00:00
|
|
|
# run_cmd given by user, if port is also given then pass it on
|
2017-07-29 06:46:04 +00:00
|
|
|
run_cmd = self.run_cmd
|
2017-12-25 02:03:17 +00:00
|
|
|
if self.ports:
|
|
|
|
ports = self.ports
|
|
|
|
else:
|
|
|
|
ports = {}
|
2018-06-15 11:08:20 +00:00
|
|
|
# store ports on self so they can be retrieved in tests
|
|
|
|
self.ports = ports
|
2018-07-20 09:45:01 +00:00
|
|
|
|
2017-12-19 19:32:59 +00:00
|
|
|
container_volumes = {}
|
|
|
|
if self.volumes:
|
2017-12-19 20:49:53 +00:00
|
|
|
api_client = docker.APIClient(
|
|
|
|
version='auto',
|
|
|
|
**docker.utils.kwargs_from_env()
|
|
|
|
)
|
2017-12-19 19:32:59 +00:00
|
|
|
image = api_client.inspect_image(self.output_image_spec)
|
|
|
|
image_workdir = image['ContainerConfig']['WorkingDir']
|
|
|
|
|
|
|
|
for k, v in self.volumes.items():
|
|
|
|
container_volumes[os.path.abspath(k)] = {
|
|
|
|
'bind': v if v.startswith('/') else os.path.join(image_workdir, v),
|
|
|
|
'mode': 'rw'
|
|
|
|
}
|
|
|
|
|
2017-05-23 05:55:17 +00:00
|
|
|
container = client.containers.run(
|
|
|
|
self.output_image_spec,
|
2017-12-25 02:03:17 +00:00
|
|
|
publish_all_ports=self.all_ports,
|
2017-07-30 03:14:36 +00:00
|
|
|
ports=ports,
|
2017-05-24 01:08:53 +00:00
|
|
|
detach=True,
|
2017-12-19 19:02:27 +00:00
|
|
|
command=run_cmd,
|
2018-01-07 15:43:03 +00:00
|
|
|
volumes=container_volumes,
|
|
|
|
environment=self.environment
|
2017-05-23 05:55:17 +00:00
|
|
|
)
|
2017-05-23 06:57:40 +00:00
|
|
|
while container.status == 'created':
|
|
|
|
time.sleep(0.5)
|
|
|
|
container.reload()
|
|
|
|
|
2018-06-15 11:08:20 +00:00
|
|
|
return container
|
|
|
|
|
|
|
|
def wait_for_container(self, container):
|
|
|
|
"""Wait for a container to finish
|
|
|
|
|
|
|
|
Displaying logs while it's running
|
|
|
|
"""
|
|
|
|
|
2017-05-23 06:00:37 +00:00
|
|
|
try:
|
|
|
|
for line in container.logs(stream=True):
|
2017-11-13 07:26:58 +00:00
|
|
|
self.log.info(line.decode('utf-8'),
|
|
|
|
extra=dict(phase='running'))
|
2017-05-23 06:00:37 +00:00
|
|
|
finally:
|
2017-07-30 00:36:07 +00:00
|
|
|
container.reload()
|
2017-07-29 06:46:04 +00:00
|
|
|
if container.status == 'running':
|
2017-11-13 07:26:58 +00:00
|
|
|
self.log.info('Stopping container...\n',
|
|
|
|
extra=dict(phase='running'))
|
2017-07-29 06:46:04 +00:00
|
|
|
container.kill()
|
2017-07-29 21:31:23 +00:00
|
|
|
exit_code = container.attrs['State']['ExitCode']
|
2017-05-23 06:00:37 +00:00
|
|
|
container.remove()
|
2018-02-09 12:52:01 +00:00
|
|
|
if exit_code:
|
|
|
|
sys.exit(exit_code)
|
2017-05-23 05:55:17 +00:00
|
|
|
|
2017-05-24 01:08:53 +00:00
|
|
|
def _get_free_port(self):
|
|
|
|
"""
|
|
|
|
Hacky method to get a free random port on local host
|
|
|
|
"""
|
|
|
|
import socket
|
|
|
|
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
2017-11-13 07:26:58 +00:00
|
|
|
s.bind(("", 0))
|
2017-05-24 01:08:53 +00:00
|
|
|
port = s.getsockname()[1]
|
|
|
|
s.close()
|
|
|
|
return port
|
|
|
|
|
2017-05-24 00:56:03 +00:00
|
|
|
def start(self):
|
2018-02-05 23:15:49 +00:00
|
|
|
"""Start execution of repo2docker"""
|
2018-01-10 02:39:07 +00:00
|
|
|
# Check if r2d can connect to docker daemon
|
|
|
|
if self.build:
|
|
|
|
try:
|
|
|
|
client = docker.APIClient(version='auto',
|
|
|
|
**kwargs_from_env())
|
|
|
|
del client
|
|
|
|
except DockerException as e:
|
2018-02-05 23:15:49 +00:00
|
|
|
print("Docker client initialization error. Check if docker is"
|
|
|
|
" running on the host.")
|
2018-01-10 02:39:07 +00:00
|
|
|
print(e)
|
|
|
|
if self.log_level == logging.DEBUG:
|
|
|
|
raise e
|
|
|
|
sys.exit(1)
|
|
|
|
|
2017-07-29 21:17:32 +00:00
|
|
|
if self.repo_type == 'local':
|
|
|
|
checkout_path = self.repo
|
|
|
|
else:
|
2017-11-01 20:15:27 +00:00
|
|
|
if self.git_workdir is None:
|
|
|
|
checkout_path = tempfile.mkdtemp(prefix='repo2docker')
|
|
|
|
else:
|
|
|
|
checkout_path = self.git_workdir
|
|
|
|
|
|
|
|
# keep as much as possible in the context manager to make sure we
|
|
|
|
# cleanup if things go wrong
|
|
|
|
with maybe_cleanup(checkout_path, self.cleanup_checkout):
|
|
|
|
if self.repo_type == 'remote':
|
2018-02-05 23:15:49 +00:00
|
|
|
self.fetch(self.repo, self.ref, checkout_path)
|
2017-11-01 20:15:27 +00:00
|
|
|
|
|
|
|
os.chdir(checkout_path)
|
|
|
|
|
2018-02-09 12:14:34 +00:00
|
|
|
for BP in self.buildpacks:
|
|
|
|
bp = BP()
|
2017-11-01 20:15:27 +00:00
|
|
|
if bp.detect():
|
|
|
|
picked_buildpack = bp
|
|
|
|
break
|
2018-02-09 12:14:34 +00:00
|
|
|
else:
|
|
|
|
picked_buildpack = self.default_buildpack()
|
2017-11-01 20:15:27 +00:00
|
|
|
|
2018-02-09 10:54:55 +00:00
|
|
|
picked_buildpack.appendix = self.appendix
|
|
|
|
|
2017-11-01 20:15:27 +00:00
|
|
|
self.log.debug(picked_buildpack.render(),
|
|
|
|
extra=dict(phase='building'))
|
|
|
|
|
|
|
|
if self.build:
|
2017-12-19 20:46:22 +00:00
|
|
|
build_args = {
|
|
|
|
'NB_USER': self.user_name,
|
|
|
|
'NB_UID': str(self.user_id)
|
|
|
|
}
|
2018-02-01 11:19:06 +00:00
|
|
|
self.log.info('Using %s builder\n', bp.__class__.__name__,
|
2017-11-01 20:15:27 +00:00
|
|
|
extra=dict(phase='building'))
|
2018-02-05 23:15:49 +00:00
|
|
|
|
|
|
|
for l in picked_buildpack.build(self.output_image_spec,
|
|
|
|
self.build_memory_limit, build_args):
|
2017-11-01 20:15:27 +00:00
|
|
|
if 'stream' in l:
|
|
|
|
self.log.info(l['stream'],
|
|
|
|
extra=dict(phase='building'))
|
|
|
|
elif 'error' in l:
|
|
|
|
self.log.info(l['error'], extra=dict(phase='failure'))
|
|
|
|
sys.exit(1)
|
|
|
|
elif 'status' in l:
|
|
|
|
self.log.info('Fetching base image...\r',
|
|
|
|
extra=dict(phase='building'))
|
|
|
|
else:
|
|
|
|
self.log.info(json.dumps(l),
|
|
|
|
extra=dict(phase='building'))
|
2017-05-23 05:55:17 +00:00
|
|
|
|
2017-05-23 05:17:02 +00:00
|
|
|
if self.push:
|
2017-05-23 05:55:17 +00:00
|
|
|
self.push_image()
|
2017-05-23 05:17:02 +00:00
|
|
|
|
2017-05-23 05:55:17 +00:00
|
|
|
if self.run:
|
|
|
|
self.run_image()
|