Apply auto formatting

Apologies to anyone finding this commit via git blame or log

run the autoformatting by

    pre-commit run --all-files
pull/699/head
Tim Head 2019-05-31 11:10:17 +02:00
rodzic 91fed12e87
commit 8b004e06dc
55 zmienionych plików z 1599 dodań i 1447 usunięć

Wyświetl plik

@ -1,5 +1,6 @@
from ._version import get_versions
__version__ = get_versions()['version']
__version__ = get_versions()["version"]
del get_versions
from .app import Repo2Docker

Wyświetl plik

@ -30,177 +30,177 @@ def validate_image_name(image_name):
[a-zA-Z0-9][a-zA-Z0-9_.-]+
"""
if not is_valid_docker_image_name(image_name):
msg = ("%r is not a valid docker image name. Image name"
"must start with an alphanumeric character and"
"can then use _ . or - in addition to alphanumeric." % image_name)
msg = (
"%r is not a valid docker image name. Image name"
"must start with an alphanumeric character and"
"can then use _ . or - in addition to alphanumeric." % image_name
)
raise argparse.ArgumentTypeError(msg)
return image_name
def get_argparser():
"""Get arguments that may be used by repo2docker"""
argparser = argparse.ArgumentParser(
description='Fetch a repository and build a container image',
description="Fetch a repository and build a container image"
)
argparser.add_argument(
'--config',
default='repo2docker_config.py',
help="Path to config file for repo2docker"
"--config",
default="repo2docker_config.py",
help="Path to config file for repo2docker",
)
argparser.add_argument(
'--json-logs',
"--json-logs",
default=False,
action='store_true',
help='Emit JSON logs instead of human readable logs'
action="store_true",
help="Emit JSON logs instead of human readable logs",
)
argparser.add_argument(
'repo',
help=('Path to repository that should be built. Could be '
'local path or a git URL.')
"repo",
help=(
"Path to repository that should be built. Could be "
"local path or a git URL."
),
)
argparser.add_argument(
'--image-name',
help=('Name of image to be built. If unspecified will be '
'autogenerated'),
type=validate_image_name
"--image-name",
help=("Name of image to be built. If unspecified will be " "autogenerated"),
type=validate_image_name,
)
argparser.add_argument(
'--ref',
help=('If building a git url, which reference to check out. '
'E.g., `master`.')
"--ref",
help=(
"If building a git url, which reference to check out. " "E.g., `master`."
),
)
argparser.add_argument("--debug", help="Turn on debug logging", action="store_true")
argparser.add_argument(
"--no-build",
dest="build",
action="store_false",
help=(
"Do not actually build the image. Useful in conjunction " "with --debug."
),
)
argparser.add_argument(
'--debug',
help="Turn on debug logging",
action='store_true',
"--build-memory-limit",
help="Total Memory that can be used by the docker build process",
)
argparser.add_argument(
'--no-build',
dest='build',
action='store_false',
help=('Do not actually build the image. Useful in conjunction '
'with --debug.')
)
argparser.add_argument(
'--build-memory-limit',
help='Total Memory that can be used by the docker build process'
)
argparser.add_argument(
'cmd',
"cmd",
nargs=argparse.REMAINDER,
help='Custom command to run after building container'
help="Custom command to run after building container",
)
argparser.add_argument(
'--no-run',
dest='run',
action='store_false',
help='Do not run container after it has been built'
"--no-run",
dest="run",
action="store_false",
help="Do not run container after it has been built",
)
argparser.add_argument(
'--publish', '-p',
dest='ports',
action='append',
help=('Specify port mappings for the image. Needs a command to '
'run in the container.')
"--publish",
"-p",
dest="ports",
action="append",
help=(
"Specify port mappings for the image. Needs a command to "
"run in the container."
),
)
argparser.add_argument(
'--publish-all', '-P',
dest='all_ports',
action='store_true',
help='Publish all exposed ports to random host ports.'
"--publish-all",
"-P",
dest="all_ports",
action="store_true",
help="Publish all exposed ports to random host ports.",
)
argparser.add_argument(
'--no-clean',
dest='clean',
action='store_false',
help="Don't clean up remote checkouts after we are done"
"--no-clean",
dest="clean",
action="store_false",
help="Don't clean up remote checkouts after we are done",
)
argparser.add_argument(
'--push',
dest='push',
action='store_true',
help='Push docker image to repository'
"--push",
dest="push",
action="store_true",
help="Push docker image to repository",
)
argparser.add_argument(
'--volume', '-v',
dest='volumes',
action='append',
help='Volumes to mount inside the container, in form src:dest',
default=[]
)
argparser.add_argument(
'--user-id',
help='User ID of the primary user in the image',
type=int
)
argparser.add_argument(
'--user-name',
help='Username of the primary user in the image',
)
argparser.add_argument(
'--env', '-e',
dest='environment',
action='append',
help='Environment variables to define at container run time',
default=[]
)
argparser.add_argument(
'--editable', '-E',
dest='editable',
action='store_true',
help='Use the local repository in edit mode',
)
argparser.add_argument(
'--target-repo-dir',
help=Repo2Docker.target_repo_dir.help
)
argparser.add_argument(
'--appendix',
type=str,
#help=self.traits()['appendix'].help,
)
argparser.add_argument(
'--subdir',
type=str,
#help=self.traits()['subdir'].help,
)
argparser.add_argument(
'--version',
dest='version',
action='store_true',
help='Print the repo2docker version and exit.'
)
argparser.add_argument(
'--cache-from',
action='append',
"--volume",
"-v",
dest="volumes",
action="append",
help="Volumes to mount inside the container, in form src:dest",
default=[],
)
argparser.add_argument(
"--user-id", help="User ID of the primary user in the image", type=int
)
argparser.add_argument(
"--user-name", help="Username of the primary user in the image"
)
argparser.add_argument(
"--env",
"-e",
dest="environment",
action="append",
help="Environment variables to define at container run time",
default=[],
)
argparser.add_argument(
"--editable",
"-E",
dest="editable",
action="store_true",
help="Use the local repository in edit mode",
)
argparser.add_argument("--target-repo-dir", help=Repo2Docker.target_repo_dir.help)
argparser.add_argument(
"--appendix",
type=str,
# help=self.traits()['appendix'].help,
)
argparser.add_argument(
"--subdir",
type=str,
# help=self.traits()['subdir'].help,
)
argparser.add_argument(
"--version",
dest="version",
action="store_true",
help="Print the repo2docker version and exit.",
)
argparser.add_argument("--cache-from", action="append", default=[])
return argparser
argparser = get_argparser()
@ -210,7 +210,7 @@ def make_r2d(argv=None):
# version must be checked before parse, as repo/cmd are required and
# will spit out an error if allowed to be parsed first.
if '--version' in argv:
if "--version" in argv:
print(__version__)
sys.exit(0)
@ -236,11 +236,13 @@ def make_r2d(argv=None):
# provide content from a local `something.zip` file, which we
# couldn't mount in editable mode
if os.path.isdir(args.repo):
r2d.volumes[os.path.abspath(args.repo)] = '.'
r2d.volumes[os.path.abspath(args.repo)] = "."
else:
r2d.log.error('Cannot mount "{}" in editable mode '
'as it is not a directory'.format(args.repo),
extra=dict(phase='failed'))
r2d.log.error(
'Cannot mount "{}" in editable mode '
"as it is not a directory".format(args.repo),
extra=dict(phase="failed"),
)
sys.exit(1)
if args.image_name:
@ -266,29 +268,32 @@ def make_r2d(argv=None):
# modified r2d.volumes
if r2d.volumes and not r2d.run:
# Can't mount if we aren't running
print('To Mount volumes with -v, you also need to run the '
'container')
print("To Mount volumes with -v, you also need to run the " "container")
sys.exit(1)
for v in args.volumes:
src, dest = v.split(':')
src, dest = v.split(":")
r2d.volumes[src] = dest
r2d.run_cmd = args.cmd
if args.all_ports and not r2d.run:
print('To publish user defined port mappings, the container must '
'also be run')
print(
"To publish user defined port mappings, the container must " "also be run"
)
sys.exit(1)
if args.ports and not r2d.run:
print('To publish user defined port mappings, the container must '
'also be run')
print(
"To publish user defined port mappings, the container must " "also be run"
)
sys.exit(1)
if args.ports and not r2d.run_cmd:
print('To publish user defined port mapping, user must specify '
'the command to run in the container')
print(
"To publish user defined port mapping, user must specify "
"the command to run in the container"
)
sys.exit(1)
r2d.ports = validate_and_generate_port_mapping(args.ports)
@ -308,8 +313,7 @@ def make_r2d(argv=None):
r2d.build_memory_limit = args.build_memory_limit
if args.environment and not r2d.run:
print('To specify environment variables, you also need to run '
'the container')
print("To specify environment variables, you also need to run " "the container")
sys.exit(1)
if args.subdir:
@ -350,5 +354,5 @@ def main():
sys.exit(1)
if __name__ == '__main__':
if __name__ == "__main__":
main()

Wyświetl plik

@ -1,4 +1,3 @@
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
@ -58,17 +57,18 @@ HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
@ -76,10 +76,13 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
@ -116,16 +119,22 @@ def versions_from_parentdir(parentdir_prefix, root, verbose):
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@ -181,7 +190,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
@ -190,7 +199,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
@ -198,19 +207,26 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
@ -225,8 +241,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
@ -234,10 +249,19 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
@ -260,17 +284,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
@ -279,10 +302,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
@ -293,13 +318,13 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
@ -330,8 +355,7 @@ def render_pep440(pieces):
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
@ -445,11 +469,13 @@ def render_git_describe_long(pieces):
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
@ -469,9 +495,13 @@ def render(pieces, style):
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
@ -485,8 +515,7 @@ def get_versions():
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
@ -495,13 +524,16 @@ def get_versions():
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
@ -515,6 +547,10 @@ def get_versions():
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}

Wyświetl plik

@ -24,13 +24,19 @@ from docker.errors import DockerException
import escapism
from pythonjsonlogger import jsonlogger
from traitlets import Any, Dict, Int, List, Unicode, Bool, default
from traitlets import Any, Dict, Int, List, Unicode, Bool, default
from traitlets.config import Application
from . import __version__
from .buildpacks import (
PythonBuildPack, DockerBuildPack, LegacyBinderDockerBuildPack,
CondaBuildPack, JuliaProjectTomlBuildPack, JuliaRequireBuildPack, RBuildPack, NixBuildPack
PythonBuildPack,
DockerBuildPack,
LegacyBinderDockerBuildPack,
CondaBuildPack,
JuliaProjectTomlBuildPack,
JuliaRequireBuildPack,
RBuildPack,
NixBuildPack,
)
from . import contentproviders
from .utils import ByteSpecification, chdir
@ -38,11 +44,12 @@ from .utils import ByteSpecification, chdir
class Repo2Docker(Application):
"""An application for converting git repositories to docker images"""
name = 'jupyter-repo2docker'
name = "jupyter-repo2docker"
version = __version__
description = __doc__
@default('log_level')
@default("log_level")
def _default_log_level(self):
"""The application's default log level"""
return logging.INFO
@ -56,17 +63,17 @@ class Repo2Docker(Application):
The default is to use the system's temporary directory. Should be
somewhere ephemeral, such as /tmp.
"""
""",
)
subdir = Unicode(
'',
"",
config=True,
help="""
Subdirectory of the git repository to examine.
Defaults to ''.
"""
""",
)
cache_from = List(
@ -78,7 +85,7 @@ class Repo2Docker(Application):
Docker only tries to re-use image layers from images built locally,
not pulled from a registry. We can ask it to explicitly re-use layers
from non-locally built images by through the 'cache_from' parameter.
"""
""",
)
buildpacks = List(
@ -95,7 +102,7 @@ class Repo2Docker(Application):
config=True,
help="""
Ordered list of BuildPacks to try when building a git repository.
"""
""",
)
extra_build_kwargs = Dict(
@ -106,7 +113,7 @@ class Repo2Docker(Application):
to configure the amount of access to CPU resources your container has.
Reference https://docs.docker.com/config/containers/resource_constraints/#cpu
""",
config=True
config=True,
)
extra_run_kwargs = Dict(
@ -117,7 +124,7 @@ class Repo2Docker(Application):
to configure the amount of access to CPU resources your container has.
Reference https://docs.docker.com/config/containers/resource_constraints/#cpu
""",
config=True
config=True,
)
default_buildpack = Any(
@ -125,7 +132,7 @@ class Repo2Docker(Application):
config=True,
help="""
The default build pack to use when no other buildpacks are found.
"""
""",
)
# Git is our content provider of last resort. This is to maintain the
@ -134,16 +141,12 @@ class Repo2Docker(Application):
# detecting if something will successfully `git clone` is very hard if all
# you can do is look at the path/URL to it.
content_providers = List(
[
contentproviders.Local,
contentproviders.Zenodo,
contentproviders.Git,
],
[contentproviders.Local, contentproviders.Zenodo, contentproviders.Git],
config=True,
help="""
Ordered list by priority of ContentProviders to try in turn to fetch
the contents specified by the user.
"""
""",
)
build_memory_limit = ByteSpecification(
@ -153,7 +156,7 @@ class Repo2Docker(Application):
Set to 0 for no limits.
""",
config=True
config=True,
)
volumes = Dict(
@ -171,7 +174,7 @@ class Repo2Docker(Application):
destination is resolved relative to the working directory of the
image - ($HOME by default)
""",
config=True
config=True,
)
user_id = Int(
@ -184,10 +187,10 @@ class Repo2Docker(Application):
Might not affect Dockerfile builds.
""",
config=True
config=True,
)
@default('user_id')
@default("user_id")
def _user_id_default(self):
"""
Default user_id to current running user.
@ -195,7 +198,7 @@ class Repo2Docker(Application):
return os.geteuid()
user_name = Unicode(
'jovyan',
"jovyan",
help="""
Username of the user to create inside the built image.
@ -205,10 +208,10 @@ class Repo2Docker(Application):
Defaults to username of currently running user, since that is the most
common case when running repo2docker manually.
""",
config=True
config=True,
)
@default('user_name')
@default("user_name")
def _user_name_default(self):
"""
Default user_name to current running user.
@ -222,7 +225,7 @@ class Repo2Docker(Application):
Can be used to customize the resulting image after all
standard build steps finish.
"""
""",
)
json_logs = Bool(
@ -232,7 +235,7 @@ class Repo2Docker(Application):
Useful when stdout is consumed by other tools
""",
config=True
config=True,
)
repo = Unicode(
@ -242,7 +245,7 @@ class Repo2Docker(Application):
Could be local path or git URL.
""",
config=True
config=True,
)
ref = Unicode(
@ -254,7 +257,7 @@ class Repo2Docker(Application):
in a local clone before repository is built.
""",
config=True,
allow_none=True
allow_none=True,
)
cleanup_checkout = Bool(
@ -264,7 +267,7 @@ class Repo2Docker(Application):
Useful when repo2docker is doing the git cloning
""",
config=True
config=True,
)
output_image_spec = Unicode(
@ -274,7 +277,7 @@ class Repo2Docker(Application):
Required parameter.
""",
config=True
config=True,
)
push = Bool(
@ -282,7 +285,7 @@ class Repo2Docker(Application):
help="""
Set to true to push docker image after building
""",
config=True
config=True,
)
run = Bool(
@ -290,7 +293,7 @@ class Repo2Docker(Application):
help="""
Run docker image after building
""",
config=True
config=True,
)
# FIXME: Refactor class to be able to do --no-build without needing
@ -300,7 +303,7 @@ class Repo2Docker(Application):
help="""
Do not actually build the docker image, just simulate it.
""",
config=True
config=True,
)
# FIXME: Refactor classes to separate build & run steps
@ -311,7 +314,7 @@ class Repo2Docker(Application):
When left empty, a jupyter notebook is run.
""",
config=True
config=True,
)
all_ports = Bool(
@ -321,7 +324,7 @@ class Repo2Docker(Application):
Equivalent to -P option to docker run
""",
config=True
config=True,
)
ports = Dict(
@ -333,7 +336,7 @@ class Repo2Docker(Application):
{key} refers to port inside container, and {value}
refers to port / host:port in the host
""",
config=True
config=True,
)
environment = List(
@ -343,17 +346,17 @@ class Repo2Docker(Application):
Each item must be a string formatted as KEY=VALUE
""",
config=True
config=True,
)
target_repo_dir = Unicode(
'',
"",
help="""
Path inside the image where contents of the repositories are copied to.
Defaults to ${HOME} if not set
""",
config=True
config=True,
)
def fetch(self, url, ref, checkout_path):
@ -371,29 +374,33 @@ class Repo2Docker(Application):
spec = cp.detect(url, ref=ref)
if spec is not None:
picked_content_provider = cp
self.log.info("Picked {cp} content "
"provider.\n".format(cp=cp.__class__.__name__))
self.log.info(
"Picked {cp} content "
"provider.\n".format(cp=cp.__class__.__name__)
)
break
if picked_content_provider is None:
self.log.error("No matching content provider found for "
"{url}.".format(url=url))
self.log.error(
"No matching content provider found for " "{url}.".format(url=url)
)
for log_line in picked_content_provider.fetch(
spec, checkout_path, yield_output=self.json_logs):
self.log.info(log_line, extra=dict(phase='fetching'))
spec, checkout_path, yield_output=self.json_logs
):
self.log.info(log_line, extra=dict(phase="fetching"))
if not self.output_image_spec:
self.output_image_spec = (
'r2d' + escapism.escape(self.repo, escape_char='-').lower()
)
"r2d" + escapism.escape(self.repo, escape_char="-").lower()
)
# if we are building from a subdirectory include that in the
# image name so we can tell builds from different sub-directories
# apart.
if self.subdir:
self.output_image_spec += (
escapism.escape(self.subdir, escape_char='-').lower()
)
self.output_image_spec += escapism.escape(
self.subdir, escape_char="-"
).lower()
if picked_content_provider.content_id is not None:
self.output_image_spec += picked_content_provider.content_id
else:
@ -404,9 +411,12 @@ class Repo2Docker(Application):
Avoids non-JSON output on errors when using --json-logs
"""
self.log.error("Error during build: %s", evalue,
exc_info=(etype, evalue, traceback),
extra=dict(phase='failed'))
self.log.error(
"Error during build: %s",
evalue,
exc_info=(etype, evalue, traceback),
extra=dict(phase="failed"),
)
def initialize(self):
"""Init repo2docker configuration before start"""
@ -426,11 +436,9 @@ class Repo2Docker(Application):
# due to json logger stuff above,
# our log messages include carriage returns, newlines, etc.
# remove the additional newline from the stream handler
self.log.handlers[0].terminator = ''
self.log.handlers[0].terminator = ""
# We don't want a [Repo2Docker] on all messages
self.log.handlers[0].formatter = logging.Formatter(
fmt='%(message)s'
)
self.log.handlers[0].formatter = logging.Formatter(fmt="%(message)s")
if self.dry_run and (self.run or self.push):
raise ValueError("Cannot push or run image if we are not building it")
@ -440,28 +448,31 @@ class Repo2Docker(Application):
def push_image(self):
"""Push docker image to registry"""
client = docker.APIClient(version='auto', **kwargs_from_env())
client = docker.APIClient(version="auto", **kwargs_from_env())
# Build a progress setup for each layer, and only emit per-layer
# info every 1.5s
layers = {}
last_emit_time = time.time()
for line in client.push(self.output_image_spec, stream=True):
progress = json.loads(line.decode('utf-8'))
if 'error' in progress:
self.log.error(progress['error'], extra=dict(phase='failed'))
raise docker.errors.ImageLoadError(progress['error'])
if 'id' not in progress:
progress = json.loads(line.decode("utf-8"))
if "error" in progress:
self.log.error(progress["error"], extra=dict(phase="failed"))
raise docker.errors.ImageLoadError(progress["error"])
if "id" not in progress:
continue
if 'progressDetail' in progress and progress['progressDetail']:
layers[progress['id']] = progress['progressDetail']
if "progressDetail" in progress and progress["progressDetail"]:
layers[progress["id"]] = progress["progressDetail"]
else:
layers[progress['id']] = progress['status']
layers[progress["id"]] = progress["status"]
if time.time() - last_emit_time > 1.5:
self.log.info('Pushing image\n',
extra=dict(progress=layers, phase='pushing'))
self.log.info(
"Pushing image\n", extra=dict(progress=layers, phase="pushing")
)
last_emit_time = time.time()
self.log.info('Successfully pushed {}'.format(self.output_image_spec),
extra=dict(phase='pushing'))
self.log.info(
"Successfully pushed {}".format(self.output_image_spec),
extra=dict(phase="pushing"),
)
def run_image(self):
"""Run docker container from built image
@ -476,13 +487,13 @@ class Repo2Docker(Application):
Returns running container
"""
client = docker.from_env(version='auto')
client = docker.from_env(version="auto")
docker_host = os.environ.get('DOCKER_HOST')
docker_host = os.environ.get("DOCKER_HOST")
if docker_host:
host_name = urlparse(docker_host).hostname
else:
host_name = '127.0.0.1'
host_name = "127.0.0.1"
self.hostname = host_name
if not self.run_cmd:
@ -492,12 +503,15 @@ class Repo2Docker(Application):
# make sure the base-notebook image is updated:
# docker pull jupyter/base-notebook
run_cmd = [
'jupyter', 'notebook',
'--ip', '0.0.0.0',
'--port', port,
"jupyter",
"notebook",
"--ip",
"0.0.0.0",
"--port",
port,
"--NotebookApp.custom_display_url=http://{}:{}".format(host_name, port),
]
ports = {'%s/tcp' % port: port}
ports = {"%s/tcp" % port: port}
else:
# run_cmd given by user, if port is also given then pass it on
run_cmd = self.run_cmd
@ -511,16 +525,15 @@ class Repo2Docker(Application):
container_volumes = {}
if self.volumes:
api_client = docker.APIClient(
version='auto',
**docker.utils.kwargs_from_env()
version="auto", **docker.utils.kwargs_from_env()
)
image = api_client.inspect_image(self.output_image_spec)
image_workdir = image['ContainerConfig']['WorkingDir']
image_workdir = image["ContainerConfig"]["WorkingDir"]
for k, v in self.volumes.items():
container_volumes[os.path.abspath(k)] = {
'bind': v if v.startswith('/') else os.path.join(image_workdir, v),
'mode': 'rw'
"bind": v if v.startswith("/") else os.path.join(image_workdir, v),
"mode": "rw",
}
run_kwargs = dict(
@ -529,14 +542,14 @@ class Repo2Docker(Application):
detach=True,
command=run_cmd,
volumes=container_volumes,
environment=self.environment
environment=self.environment,
)
run_kwargs.update(self.extra_run_kwargs)
container = client.containers.run(self.output_image_spec, **run_kwargs)
while container.status == 'created':
while container.status == "created":
time.sleep(0.5)
container.reload()
@ -550,15 +563,13 @@ class Repo2Docker(Application):
try:
for line in container.logs(stream=True):
self.log.info(line.decode('utf-8'),
extra=dict(phase='running'))
self.log.info(line.decode("utf-8"), extra=dict(phase="running"))
finally:
container.reload()
if container.status == 'running':
self.log.info('Stopping container...\n',
extra=dict(phase='running'))
if container.status == "running":
self.log.info("Stopping container...\n", extra=dict(phase="running"))
container.kill()
exit_code = container.attrs['State']['ExitCode']
exit_code = container.attrs["State"]["ExitCode"]
container.remove()
if exit_code:
sys.exit(exit_code)
@ -568,6 +579,7 @@ class Repo2Docker(Application):
Hacky method to get a free random port on local host
"""
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
@ -580,10 +592,10 @@ class Repo2Docker(Application):
if self.dry_run:
return False
# check if we already have an image for this content
client = docker.APIClient(version='auto', **kwargs_from_env())
client = docker.APIClient(version="auto", **kwargs_from_env())
for image in client.images():
if image['RepoTags'] is not None:
for tag in image['RepoTags']:
if image["RepoTags"] is not None:
for tag in image["RepoTags"]:
if tag == self.output_image_spec + ":latest":
return True
return False
@ -595,8 +607,7 @@ class Repo2Docker(Application):
# Check if r2d can connect to docker daemon
if not self.dry_run:
try:
docker_client = docker.APIClient(version='auto',
**kwargs_from_env())
docker_client = docker.APIClient(version="auto", **kwargs_from_env())
except DockerException as e:
self.log.exception(e)
raise
@ -609,7 +620,7 @@ class Repo2Docker(Application):
checkout_path = self.repo
else:
if self.git_workdir is None:
checkout_path = tempfile.mkdtemp(prefix='repo2docker')
checkout_path = tempfile.mkdtemp(prefix="repo2docker")
else:
checkout_path = self.git_workdir
@ -617,8 +628,10 @@ class Repo2Docker(Application):
self.fetch(self.repo, self.ref, checkout_path)
if self.find_image():
self.log.info("Reusing existing image ({}), not "
"building.".format(self.output_image_spec))
self.log.info(
"Reusing existing image ({}), not "
"building.".format(self.output_image_spec)
)
# no need to build, so skip to the end by `return`ing here
# this will still execute the finally clause and let's us
# avoid having to indent the build code by an extra level
@ -627,9 +640,12 @@ class Repo2Docker(Application):
if self.subdir:
checkout_path = os.path.join(checkout_path, self.subdir)
if not os.path.isdir(checkout_path):
self.log.error('Subdirectory %s does not exist',
self.subdir, extra=dict(phase='failure'))
raise FileNotFoundError('Could not find {}'.format(checkout_path))
self.log.error(
"Subdirectory %s does not exist",
self.subdir,
extra=dict(phase="failure"),
)
raise FileNotFoundError("Could not find {}".format(checkout_path))
with chdir(checkout_path):
for BP in self.buildpacks:
@ -642,18 +658,17 @@ class Repo2Docker(Application):
picked_buildpack.appendix = self.appendix
# Add metadata labels
picked_buildpack.labels['repo2docker.version'] = self.version
repo_label = 'local' if os.path.isdir(self.repo) else self.repo
picked_buildpack.labels['repo2docker.repo'] = repo_label
picked_buildpack.labels['repo2docker.ref'] = self.ref
picked_buildpack.labels["repo2docker.version"] = self.version
repo_label = "local" if os.path.isdir(self.repo) else self.repo
picked_buildpack.labels["repo2docker.repo"] = repo_label
picked_buildpack.labels["repo2docker.ref"] = self.ref
self.log.debug(picked_buildpack.render(),
extra=dict(phase='building'))
self.log.debug(picked_buildpack.render(), extra=dict(phase="building"))
if not self.dry_run:
if self.user_id == 0:
self.log.error(
'Root as the primary user in the image is not permitted.\n'
"Root as the primary user in the image is not permitted.\n"
)
self.log.info(
"The uid and the username of the user invoking repo2docker "
@ -665,32 +680,36 @@ class Repo2Docker(Application):
sys.exit(errno.EPERM)
build_args = {
'NB_USER': self.user_name,
'NB_UID': str(self.user_id),
"NB_USER": self.user_name,
"NB_UID": str(self.user_id),
}
if self.target_repo_dir:
build_args['REPO_DIR'] = self.target_repo_dir
self.log.info('Using %s builder\n', bp.__class__.__name__,
extra=dict(phase='building'))
build_args["REPO_DIR"] = self.target_repo_dir
self.log.info(
"Using %s builder\n",
bp.__class__.__name__,
extra=dict(phase="building"),
)
for l in picked_buildpack.build(docker_client,
self.output_image_spec,
self.build_memory_limit,
build_args,
self.cache_from,
self.extra_build_kwargs):
if 'stream' in l:
self.log.info(l['stream'],
extra=dict(phase='building'))
elif 'error' in l:
self.log.info(l['error'], extra=dict(phase='failure'))
raise docker.errors.BuildError(l['error'], build_log='')
elif 'status' in l:
self.log.info('Fetching base image...\r',
extra=dict(phase='building'))
for l in picked_buildpack.build(
docker_client,
self.output_image_spec,
self.build_memory_limit,
build_args,
self.cache_from,
self.extra_build_kwargs,
):
if "stream" in l:
self.log.info(l["stream"], extra=dict(phase="building"))
elif "error" in l:
self.log.info(l["error"], extra=dict(phase="failure"))
raise docker.errors.BuildError(l["error"], build_log="")
elif "status" in l:
self.log.info(
"Fetching base image...\r", extra=dict(phase="building")
)
else:
self.log.info(json.dumps(l),
extra=dict(phase='building'))
self.log.info(json.dumps(l), extra=dict(phase="building"))
finally:
# Cleanup checkout if necessary

Wyświetl plik

@ -172,8 +172,7 @@ CMD ["jupyter", "notebook", "--ip", "0.0.0.0"]
"""
ENTRYPOINT_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"repo2docker-entrypoint",
os.path.dirname(os.path.abspath(__file__)), "repo2docker-entrypoint"
)
@ -195,12 +194,14 @@ class BuildPack:
"""
def __init__(self):
self.log = logging.getLogger('repo2docker')
self.appendix = ''
self.log = logging.getLogger("repo2docker")
self.appendix = ""
self.labels = {}
if sys.platform.startswith('win'):
self.log.warning("Windows environment detected. Note that Windows "
"support is experimental in repo2docker.")
if sys.platform.startswith("win"):
self.log.warning(
"Windows environment detected. Note that Windows "
"support is experimental in repo2docker."
)
def get_packages(self):
"""
@ -288,7 +289,7 @@ class BuildPack:
@property
def stencila_manifest_dir(self):
"""Find the stencila manifest dir if it exists"""
if hasattr(self, '_stencila_manifest_dir'):
if hasattr(self, "_stencila_manifest_dir"):
return self._stencila_manifest_dir
# look for a manifest.xml that suggests stencila could be used
@ -304,8 +305,7 @@ class BuildPack:
self.log.debug("Found a manifest.xml at %s", root)
self._stencila_manifest_dir = root.split(os.path.sep, 1)[1]
self.log.info(
"Using stencila manifest.xml in %s",
self._stencila_manifest_dir,
"Using stencila manifest.xml in %s", self._stencila_manifest_dir
)
break
return self._stencila_manifest_dir
@ -313,7 +313,7 @@ class BuildPack:
@property
def stencila_contexts(self):
"""Find the stencila manifest contexts from file path in manifest"""
if hasattr(self, '_stencila_contexts'):
if hasattr(self, "_stencila_contexts"):
return self._stencila_contexts
# look at the content of the documents in the manifest
@ -323,11 +323,14 @@ class BuildPack:
# get paths to the article files from manifest
files = []
if self.stencila_manifest_dir:
manifest = ET.parse(os.path.join(self.stencila_manifest_dir,
'manifest.xml'))
documents = manifest.findall('./documents/document')
files = [os.path.join(self.stencila_manifest_dir, x.get('path'))
for x in documents]
manifest = ET.parse(
os.path.join(self.stencila_manifest_dir, "manifest.xml")
)
documents = manifest.findall("./documents/document")
files = [
os.path.join(self.stencila_manifest_dir, x.get("path"))
for x in documents
]
else:
return self._stencila_contexts
@ -338,12 +341,11 @@ class BuildPack:
# extract code languages from file
document = ET.parse(filename)
code_chunks = document.findall('.//code[@specific-use="source"]')
languages = [x.get('language') for x in code_chunks]
languages = [x.get("language") for x in code_chunks]
self._stencila_contexts.update(languages)
self.log.info(
"Added executions contexts, now have %s",
self._stencila_contexts,
"Added executions contexts, now have %s", self._stencila_contexts
)
break
@ -431,7 +433,8 @@ class BuildPack:
if has_binder and has_dotbinder:
raise RuntimeError(
"The repository contains both a 'binder' and a '.binder' "
"directory. However they are exclusive.")
"directory. However they are exclusive."
)
if has_dotbinder:
return ".binder"
@ -454,24 +457,24 @@ class BuildPack:
t = jinja2.Template(TEMPLATE)
build_script_directives = []
last_user = 'root'
last_user = "root"
for user, script in self.get_build_scripts():
if last_user != user:
build_script_directives.append("USER {}".format(user))
last_user = user
build_script_directives.append("RUN {}".format(
textwrap.dedent(script.strip('\n'))
))
build_script_directives.append(
"RUN {}".format(textwrap.dedent(script.strip("\n")))
)
assemble_script_directives = []
last_user = 'root'
last_user = "root"
for user, script in self.get_assemble_scripts():
if last_user != user:
assemble_script_directives.append("USER {}".format(user))
last_user = user
assemble_script_directives.append("RUN {}".format(
textwrap.dedent(script.strip('\n'))
))
assemble_script_directives.append(
"RUN {}".format(textwrap.dedent(script.strip("\n")))
)
return t.render(
packages=sorted(self.get_packages()),
@ -488,37 +491,42 @@ class BuildPack:
appendix=self.appendix,
)
def build(self, client, image_spec, memory_limit, build_args, cache_from, extra_build_kwargs):
def build(
self,
client,
image_spec,
memory_limit,
build_args,
cache_from,
extra_build_kwargs,
):
tarf = io.BytesIO()
tar = tarfile.open(fileobj=tarf, mode='w')
tar = tarfile.open(fileobj=tarf, mode="w")
dockerfile_tarinfo = tarfile.TarInfo("Dockerfile")
dockerfile = self.render().encode('utf-8')
dockerfile = self.render().encode("utf-8")
dockerfile_tarinfo.size = len(dockerfile)
tar.addfile(
dockerfile_tarinfo,
io.BytesIO(dockerfile)
)
tar.addfile(dockerfile_tarinfo, io.BytesIO(dockerfile))
def _filter_tar(tar):
# We need to unset these for build_script_files we copy into tar
# Otherwise they seem to vary each time, preventing effective use
# of the cache!
# https://github.com/docker/docker-py/pull/1582 is related
tar.uname = ''
tar.gname = ''
tar.uid = int(build_args.get('NB_UID', 1000))
tar.gid = int(build_args.get('NB_UID', 1000))
tar.uname = ""
tar.gname = ""
tar.uid = int(build_args.get("NB_UID", 1000))
tar.gid = int(build_args.get("NB_UID", 1000))
return tar
for src in sorted(self.get_build_script_files()):
src_parts = src.split('/')
src_parts = src.split("/")
src_path = os.path.join(os.path.dirname(__file__), *src_parts)
tar.add(src_path, src, filter=_filter_tar)
tar.add(ENTRYPOINT_FILE, "repo2docker-entrypoint", filter=_filter_tar)
tar.add('.', 'src/', filter=_filter_tar)
tar.add(".", "src/", filter=_filter_tar)
tar.close()
tarf.seek(0)
@ -526,18 +534,17 @@ class BuildPack:
# If you work on this bit of code check the corresponding code in
# buildpacks/docker.py where it is duplicated
if not isinstance(memory_limit, int):
raise ValueError("The memory limit has to be specified as an"
"integer but is '{}'".format(type(memory_limit)))
raise ValueError(
"The memory limit has to be specified as an"
"integer but is '{}'".format(type(memory_limit))
)
limits = {}
if memory_limit:
# We want to always disable swap. Docker expects `memswap` to
# be total allowable memory, *including* swap - while `memory`
# points to non-swap memory. We set both values to the same so
# we use no swap.
limits = {
'memory': memory_limit,
'memswap': memory_limit
}
limits = {"memory": memory_limit, "memswap": memory_limit}
build_kwargs = dict(
fileobj=tarf,
@ -562,14 +569,12 @@ class BaseImage(BuildPack):
"""Return env directives required for build"""
return [
("APP_BASE", "/srv"),
('NPM_DIR', '${APP_BASE}/npm'),
('NPM_CONFIG_GLOBALCONFIG','${NPM_DIR}/npmrc')
("NPM_DIR", "${APP_BASE}/npm"),
("NPM_CONFIG_GLOBALCONFIG", "${NPM_DIR}/npmrc"),
]
def get_path(self):
return super().get_path() + [
'${NPM_DIR}/bin'
]
return super().get_path() + ["${NPM_DIR}/bin"]
def get_build_scripts(self):
scripts = [
@ -578,14 +583,14 @@ class BaseImage(BuildPack):
r"""
mkdir -p ${NPM_DIR} && \
chown -R ${NB_USER}:${NB_USER} ${NPM_DIR}
"""
""",
),
(
"${NB_USER}",
r"""
npm config --global set prefix ${NPM_DIR}
"""
),
""",
),
]
return super().get_build_scripts() + scripts
@ -602,10 +607,12 @@ class BaseImage(BuildPack):
# exists.
archive_dir, archive = os.path.split(self.stencila_manifest_dir)
env.extend([
("STENCILA_ARCHIVE_DIR", "${REPO_DIR}/" + archive_dir),
("STENCILA_ARCHIVE", archive),
])
env.extend(
[
("STENCILA_ARCHIVE_DIR", "${REPO_DIR}/" + archive_dir),
("STENCILA_ARCHIVE", archive),
]
)
return env
def detect(self):
@ -614,34 +621,40 @@ class BaseImage(BuildPack):
def get_assemble_scripts(self):
assemble_scripts = []
try:
with open(self.binder_path('apt.txt')) as f:
with open(self.binder_path("apt.txt")) as f:
extra_apt_packages = []
for l in f:
package = l.partition('#')[0].strip()
package = l.partition("#")[0].strip()
if not package:
continue
# Validate that this is, indeed, just a list of packages
# We're doing shell injection around here, gotta be careful.
# FIXME: Add support for specifying version numbers
if not re.match(r"^[a-z0-9.+-]+", package):
raise ValueError("Found invalid package name {} in "
"apt.txt".format(package))
raise ValueError(
"Found invalid package name {} in "
"apt.txt".format(package)
)
extra_apt_packages.append(package)
assemble_scripts.append((
'root',
# This apt-get install is *not* quiet, since users explicitly asked for this
r"""
assemble_scripts.append(
(
"root",
# This apt-get install is *not* quiet, since users explicitly asked for this
r"""
apt-get -qq update && \
apt-get install --yes --no-install-recommends {} && \
apt-get -qq purge && \
apt-get -qq clean && \
rm -rf /var/lib/apt/lists/*
""".format(' '.join(extra_apt_packages))
))
""".format(
" ".join(extra_apt_packages)
),
)
)
except FileNotFoundError:
pass
if 'py' in self.stencila_contexts:
if "py" in self.stencila_contexts:
assemble_scripts.extend(
[
(
@ -670,18 +683,18 @@ class BaseImage(BuildPack):
return assemble_scripts
def get_post_build_scripts(self):
post_build = self.binder_path('postBuild')
post_build = self.binder_path("postBuild")
if os.path.exists(post_build):
return [post_build]
return []
def get_start_script(self):
start = self.binder_path('start')
start = self.binder_path("start")
if os.path.exists(start):
# Return an absolute path to start
# This is important when built container images start with
# a working directory that is different from ${REPO_DIR}
# This isn't a problem with anything else, since start is
# the only path evaluated at container start time rather than build time
return os.path.join('${REPO_DIR}', start)
return os.path.join("${REPO_DIR}", start)
return None

Wyświetl plik

@ -8,7 +8,7 @@ from ruamel.yaml import YAML
from ..base import BaseImage
# pattern for parsing conda dependency line
PYTHON_REGEX = re.compile(r'python\s*=+\s*([\d\.]*)')
PYTHON_REGEX = re.compile(r"python\s*=+\s*([\d\.]*)")
# current directory
HERE = os.path.dirname(os.path.abspath(__file__))
@ -19,6 +19,7 @@ class CondaBuildPack(BaseImage):
Uses miniconda since it is more lightweight than Anaconda.
"""
def get_build_env(self):
"""Return environment variables to be set.
@ -27,20 +28,18 @@ class CondaBuildPack(BaseImage):
"""
env = super().get_build_env() + [
('CONDA_DIR', '${APP_BASE}/conda'),
('NB_PYTHON_PREFIX', '${CONDA_DIR}/envs/notebook'),
("CONDA_DIR", "${APP_BASE}/conda"),
("NB_PYTHON_PREFIX", "${CONDA_DIR}/envs/notebook"),
]
if self.py2:
env.append(('KERNEL_PYTHON_PREFIX', '${CONDA_DIR}/envs/kernel'))
env.append(("KERNEL_PYTHON_PREFIX", "${CONDA_DIR}/envs/kernel"))
else:
env.append(('KERNEL_PYTHON_PREFIX', '${NB_PYTHON_PREFIX}'))
env.append(("KERNEL_PYTHON_PREFIX", "${NB_PYTHON_PREFIX}"))
return env
def get_env(self):
"""Make kernel env the default for `conda install`"""
env = super().get_env() + [
('CONDA_DEFAULT_ENV', '${KERNEL_PYTHON_PREFIX}'),
]
env = super().get_env() + [("CONDA_DEFAULT_ENV", "${KERNEL_PYTHON_PREFIX}")]
return env
def get_path(self):
@ -49,10 +48,10 @@ class CondaBuildPack(BaseImage):
"""
path = super().get_path()
path.insert(0, '${CONDA_DIR}/bin')
path.insert(0, "${CONDA_DIR}/bin")
if self.py2:
path.insert(0, '${KERNEL_PYTHON_PREFIX}/bin')
path.insert(0, '${NB_PYTHON_PREFIX}/bin')
path.insert(0, "${KERNEL_PYTHON_PREFIX}/bin")
path.insert(0, "${NB_PYTHON_PREFIX}/bin")
return path
def get_build_scripts(self):
@ -79,14 +78,11 @@ class CondaBuildPack(BaseImage):
r"""
bash /tmp/install-miniconda.bash && \
rm /tmp/install-miniconda.bash /tmp/environment.yml
"""
""",
)
]
major_pythons = {
'2': '2.7',
'3': '3.7',
}
major_pythons = {"2": "2.7", "3": "3.7"}
def get_build_script_files(self):
"""
@ -104,8 +100,8 @@ class CondaBuildPack(BaseImage):
"""
files = {
'conda/install-miniconda.bash': '/tmp/install-miniconda.bash',
'conda/activate-conda.sh': '/etc/profile.d/activate-conda.sh',
"conda/install-miniconda.bash": "/tmp/install-miniconda.bash",
"conda/activate-conda.sh": "/etc/profile.d/activate-conda.sh",
}
py_version = self.python_version
self.log.info("Building conda environment for python=%s" % py_version)
@ -114,19 +110,20 @@ class CondaBuildPack(BaseImage):
# major Python versions during upgrade.
# If no version is specified or no matching X.Y version is found,
# the default base environment is used.
frozen_name = 'environment.frozen.yml'
frozen_name = "environment.frozen.yml"
if py_version:
if self.py2:
# python 2 goes in a different env
files['conda/environment.py-2.7.frozen.yml'] = '/tmp/kernel-environment.yml'
files[
"conda/environment.py-2.7.frozen.yml"
] = "/tmp/kernel-environment.yml"
else:
py_frozen_name = \
'environment.py-{py}.frozen.yml'.format(py=py_version)
py_frozen_name = "environment.py-{py}.frozen.yml".format(py=py_version)
if os.path.exists(os.path.join(HERE, py_frozen_name)):
frozen_name = py_frozen_name
else:
self.log.warning("No frozen env: %s", py_frozen_name)
files['conda/' + frozen_name] = '/tmp/environment.yml'
files["conda/" + frozen_name] = "/tmp/environment.yml"
files.update(super().get_build_script_files())
return files
@ -138,11 +135,11 @@ class CondaBuildPack(BaseImage):
or a Falsy empty string '' if not found.
"""
environment_yml = self.binder_path('environment.yml')
environment_yml = self.binder_path("environment.yml")
if not os.path.exists(environment_yml):
return ''
return ""
if not hasattr(self, '_python_version'):
if not hasattr(self, "_python_version"):
py_version = None
with open(environment_yml) as f:
env = YAML().load(f)
@ -151,8 +148,11 @@ class CondaBuildPack(BaseImage):
env = {}
# check if the env file provided a dick-like thing not a list or other data structure.
if not isinstance(env, Mapping):
raise TypeError("environment.yml should contain a dictionary. Got %r" % type(env))
for dep in env.get('dependencies', []):
raise TypeError(
"environment.yml should contain a dictionary. Got %r"
% type(env)
)
for dep in env.get("dependencies", []):
if not isinstance(dep, str):
continue
match = PYTHON_REGEX.match(dep)
@ -167,35 +167,39 @@ class CondaBuildPack(BaseImage):
self._python_version = self.major_pythons.get(py_version[0])
else:
# return major.minor
self._python_version = '.'.join(py_version.split('.')[:2])
self._python_version = ".".join(py_version.split(".")[:2])
else:
self._python_version = ''
self._python_version = ""
return self._python_version
@property
def py2(self):
"""Am I building a Python 2 kernel environment?"""
return self.python_version and self.python_version.split('.')[0] == '2'
return self.python_version and self.python_version.split(".")[0] == "2"
def get_assemble_scripts(self):
"""Return series of build-steps specific to this source repository.
"""
assembly_scripts = []
environment_yml = self.binder_path('environment.yml')
environment_yml = self.binder_path("environment.yml")
env_prefix = "${KERNEL_PYTHON_PREFIX}" if self.py2 else "${NB_PYTHON_PREFIX}"
if os.path.exists(environment_yml):
assembly_scripts.append((
'${NB_USER}',
r"""
assembly_scripts.append(
(
"${NB_USER}",
r"""
conda env update -p {0} -f "{1}" && \
conda clean --all -f -y && \
conda list -p {0}
""".format(env_prefix, environment_yml)
))
""".format(
env_prefix, environment_yml
),
)
)
return super().get_assemble_scripts() + assembly_scripts
def detect(self):
"""Check if current repo should be built with the Conda BuildPack.
"""
return os.path.exists(self.binder_path('environment.yml')) and super().detect()
return os.path.exists(self.binder_path("environment.yml")) and super().detect()

Wyświetl plik

@ -21,18 +21,18 @@ from ruamel.yaml import YAML
# Docker image version can be different than conda version,
# since miniconda3 docker images seem to lag conda releases.
MINICONDA_DOCKER_VERSION = '4.5.12'
CONDA_VERSION = '4.6.14'
MINICONDA_DOCKER_VERSION = "4.5.12"
CONDA_VERSION = "4.6.14"
HERE = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
ENV_FILE = HERE / 'environment.yml'
FROZEN_FILE = os.path.splitext(ENV_FILE)[0] + '.frozen.yml'
ENV_FILE = HERE / "environment.yml"
FROZEN_FILE = os.path.splitext(ENV_FILE)[0] + ".frozen.yml"
ENV_FILE_T = HERE / 'environment.py-{py}.yml'
FROZEN_FILE_T = os.path.splitext(ENV_FILE_T)[0] + '.frozen.yml'
ENV_FILE_T = HERE / "environment.py-{py}.yml"
FROZEN_FILE_T = os.path.splitext(ENV_FILE_T)[0] + ".frozen.yml"
yaml = YAML(typ='rt')
yaml = YAML(typ="rt")
def freeze(env_file, frozen_file):
@ -48,37 +48,46 @@ def freeze(env_file, frozen_file):
frozen_dest = HERE / frozen_file
if frozen_dest.exists():
with frozen_dest.open('r') as f:
with frozen_dest.open("r") as f:
line = f.readline()
if 'GENERATED' not in line:
print(f"{frozen_file.relative_to(HERE)} not autogenerated, not refreezing")
if "GENERATED" not in line:
print(
f"{frozen_file.relative_to(HERE)} not autogenerated, not refreezing"
)
return
print(f"Freezing {env_file} -> {frozen_file}")
with frozen_dest.open('w') as f:
f.write(f"# AUTO GENERATED FROM {env_file.relative_to(HERE)}, DO NOT MANUALLY MODIFY\n")
with frozen_dest.open("w") as f:
f.write(
f"# AUTO GENERATED FROM {env_file.relative_to(HERE)}, DO NOT MANUALLY MODIFY\n"
)
f.write(f"# Frozen on {datetime.utcnow():%Y-%m-%d %H:%M:%S UTC}\n")
check_call([
'docker',
'run',
'--rm',
'-v' f"{HERE}:/r2d",
'-it',
f"continuumio/miniconda3:{MINICONDA_DOCKER_VERSION}",
"sh", "-c",
'; '.join([
'set -ex',
f"conda install -yq conda={CONDA_VERSION}",
'conda config --add channels conda-forge',
'conda config --system --set auto_update_conda false',
f"conda env create -v -f /r2d/{env_file.relative_to(HERE)} -n r2d",
# add conda-forge broken channel as lowest priority in case
# any of our frozen packages are marked as broken after freezing
'conda config --append channels conda-forge/label/broken',
f"conda env export -n r2d >> /r2d/{frozen_file.relative_to(HERE)}",
])
])
check_call(
[
"docker",
"run",
"--rm",
"-v" f"{HERE}:/r2d",
"-it",
f"continuumio/miniconda3:{MINICONDA_DOCKER_VERSION}",
"sh",
"-c",
"; ".join(
[
"set -ex",
f"conda install -yq conda={CONDA_VERSION}",
"conda config --add channels conda-forge",
"conda config --system --set auto_update_conda false",
f"conda env create -v -f /r2d/{env_file.relative_to(HERE)} -n r2d",
# add conda-forge broken channel as lowest priority in case
# any of our frozen packages are marked as broken after freezing
"conda config --append channels conda-forge/label/broken",
f"conda env export -n r2d >> /r2d/{frozen_file.relative_to(HERE)}",
]
),
]
)
def set_python(py_env_file, py):
@ -87,32 +96,34 @@ def set_python(py_env_file, py):
# only clobber auto-generated files
with open(py_env_file) as f:
text = f.readline()
if 'GENERATED' not in text:
if "GENERATED" not in text:
return
print(f"Regenerating {py_env_file} from {ENV_FILE}")
with open(ENV_FILE) as f:
env = yaml.load(f)
for idx, dep in enumerate(env['dependencies']):
if dep.split('=')[0] == 'python':
env['dependencies'][idx] = f'python={py}.*'
for idx, dep in enumerate(env["dependencies"]):
if dep.split("=")[0] == "python":
env["dependencies"][idx] = f"python={py}.*"
break
else:
raise ValueError(f"python dependency not found in {env['dependencies']}")
# update python dependency
with open(py_env_file, 'w') as f:
f.write(f"# AUTO GENERATED FROM {ENV_FILE.relative_to(HERE)}, DO NOT MANUALLY MODIFY\n")
with open(py_env_file, "w") as f:
f.write(
f"# AUTO GENERATED FROM {ENV_FILE.relative_to(HERE)}, DO NOT MANUALLY MODIFY\n"
)
f.write(f"# Generated on {datetime.utcnow():%Y-%m-%d %H:%M:%S UTC}\n")
yaml.dump(env, f)
if __name__ == '__main__':
if __name__ == "__main__":
# allow specifying which Pythons to update on argv
pys = sys.argv[1:] or ('2.7', '3.5', '3.6', '3.7')
pys = sys.argv[1:] or ("2.7", "3.5", "3.6", "3.7")
for py in pys:
env_file = pathlib.Path(str(ENV_FILE_T).format(py=py))
set_python(env_file, py)
frozen_file = pathlib.Path(os.path.splitext(env_file)[0] + '.frozen.yml')
frozen_file = pathlib.Path(os.path.splitext(env_file)[0] + ".frozen.yml")
freeze(env_file, frozen_file)
# use last version as default

Wyświetl plik

@ -7,35 +7,43 @@ from .base import BuildPack
class DockerBuildPack(BuildPack):
"""Docker BuildPack"""
dockerfile = "Dockerfile"
def detect(self):
"""Check if current repo should be built with the Docker BuildPack"""
return os.path.exists(self.binder_path('Dockerfile'))
return os.path.exists(self.binder_path("Dockerfile"))
def render(self):
"""Render the Dockerfile using by reading it from the source repo"""
Dockerfile = self.binder_path('Dockerfile')
Dockerfile = self.binder_path("Dockerfile")
with open(Dockerfile) as f:
return f.read()
def build(self, client, image_spec, memory_limit, build_args, cache_from, extra_build_kwargs):
def build(
self,
client,
image_spec,
memory_limit,
build_args,
cache_from,
extra_build_kwargs,
):
"""Build a Docker image based on the Dockerfile in the source repo."""
# If you work on this bit of code check the corresponding code in
# buildpacks/base.py where it is duplicated
if not isinstance(memory_limit, int):
raise ValueError("The memory limit has to be specified as an"
"integer but is '{}'".format(type(memory_limit)))
raise ValueError(
"The memory limit has to be specified as an"
"integer but is '{}'".format(type(memory_limit))
)
limits = {}
if memory_limit:
# We want to always disable swap. Docker expects `memswap` to
# be total allowable memory, *including* swap - while `memory`
# points to non-swap memory. We set both values to the same so
# we use no swap.
limits = {
'memory': memory_limit,
'memswap': memory_limit,
}
limits = {"memory": memory_limit, "memswap": memory_limit}
build_kwargs = dict(
path=os.getcwd(),
@ -46,7 +54,7 @@ class DockerBuildPack(BuildPack):
forcerm=True,
rm=True,
container_limits=limits,
cache_from=cache_from
cache_from=cache_from,
)
build_kwargs.update(extra_build_kwargs)

Wyświetl plik

@ -4,6 +4,7 @@ import toml
from ..python import PythonBuildPack
from .semver import find_semver_match
class JuliaProjectTomlBuildPack(PythonBuildPack):
"""
Julia build pack which uses conda.
@ -12,24 +13,20 @@ class JuliaProjectTomlBuildPack(PythonBuildPack):
# ALL EXISTING JULIA VERSIONS
# Note that these must remain ordered, in order for the find_semver_match()
# function to behave correctly.
all_julias = [
"0.7.0",
"1.0.0", "1.0.1", "1.0.2", "1.0.3",
"1.1.0",
]
all_julias = ["0.7.0", "1.0.0", "1.0.1", "1.0.2", "1.0.3", "1.1.0"]
@property
def julia_version(self):
default_julia_version = self.all_julias[-1]
if os.path.exists(self.binder_path('JuliaProject.toml')):
project_toml = toml.load(self.binder_path('JuliaProject.toml'))
if os.path.exists(self.binder_path("JuliaProject.toml")):
project_toml = toml.load(self.binder_path("JuliaProject.toml"))
else:
project_toml = toml.load(self.binder_path('Project.toml'))
project_toml = toml.load(self.binder_path("Project.toml"))
if 'compat' in project_toml:
if 'julia' in project_toml['compat']:
julia_version_str = project_toml['compat']['julia']
if "compat" in project_toml:
if "julia" in project_toml["compat"]:
julia_version_str = project_toml["compat"]["julia"]
# For Project.toml files, install the latest julia version that
# satisfies the given semver.
@ -58,18 +55,15 @@ class JuliaProjectTomlBuildPack(PythonBuildPack):
"""
return super().get_build_env() + [
('JULIA_PATH', '${APP_BASE}/julia'),
('JULIA_DEPOT_PATH', '${JULIA_PATH}/pkg'),
('JULIA_VERSION', self.julia_version),
('JUPYTER', '${NB_PYTHON_PREFIX}/bin/jupyter'),
('JUPYTER_DATA_DIR', '${NB_PYTHON_PREFIX}/share/jupyter'),
("JULIA_PATH", "${APP_BASE}/julia"),
("JULIA_DEPOT_PATH", "${JULIA_PATH}/pkg"),
("JULIA_VERSION", self.julia_version),
("JUPYTER", "${NB_PYTHON_PREFIX}/bin/jupyter"),
("JUPYTER_DATA_DIR", "${NB_PYTHON_PREFIX}/share/jupyter"),
]
def get_env(self):
return super().get_env() + [
('JULIA_PROJECT', '${REPO_DIR}')
]
return super().get_env() + [("JULIA_PROJECT", "${REPO_DIR}")]
def get_path(self):
"""Adds path to Julia binaries to user's PATH.
@ -79,7 +73,7 @@ class JuliaProjectTomlBuildPack(PythonBuildPack):
executable is added to the list.
"""
return super().get_path() + ['${JULIA_PATH}/bin']
return super().get_path() + ["${JULIA_PATH}/bin"]
def get_build_scripts(self):
"""
@ -98,14 +92,14 @@ class JuliaProjectTomlBuildPack(PythonBuildPack):
r"""
mkdir -p ${JULIA_PATH} && \
curl -sSL "https://julialang-s3.julialang.org/bin/linux/x64/${JULIA_VERSION%[.-]*}/julia-${JULIA_VERSION}-linux-x86_64.tar.gz" | tar -xz -C ${JULIA_PATH} --strip-components 1
"""
""",
),
(
"root",
r"""
mkdir -p ${JULIA_DEPOT_PATH} && \
chown ${NB_USER}:${NB_USER} ${JULIA_DEPOT_PATH}
"""
""",
),
]
@ -129,7 +123,7 @@ class JuliaProjectTomlBuildPack(PythonBuildPack):
r"""
JULIA_PROJECT="" julia -e "using Pkg; Pkg.add(\"IJulia\"); using IJulia; installkernel(\"Julia\", \"--project=${REPO_DIR}\");" && \
julia --project=${REPO_DIR} -e 'using Pkg; Pkg.instantiate(); pkg"precompile"'
"""
""",
)
]
@ -145,4 +139,6 @@ class JuliaProjectTomlBuildPack(PythonBuildPack):
`JuliaProject.toml` exists.
"""
return os.path.exists(self.binder_path('Project.toml')) or os.path.exists(self.binder_path('JuliaProject.toml'))
return os.path.exists(self.binder_path("Project.toml")) or os.path.exists(
self.binder_path("JuliaProject.toml")
)

Wyświetl plik

@ -8,41 +8,36 @@ class JuliaRequireBuildPack(PythonBuildPack):
Julia build pack which uses conda and REQUIRE.
"""
minor_julias = {
'0.6': '0.6.4',
'0.7': '0.7.0',
'1.0': '1.0.3',
'1.1': '1.1.0',
}
major_julias = {
'1': '1.1.0',
}
minor_julias = {"0.6": "0.6.4", "0.7": "0.7.0", "1.0": "1.0.3", "1.1": "1.1.0"}
major_julias = {"1": "1.1.0"}
@property
def julia_version(self):
require = self.binder_path('REQUIRE')
require = self.binder_path("REQUIRE")
try:
with open(require) as f:
julia_version_line = f.readline().strip() # First line is optionally a julia version
julia_version_line = (
f.readline().strip()
) # First line is optionally a julia version
except FileNotFoundError:
julia_version_line = ''
julia_version_line = ""
if not julia_version_line.startswith('julia '):
if not julia_version_line.startswith("julia "):
# not a Julia version line.
# use the default Julia.
self._julia_version = self.minor_julias['0.6']
self._julia_version = self.minor_julias["0.6"]
return self._julia_version
julia_version_info = julia_version_line.split(' ', 1)[1].split('.')
julia_version = ''
julia_version_info = julia_version_line.split(" ", 1)[1].split(".")
julia_version = ""
if len(julia_version_info) == 1:
julia_version = self.major_julias[julia_version_info[0]]
elif len(julia_version_info) == 2:
# get major.minor
julia_version = self.minor_julias['.'.join(julia_version_info)]
julia_version = self.minor_julias[".".join(julia_version_info)]
else:
# use supplied julia version
julia_version = '.'.join(julia_version_info)
julia_version = ".".join(julia_version_info)
self._julia_version = julia_version
return self._julia_version
@ -68,13 +63,13 @@ class JuliaRequireBuildPack(PythonBuildPack):
"""
return super().get_build_env() + [
('JULIA_PATH', '${APP_BASE}/julia'),
('JULIA_HOME', '${JULIA_PATH}/bin'), # julia <= 0.6
('JULIA_BINDIR', '${JULIA_HOME}'), # julia >= 0.7
('JULIA_PKGDIR', '${JULIA_PATH}/pkg'),
('JULIA_DEPOT_PATH', '${JULIA_PKGDIR}'), # julia >= 0.7
('JULIA_VERSION', self.julia_version),
('JUPYTER', '${NB_PYTHON_PREFIX}/bin/jupyter')
("JULIA_PATH", "${APP_BASE}/julia"),
("JULIA_HOME", "${JULIA_PATH}/bin"), # julia <= 0.6
("JULIA_BINDIR", "${JULIA_HOME}"), # julia >= 0.7
("JULIA_PKGDIR", "${JULIA_PATH}/pkg"),
("JULIA_DEPOT_PATH", "${JULIA_PKGDIR}"), # julia >= 0.7
("JULIA_VERSION", self.julia_version),
("JUPYTER", "${NB_PYTHON_PREFIX}/bin/jupyter"),
]
def get_path(self):
@ -85,7 +80,7 @@ class JuliaRequireBuildPack(PythonBuildPack):
executable is added to the list.
"""
return super().get_path() + ['${JULIA_HOME}']
return super().get_path() + ["${JULIA_HOME}"]
def get_build_scripts(self):
"""
@ -104,14 +99,14 @@ class JuliaRequireBuildPack(PythonBuildPack):
r"""
mkdir -p ${JULIA_PATH} && \
curl -sSL "https://julialang-s3.julialang.org/bin/linux/x64/${JULIA_VERSION%[.-]*}/julia-${JULIA_VERSION}-linux-x86_64.tar.gz" | tar -xz -C ${JULIA_PATH} --strip-components 1
"""
""",
),
(
"root",
r"""
mkdir -p ${JULIA_PKGDIR} && \
chown ${NB_USER}:${NB_USER} ${JULIA_PKGDIR}
"""
""",
),
(
"${NB_USER}",
@ -120,8 +115,8 @@ class JuliaRequireBuildPack(PythonBuildPack):
r"""
julia -e 'if (VERSION > v"0.7-") using Pkg; else Pkg.init(); end; Pkg.add("IJulia"); using IJulia;' && \
mv ${HOME}/.local/share/jupyter/kernels/julia-${JULIA_VERSION%[.-]*} ${NB_PYTHON_PREFIX}/share/jupyter/kernels/julia-${JULIA_VERSION%[.-]*}
"""
)
""",
),
]
def get_assemble_scripts(self):
@ -133,24 +128,27 @@ class JuliaRequireBuildPack(PythonBuildPack):
any needed Python packages found in environment.yml.
"""
require = self.binder_path('REQUIRE')
return super().get_assemble_scripts() + [(
"${NB_USER}",
# Install and pre-compile all libraries if they've opted into it.
# In v0.6, Pkg.resolve() installs all the packages, but in v0.7+, we
# have to manually Pkg.add() each of them (since the REQUIRES file
# format is deprecated).
# The precompliation is done via `using {libraryname}`.
r"""
require = self.binder_path("REQUIRE")
return super().get_assemble_scripts() + [
(
"${NB_USER}",
# Install and pre-compile all libraries if they've opted into it.
# In v0.6, Pkg.resolve() installs all the packages, but in v0.7+, we
# have to manually Pkg.add() each of them (since the REQUIRES file
# format is deprecated).
# The precompliation is done via `using {libraryname}`.
r"""
julia /tmp/install-repo-dependencies.jl "%(require)s"
""" % {"require": require}
# TODO: For some reason, `rm`ing the file fails with permission denied.
# && rm /tmp/install-repo-dependencies.jl
)]
"""
% {"require": require}
# TODO: For some reason, `rm`ing the file fails with permission denied.
# && rm /tmp/install-repo-dependencies.jl
)
]
def get_build_script_files(self):
files = {
'julia/install-repo-dependencies.jl': '/tmp/install-repo-dependencies.jl',
"julia/install-repo-dependencies.jl": "/tmp/install-repo-dependencies.jl"
}
files.update(super().get_build_script_files())
return files
@ -167,4 +165,7 @@ class JuliaRequireBuildPack(PythonBuildPack):
no julia 1.0 style environment
"""
return os.path.exists(self.binder_path('REQUIRE')) and not(os.path.exists(self.binder_path('Project.toml')) or os.path.exists(self.binder_path('JuliaProject.toml')))
return os.path.exists(self.binder_path("REQUIRE")) and not (
os.path.exists(self.binder_path("Project.toml"))
or os.path.exists(self.binder_path("JuliaProject.toml"))
)

Wyświetl plik

@ -71,16 +71,12 @@ def create_semver_matcher(constraint_str):
break
return VersionRange(constraint, upper, True)
else:
return VersionRange(
constraint, (major(constraint) + 1,), True
)
return VersionRange(constraint, (major(constraint) + 1,), True)
# '~' matching (only allowed to bump the last present number by one)
if comparison_symbol == "~":
return VersionRange(
constraint,
constraint[:-1] + (constraint[-1] + 1,),
exclusive=False
constraint, constraint[:-1] + (constraint[-1] + 1,), exclusive=False
)
# Use semver package's comparisons for everything else:

Wyświetl plik

@ -14,9 +14,11 @@ from ..docker import DockerBuildPack
class LegacyBinderDockerBuildPack(DockerBuildPack):
"""Legacy build pack for compatibility to first version of Binder."""
dockerfile = '._binder.Dockerfile'
legacy_prependix = dedent(r"""
dockerfile = "._binder.Dockerfile"
legacy_prependix = dedent(
r"""
USER root
# update the source list now that jessie is archived
COPY apt-sources.list /etc/apt/sources.list
@ -34,9 +36,11 @@ class LegacyBinderDockerBuildPack(DockerBuildPack):
/home/main/anaconda2/envs/python3/bin/ipython kernel install --sys-prefix && \
/home/main/anaconda2/bin/ipython kernel install --prefix=/home/main/anaconda2/envs/python3 && \
/home/main/anaconda2/bin/ipython kernel install --sys-prefix
""")
"""
)
legacy_appendix = dedent(r"""
legacy_appendix = dedent(
r"""
USER root
COPY . /home/main/notebooks
RUN chown -R main:main /home/main/notebooks && \
@ -48,7 +52,8 @@ class LegacyBinderDockerBuildPack(DockerBuildPack):
ENV PATH /home/main/anaconda2/envs/python3/bin:$PATH
ENV JUPYTER_PATH /home/main/anaconda2/share/jupyter:$JUPYTER_PATH
CMD jupyter notebook --ip 0.0.0.0
""")
"""
)
def render(self):
"""Render buildpack into a Dockerfile.
@ -59,16 +64,16 @@ class LegacyBinderDockerBuildPack(DockerBuildPack):
"""
segments = [
'FROM andrewosh/binder-base@sha256:eabde24f4c55174832ed8795faa40cea62fc9e2a4a9f1ee1444f8a2e4f9710ee',
"FROM andrewosh/binder-base@sha256:eabde24f4c55174832ed8795faa40cea62fc9e2a4a9f1ee1444f8a2e4f9710ee",
self.legacy_prependix,
]
with open('Dockerfile') as f:
with open("Dockerfile") as f:
for line in f:
if line.strip().startswith('FROM'):
if line.strip().startswith("FROM"):
break
segments.append(f.read())
segments.append(self.legacy_appendix)
return '\n'.join(segments)
return "\n".join(segments)
def get_build_script_files(self):
"""
@ -86,38 +91,43 @@ class LegacyBinderDockerBuildPack(DockerBuildPack):
"""
return {
'legacy/root.frozen.yml': '/tmp/root.frozen.yml',
'legacy/python3.frozen.yml': '/tmp/python3.frozen.yml',
'legacy/apt-sources.list': '/tmp/apt-sources.list',
"legacy/root.frozen.yml": "/tmp/root.frozen.yml",
"legacy/python3.frozen.yml": "/tmp/python3.frozen.yml",
"legacy/apt-sources.list": "/tmp/apt-sources.list",
}
def build(self, client, image_spec, memory_limit, build_args, cache_from,
extra_build_kwargs):
def build(
self,
client,
image_spec,
memory_limit,
build_args,
cache_from,
extra_build_kwargs,
):
"""Build a legacy Docker image."""
with open(self.dockerfile, 'w') as f:
with open(self.dockerfile, "w") as f:
f.write(self.render())
for env in ('root', 'python3'):
env_file = env + '.frozen.yml'
src_path = os.path.join(
os.path.dirname(__file__),
env_file,
)
for env in ("root", "python3"):
env_file = env + ".frozen.yml"
src_path = os.path.join(os.path.dirname(__file__), env_file)
shutil.copy(src_path, env_file)
src_path = os.path.join(os.path.dirname(__file__), 'apt-sources.list')
shutil.copy(src_path, 'apt-sources.list')
src_path = os.path.join(os.path.dirname(__file__), "apt-sources.list")
shutil.copy(src_path, "apt-sources.list")
return super().build(client, image_spec, memory_limit, build_args,
cache_from, extra_build_kwargs)
return super().build(
client, image_spec, memory_limit, build_args, cache_from, extra_build_kwargs
)
def detect(self):
"""Check if current repo should be built with the Legacy BuildPack.
"""
try:
with open('Dockerfile', 'r') as f:
with open("Dockerfile", "r") as f:
for line in f:
if line.startswith('FROM'):
if 'andrewosh/binder-base' in line.split('#')[0].lower():
if line.startswith("FROM"):
if "andrewosh/binder-base" in line.split("#")[0].lower():
return True
else:
return False

Wyświetl plik

@ -17,7 +17,7 @@ from subprocess import check_call
import sys
# need conda ≥ 4.4 to avoid bug adding spurious pip dependencies
CONDA_VERSION = '4.4.11'
CONDA_VERSION = "4.4.11"
HERE = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
@ -34,33 +34,38 @@ def freeze(env_name, env_file, frozen_file):
"""
print(f"Freezing {env_file} -> {frozen_file}")
with open(HERE / frozen_file, 'w') as f:
with open(HERE / frozen_file, "w") as f:
f.write(f"# AUTO GENERATED FROM {env_file}, DO NOT MANUALLY MODIFY\n")
f.write(f"# Frozen on {datetime.utcnow():%Y-%m-%d %H:%M:%S UTC}\n")
check_call([
'docker',
'run',
'--rm',
'-v' f"{HERE}:/r2d",
'-it',
f"andrewosh/binder-base",
"sh", "-c",
'; '.join([
"conda update -yq conda",
f"conda install -yq conda={CONDA_VERSION}",
'conda config --system --set auto_update_conda false',
f"conda env update -f /r2d/{env_file} -n {env_name}",
# exclude conda packages because we don't want to pin them
f"conda env export -n {env_name} | grep -v conda >> /r2d/{frozen_file}",
])
])
check_call(
[
"docker",
"run",
"--rm",
"-v" f"{HERE}:/r2d",
"-it",
f"andrewosh/binder-base",
"sh",
"-c",
"; ".join(
[
"conda update -yq conda",
f"conda install -yq conda={CONDA_VERSION}",
"conda config --system --set auto_update_conda false",
f"conda env update -f /r2d/{env_file} -n {env_name}",
# exclude conda packages because we don't want to pin them
f"conda env export -n {env_name} | grep -v conda >> /r2d/{frozen_file}",
]
),
]
)
if __name__ == '__main__':
if __name__ == "__main__":
# allow specifying which env(s) to update on argv
env_names = sys.argv[1:] or ('root', 'python3')
env_names = sys.argv[1:] or ("root", "python3")
for env_name in env_names:
env_file = env_name + ".yml"
frozen_file = os.path.splitext(env_file)[0] + '.frozen.yml'
frozen_file = os.path.splitext(env_file)[0] + ".frozen.yml"
freeze(env_name, env_file, frozen_file)

Wyświetl plik

@ -10,16 +10,14 @@ class NixBuildPack(BaseImage):
def get_path(self):
"""Return paths to be added to PATH environemnt variable
"""
return super().get_path() + [
'/home/${NB_USER}/.nix-profile/bin'
]
return super().get_path() + ["/home/${NB_USER}/.nix-profile/bin"]
def get_env(self):
"""Ordered list of environment variables to be set for this image"""
return super().get_env() + [
('NIX_PATH', "nixpkgs=/home/${NB_USER}/.nix-defexpr/channels/nixpkgs"),
('NIX_SSL_CERT_FILE', '/etc/ssl/certs/ca-certificates.crt'),
('GIT_SSL_CAINFO', '/etc/ssl/certs/ca-certificates.crt')
("NIX_PATH", "nixpkgs=/home/${NB_USER}/.nix-defexpr/channels/nixpkgs"),
("NIX_SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt"),
("GIT_SSL_CAINFO", "/etc/ssl/certs/ca-certificates.crt"),
]
def get_build_scripts(self):
@ -32,14 +30,20 @@ class NixBuildPack(BaseImage):
- install nix package manager for user
"""
return super().get_build_scripts() + [
("root", """
(
"root",
"""
mkdir -m 0755 /nix && \
chown -R ${NB_USER}:${NB_USER} /nix /usr/local/bin/nix-shell-wrapper /home/${NB_USER}
"""),
("${NB_USER}", """
""",
),
(
"${NB_USER}",
"""
bash /home/${NB_USER}/.local/bin/install-nix.bash && \
rm /home/${NB_USER}/.local/bin/install-nix.bash
""")
""",
),
]
def get_build_script_files(self):
@ -47,18 +51,23 @@ class NixBuildPack(BaseImage):
"""
return {
"nix/install-nix.bash": "/home/${NB_USER}/.local/bin/install-nix.bash",
"nix/nix-shell-wrapper": "/usr/local/bin/nix-shell-wrapper"
"nix/nix-shell-wrapper": "/usr/local/bin/nix-shell-wrapper",
}
def get_assemble_scripts(self):
"""Return series of build-steps specific to this source repository.
"""
return super().get_assemble_scripts() + [
('${NB_USER}', """
(
"${NB_USER}",
"""
nix-channel --add https://nixos.org/channels/nixpkgs-unstable nixpkgs && \
nix-channel --update && \
nix-shell {}
""".format(self.binder_path('default.nix')))
""".format(
self.binder_path("default.nix")
),
)
]
def get_start_script(self):
@ -70,4 +79,4 @@ class NixBuildPack(BaseImage):
def detect(self):
"""Check if current repo should be built with the nix BuildPack"""
return os.path.exists(self.binder_path('default.nix'))
return os.path.exists(self.binder_path("default.nix"))

Wyświetl plik

@ -9,28 +9,28 @@ class PythonBuildPack(CondaBuildPack):
@property
def python_version(self):
if hasattr(self, '_python_version'):
if hasattr(self, "_python_version"):
return self._python_version
try:
with open(self.binder_path('runtime.txt')) as f:
with open(self.binder_path("runtime.txt")) as f:
runtime = f.read().strip()
except FileNotFoundError:
runtime = ''
runtime = ""
if not runtime.startswith('python-'):
if not runtime.startswith("python-"):
# not a Python runtime (e.g. R, which subclasses this)
# use the default Python
self._python_version = self.major_pythons['3']
self._python_version = self.major_pythons["3"]
return self._python_version
py_version_info = runtime.split('-', 1)[1].split('.')
py_version = ''
py_version_info = runtime.split("-", 1)[1].split(".")
py_version = ""
if len(py_version_info) == 1:
py_version = self.major_pythons[py_version_info[0]]
else:
# get major.minor
py_version = '.'.join(py_version_info[:2])
py_version = ".".join(py_version_info[:2])
self._python_version = py_version
return self._python_version
@ -42,44 +42,49 @@ class PythonBuildPack(CondaBuildPack):
# and requirements3.txt (if it exists)
# will be installed in the python 3 notebook server env.
assemble_scripts = super().get_assemble_scripts()
setup_py = 'setup.py'
setup_py = "setup.py"
# KERNEL_PYTHON_PREFIX is the env with the kernel,
# whether it's distinct from the notebook or the same.
pip = '${KERNEL_PYTHON_PREFIX}/bin/pip'
pip = "${KERNEL_PYTHON_PREFIX}/bin/pip"
if self.py2:
# using python 2 kernel,
# requirements3.txt allows installation in the notebook server env
nb_requirements_file = self.binder_path('requirements3.txt')
nb_requirements_file = self.binder_path("requirements3.txt")
if os.path.exists(nb_requirements_file):
assemble_scripts.append((
'${NB_USER}',
# want the $NB_PYHTON_PREFIX environment variable, not for
# Python's string formatting to try and replace this
'${{NB_PYTHON_PREFIX}}/bin/pip install --no-cache-dir -r "{}"'.format(nb_requirements_file)
))
assemble_scripts.append(
(
"${NB_USER}",
# want the $NB_PYHTON_PREFIX environment variable, not for
# Python's string formatting to try and replace this
'${{NB_PYTHON_PREFIX}}/bin/pip install --no-cache-dir -r "{}"'.format(
nb_requirements_file
),
)
)
# install requirements.txt in the kernel env
requirements_file = self.binder_path('requirements.txt')
requirements_file = self.binder_path("requirements.txt")
if os.path.exists(requirements_file):
assemble_scripts.append((
'${NB_USER}',
'{} install --no-cache-dir -r "{}"'.format(pip, requirements_file)
))
assemble_scripts.append(
(
"${NB_USER}",
'{} install --no-cache-dir -r "{}"'.format(pip, requirements_file),
)
)
# setup.py exists *and* binder dir is not used
if not self.binder_dir and os.path.exists(setup_py):
assemble_scripts.append((
'${NB_USER}',
'{} install --no-cache-dir .'.format(pip)
))
assemble_scripts.append(
("${NB_USER}", "{} install --no-cache-dir .".format(pip))
)
return assemble_scripts
def detect(self):
"""Check if current repo should be built with the Python buildpack.
"""
requirements_txt = self.binder_path('requirements.txt')
runtime_txt = self.binder_path('runtime.txt')
setup_py = 'setup.py'
requirements_txt = self.binder_path("requirements.txt")
runtime_txt = self.binder_path("runtime.txt")
setup_py = "setup.py"
if os.path.exists(runtime_txt):
with open(runtime_txt) as f:

Wyświetl plik

@ -39,18 +39,19 @@ class RBuildPack(PythonBuildPack):
The `r-base` package from Ubuntu apt repositories is used to install
R itself, rather than any of the methods from https://cran.r-project.org/.
"""
@property
def runtime(self):
"""
Return contents of runtime.txt if it exists, '' otherwise
"""
if not hasattr(self, '_runtime'):
runtime_path = self.binder_path('runtime.txt')
if not hasattr(self, "_runtime"):
runtime_path = self.binder_path("runtime.txt")
try:
with open(runtime_path) as f:
self._runtime = f.read().strip()
except FileNotFoundError:
self._runtime = ''
self._runtime = ""
return self._runtime
@ -61,8 +62,8 @@ class RBuildPack(PythonBuildPack):
Returns '' if no date is specified
"""
if not hasattr(self, '_checkpoint_date'):
match = re.match(r'r-(\d\d\d\d)-(\d\d)-(\d\d)', self.runtime)
if not hasattr(self, "_checkpoint_date"):
match = re.match(r"r-(\d\d\d\d)-(\d\d)-(\d\d)", self.runtime)
if not match:
self._checkpoint_date = False
else:
@ -83,14 +84,17 @@ class RBuildPack(PythonBuildPack):
if self.checkpoint_date:
return True
description_R = 'DESCRIPTION'
if ((not self.binder_dir and os.path.exists(description_R))
or 'r' in self.stencila_contexts):
description_R = "DESCRIPTION"
if (
not self.binder_dir and os.path.exists(description_R)
) or "r" in self.stencila_contexts:
if not self.checkpoint_date:
# no R snapshot date set through runtime.txt
# set the R runtime to the latest date that is guaranteed to
# be on MRAN across timezones
self._checkpoint_date = datetime.date.today() - datetime.timedelta(days=2)
self._checkpoint_date = datetime.date.today() - datetime.timedelta(
days=2
)
self._runtime = "r-{}".format(str(self._checkpoint_date))
return True
@ -101,9 +105,7 @@ class RBuildPack(PythonBuildPack):
The RStudio package installs its binaries in a non-standard path,
so we explicitly add that path to PATH.
"""
return super().get_path() + [
'/usr/lib/rstudio-server/bin/'
]
return super().get_path() + ["/usr/lib/rstudio-server/bin/"]
def get_build_env(self):
"""
@ -115,7 +117,7 @@ class RBuildPack(PythonBuildPack):
"""
return super().get_build_env() + [
# This is the path where user libraries are installed
('R_LIBS_USER', '${APP_BASE}/rlibs')
("R_LIBS_USER", "${APP_BASE}/rlibs")
]
def get_packages(self):
@ -125,14 +127,20 @@ class RBuildPack(PythonBuildPack):
We install a base version of R, and packages required for RStudio to
be installed.
"""
return super().get_packages().union([
'r-base',
# For rstudio
'psmisc',
'libapparmor1',
'sudo',
'lsb-release'
])
return (
super()
.get_packages()
.union(
[
"r-base",
# For rstudio
"psmisc",
"libapparmor1",
"sudo",
"lsb-release",
]
)
)
def get_build_scripts(self):
"""
@ -151,19 +159,19 @@ class RBuildPack(PythonBuildPack):
- nbrsessionproxy (to access RStudio via Jupyter Notebook)
- stencila R package (if Stencila document with R code chunks detected)
"""
rstudio_url = 'https://download2.rstudio.org/rstudio-server-1.1.419-amd64.deb'
rstudio_url = "https://download2.rstudio.org/rstudio-server-1.1.419-amd64.deb"
# This is MD5, because that is what RStudio download page provides!
rstudio_checksum = '24cd11f0405d8372b4168fc9956e0386'
rstudio_checksum = "24cd11f0405d8372b4168fc9956e0386"
# Via https://www.rstudio.com/products/shiny/download-server/
shiny_url = 'https://download3.rstudio.org/ubuntu-14.04/x86_64/shiny-server-1.5.7.907-amd64.deb'
shiny_checksum = '78371a8361ba0e7fec44edd2b8e425ac'
shiny_url = "https://download3.rstudio.org/ubuntu-14.04/x86_64/shiny-server-1.5.7.907-amd64.deb"
shiny_checksum = "78371a8361ba0e7fec44edd2b8e425ac"
# Version of MRAN to pull devtools from.
devtools_version = '2018-02-01'
devtools_version = "2018-02-01"
# IRKernel version - specified as a tag in the IRKernel repository
irkernel_version = '0.8.11'
irkernel_version = "0.8.11"
scripts = [
(
@ -171,7 +179,7 @@ class RBuildPack(PythonBuildPack):
r"""
mkdir -p ${R_LIBS_USER} && \
chown -R ${NB_USER}:${NB_USER} ${R_LIBS_USER}
"""
""",
),
(
"root",
@ -182,9 +190,8 @@ class RBuildPack(PythonBuildPack):
dpkg -i /tmp/rstudio.deb && \
rm /tmp/rstudio.deb
""".format(
rstudio_url=rstudio_url,
rstudio_checksum=rstudio_checksum
)
rstudio_url=rstudio_url, rstudio_checksum=rstudio_checksum
),
),
(
"root",
@ -195,10 +202,8 @@ class RBuildPack(PythonBuildPack):
dpkg -i {deb} && \
rm {deb}
""".format(
url=shiny_url,
checksum=shiny_checksum,
deb='/tmp/shiny.deb'
)
url=shiny_url, checksum=shiny_checksum, deb="/tmp/shiny.deb"
),
),
(
"root",
@ -208,7 +213,7 @@ class RBuildPack(PythonBuildPack):
r"""
sed -i -e '/^R_LIBS_USER=/s/^/#/' /etc/R/Renviron && \
echo "R_LIBS_USER=${R_LIBS_USER}" >> /etc/R/Renviron
"""
""",
),
(
"${NB_USER}",
@ -219,7 +224,7 @@ class RBuildPack(PythonBuildPack):
jupyter serverextension enable jupyter_server_proxy --sys-prefix && \
jupyter nbextension install --py jupyter_server_proxy --sys-prefix && \
jupyter nbextension enable --py jupyter_server_proxy --sys-prefix
"""
""",
),
(
"${NB_USER}",
@ -229,9 +234,8 @@ class RBuildPack(PythonBuildPack):
R --quiet -e "devtools::install_github('IRkernel/IRkernel', ref='{irkernel_version}')" && \
R --quiet -e "IRkernel::installspec(prefix='$NB_PYTHON_PREFIX')"
""".format(
devtools_version=devtools_version,
irkernel_version=irkernel_version
)
devtools_version=devtools_version, irkernel_version=irkernel_version
),
),
(
"${NB_USER}",
@ -240,22 +244,22 @@ class RBuildPack(PythonBuildPack):
R --quiet -e "install.packages('shiny', repos='https://mran.microsoft.com/snapshot/{}', method='libcurl')"
""".format(
self.checkpoint_date.isoformat()
)
),
),
]
if "r" in self.stencila_contexts:
scripts += [
(
"${NB_USER}",
# Install and register stencila library
r"""
(
"${NB_USER}",
# Install and register stencila library
r"""
R --quiet -e "source('https://bioconductor.org/biocLite.R'); biocLite('graph')" && \
R --quiet -e "devtools::install_github('stencila/r', ref = '361bbf560f3f0561a8612349bca66cd8978f4f24')" && \
R --quiet -e "stencila::register()"
"""
),
]
""",
)
]
return super().get_build_scripts() + scripts
@ -266,7 +270,7 @@ class RBuildPack(PythonBuildPack):
We set the snapshot date used to install R libraries from based on the
contents of runtime.txt, and run the `install.R` script if it exists.
"""
mran_url = 'https://mran.microsoft.com/snapshot/{}'.format(
mran_url = "https://mran.microsoft.com/snapshot/{}".format(
self.checkpoint_date.isoformat()
)
assemble_scripts = super().get_assemble_scripts() + [
@ -276,7 +280,9 @@ class RBuildPack(PythonBuildPack):
# We set download method to be curl so we get HTTPS support
r"""
echo "options(repos = c(CRAN='{mran_url}'), download.file.method = 'libcurl')" > /etc/R/Rprofile.site
""".format(mran_url=mran_url)
""".format(
mran_url=mran_url
),
),
(
# Not all of these locations are configurable; log_dir is
@ -286,26 +292,18 @@ class RBuildPack(PythonBuildPack):
install -o ${NB_USER} -g ${NB_USER} -d /var/lib/shiny-server && \
install -o ${NB_USER} -g ${NB_USER} /dev/null /var/log/shiny-server.log && \
install -o ${NB_USER} -g ${NB_USER} /dev/null /var/run/shiny-server.pid
"""
""",
),
]
installR_path = self.binder_path('install.R')
installR_path = self.binder_path("install.R")
if os.path.exists(installR_path):
assemble_scripts += [
(
"${NB_USER}",
"Rscript %s" % installR_path
)
]
assemble_scripts += [("${NB_USER}", "Rscript %s" % installR_path)]
description_R = 'DESCRIPTION'
description_R = "DESCRIPTION"
if not self.binder_dir and os.path.exists(description_R):
assemble_scripts += [
(
"${NB_USER}",
'R --quiet -e "devtools::install_local(getwd())"'
)
("${NB_USER}", 'R --quiet -e "devtools::install_local(getwd())"')
]
return assemble_scripts

Wyświetl plik

@ -10,6 +10,7 @@ import os
class ContentProviderException(Exception):
"""Exception raised when a ContentProvider can not provide content."""
pass
@ -63,11 +64,10 @@ class ContentProvider:
class Local(ContentProvider):
def detect(self, source, ref=None, extra_args=None):
if os.path.isdir(source):
return {'path': source}
return {"path": source}
def fetch(self, spec, output_dir, yield_output=False):
# nothing to be done if your content is already in the output directory
msg = "Local content provider assumes {} == {}".format(spec['path'],
output_dir)
assert output_dir == spec['path'], msg
yield "Using local repo {}.\n".format(spec['path'])
msg = "Local content provider assumes {} == {}".format(spec["path"], output_dir)
assert output_dir == spec["path"], msg
yield "Using local repo {}.\n".format(spec["path"])

Wyświetl plik

@ -13,17 +13,17 @@ class Git(ContentProvider):
# old behaviour when git and local directories were the only supported
# content providers. This means that this content provider will always
# match. The downside is that the call to `fetch()` later on might fail
return {'repo': source, 'ref': ref}
return {"repo": source, "ref": ref}
def fetch(self, spec, output_dir, yield_output=False):
repo = spec['repo']
ref = spec.get('ref', None)
repo = spec["repo"]
ref = spec.get("ref", None)
# make a, possibly shallow, clone of the remote repository
try:
cmd = ['git', 'clone', '--recursive']
cmd = ["git", "clone", "--recursive"]
if ref is None:
cmd.extend(['--depth', '1'])
cmd.extend(["--depth", "1"])
cmd.extend([repo, output_dir])
for line in execute_cmd(cmd, capture=yield_output):
yield line
@ -36,22 +36,25 @@ class Git(ContentProvider):
if ref is not None:
hash = check_ref(ref, output_dir)
if hash is None:
self.log.error('Failed to check out ref %s', ref,
extra=dict(phase='failed'))
raise ValueError('Failed to check out ref {}'.format(ref))
self.log.error(
"Failed to check out ref %s", ref, extra=dict(phase="failed")
)
raise ValueError("Failed to check out ref {}".format(ref))
# If the hash is resolved above, we should be able to reset to it
for line in execute_cmd(['git', 'reset', '--hard', hash],
cwd=output_dir,
capture=yield_output):
for line in execute_cmd(
["git", "reset", "--hard", hash], cwd=output_dir, capture=yield_output
):
yield line
# ensure that git submodules are initialised and updated
for line in execute_cmd(['git', 'submodule', 'update', '--init', '--recursive'],
cwd=output_dir,
capture=yield_output):
for line in execute_cmd(
["git", "submodule", "update", "--init", "--recursive"],
cwd=output_dir,
capture=yield_output,
):
yield line
cmd = ['git', 'rev-parse', 'HEAD']
cmd = ["git", "rev-parse", "HEAD"]
sha1 = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=output_dir)
self._sha1 = sha1.stdout.read().decode().strip()

Wyświetl plik

@ -16,8 +16,8 @@ def execute_cmd(cmd, capture=False, **kwargs):
Must be yielded from.
"""
if capture:
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.STDOUT
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.STDOUT
proc = subprocess.Popen(cmd, **kwargs)
@ -36,17 +36,17 @@ def execute_cmd(cmd, capture=False, **kwargs):
def flush():
"""Flush next line of the buffer"""
line = b''.join(buf).decode('utf8', 'replace')
line = b"".join(buf).decode("utf8", "replace")
buf[:] = []
return line
c_last = ''
c_last = ""
try:
for c in iter(partial(proc.stdout.read, 1), b''):
if c_last == b'\r' and buf and c != b'\n':
for c in iter(partial(proc.stdout.read, 1), b""):
if c_last == b"\r" and buf and c != b"\n":
yield flush()
buf.append(c)
if c == b'\n':
if c == b"\n":
yield flush()
c_last = c
finally:
@ -91,38 +91,44 @@ def validate_and_generate_port_mapping(port_mappings):
single container_port to multiple host_ports
(docker-py supports this but repo2docker does not)
"""
def check_port(port):
try:
p = int(port)
except ValueError as e:
raise ValueError('Port specification "{}" has '
'an invalid port.'.format(mapping))
raise ValueError(
'Port specification "{}" has ' "an invalid port.".format(mapping)
)
if p > 65535:
raise ValueError('Port specification "{}" specifies '
'a port above 65535.'.format(mapping))
raise ValueError(
'Port specification "{}" specifies '
"a port above 65535.".format(mapping)
)
return port
def check_port_string(p):
parts = p.split('/')
parts = p.split("/")
if len(parts) == 2: # 134/tcp
port, protocol = parts
if protocol not in ('tcp', 'udp'):
raise ValueError('Port specification "{}" has '
'an invalid protocol.'.format(mapping))
if protocol not in ("tcp", "udp"):
raise ValueError(
'Port specification "{}" has '
"an invalid protocol.".format(mapping)
)
elif len(parts) == 1:
port = parts[0]
protocol = 'tcp'
protocol = "tcp"
check_port(port)
return '/'.join((port, protocol))
return "/".join((port, protocol))
ports = {}
if port_mappings is None:
return ports
for mapping in port_mappings:
parts = mapping.split(':')
parts = mapping.split(":")
*host, container_port = parts
# just a port
@ -164,7 +170,8 @@ def is_valid_docker_image_name(image_name):
This pattern will not allow cases like `TEST.com/name:latest` though
docker considers it a valid tag.
"""
reference_regex = re.compile(r"""
reference_regex = re.compile(
r"""
^ # Anchored at start and end of string
( # Start capturing name
@ -213,7 +220,9 @@ def is_valid_docker_image_name(image_name):
# optionally capture <digest-pattern>='@<digest>'
(?:@[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][A-Fa-f0-9]{32,})?
$
""", re.VERBOSE)
""",
re.VERBOSE,
)
return reference_regex.match(image_name) is not None
@ -232,10 +241,10 @@ class ByteSpecification(Integer):
"""
UNIT_SUFFIXES = {
'K': 1024,
'M': 1024 * 1024,
'G': 1024 * 1024 * 1024,
'T': 1024 * 1024 * 1024 * 1024,
"K": 1024,
"M": 1024 * 1024,
"G": 1024 * 1024 * 1024,
"T": 1024 * 1024 * 1024 * 1024,
}
# Default to allowing None as a value
@ -256,16 +265,14 @@ class ByteSpecification(Integer):
num = float(value[:-1])
except ValueError:
raise TraitError(
'{val} is not a valid memory specification. '
'Must be an int or a string with suffix K, M, G, T'
.format(val=value)
"{val} is not a valid memory specification. "
"Must be an int or a string with suffix K, M, G, T".format(val=value)
)
suffix = value[-1]
if suffix not in self.UNIT_SUFFIXES:
raise TraitError(
'{val} is not a valid memory specification. '
'Must be an int or a string with suffix K, M, G, T'
.format(val=value)
"{val} is not a valid memory specification. "
"Must be an int or a string with suffix K, M, G, T".format(val=value)
)
else:
return int(float(num) * self.UNIT_SUFFIXES[suffix])
@ -274,9 +281,11 @@ class ByteSpecification(Integer):
def check_ref(ref, cwd=None):
"""Prepare a ref and ensure it works with git reset --hard."""
# Try original ref, then trying a remote ref, then removing remote
refs = [ref, # Original ref
'/'.join(["origin", ref]), # In case its a remote branch
ref.split('/')[-1]] # In case partial commit w/ remote
refs = [
ref, # Original ref
"/".join(["origin", ref]), # In case its a remote branch
ref.split("/")[-1],
] # In case partial commit w/ remote
hash = None
for i_ref in refs:
@ -297,8 +306,14 @@ class Error(OSError):
# a copy of shutil.copytree() that is ok with the target directory
# already existing
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
def copytree(
src,
dst,
symlinks=False,
ignore=None,
copy_function=copy2,
ignore_dangling_symlinks=False,
):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
@ -353,8 +368,7 @@ def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
continue
# otherwise let the copy occurs. copy2 will raise an error
if os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore,
copy_function)
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
@ -372,7 +386,7 @@ def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
copystat(src, dst)
except OSError as why:
# Copying file access times may fail on Windows
if getattr(why, 'winerror', None) is None:
if getattr(why, "winerror", None) is None:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)

Wyświetl plik

@ -5,54 +5,54 @@ import versioneer
if sys.version_info[0] < 3:
readme = None
else:
with open('README.md', encoding="utf8") as f:
with open("README.md", encoding="utf8") as f:
readme = f.read()
setup(
name='jupyter-repo2docker',
name="jupyter-repo2docker",
version=versioneer.get_version(),
install_requires=[
'docker',
'traitlets',
'python-json-logger',
'escapism',
'jinja2',
'ruamel.yaml>=0.15',
'toml',
'semver',
"docker",
"traitlets",
"python-json-logger",
"escapism",
"jinja2",
"ruamel.yaml>=0.15",
"toml",
"semver",
],
python_requires='>=3.5',
author='Project Jupyter Contributors',
author_email='jupyter@googlegroups.com',
url='https://repo2docker.readthedocs.io/en/latest/',
python_requires=">=3.5",
author="Project Jupyter Contributors",
author_email="jupyter@googlegroups.com",
url="https://repo2docker.readthedocs.io/en/latest/",
project_urls={
'Documentation': 'https://repo2docker.readthedocs.io',
'Funding': 'https://jupyter.org/about',
'Source': 'https://github.com/jupyter/repo2docker/',
'Tracker': 'https://github.com/jupyter/repo2docker/issues',
"Documentation": "https://repo2docker.readthedocs.io",
"Funding": "https://jupyter.org/about",
"Source": "https://github.com/jupyter/repo2docker/",
"Tracker": "https://github.com/jupyter/repo2docker/issues",
},
# this should be a whitespace separated string of keywords, not a list
keywords="reproducible science environments docker",
description="Repo2docker: Turn code repositories into Jupyter enabled Docker Images",
long_description=readme,
long_description_content_type='text/markdown',
license='BSD',
long_description_content_type="text/markdown",
license="BSD",
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
],
packages=find_packages(),
include_package_data=True,
cmdclass=versioneer.get_cmdclass(),
entry_points={
'console_scripts': [
'jupyter-repo2docker = repo2docker.__main__:main',
'repo2docker = repo2docker.__main__:main',
"console_scripts": [
"jupyter-repo2docker = repo2docker.__main__:main",
"repo2docker = repo2docker.__main__:main",
]
},
)

Wyświetl plik

@ -4,7 +4,7 @@ from subprocess import check_output
assert sys.version_info[:2] == (3, 5), sys.version
out = check_output(['conda', '--version']).decode('utf8').strip()
assert out == 'conda 4.6.14', out
out = check_output(["conda", "--version"]).decode("utf8").strip()
assert out == "conda 4.6.14", out
import numpy

Wyświetl plik

@ -4,12 +4,14 @@ import os
# conda should still be in /srv/conda
# and Python should still be in $NB_PYTHON_PREFIX
assert sys.executable == os.path.join(os.environ['NB_PYTHON_PREFIX'], 'bin', 'python'), sys.executable
assert sys.executable == os.path.join(
os.environ["NB_PYTHON_PREFIX"], "bin", "python"
), sys.executable
assert sys.executable.startswith("/srv/conda/"), sys.executable
# Repo should be in /srv/repo
assert os.path.exists('/srv/repo/verify')
assert os.path.abspath(__file__) == '/srv/repo/verify'
assert os.path.exists("/srv/repo/verify")
assert os.path.abspath(__file__) == "/srv/repo/verify"
# We should be able to import the package in environment.yml
import numpy

Wyświetl plik

@ -4,6 +4,7 @@ import sys
assert sys.version_info[:2] == (3, 7), sys.version
import numpy
try:
import there
except ImportError:

Wyświetl plik

@ -5,23 +5,29 @@ assert sys.version_info[:2] == (3, 7), sys.version
# verify that we have Python 2 and Python 3 kernelspecs
from jupyter_client.kernelspec import KernelSpecManager
ksm = KernelSpecManager()
specs = ksm.get_all_specs()
assert sorted(specs) == ['python2', 'python3'], specs.keys()
assert sorted(specs) == ["python2", "python3"], specs.keys()
# verify that we created the kernel env
import json
from subprocess import check_output
envs = json.loads(check_output(['conda', 'env', 'list', '--json']).decode('utf8'))
assert envs == {'envs': ['/srv/conda', '/srv/conda/envs/kernel', '/srv/conda/envs/notebook']}, envs
pkgs = json.loads(check_output(['conda', 'list', '-n', 'kernel', '--json']).decode('utf8'))
pkg_names = [pkg['name'] for pkg in pkgs]
assert 'ipykernel' in pkg_names, pkg_names
assert 'numpy' in pkg_names
envs = json.loads(check_output(["conda", "env", "list", "--json"]).decode("utf8"))
assert envs == {
"envs": ["/srv/conda", "/srv/conda/envs/kernel", "/srv/conda/envs/notebook"]
}, envs
pkgs = json.loads(
check_output(["conda", "list", "-n", "kernel", "--json"]).decode("utf8")
)
pkg_names = [pkg["name"] for pkg in pkgs]
assert "ipykernel" in pkg_names, pkg_names
assert "numpy" in pkg_names
for pkg in pkgs:
if pkg['name'] == 'python':
assert pkg['version'].startswith('2.7.')
if pkg["name"] == "python":
assert pkg["version"].startswith("2.7.")
break
else:
assert False, "python not found in %s" % pkg_names

Wyświetl plik

@ -2,11 +2,16 @@
import os
import sys
def test_sys_version():
assert sys.version_info[:2] == (3, 7)
def test_numpy():
import numpy
def test_conda_activated():
assert os.environ.get("CONDA_PREFIX") == os.environ["NB_PYTHON_PREFIX"], dict(os.environ)
assert os.environ.get("CONDA_PREFIX") == os.environ["NB_PYTHON_PREFIX"], dict(
os.environ
)

Wyświetl plik

@ -25,14 +25,15 @@ from repo2docker.__main__ import make_r2d
def pytest_collect_file(parent, path):
if path.basename == 'verify':
if path.basename == "verify":
return LocalRepo(path, parent)
elif path.basename.endswith('.repos.yaml'):
elif path.basename.endswith(".repos.yaml"):
return RemoteRepoList(path, parent)
def make_test_func(args):
"""Generate a test function that runs repo2docker"""
def test():
app = make_r2d(args)
app.initialize()
@ -46,7 +47,7 @@ def make_test_func(args):
container = app.start_container()
port = app.port
# wait a bit for the container to be ready
container_url = 'http://localhost:%s/api' % port
container_url = "http://localhost:%s/api" % port
# give the container a chance to start
time.sleep(1)
try:
@ -54,7 +55,7 @@ def make_test_func(args):
success = False
for i in range(1, 4):
container.reload()
assert container.status == 'running'
assert container.status == "running"
try:
info = requests.get(container_url).json()
except Exception as e:
@ -69,6 +70,7 @@ def make_test_func(args):
# stop the container
container.stop()
app.wait_for_container(container)
return test
@ -77,24 +79,25 @@ def make_test_func(args):
def run_repo2docker():
def run_test(args):
return make_test_func(args)()
return run_test
def _add_content_to_git(repo_dir):
"""Add content to file 'test' in git repository and commit."""
# use append mode so this can be called multiple times
with open(os.path.join(repo_dir, 'test'), 'a') as f:
with open(os.path.join(repo_dir, "test"), "a") as f:
f.write("Hello")
subprocess.check_call(['git', 'add', 'test'], cwd=repo_dir)
subprocess.check_call(['git', 'commit', '-m', 'Test commit'],
cwd=repo_dir)
subprocess.check_call(["git", "add", "test"], cwd=repo_dir)
subprocess.check_call(["git", "commit", "-m", "Test commit"], cwd=repo_dir)
def _get_sha1(repo_dir):
"""Get repository's current commit SHA1."""
sha1 = subprocess.Popen(['git', 'rev-parse', 'HEAD'],
stdout=subprocess.PIPE, cwd=repo_dir)
sha1 = subprocess.Popen(
["git", "rev-parse", "HEAD"], stdout=subprocess.PIPE, cwd=repo_dir
)
return sha1.stdout.read().decode().strip()
@ -106,7 +109,7 @@ def git_repo():
Should be used as a contextmanager, it will delete directory when done
"""
with TemporaryDirectory() as gitdir:
subprocess.check_call(['git', 'init'], cwd=gitdir)
subprocess.check_call(["git", "init"], cwd=gitdir)
yield gitdir
@ -134,26 +137,29 @@ def repo_with_submodule():
"""
with TemporaryDirectory() as git_a_dir, TemporaryDirectory() as git_b_dir:
# create "parent" repository
subprocess.check_call(['git', 'init'], cwd=git_a_dir)
subprocess.check_call(["git", "init"], cwd=git_a_dir)
_add_content_to_git(git_a_dir)
# create repository with 2 commits that will be the submodule
subprocess.check_call(['git', 'init'], cwd=git_b_dir)
subprocess.check_call(["git", "init"], cwd=git_b_dir)
_add_content_to_git(git_b_dir)
submod_sha1_b = _get_sha1(git_b_dir)
_add_content_to_git(git_b_dir)
# create a new branch in the parent to add the submodule
subprocess.check_call(['git', 'checkout', '-b', 'branch-with-submod'],
cwd=git_a_dir)
subprocess.check_call(['git', 'submodule', 'add', git_b_dir, 'submod'],
cwd=git_a_dir)
subprocess.check_call(
["git", "checkout", "-b", "branch-with-submod"], cwd=git_a_dir
)
subprocess.check_call(
["git", "submodule", "add", git_b_dir, "submod"], cwd=git_a_dir
)
# checkout the first commit for the submod, not the latest
subprocess.check_call(['git', 'checkout', submod_sha1_b],
cwd=os.path.join(git_a_dir, 'submod'))
subprocess.check_call(['git', 'add', git_a_dir, ".gitmodules"],
cwd=git_a_dir)
subprocess.check_call(['git', 'commit', '-m', 'Add B repos submod'],
cwd=git_a_dir)
subprocess.check_call(
["git", "checkout", submod_sha1_b], cwd=os.path.join(git_a_dir, "submod")
)
subprocess.check_call(["git", "add", git_a_dir, ".gitmodules"], cwd=git_a_dir)
subprocess.check_call(
["git", "commit", "-m", "Add B repos submod"], cwd=git_a_dir
)
sha1_a = _get_sha1(git_a_dir)
yield git_a_dir, sha1_a, submod_sha1_b
@ -161,6 +167,7 @@ def repo_with_submodule():
class Repo2DockerTest(pytest.Function):
"""A pytest.Item for running repo2docker"""
def __init__(self, name, parent, args):
self.args = args
self.save_cwd = os.getcwd()
@ -173,7 +180,7 @@ class Repo2DockerTest(pytest.Function):
def repr_failure(self, excinfo):
err = excinfo.value
if isinstance(err, SystemExit):
cmd = "jupyter-repo2docker %s" % ' '.join(map(pipes.quote, self.args))
cmd = "jupyter-repo2docker %s" % " ".join(map(pipes.quote, self.args))
return "%s | exited with status=%s" % (cmd, err.code)
else:
return super().repr_failure(excinfo)
@ -185,12 +192,10 @@ class Repo2DockerTest(pytest.Function):
class LocalRepo(pytest.File):
def collect(self):
args = [
'--appendix', 'RUN echo "appendix" > /tmp/appendix',
]
args = ["--appendix", 'RUN echo "appendix" > /tmp/appendix']
# If there's an extra-args.yaml file in a test dir, assume it contains
# a yaml list with extra arguments to be passed to repo2docker
extra_args_path = os.path.join(self.fspath.dirname, 'extra-args.yaml')
extra_args_path = os.path.join(self.fspath.dirname, "extra-args.yaml")
if os.path.exists(extra_args_path):
with open(extra_args_path) as f:
extra_args = yaml.safe_load(f)
@ -198,14 +203,8 @@ class LocalRepo(pytest.File):
args.append(self.fspath.dirname)
yield Repo2DockerTest(
'build', self,
args=args
)
yield Repo2DockerTest(
self.fspath.basename, self,
args=args + ['./verify']
)
yield Repo2DockerTest("build", self, args=args)
yield Repo2DockerTest(self.fspath.basename, self, args=args + ["./verify"])
class RemoteRepoList(pytest.File):
@ -215,11 +214,6 @@ class RemoteRepoList(pytest.File):
for repo in repos:
args = []
if "ref" in repo:
args += ['--ref', repo['ref']]
args += [repo['url'],
'--',
] + shlex.split(repo['verify'])
yield Repo2DockerTest(
repo['name'], self,
args=args,
)
args += ["--ref", repo["ref"]]
args += [repo["url"], "--"] + shlex.split(repo["verify"])
yield Repo2DockerTest(repo["name"], self, args=args)

Wyświetl plik

@ -16,7 +16,7 @@ import os
libc = cdll.LoadLibrary("libc.so.6")
libc.malloc.restype = c_void_p
with open('mem_allocate_mb') as f:
with open("mem_allocate_mb") as f:
mem_allocate_mb = int(f.read().strip())
size = 1024 * 1024 * mem_allocate_mb

Wyświetl plik

@ -16,7 +16,7 @@ import os
libc = cdll.LoadLibrary("libc.so.6")
libc.malloc.restype = c_void_p
with open('mem_allocate_mb') as f:
with open("mem_allocate_mb") as f:
mem_allocate_mb = int(f.read().strip())
size = 1024 * 1024 * mem_allocate_mb

Wyświetl plik

@ -10,11 +10,11 @@ def test_clone(repo_with_content):
upstream, sha1 = repo_with_content
with TemporaryDirectory() as clone_dir:
spec = {'repo': upstream}
spec = {"repo": upstream}
git_content = Git()
for _ in git_content.fetch(spec, clone_dir):
pass
assert os.path.exists(os.path.join(clone_dir, 'test'))
assert os.path.exists(os.path.join(clone_dir, "test"))
assert git_content.content_id == sha1[:7]
@ -24,16 +24,16 @@ def test_submodule_clone(repo_with_submodule):
upstream, expected_sha1_upstream, expected_sha1_submod = repo_with_submodule
with TemporaryDirectory() as clone_dir:
submod_dir = os.path.join(clone_dir, 'submod') # set by fixture
spec = {'repo': upstream}
submod_dir = os.path.join(clone_dir, "submod") # set by fixture
spec = {"repo": upstream}
git_content = Git()
for _ in git_content.fetch(spec, clone_dir):
pass
assert os.path.exists(os.path.join(clone_dir, 'test'))
assert os.path.exists(os.path.join(submod_dir, 'test'))
assert os.path.exists(os.path.join(clone_dir, "test"))
assert os.path.exists(os.path.join(submod_dir, "test"))
# get current sha1 of submodule
cmd = ['git', 'rev-parse', 'HEAD']
cmd = ["git", "rev-parse", "HEAD"]
sha1 = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=submod_dir)
submod_sha1 = sha1.stdout.read().decode().strip()
@ -47,7 +47,7 @@ def test_bad_ref(repo_with_content):
"""
upstream, sha1 = repo_with_content
with TemporaryDirectory() as clone_dir:
spec = {'repo': upstream, 'ref': 'does-not-exist'}
spec = {"repo": upstream, "ref": "does-not-exist"}
with pytest.raises(ValueError):
for _ in Git().fetch(spec, clone_dir):
pass
@ -55,9 +55,9 @@ def test_bad_ref(repo_with_content):
def test_always_accept():
# The git content provider should always accept a spec
assert Git().detect('/tmp/doesnt-exist', ref='1234')
assert Git().detect('/tmp/doesnt-exist')
assert Git().detect("/tmp/doesnt-exist", ref="1234")
assert Git().detect("/tmp/doesnt-exist")
# a path that exists
assert Git().detect('/etc', ref='1234')
assert Git().detect("/etc", ref="1234")
# a remote URL
assert Git().detect('https://example.com/path/here')
assert Git().detect("https://example.com/path/here")

Wyświetl plik

@ -11,8 +11,8 @@ def test_detect_local_dir():
# should accept a local directory
assert spec is not None, spec
assert 'path' in spec, spec
assert spec['path'] == d
assert "path" in spec, spec
assert spec["path"] == d
def test_not_detect_local_file():
@ -35,14 +35,14 @@ def test_content_available():
# create a directory with files, check they are available in the output
# directory
with TemporaryDirectory() as d:
with open(os.path.join(d, 'test'), 'w') as f:
with open(os.path.join(d, "test"), "w") as f:
f.write("Hello")
local = Local()
spec = {'path': d}
spec = {"path": d}
for _ in local.fetch(spec, d):
pass
assert os.path.exists(os.path.join(d, 'test'))
assert os.path.exists(os.path.join(d, "test"))
# content_id property should always be None for local content provider
# as we rely on the caching done by docker
assert local.content_id is None

Wyświetl plik

@ -25,8 +25,12 @@ def test_detect():
fake_urlopen.return_value.url = "https://zenodo.org/record/3232985"
# valid Zenodo DOIs trigger this content provider
assert Zenodo().detect("10.5281/zenodo.3232985") == {"record": "3232985"}
assert Zenodo().detect("https://doi.org/10.5281/zenodo.3232985") == {"record": "3232985"}
assert Zenodo().detect("https://zenodo.org/record/3232985") == {"record": "3232985"}
assert Zenodo().detect("https://doi.org/10.5281/zenodo.3232985") == {
"record": "3232985"
}
assert Zenodo().detect("https://zenodo.org/record/3232985") == {
"record": "3232985"
}
# only two of the three calls above have to resolve a DOI
assert fake_urlopen.call_count == 2
@ -77,7 +81,7 @@ def test_fetch_software_from_github_archive():
else:
return urlopen(req)
with patch.object(Zenodo, '_urlopen', new=mock_urlopen):
with patch.object(Zenodo, "_urlopen", new=mock_urlopen):
zen = Zenodo()
with TemporaryDirectory() as d:
@ -116,7 +120,7 @@ def test_fetch_software():
else:
return urlopen(req)
with patch.object(Zenodo, '_urlopen', new=mock_urlopen):
with patch.object(Zenodo, "_urlopen", new=mock_urlopen):
with TemporaryDirectory() as d:
zen = Zenodo()
@ -144,7 +148,7 @@ def test_fetch_data():
{
"filename": "bfake.zip",
"links": {"download": "file://{}".format(b_zen_path)},
}
},
],
"metadata": {"upload_type": "data"},
}
@ -157,7 +161,7 @@ def test_fetch_data():
else:
return urlopen(req)
with patch.object(Zenodo, '_urlopen', new=mock_urlopen):
with patch.object(Zenodo, "_urlopen", new=mock_urlopen):
with TemporaryDirectory() as d:
zen = Zenodo()
@ -167,5 +171,5 @@ def test_fetch_data():
unpacked_files = set(os.listdir(d))
# ZIP files shouldn't have been unpacked
expected = {'bfake.zip', 'afake.zip'}
expected = {"bfake.zip", "afake.zip"}
assert expected == unpacked_files

Wyświetl plik

@ -11,28 +11,28 @@ from repo2docker.__main__ import make_r2d
def test_find_image():
images = [{'RepoTags': ['some-org/some-repo:latest']}]
images = [{"RepoTags": ["some-org/some-repo:latest"]}]
with patch('repo2docker.app.docker.APIClient') as FakeDockerClient:
with patch("repo2docker.app.docker.APIClient") as FakeDockerClient:
instance = FakeDockerClient.return_value
instance.images.return_value = images
r2d = Repo2Docker()
r2d.output_image_spec = 'some-org/some-repo'
r2d.output_image_spec = "some-org/some-repo"
assert r2d.find_image()
instance.images.assert_called_with()
def test_dont_find_image():
images = [{'RepoTags': ['some-org/some-image-name:latest']}]
images = [{"RepoTags": ["some-org/some-image-name:latest"]}]
with patch('repo2docker.app.docker.APIClient') as FakeDockerClient:
with patch("repo2docker.app.docker.APIClient") as FakeDockerClient:
instance = FakeDockerClient.return_value
instance.images.return_value = images
r2d = Repo2Docker()
r2d.output_image_spec = 'some-org/some-other-image-name'
r2d.output_image_spec = "some-org/some-other-image-name"
assert not r2d.find_image()
instance.images.assert_called_with()
@ -42,12 +42,12 @@ def test_image_name_remains_unchanged():
# if we specify an image name, it should remain unmodified
with TemporaryDirectory() as src:
app = Repo2Docker()
argv = ['--image-name', 'a-special-name', '--no-build', src]
argv = ["--image-name", "a-special-name", "--no-build", src]
app = make_r2d(argv)
app.start()
assert app.output_image_spec == 'a-special-name'
assert app.output_image_spec == "a-special-name"
def test_image_name_contains_sha1(repo_with_content):
@ -56,7 +56,7 @@ def test_image_name_contains_sha1(repo_with_content):
# force selection of the git content provider by prefixing path with
# file://. This is important as the Local content provider does not
# store the SHA1 in the repo spec
argv = ['--no-build', 'file://' + upstream]
argv = ["--no-build", "file://" + upstream]
app = make_r2d(argv)
app.start()
@ -67,13 +67,13 @@ def test_image_name_contains_sha1(repo_with_content):
def test_local_dir_image_name(repo_with_content):
upstream, sha1 = repo_with_content
app = Repo2Docker()
argv = ['--no-build', upstream]
argv = ["--no-build", upstream]
app = make_r2d(argv)
app.start()
assert app.output_image_spec.startswith(
'r2d' + escapism.escape(upstream, escape_char='-').lower()
"r2d" + escapism.escape(upstream, escape_char="-").lower()
)
@ -81,33 +81,33 @@ def test_build_kwargs(repo_with_content):
upstream, sha1 = repo_with_content
argv = [upstream]
app = make_r2d(argv)
app.extra_build_kwargs = {'somekey': "somevalue"}
app.extra_build_kwargs = {"somekey": "somevalue"}
with patch.object(docker.APIClient, 'build') as builds:
with patch.object(docker.APIClient, "build") as builds:
builds.return_value = []
app.build()
builds.assert_called_once()
args, kwargs = builds.call_args
assert 'somekey' in kwargs
assert kwargs['somekey'] == "somevalue"
assert "somekey" in kwargs
assert kwargs["somekey"] == "somevalue"
def test_run_kwargs(repo_with_content):
upstream, sha1 = repo_with_content
argv = [upstream]
app = make_r2d(argv)
app.extra_run_kwargs = {'somekey': "somevalue"}
app.extra_run_kwargs = {"somekey": "somevalue"}
with patch.object(docker.DockerClient, 'containers') as containers:
with patch.object(docker.DockerClient, "containers") as containers:
app.start_container()
containers.run.assert_called_once()
args, kwargs = containers.run.call_args
assert 'somekey' in kwargs
assert kwargs['somekey'] == "somevalue"
assert "somekey" in kwargs
assert kwargs["somekey"] == "somevalue"
def test_root_not_allowed():
with TemporaryDirectory() as src, patch('os.geteuid') as geteuid:
with TemporaryDirectory() as src, patch("os.geteuid") as geteuid:
geteuid.return_value = 0
argv = [src]
app = make_r2d(argv)
@ -115,14 +115,9 @@ def test_root_not_allowed():
app.build()
assert exc.code == errno.EPERM
app = Repo2Docker(
repo=src,
user_id=1000,
user_name='jovyan',
run=False,
)
app = Repo2Docker(repo=src, user_id=1000, user_name="jovyan", run=False)
app.initialize()
with patch.object(docker.APIClient, 'build') as builds:
with patch.object(docker.APIClient, "build") as builds:
builds.return_value = []
app.build()
builds.assert_called_once()

Wyświetl plik

@ -13,29 +13,32 @@ def test_version(capsys):
Test passing '--version' to repo2docker
"""
with pytest.raises(SystemExit):
make_r2d(['--version'])
make_r2d(["--version"])
assert capsys.readouterr().out == "{}\n".format(__version__)
def test_simple():
"""
Test simplest possible invocation to r2d
"""
r2d = make_r2d(['.'])
assert r2d.repo == '.'
r2d = make_r2d(["."])
assert r2d.repo == "."
def test_editable():
"""
Test --editable behavior
"""
r2d = make_r2d(['--editable', '.'])
assert r2d.repo == '.'
assert r2d.volumes[os.getcwd()] == '.'
r2d = make_r2d(["--editable", "."])
assert r2d.repo == "."
assert r2d.volumes[os.getcwd()] == "."
def test_dry_run():
"""
Test passing --no-build implies --no-run and lack of --push
"""
r2d = make_r2d(['--no-build', '.'])
r2d = make_r2d(["--no-build", "."])
assert r2d.dry_run
assert not r2d.run
assert not r2d.push
@ -45,31 +48,33 @@ def test_mem_limit():
"""
Test various ways of passing --build-memory-limit
"""
r2d = make_r2d(['--build-memory-limit', '1024', '.'])
r2d = make_r2d(["--build-memory-limit", "1024", "."])
assert int(r2d.build_memory_limit) == 1024
r2d = make_r2d(['--build-memory-limit', '3K', '.'])
r2d = make_r2d(["--build-memory-limit", "3K", "."])
assert int(r2d.build_memory_limit) == 1024 * 3
def test_run_required():
"""
Test all the things that should fail if we pass in --no-run
"""
# Can't use volumes without running
with pytest.raises(SystemExit):
make_r2d(['--no-run', '--editable', '.'])
make_r2d(["--no-run", "--editable", "."])
# Can't publish all ports without running
with pytest.raises(SystemExit):
make_r2d(['--no-run', '-P', '.'])
make_r2d(["--no-run", "-P", "."])
# Can't publish any ports without running
with pytest.raises(SystemExit):
make_r2d(['--no-run', '-p', '8000:8000', '.'])
make_r2d(["--no-run", "-p", "8000:8000", "."])
# Can't publish any ports while running if we don't specify a command explicitly
with pytest.raises(SystemExit):
make_r2d(['-p', '8000:8000', '.'])
make_r2d(["-p", "8000:8000", "."])
def test_clean():
"""
@ -77,14 +82,14 @@ def test_clean():
"""
# Don't clean when repo isn't local and we explicitly ask it to not clean
assert not make_r2d(['--no-clean', 'https://github.com/blah.git']).cleanup_checkout
assert not make_r2d(["--no-clean", "https://github.com/blah.git"]).cleanup_checkout
# Do clean repo when repo isn't localj
assert make_r2d(['https://github.com/blah.git']).cleanup_checkout
assert make_r2d(["https://github.com/blah.git"]).cleanup_checkout
# Don't clean by default when repo exists locally
assert not make_r2d(['.']).cleanup_checkout
assert not make_r2d(["."]).cleanup_checkout
# Don't clean when repo exists locally and we explicitly ask it to not clean
assert not make_r2d(['--no-clean', '.']).cleanup_checkout
assert not make_r2d(["--no-clean", "."]).cleanup_checkout
def test_invalid_image_name():
@ -92,4 +97,4 @@ def test_invalid_image_name():
Test validating image names
"""
with pytest.raises(SystemExit):
make_r2d(['--image-name', '_invalid', '.'])
make_r2d(["--image-name", "_invalid", "."])

Wyświetl plik

@ -10,10 +10,10 @@ import pytest
here = os.path.dirname(os.path.abspath(__file__))
test_dir = os.path.dirname(here)
docker_simple = os.path.join(test_dir, 'dockerfile', 'simple')
docker_simple = os.path.join(test_dir, "dockerfile", "simple")
# default to building in the cwd (a temporary directory)
builddir = '.'
builddir = "."
@pytest.fixture
@ -21,15 +21,15 @@ def temp_cwd(tmpdir):
tmpdir.chdir()
def validate_arguments(builddir, args_list='.', expected=None, disable_dockerd=False):
def validate_arguments(builddir, args_list=".", expected=None, disable_dockerd=False):
try:
cmd = ['repo2docker']
cmd = ["repo2docker"]
for k in args_list:
cmd.append(k)
cmd.append(builddir)
env = os.environ.copy()
if disable_dockerd:
env['DOCKER_HOST'] = "INCORRECT"
env["DOCKER_HOST"] = "INCORRECT"
subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
return True
except subprocess.CalledProcessError as e:
@ -48,8 +48,8 @@ def test_image_name_fail(temp_cwd):
uppercase characters and _ characters in incorrect positions.
"""
image_name = 'Test/Invalid_name:1.0.0'
args_list = ['--no-run', '--no-build', '--image-name', image_name]
image_name = "Test/Invalid_name:1.0.0"
args_list = ["--no-run", "--no-build", "--image-name", image_name]
expected = (
"%r is not a valid docker image name. Image name"
"must start with an alphanumeric character and"
@ -63,8 +63,8 @@ def test_image_name_underscore_fail(temp_cwd):
Test to check if repo2docker throws image_name validation error on --image-name argument starts with _.
"""
image_name = '_test/invalid_name:1.0.0'
args_list = ['--no-run', '--no-build', '--image-name', image_name]
image_name = "_test/invalid_name:1.0.0"
args_list = ["--no-run", "--no-build", "--image-name", image_name]
expected = (
"%r is not a valid docker image name. Image name"
"must start with an alphanumeric character and"
@ -78,8 +78,8 @@ def test_image_name_double_dot_fail(temp_cwd):
Test to check if repo2docker throws image_name validation error on --image-name argument contains consecutive dots.
"""
image_name = 'test..com/invalid_name:1.0.0'
args_list = ['--no-run', '--no-build', '--image-name', image_name]
image_name = "test..com/invalid_name:1.0.0"
args_list = ["--no-run", "--no-build", "--image-name", image_name]
expected = (
"%r is not a valid docker image name. Image name"
"must start with an alphanumeric character and"
@ -94,8 +94,8 @@ def test_image_name_valid_restircted_registry_domain_name_fail(temp_cwd):
regex definitions first part of registry domain cannot contain uppercase characters
"""
image_name = 'Test.com/valid_name:1.0.0'
args_list = ['--no-run', '--no-build', '--image-name', image_name]
image_name = "Test.com/valid_name:1.0.0"
args_list = ["--no-run", "--no-build", "--image-name", image_name]
expected = (
"%r is not a valid docker image name. Image name"
"must start with an alphanumeric character and"
@ -111,8 +111,8 @@ def test_image_name_valid_registry_domain_name_success(temp_cwd):
"""
builddir = docker_simple
image_name = 'test.COM/valid_name:1.0.0'
args_list = ['--no-run', '--no-build', '--image-name', image_name]
image_name = "test.COM/valid_name:1.0.0"
args_list = ["--no-run", "--no-build", "--image-name", image_name]
assert validate_arguments(builddir, args_list, None)
@ -123,8 +123,8 @@ def test_image_name_valid_name_success(temp_cwd):
"""
builddir = docker_simple
image_name = 'test.com/valid_name:1.0.0'
args_list = ['--no-run', '--no-build', '--image-name', image_name]
image_name = "test.com/valid_name:1.0.0"
args_list = ["--no-run", "--no-build", "--image-name", image_name]
assert validate_arguments(builddir, args_list, None)
@ -133,12 +133,10 @@ def test_volume_no_build_fail(temp_cwd):
"""
Test to check if repo2docker fails when both --no-build and -v arguments are given
"""
args_list = ['--no-build', '-v', '/data:/data']
args_list = ["--no-build", "-v", "/data:/data"]
assert not validate_arguments(
builddir,
args_list,
'Cannot mount volumes if container is not run',
builddir, args_list, "Cannot mount volumes if container is not run"
)
@ -146,12 +144,10 @@ def test_volume_no_run_fail(temp_cwd):
"""
Test to check if repo2docker fails when both --no-run and -v arguments are given
"""
args_list = ['--no-run', '-v', '/data:/data']
args_list = ["--no-run", "-v", "/data:/data"]
assert not validate_arguments(
builddir,
args_list,
'Cannot mount volumes if container is not run',
builddir, args_list, "Cannot mount volumes if container is not run"
)
@ -159,27 +155,39 @@ def test_env_no_run_fail(temp_cwd):
"""
Test to check if repo2docker fails when both --no-run and -e arguments are given
"""
args_list = ['--no-run', '-e', 'FOO=bar', '--']
args_list = ["--no-run", "-e", "FOO=bar", "--"]
assert not validate_arguments(builddir, args_list, 'To specify environment variables, you also need to run the container')
assert not validate_arguments(
builddir,
args_list,
"To specify environment variables, you also need to run the container",
)
def test_port_mapping_no_run_fail(temp_cwd):
"""
Test to check if repo2docker fails when both --no-run and --publish arguments are specified.
"""
args_list = ['--no-run', '--publish', '8000:8000']
args_list = ["--no-run", "--publish", "8000:8000"]
assert not validate_arguments(builddir, args_list, 'To publish user defined port mappings, the container must also be run')
assert not validate_arguments(
builddir,
args_list,
"To publish user defined port mappings, the container must also be run",
)
def test_all_ports_mapping_no_run_fail(temp_cwd):
"""
Test to check if repo2docker fails when both --no-run and -P arguments are specified.
"""
args_list = ['--no-run', '-P']
args_list = ["--no-run", "-P"]
assert not validate_arguments(builddir, args_list, 'To publish user defined port mappings, the container must also be run')
assert not validate_arguments(
builddir,
args_list,
"To publish user defined port mappings, the container must also be run",
)
def test_invalid_port_mapping_fail(temp_cwd):
@ -188,9 +196,9 @@ def test_invalid_port_mapping_fail(temp_cwd):
"""
# Specifying builddir here itself to simulate passing in a run command
# builddir passed in the function will be an argument for the run command
args_list = ['-p', '75000:80', builddir, 'ls']
args_list = ["-p", "75000:80", builddir, "ls"]
assert not validate_arguments(builddir, args_list, 'Port specification')
assert not validate_arguments(builddir, args_list, "Port specification")
def test_invalid_protocol_port_mapping_fail(temp_cwd):
@ -199,9 +207,9 @@ def test_invalid_protocol_port_mapping_fail(temp_cwd):
"""
# Specifying builddir here itself to simulate passing in a run command
# builddir passed in the function will be an argument for the run command
args_list = ['-p', '80/tpc:8000', builddir, 'ls']
args_list = ["-p", "80/tpc:8000", builddir, "ls"]
assert not validate_arguments(builddir, args_list, 'Port specification')
assert not validate_arguments(builddir, args_list, "Port specification")
def test_invalid_container_port_protocol_mapping_fail(temp_cwd):
@ -210,9 +218,9 @@ def test_invalid_container_port_protocol_mapping_fail(temp_cwd):
"""
# Specifying builddir here itself to simulate passing in a run command
# builddir passed in the function will be an argument for the run command
args_list = ['-p', '80:8000/upd', builddir, 'ls']
args_list = ["-p", "80:8000/upd", builddir, "ls"]
assert not validate_arguments(builddir, args_list, 'Port specification')
assert not validate_arguments(builddir, args_list, "Port specification")
@pytest.mark.xfail(reason="Regression in new arg parsing")
@ -234,13 +242,10 @@ def test_docker_handle_debug_fail(temp_cwd):
"""
Test to check if r2d fails with stack trace on not being able to connect to docker daemon and debug enabled
"""
args_list = ['--debug']
args_list = ["--debug"]
assert not validate_arguments(
builddir,
args_list,
"docker.errors.DockerException",
disable_dockerd=True,
builddir, args_list, "docker.errors.DockerException", disable_dockerd=True
)
@ -248,6 +253,6 @@ def test_docker_no_build_success(temp_cwd):
"""
Test to check if r2d succeeds with --no-build argument with not being able to connect to docker daemon
"""
args_list = ['--no-build', '--no-run']
args_list = ["--no-build", "--no-run"]
assert validate_arguments(builddir, args_list, disable_dockerd=True)

Wyświetl plik

@ -5,26 +5,26 @@ import pytest
from repo2docker import buildpacks
@pytest.mark.parametrize("binder_dir", ['.binder', 'binder'])
@pytest.mark.parametrize("binder_dir", [".binder", "binder"])
def test_binder_dir_property(tmpdir, binder_dir):
tmpdir.chdir()
os.mkdir(binder_dir)
bp = buildpacks.BuildPack()
assert binder_dir in bp.binder_dir
assert bp.binder_path('foo.yaml') == os.path.join(binder_dir, 'foo.yaml')
assert bp.binder_path("foo.yaml") == os.path.join(binder_dir, "foo.yaml")
def test_root_binder_dir(tmpdir):
tmpdir.chdir()
bp = buildpacks.BuildPack()
assert bp.binder_dir == ''
assert bp.binder_dir == ""
def test_exclusive_binder_dir(tmpdir):
tmpdir.chdir()
os.mkdir('./binder')
os.mkdir('./.binder')
os.mkdir("./binder")
os.mkdir("./.binder")
bp = buildpacks.BuildPack()
with pytest.raises(RuntimeError):

Wyświetl plik

@ -6,63 +6,67 @@ from unittest.mock import MagicMock
import docker
from repo2docker.buildpacks import BaseImage, DockerBuildPack, LegacyBinderDockerBuildPack
from repo2docker.buildpacks import (
BaseImage,
DockerBuildPack,
LegacyBinderDockerBuildPack,
)
def test_cache_from_base(tmpdir):
cache_from = [
'image-1:latest'
]
fake_log_value = {'stream': 'fake'}
cache_from = ["image-1:latest"]
fake_log_value = {"stream": "fake"}
fake_client = MagicMock(spec=docker.APIClient)
fake_client.build.return_value = iter([fake_log_value])
extra_build_kwargs = {'somekey': 'somevalue'}
extra_build_kwargs = {"somekey": "somevalue"}
# Test base image build pack
tmpdir.chdir()
for line in BaseImage().build(fake_client, 'image-2', 100, {}, cache_from, extra_build_kwargs):
for line in BaseImage().build(
fake_client, "image-2", 100, {}, cache_from, extra_build_kwargs
):
assert line == fake_log_value
called_args, called_kwargs = fake_client.build.call_args
assert 'cache_from' in called_kwargs
assert called_kwargs['cache_from'] == cache_from
assert "cache_from" in called_kwargs
assert called_kwargs["cache_from"] == cache_from
def test_cache_from_docker(tmpdir):
cache_from = [
'image-1:latest'
]
fake_log_value = {'stream': 'fake'}
cache_from = ["image-1:latest"]
fake_log_value = {"stream": "fake"}
fake_client = MagicMock(spec=docker.APIClient)
fake_client.build.return_value = iter([fake_log_value])
extra_build_kwargs = {'somekey': 'somevalue'}
extra_build_kwargs = {"somekey": "somevalue"}
tmpdir.chdir()
# test dockerfile
with tmpdir.join("Dockerfile").open('w') as f:
f.write('FROM scratch\n')
with tmpdir.join("Dockerfile").open("w") as f:
f.write("FROM scratch\n")
for line in DockerBuildPack().build(fake_client, 'image-2', 100, {}, cache_from, extra_build_kwargs):
for line in DockerBuildPack().build(
fake_client, "image-2", 100, {}, cache_from, extra_build_kwargs
):
assert line == fake_log_value
called_args, called_kwargs = fake_client.build.call_args
assert 'cache_from' in called_kwargs
assert called_kwargs['cache_from'] == cache_from
assert "cache_from" in called_kwargs
assert called_kwargs["cache_from"] == cache_from
def test_cache_from_legacy(tmpdir):
cache_from = [
'image-1:latest'
]
fake_log_value = {'stream': 'fake'}
cache_from = ["image-1:latest"]
fake_log_value = {"stream": "fake"}
fake_client = MagicMock(spec=docker.APIClient)
fake_client.build.return_value = iter([fake_log_value])
extra_build_kwargs = {'somekey': 'somevalue'}
extra_build_kwargs = {"somekey": "somevalue"}
# Test legacy docker image
with tmpdir.join("Dockerfile").open('w') as f:
f.write('FROM andrewosh/binder-base\n')
with tmpdir.join("Dockerfile").open("w") as f:
f.write("FROM andrewosh/binder-base\n")
for line in LegacyBinderDockerBuildPack().build(fake_client, 'image-2', 100, {}, cache_from, extra_build_kwargs):
for line in LegacyBinderDockerBuildPack().build(
fake_client, "image-2", 100, {}, cache_from, extra_build_kwargs
):
assert line == fake_log_value
called_args, called_kwargs = fake_client.build.call_args
assert 'cache_from' in called_kwargs
assert called_kwargs['cache_from'] == cache_from
assert "cache_from" in called_kwargs
assert called_kwargs["cache_from"] == cache_from

Wyświetl plik

@ -33,14 +33,14 @@ def test_clone_depth():
app.initialize()
app.start()
cmd = ['git', 'rev-parse', 'HEAD']
cmd = ["git", "rev-parse", "HEAD"]
p = subprocess.run(cmd, stdout=subprocess.PIPE, cwd=d)
assert p.stdout.strip() == b'703322e9c6635ba1835d3b92eafbabeca0042c3e'
cmd = ['git', 'rev-list', '--count', 'HEAD']
assert p.stdout.strip() == b"703322e9c6635ba1835d3b92eafbabeca0042c3e"
cmd = ["git", "rev-list", "--count", "HEAD"]
p = subprocess.run(cmd, stdout=subprocess.PIPE, cwd=d)
assert p.stdout.strip() == b'1'
with open(os.path.join(d, 'COMMIT')) as fp:
assert fp.read() == '100\n'
assert p.stdout.strip() == b"1"
with open(os.path.join(d, "COMMIT")) as fp:
assert fp.read() == "100\n"
def test_clone_depth_full():
@ -49,7 +49,7 @@ def test_clone_depth_full():
with TemporaryDirectory() as d:
app = Repo2Docker(
repo=URL,
ref='master',
ref="master",
dry_run=True,
run=False,
# turn of automatic clean up of the checkout so we can inspect it
@ -61,14 +61,14 @@ def test_clone_depth_full():
app.start()
# Building the image has already put us in the cloned repository directory
cmd = ['git', 'rev-parse', 'HEAD']
cmd = ["git", "rev-parse", "HEAD"]
p = subprocess.run(cmd, stdout=subprocess.PIPE, cwd=d)
assert p.stdout.strip() == b'703322e9c6635ba1835d3b92eafbabeca0042c3e'
cmd = ['git', 'rev-list', '--count', 'HEAD']
assert p.stdout.strip() == b"703322e9c6635ba1835d3b92eafbabeca0042c3e"
cmd = ["git", "rev-list", "--count", "HEAD"]
p = subprocess.run(cmd, stdout=subprocess.PIPE, cwd=d)
assert p.stdout.strip() == b'100'
with open(os.path.join(d, 'COMMIT')) as fp:
assert fp.read() == '100\n'
assert p.stdout.strip() == b"100"
with open(os.path.join(d, "COMMIT")) as fp:
assert fp.read() == "100\n"
def test_clone_depth_full2():
@ -77,7 +77,7 @@ def test_clone_depth_full2():
with TemporaryDirectory() as d:
app = Repo2Docker(
repo=URL,
ref='703322e',
ref="703322e",
dry_run=True,
run=False,
# turn of automatic clean up of the checkout so we can inspect it
@ -89,14 +89,14 @@ def test_clone_depth_full2():
app.start()
# Building the image has already put us in the cloned repository directory
cmd = ['git', 'rev-parse', 'HEAD']
cmd = ["git", "rev-parse", "HEAD"]
p = subprocess.run(cmd, stdout=subprocess.PIPE, cwd=d)
assert p.stdout.strip() == b'703322e9c6635ba1835d3b92eafbabeca0042c3e'
cmd = ['git', 'rev-list', '--count', 'HEAD']
assert p.stdout.strip() == b"703322e9c6635ba1835d3b92eafbabeca0042c3e"
cmd = ["git", "rev-list", "--count", "HEAD"]
p = subprocess.run(cmd, stdout=subprocess.PIPE, cwd=d)
assert p.stdout.strip() == b'100'
with open(os.path.join(d, 'COMMIT')) as fp:
assert fp.read() == '100\n'
assert p.stdout.strip() == b"100"
with open(os.path.join(d, "COMMIT")) as fp:
assert fp.read() == "100\n"
def test_clone_depth_mid():
@ -105,7 +105,7 @@ def test_clone_depth_mid():
with TemporaryDirectory() as d:
app = Repo2Docker(
repo=URL,
ref='8bc4f21',
ref="8bc4f21",
dry_run=True,
run=False,
# turn of automatic clean up of the checkout so we can inspect it
@ -117,11 +117,11 @@ def test_clone_depth_mid():
app.start()
# Building the image has already put us in the cloned repository directory
cmd = ['git', 'rev-parse', 'HEAD']
cmd = ["git", "rev-parse", "HEAD"]
p = subprocess.run(cmd, stdout=subprocess.PIPE, cwd=d)
assert p.stdout.strip() == b'8bc4f216856f86f6fc25a788b744b93b87e9ba48'
cmd = ['git', 'rev-list', '--count', 'HEAD']
assert p.stdout.strip() == b"8bc4f216856f86f6fc25a788b744b93b87e9ba48"
cmd = ["git", "rev-list", "--count", "HEAD"]
p = subprocess.run(cmd, stdout=subprocess.PIPE, cwd=d)
assert p.stdout.strip() == b'50'
with open(os.path.join(d, 'COMMIT')) as fp:
assert fp.read() == '50\n'
assert p.stdout.strip() == b"50"
with open(os.path.join(d, "COMMIT")) as fp:
assert fp.read() == "50\n"

Wyświetl plik

@ -40,8 +40,8 @@ def test_connect_url(tmpdir):
app.start()
container = app.start_container()
container_url = 'http://{}:{}/api'.format(app.hostname, app.port)
expected_url = 'http://{}:{}'.format(app.hostname, app.port)
container_url = "http://{}:{}/api".format(app.hostname, app.port)
expected_url = "http://{}:{}".format(app.hostname, app.port)
# wait a bit for the container to be ready
# give the container a chance to start
@ -52,7 +52,7 @@ def test_connect_url(tmpdir):
success = False
for i in range(1, 4):
container.reload()
assert container.status == 'running'
assert container.status == "running"
if expected_url not in container.logs().decode("utf8"):
time.sleep(i * 3)
continue

Wyświetl plik

@ -13,7 +13,7 @@ def test_git_credential_env():
out = (
check_output(
os.path.join(repo_root, "docker", "git-credential-env"),
env={'GIT_CREDENTIAL_ENV': credential_env},
env={"GIT_CREDENTIAL_ENV": credential_env},
)
.decode()
.strip()

Wyświetl plik

@ -6,20 +6,20 @@ import time
from repo2docker.__main__ import make_r2d
DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'dockerfile', 'editable')
DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "dockerfile", "editable")
def test_editable(run_repo2docker):
"""Run a local repository in edit mode. Verify a new file has been
created afterwards"""
newfile = os.path.join(DIR, 'newfile')
newfile = os.path.join(DIR, "newfile")
try:
# If the file didn't get properly cleaned up last time, we
# need to do that now
os.remove(newfile)
except FileNotFoundError:
pass
argv = ['--editable', DIR, '/usr/local/bin/change.sh']
argv = ["--editable", DIR, "/usr/local/bin/change.sh"]
run_repo2docker(argv)
try:
with open(newfile) as fp:
@ -33,30 +33,30 @@ def test_editable_by_host():
"""Test whether a new file created by the host environment, is
detected in the container"""
app = make_r2d(['--editable', DIR])
app = make_r2d(["--editable", DIR])
app.initialize()
app.build()
container = app.start_container()
# give the container a chance to start
while container.status != 'running':
while container.status != "running":
time.sleep(1)
try:
with tempfile.NamedTemporaryFile(dir=DIR, prefix='testfile', suffix='.txt'):
status, output = container.exec_run(['sh', '-c', 'ls testfile????????.txt'])
with tempfile.NamedTemporaryFile(dir=DIR, prefix="testfile", suffix=".txt"):
status, output = container.exec_run(["sh", "-c", "ls testfile????????.txt"])
assert status == 0
assert re.match(br'^testfile\w{8}\.txt\n$', output) is not None
assert re.match(br"^testfile\w{8}\.txt\n$", output) is not None
# After exiting the with block the file should stop existing
# in the container as well as locally
status, output = container.exec_run(['sh', '-c', 'ls testfile????????.txt'])
status, output = container.exec_run(["sh", "-c", "ls testfile????????.txt"])
assert status == 2
assert re.match(br'^testfile\w{8}\.txt\n$', output) is None
assert re.match(br"^testfile\w{8}\.txt\n$", output) is None
finally:
# stop the container, we don't care how it stops or
# what the exit code is.
container.stop(timeout=1)
container.reload()
assert container.status == 'exited', container.status
assert container.status == "exited", container.status
container.remove()

Wyświetl plik

@ -6,6 +6,7 @@ import subprocess
import tempfile
import time
def test_env():
"""
Validate that you can define environment variables
@ -13,19 +14,24 @@ def test_env():
ts = str(time.time())
with tempfile.TemporaryDirectory() as tmpdir:
username = os.getlogin()
subprocess.check_call([
'repo2docker',
'-v', '{}:/home/{}'.format(tmpdir, username),
'-e', 'FOO={}'.format(ts),
'--env', 'BAR=baz',
'--',
tmpdir,
'/bin/bash',
'-c', 'echo -n $FOO > ts && echo -n $BAR > bar'
])
subprocess.check_call(
[
"repo2docker",
"-v",
"{}:/home/{}".format(tmpdir, username),
"-e",
"FOO={}".format(ts),
"--env",
"BAR=baz",
"--",
tmpdir,
"/bin/bash",
"-c",
"echo -n $FOO > ts && echo -n $BAR > bar",
]
)
with open(os.path.join(tmpdir, 'ts')) as f:
with open(os.path.join(tmpdir, "ts")) as f:
assert f.read().strip() == ts
with open(os.path.join(tmpdir, 'bar')) as f:
assert f.read().strip() == 'baz'
with open(os.path.join(tmpdir, "bar")) as f:
assert f.read().strip() == "baz"

Wyświetl plik

@ -14,13 +14,13 @@ def test_empty_env_yml(tmpdir):
bp = buildpacks.CondaBuildPack()
py_ver = bp.python_version
# If the environment.yml is empty python_version will get an empty string
assert py_ver == ''
assert py_ver == ""
def test_no_dict_env_yml(tmpdir):
tmpdir.chdir()
q = tmpdir.join("environment.yml")
q.write("numpy\n "
"matplotlib\n")
q.write("numpy\n " "matplotlib\n")
bq = buildpacks.CondaBuildPack()
with pytest.raises(TypeError):
py_ver = bq.python_version
py_ver = bq.python_version

Wyświetl plik

@ -8,13 +8,13 @@ from repo2docker.buildpacks.conda.freeze import set_python
import pytest
V = '3.7'
yaml = YAML(typ='rt')
V = "3.7"
yaml = YAML(typ="rt")
def test_set_python():
with TemporaryDirectory() as d:
env_fname = os.path.join(d, 'some-env.yml')
env_fname = os.path.join(d, "some-env.yml")
# function being tested
set_python(env_fname, V)
@ -25,7 +25,7 @@ def test_set_python():
f.seek(0)
assert "AUTO GENERATED FROM" in f.readline()
for dep in env['dependencies']:
for dep in env["dependencies"]:
# the "- pip:" entry isn't a string, hence this complex if
# statement
if isinstance(dep, str) and dep.startswith("python="):
@ -39,7 +39,7 @@ def test_doesnt_clobber():
# check a file not containing the word GENERATED on the first line is
# left unchanged
with TemporaryDirectory() as d:
env_fname = os.path.join(d, 'some-env.yml')
env_fname = os.path.join(d, "some-env.yml")
with open(env_fname, "w") as f:
f.write("some text here")
@ -53,15 +53,14 @@ def test_python_missing_in_source_env():
# check we raise an exception when python isn't in the source environemt
with TemporaryDirectory() as d:
# prep our source environment
source_env_fname = os.path.join(d, 'source-env.yml')
with open(source_env_fname, 'w') as f:
yaml.dump({'dependencies': ['a_package_name=1.2.3']}, f)
source_env_fname = os.path.join(d, "source-env.yml")
with open(source_env_fname, "w") as f:
yaml.dump({"dependencies": ["a_package_name=1.2.3"]}, f)
with patch('repo2docker.buildpacks.conda.freeze.ENV_FILE',
source_env_fname):
target_env_fname = os.path.join(d, 'some-env.yml')
with patch("repo2docker.buildpacks.conda.freeze.ENV_FILE", source_env_fname):
target_env_fname = os.path.join(d, "some-env.yml")
with pytest.raises(ValueError) as e:
set_python(target_env_fname, V)
assert 'python dependency not found' in str(e.value)
assert "python dependency not found" in str(e.value)

Wyświetl plik

@ -13,18 +13,17 @@ URL = "https://github.com/binderhub-ci-repos/repo2docker-ci-clone-depth"
def test_buildpack_labels_rendered():
bp = BuildPack()
assert 'LABEL' not in bp.render()
bp.labels['first_label'] = 'firstlabel'
assert "LABEL" not in bp.render()
bp.labels["first_label"] = "firstlabel"
assert 'LABEL first_label="firstlabel"\n' in bp.render()
bp.labels['second_label'] = 'anotherlabel'
bp.labels["second_label"] = "anotherlabel"
assert 'LABEL second_label="anotherlabel"\n' in bp.render()
@pytest.mark.parametrize('ref, repo, expected_repo_label', [
(None, URL, URL),
('some-ref', None, 'local'),
(None, None, 'local'),
])
@pytest.mark.parametrize(
"ref, repo, expected_repo_label",
[(None, URL, URL), ("some-ref", None, "local"), (None, None, "local")],
)
def test_Repo2Docker_labels(ref, repo, expected_repo_label, tmpdir):
app = Repo2Docker(dry_run=True)
# Add mock BuildPack to app
@ -41,9 +40,9 @@ def test_Repo2Docker_labels(ref, repo, expected_repo_label, tmpdir):
app.initialize()
app.start()
expected_labels = {
'repo2docker.ref': ref,
'repo2docker.repo': expected_repo_label,
'repo2docker.version': __version__,
"repo2docker.ref": ref,
"repo2docker.repo": expected_repo_label,
"repo2docker.version": __version__,
}
assert mock_buildpack().labels == expected_labels

Wyświetl plik

@ -24,22 +24,22 @@ basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def does_build(tmpdir, build_src_dir, mem_limit, mem_allocate_mb):
builddir = tmpdir.join('build')
builddir = tmpdir.join("build")
shutil.copytree(build_src_dir, builddir)
builddir.chdir()
print(os.getcwd(), os.listdir('.'))
mem_allocate_mb_file = os.path.join(builddir, 'mem_allocate_mb')
print(os.getcwd(), os.listdir("."))
mem_allocate_mb_file = os.path.join(builddir, "mem_allocate_mb")
# Cache bust so we actually do a rebuild each time this is run!
with builddir.join('cachebust').open('w') as cachebust:
with builddir.join("cachebust").open("w") as cachebust:
cachebust.write(str(time.time()))
# we don't have an easy way to pass env vars or whatever to
# postBuild from here, so we write a file into the repo that is
# read by the postBuild script!
with open(mem_allocate_mb_file, 'w') as f:
with open(mem_allocate_mb_file, "w") as f:
f.write(str(mem_allocate_mb))
r2d = Repo2Docker(build_memory_limit=str(mem_limit) + 'M')
r2d = Repo2Docker(build_memory_limit=str(mem_limit) + "M")
r2d.initialize()
try:
r2d.build()
@ -50,23 +50,20 @@ def does_build(tmpdir, build_src_dir, mem_limit, mem_allocate_mb):
@pytest.mark.parametrize(
'test, mem_limit, mem_allocate_mb, expected',
"test, mem_limit, mem_allocate_mb, expected",
[
('dockerfile', 128, 256, False),
('dockerfile', 512, 256, True),
('non-dockerfile', 128, 256, False),
('non-dockerfile', 512, 256, True),
]
("dockerfile", 128, 256, False),
("dockerfile", 512, 256, True),
("non-dockerfile", 128, 256, False),
("non-dockerfile", 512, 256, True),
],
)
def test_memlimit_nondockerfile(tmpdir, test, mem_limit, mem_allocate_mb, expected):
"""
Test if memory limited builds are working for non dockerfile builds
"""
success = does_build(
tmpdir,
os.path.join(basedir, 'memlimit', test),
mem_limit,
mem_allocate_mb,
tmpdir, os.path.join(basedir, "memlimit", test), mem_limit, mem_allocate_mb
)
assert success == expected
@ -78,7 +75,7 @@ def test_memlimit_same_postbuild():
Until https://github.com/jupyter/repo2docker/issues/160 gets fixed.
"""
filepaths = [
os.path.join(basedir, 'memlimit', t, "postBuild")
os.path.join(basedir, "memlimit", t, "postBuild")
for t in ("dockerfile", "non-dockerfile")
]
file_contents = []
@ -89,15 +86,15 @@ def test_memlimit_same_postbuild():
assert len(set(file_contents)) == 1
@pytest.mark.parametrize('BuildPack', [BaseImage, DockerBuildPack])
@pytest.mark.parametrize("BuildPack", [BaseImage, DockerBuildPack])
def test_memlimit_argument_type(BuildPack):
# check that an exception is raised when the memory limit isn't an int
fake_log_value = {'stream': 'fake'}
fake_log_value = {"stream": "fake"}
fake_client = MagicMock(spec=docker.APIClient)
fake_client.build.return_value = iter([fake_log_value])
with pytest.raises(ValueError) as exc_info:
for line in BuildPack().build(fake_client, 'image-2', "10Gi", {}, [], {}):
for line in BuildPack().build(fake_client, "image-2", "10Gi", {}, [], {}):
pass
assert "The memory limit has to be specified as an" in str(exc_info.value)

Wyświetl plik

@ -14,8 +14,9 @@ import pytest
from repo2docker.app import Repo2Docker
def read_port_mapping_response(request, tmpdir, host=None, port='',
all_ports=False, protocol=None):
def read_port_mapping_response(
request, tmpdir, host=None, port="", all_ports=False, protocol=None
):
"""
Deploy container and test if port mappings work as expected
@ -25,14 +26,14 @@ def read_port_mapping_response(request, tmpdir, host=None, port='',
port: the random host port to bind to
protocol: the protocol to use valid values /tcp or /udp
"""
port_protocol = '8888'
port_protocol = "8888"
if protocol:
port_protocol += protocol
host_port = port
if host:
host_port = (host, port)
else:
host = 'localhost'
host = "localhost"
if port:
ports = {port_protocol: host_port}
@ -46,15 +47,15 @@ def read_port_mapping_response(request, tmpdir, host=None, port='',
tmpdir.chdir()
username = os.getlogin()
tmpdir.mkdir('username')
tmpdir.mkdir("username")
r2d = Repo2Docker(
repo=str(tmpdir.mkdir('repo')),
repo=str(tmpdir.mkdir("repo")),
user_id=os.geteuid(),
user_name=username,
all_ports=all_ports,
ports=ports,
run=True,
run_cmd=['python', '-m', 'http.server', '8888'],
run_cmd=["python", "-m", "http.server", "8888"],
)
r2d.initialize()
r2d.build()
@ -64,21 +65,22 @@ def read_port_mapping_response(request, tmpdir, host=None, port='',
# register cleanup first thing so we don't leave it lying around
def _cleanup():
container.reload()
if container.status == 'running':
if container.status == "running":
container.kill()
try:
container.remove()
except docker.errors.NotFound:
pass
request.addfinalizer(_cleanup)
container.reload()
assert container.status == 'running'
port_mapping = container.attrs['NetworkSettings']['Ports']
assert container.status == "running"
port_mapping = container.attrs["NetworkSettings"]["Ports"]
if all_ports:
port = port_mapping['8888/tcp'][0]['HostPort']
port = port_mapping["8888/tcp"][0]["HostPort"]
url = 'http://{}:{}'.format(host, port)
url = "http://{}:{}".format(host, port)
for i in range(5):
try:
r = requests.get(url)
@ -86,14 +88,14 @@ def read_port_mapping_response(request, tmpdir, host=None, port='',
except Exception as e:
print("No response from {}: {}".format(url, e))
container.reload()
assert container.status == 'running'
assert container.status == "running"
time.sleep(3)
continue
else:
break
else:
pytest.fail("Never succeded in talking to %s" % url)
assert 'Directory listing' in r.text
assert "Directory listing" in r.text
def test_all_port_mapping_response(request, tmpdir):
@ -104,21 +106,9 @@ def test_all_port_mapping_response(request, tmpdir):
@pytest.mark.parametrize(
'host, protocol',
[
(None, None),
('127.0.0.1', None),
(None, '/tcp'),
]
"host, protocol", [(None, None), ("127.0.0.1", None), (None, "/tcp")]
)
def test_port_mapping(request, tmpdir, host, protocol):
"""Test a port mapping"""
port = str(random.randint(50000, 51000))
read_port_mapping_response(
request,
tmpdir,
host=host,
port=port,
protocol=protocol,
)
read_port_mapping_response(request, tmpdir, host=host, port=port, protocol=protocol)

Wyświetl plik

@ -33,110 +33,65 @@ def test_simple_matches():
def test_range_matches():
assert semver.create_semver_matcher(
"1.2.3"
) == semver.create_semver_matcher("^1.2.3")
assert semver.create_semver_matcher(
"1.2.3"
) == semver.create_semver_matcher("^1.2.3")
assert semver.create_semver_matcher("1.2") == semver.create_semver_matcher(
"^1.2"
assert semver.create_semver_matcher("1.2.3") == semver.create_semver_matcher(
"^1.2.3"
)
assert semver.create_semver_matcher("1") == semver.create_semver_matcher(
"^1"
assert semver.create_semver_matcher("1.2.3") == semver.create_semver_matcher(
"^1.2.3"
)
assert semver.create_semver_matcher(
"0.0.3"
) == semver.create_semver_matcher("^0.0.3")
assert semver.create_semver_matcher("0") == semver.create_semver_matcher(
"^0"
assert semver.create_semver_matcher("1.2") == semver.create_semver_matcher("^1.2")
assert semver.create_semver_matcher("1") == semver.create_semver_matcher("^1")
assert semver.create_semver_matcher("0.0.3") == semver.create_semver_matcher(
"^0.0.3"
)
assert semver.create_semver_matcher("0") == semver.create_semver_matcher("^0")
def test_match_particular_version():
assert semver.create_semver_matcher("1.2.3").match(
semver.str_to_version("1.5.2")
)
assert semver.create_semver_matcher("1.2.3").match(
semver.str_to_version("1.2.3")
)
assert semver.create_semver_matcher("1.2.3").match(semver.str_to_version("1.5.2"))
assert semver.create_semver_matcher("1.2.3").match(semver.str_to_version("1.2.3"))
assert (
semver.create_semver_matcher("1.2.3").match(
semver.str_to_version("2.0.0")
)
semver.create_semver_matcher("1.2.3").match(semver.str_to_version("2.0.0"))
== False
)
assert (
semver.create_semver_matcher("1.2.3").match(
semver.str_to_version("1.2.2")
)
semver.create_semver_matcher("1.2.3").match(semver.str_to_version("1.2.2"))
== False
)
assert semver.create_semver_matcher("~1.2.3").match(
semver.str_to_version("1.2.4")
)
assert semver.create_semver_matcher("~1.2.3").match(
semver.str_to_version("1.2.3")
)
assert semver.create_semver_matcher("~1.2.3").match(semver.str_to_version("1.2.4"))
assert semver.create_semver_matcher("~1.2.3").match(semver.str_to_version("1.2.3"))
assert (
semver.create_semver_matcher("~1.2.3").match(
semver.str_to_version("1.3")
)
semver.create_semver_matcher("~1.2.3").match(semver.str_to_version("1.3"))
== False
)
assert semver.create_semver_matcher("1.2").match(
semver.str_to_version("1.2.0")
)
assert semver.create_semver_matcher("1.2").match(
semver.str_to_version("1.9.9")
)
assert semver.create_semver_matcher("1.2").match(semver.str_to_version("1.2.0"))
assert semver.create_semver_matcher("1.2").match(semver.str_to_version("1.9.9"))
assert (
semver.create_semver_matcher("1.2").match(
semver.str_to_version("2.0.0")
)
semver.create_semver_matcher("1.2").match(semver.str_to_version("2.0.0"))
== False
)
assert (
semver.create_semver_matcher("1.2").match(
semver.str_to_version("1.1.9")
)
semver.create_semver_matcher("1.2").match(semver.str_to_version("1.1.9"))
== False
)
assert semver.create_semver_matcher("0.2.3").match(
semver.str_to_version("0.2.3")
)
assert semver.create_semver_matcher("0.2.3").match(semver.str_to_version("0.2.3"))
assert (
semver.create_semver_matcher("0.2.3").match(
semver.str_to_version("0.3.0")
)
semver.create_semver_matcher("0.2.3").match(semver.str_to_version("0.3.0"))
== False
)
assert (
semver.create_semver_matcher("0.2.3").match(
semver.str_to_version("0.2.2")
)
semver.create_semver_matcher("0.2.3").match(semver.str_to_version("0.2.2"))
== False
)
assert semver.create_semver_matcher("0").match(
semver.str_to_version("0.0.0")
)
assert semver.create_semver_matcher("0").match(
semver.str_to_version("0.99.0")
)
assert semver.create_semver_matcher("0").match(semver.str_to_version("0.0.0"))
assert semver.create_semver_matcher("0").match(semver.str_to_version("0.99.0"))
assert (
semver.create_semver_matcher("0").match(semver.str_to_version("1.0.0"))
== False
)
assert semver.create_semver_matcher("0.0").match(
semver.str_to_version("0.0.0")
)
assert semver.create_semver_matcher("0.0").match(
semver.str_to_version("0.0.99")
semver.create_semver_matcher("0").match(semver.str_to_version("1.0.0")) == False
)
assert semver.create_semver_matcher("0.0").match(semver.str_to_version("0.0.0"))
assert semver.create_semver_matcher("0.0").match(semver.str_to_version("0.0.99"))
assert (
semver.create_semver_matcher("0.0").match(
semver.str_to_version("0.1.0")
)
semver.create_semver_matcher("0.0").match(semver.str_to_version("0.1.0"))
== False
)
@ -146,19 +101,11 @@ def test_less_than_prefix():
assert repr(semver.create_semver_matcher("<1")) == "<1.0.0"
assert repr(semver.create_semver_matcher("<0.2.3")) == "<0.2.3"
assert semver.create_semver_matcher("<2.0.3").match(
semver.str_to_version("2.0.2")
)
assert semver.create_semver_matcher("<2").match(
semver.str_to_version("0.0.1")
)
assert semver.create_semver_matcher("<2.0.3").match(
semver.str_to_version("0.2.3")
)
assert semver.create_semver_matcher("<2.0.3").match(semver.str_to_version("2.0.2"))
assert semver.create_semver_matcher("<2").match(semver.str_to_version("0.0.1"))
assert semver.create_semver_matcher("<2.0.3").match(semver.str_to_version("0.2.3"))
assert (
semver.create_semver_matcher("<0.2.4").match(
semver.str_to_version("0.2.4")
)
semver.create_semver_matcher("<0.2.4").match(semver.str_to_version("0.2.4"))
== False
)
@ -167,44 +114,30 @@ def test_equal_prefix():
assert repr(semver.create_semver_matcher("=1.2.3")) == "==1.2.3"
assert repr(semver.create_semver_matcher("=1.2")) == "==1.2.0"
assert repr(semver.create_semver_matcher(" =1")) == "==1.0.0"
assert semver.create_semver_matcher("=1.2.3").match(
semver.str_to_version("1.2.3")
)
assert semver.create_semver_matcher("=1.2.3").match(semver.str_to_version("1.2.3"))
assert (
semver.create_semver_matcher("=1.2.3").match(
semver.str_to_version("1.2.4")
)
semver.create_semver_matcher("=1.2.3").match(semver.str_to_version("1.2.4"))
== False
)
assert (
semver.create_semver_matcher("=1.2.3").match(
semver.str_to_version("1.2.2")
)
semver.create_semver_matcher("=1.2.3").match(semver.str_to_version("1.2.2"))
== False
)
def test_fancy_unicode():
assert semver.create_semver_matcher(
"1.3.0"
) == semver.create_semver_matcher(">=1.3.0")
assert semver.create_semver_matcher("≥1.3.0") == semver.create_semver_matcher(
">=1.3.0"
)
def test_largerthan_equal():
assert repr(semver.create_semver_matcher(">= 1.2.3")) == ">= 1.2.3"
assert repr(semver.create_semver_matcher(" >= 1")) == ">= 1.0.0"
assert semver.create_semver_matcher(">=1").match(
semver.str_to_version("1.0.0")
)
assert semver.create_semver_matcher(">=0").match(
semver.str_to_version("0.0.1")
)
assert semver.create_semver_matcher(">=1.2.3").match(
semver.str_to_version("1.2.3")
)
assert semver.create_semver_matcher(">=1").match(semver.str_to_version("1.0.0"))
assert semver.create_semver_matcher(">=0").match(semver.str_to_version("0.0.1"))
assert semver.create_semver_matcher(">=1.2.3").match(semver.str_to_version("1.2.3"))
assert (
semver.create_semver_matcher(">=1.2.3").match(
semver.str_to_version("1.2.2")
)
semver.create_semver_matcher(">=1.2.3").match(semver.str_to_version("1.2.2"))
== False
)

Wyświetl plik

@ -17,7 +17,7 @@ def test_subdir(run_repo2docker):
# root of the test repo are invalid
cwd = os.getcwd()
argv = ['--subdir', 'a directory', TEST_REPO]
argv = ["--subdir", "a directory", TEST_REPO]
run_repo2docker(argv)
# check that we restored the current working directory
@ -25,23 +25,17 @@ def test_subdir(run_repo2docker):
def test_subdir_in_image_name():
app = Repo2Docker(
repo=TEST_REPO,
subdir='a directory',
)
app = Repo2Docker(repo=TEST_REPO, subdir="a directory")
app.initialize()
app.build()
escaped_dirname = escapism.escape('a directory', escape_char='-').lower()
escaped_dirname = escapism.escape("a directory", escape_char="-").lower()
assert escaped_dirname in app.output_image_spec
def test_subdir_invalid(caplog):
# test an error is raised when requesting a non existent subdir
app = Repo2Docker(
repo=TEST_REPO,
subdir='invalid-sub-dir',
)
app = Repo2Docker(repo=TEST_REPO, subdir="invalid-sub-dir")
app.initialize()
with pytest.raises(FileNotFoundError):
app.build() # Just build the image and do not run it.

Wyświetl plik

@ -12,8 +12,8 @@ from repo2docker import Repo2Docker
def test_automatic_username_deduction():
# check we pickup the right username
with mock.patch('os.environ') as mock_env:
expected = 'someusername'
with mock.patch("os.environ") as mock_env:
expected = "someusername"
mock_env.get.return_value = expected
r2d = Repo2Docker()
@ -30,22 +30,30 @@ def test_user():
userid = str(os.geteuid())
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = os.path.realpath(tmpdir)
subprocess.check_call([
'repo2docker',
'-v', '{}:/home/{}'.format(tmpdir, username),
'--user-id', userid,
'--user-name', username,
tmpdir,
'--',
'/bin/bash',
'-c', 'id -u > id && pwd > pwd && whoami > name && echo -n $USER > env_user'.format(ts)
])
subprocess.check_call(
[
"repo2docker",
"-v",
"{}:/home/{}".format(tmpdir, username),
"--user-id",
userid,
"--user-name",
username,
tmpdir,
"--",
"/bin/bash",
"-c",
"id -u > id && pwd > pwd && whoami > name && echo -n $USER > env_user".format(
ts
),
]
)
with open(os.path.join(tmpdir, 'id')) as f:
with open(os.path.join(tmpdir, "id")) as f:
assert f.read().strip() == userid
with open(os.path.join(tmpdir, 'pwd')) as f:
assert f.read().strip() == '/home/{}'.format(username)
with open(os.path.join(tmpdir, 'name')) as f:
with open(os.path.join(tmpdir, "pwd")) as f:
assert f.read().strip() == "/home/{}".format(username)
with open(os.path.join(tmpdir, "name")) as f:
assert f.read().strip() == username
with open(os.path.join(tmpdir, 'name')) as f:
with open(os.path.join(tmpdir, "name")) as f:
assert f.read().strip() == username

Wyświetl plik

@ -10,38 +10,32 @@ import subprocess
def test_capture_cmd_no_capture_success():
# This should succeed
for line in utils.execute_cmd([
'/bin/bash', '-c', 'echo test'
]):
for line in utils.execute_cmd(["/bin/bash", "-c", "echo test"]):
pass
def test_capture_cmd_no_capture_fail():
with pytest.raises(subprocess.CalledProcessError):
for line in utils.execute_cmd([
'/bin/bash', '-c', 'e '
]):
for line in utils.execute_cmd(["/bin/bash", "-c", "e "]):
pass
def test_capture_cmd_capture_success():
# This should succeed
for line in utils.execute_cmd([
'/bin/bash', '-c', 'echo test'
], capture=True):
assert line == 'test\n'
for line in utils.execute_cmd(["/bin/bash", "-c", "echo test"], capture=True):
assert line == "test\n"
def test_capture_cmd_capture_fail():
with pytest.raises(subprocess.CalledProcessError):
for line in utils.execute_cmd([
'/bin/bash', '-c', 'echo test; exit 1 '
], capture=True):
assert line == 'test\n'
for line in utils.execute_cmd(
["/bin/bash", "-c", "echo test; exit 1 "], capture=True
):
assert line == "test\n"
def test_chdir(tmpdir):
d = str(tmpdir.mkdir('cwd'))
d = str(tmpdir.mkdir("cwd"))
cur_cwd = os.getcwd()
with utils.chdir(d):
assert os.getcwd() == d
@ -54,35 +48,35 @@ def test_byte_spec_validation():
assert bs.validate(None, 1) == 1
assert bs.validate(None, 1.0) == 1.0
assert bs.validate(None, '1K') == 1024
assert bs.validate(None, '1M') == 1024 * 1024
assert bs.validate(None, '1G') == 1024 * 1024 * 1024
assert bs.validate(None, '1T') == 1024 * 1024 * 1024 * 1024
assert bs.validate(None, "1K") == 1024
assert bs.validate(None, "1M") == 1024 * 1024
assert bs.validate(None, "1G") == 1024 * 1024 * 1024
assert bs.validate(None, "1T") == 1024 * 1024 * 1024 * 1024
with pytest.raises(traitlets.TraitError):
bs.validate(None, 'NK')
bs.validate(None, "NK")
with pytest.raises(traitlets.TraitError):
bs.validate(None, '1m')
bs.validate(None, "1m")
@pytest.mark.parametrize("input,expected", [
(["8888:8888"], {'8888/tcp': '8888'}),
(["8888:4321"], {'4321/tcp': '8888'}),
(["8888:4321/udp"], {'4321/udp': '8888'}),
(["8888:4321/udp", "8888:4321/tcp"], {'4321/udp': '8888',
'4321/tcp': '8888'}),
(['127.0.0.1:80:8000'], {'8000/tcp': ('127.0.0.1', '80')}),
(["8888:4321", "1234:12345"], {'4321/tcp': '8888', '12345/tcp': '1234'}),
])
@pytest.mark.parametrize(
"input,expected",
[
(["8888:8888"], {"8888/tcp": "8888"}),
(["8888:4321"], {"4321/tcp": "8888"}),
(["8888:4321/udp"], {"4321/udp": "8888"}),
(["8888:4321/udp", "8888:4321/tcp"], {"4321/udp": "8888", "4321/tcp": "8888"}),
(["127.0.0.1:80:8000"], {"8000/tcp": ("127.0.0.1", "80")}),
(["8888:4321", "1234:12345"], {"4321/tcp": "8888", "12345/tcp": "1234"}),
],
)
def test_valid_port_mapping(input, expected):
actual = utils.validate_and_generate_port_mapping(input)
assert actual == expected
@pytest.mark.parametrize("port_spec", [
"a8888:8888", "888:888/abc"
])
@pytest.mark.parametrize("port_spec", ["a8888:8888", "888:888/abc"])
def test_invalid_port_mapping(port_spec):
with pytest.raises(ValueError) as e:
utils.validate_and_generate_port_mapping([port_spec])

Wyświetl plik

@ -6,6 +6,7 @@ import subprocess
import tempfile
import time
def test_volume_abspath():
"""
Validate that you can bind mount a volume onto an absolute dir & write to it
@ -15,18 +16,24 @@ def test_volume_abspath():
tmpdir = os.path.realpath(tmpdir)
username = os.getlogin()
subprocess.check_call([
'repo2docker',
'-v', '{}:/home/{}'.format(tmpdir, username),
'--user-id', str(os.geteuid()),
'--user-name', username,
tmpdir,
'--',
'/bin/bash',
'-c', 'echo -n {} > ts'.format(ts)
])
subprocess.check_call(
[
"repo2docker",
"-v",
"{}:/home/{}".format(tmpdir, username),
"--user-id",
str(os.geteuid()),
"--user-name",
username,
tmpdir,
"--",
"/bin/bash",
"-c",
"echo -n {} > ts".format(ts),
]
)
with open(os.path.join(tmpdir, 'ts')) as f:
with open(os.path.join(tmpdir, "ts")) as f:
assert f.read() == ts
@ -39,18 +46,24 @@ def test_volume_relpath():
ts = str(time.time())
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
subprocess.check_call([
'repo2docker',
'-v', '.:.',
'--user-id', str(os.geteuid()),
'--user-name', os.getlogin(),
tmpdir,
'--',
'/bin/bash',
'-c', 'echo -n {} > ts'.format(ts)
])
subprocess.check_call(
[
"repo2docker",
"-v",
".:.",
"--user-id",
str(os.geteuid()),
"--user-name",
os.getlogin(),
tmpdir,
"--",
"/bin/bash",
"-c",
"echo -n {} > ts".format(ts),
]
)
with open(os.path.join(tmpdir, 'ts')) as f:
with open(os.path.join(tmpdir, "ts")) as f:
assert f.read() == ts
finally:
os.chdir(curdir)

Wyświetl plik

@ -5,5 +5,5 @@ import sys
assert sys.version_info[:2] == (3, 7), sys.version
import jupyter
with open('/tmp/appendix') as f:
assert f.read().strip() == 'appendix'
with open("/tmp/appendix") as f:
assert f.read().strip() == "appendix"

Wyświetl plik

@ -12,7 +12,9 @@ try:
except ImportError:
pass
else:
raise Exception("'nbgitpuller' shouldn't have been installed from requirements3.txt")
raise Exception(
"'nbgitpuller' shouldn't have been installed from requirements3.txt"
)
# Python 3 is the executable used for the notebook server, this should
# have nbgitpuller installed

Wyświetl plik

@ -2,6 +2,6 @@
# Verify that ~/.local/bin & REPO_DIR/.local/bin is on the PATH
import os
assert os.path.expanduser('~/.local/bin') in os.getenv("PATH"), os.getenv("PATH")
assert os.getcwd() == os.environ['REPO_DIR']
assert '{}/.local/bin'.format(os.environ['REPO_DIR']) in os.getenv('PATH')
assert os.path.expanduser("~/.local/bin") in os.getenv("PATH"), os.getenv("PATH")
assert os.getcwd() == os.environ["REPO_DIR"]
assert "{}/.local/bin".format(os.environ["REPO_DIR"]) in os.getenv("PATH")

Wyświetl plik

@ -1,4 +1,3 @@
# Version: 0.18
"""The Versioneer - like a rocketeer, but for versions.
@ -277,6 +276,7 @@ https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
@ -308,11 +308,13 @@ def get_root():
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
@ -325,8 +327,10 @@ def get_root():
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py)
)
except NameError:
pass
return root
@ -348,6 +352,7 @@ def get_config_from_root(root):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
@ -372,17 +377,18 @@ HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
@ -390,10 +396,13 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
@ -418,7 +427,9 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
LONG_VERSION_PY[
"git"
] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
@ -993,7 +1004,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
@ -1002,7 +1013,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
@ -1010,19 +1021,26 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
@ -1037,8 +1055,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
@ -1046,10 +1063,19 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
@ -1072,17 +1098,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
@ -1091,10 +1116,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
@ -1105,13 +1132,13 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
@ -1167,16 +1194,22 @@ def versions_from_parentdir(parentdir_prefix, root, verbose):
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@ -1205,11 +1238,13 @@ def versions_from_file(filename):
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
mo = re.search(
r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
@ -1218,8 +1253,7 @@ def versions_from_file(filename):
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
@ -1251,8 +1285,7 @@ def render_pep440(pieces):
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
@ -1366,11 +1399,13 @@ def render_git_describe_long(pieces):
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
@ -1390,9 +1425,13 @@ def render(pieces, style):
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
class VersioneerBadRootError(Exception):
@ -1415,8 +1454,9 @@ def get_versions(verbose=False):
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
@ -1470,9 +1510,13 @@ def get_versions(verbose=False):
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
def get_version():
@ -1521,6 +1565,7 @@ def get_cmdclass():
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
@ -1553,14 +1598,15 @@ def get_cmdclass():
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
@ -1581,17 +1627,21 @@ def get_cmdclass():
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
if "py2exe" in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
@ -1610,13 +1660,17 @@ def get_cmdclass():
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
@ -1643,8 +1697,10 @@ def get_cmdclass():
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
write_to_version_file(
target_versionfile, self._versioneer_generated_versions
)
cmds["sdist"] = cmd_sdist
return cmds
@ -1699,11 +1755,13 @@ def do_setup():
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
except (
EnvironmentError,
configparser.NoSectionError,
configparser.NoOptionError,
) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
@ -1712,15 +1770,18 @@ def do_setup():
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
@ -1762,8 +1823,10 @@ def do_setup():
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
print(
" appending versionfile_source ('%s') to MANIFEST.in"
% cfg.versionfile_source
)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else: