diff -Nru charliecloud-0.20/bin/ch-build charliecloud-0.21/bin/ch-build --- charliecloud-0.20/bin/ch-build 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/bin/ch-build 2020-12-18 20:25:06.000000000 +0000 @@ -11,7 +11,7 @@ $ $(basename "$0") [-b BUILDER] [--builder-info] -t TAG [ARGS ...] CONTEXT -BUILDER is one of: buildah ch-grow docker. +BUILDER is one of: buildah ch-image docker. ARGS are passed unchanged to the underlying builder. EOF ) @@ -48,8 +48,8 @@ buildah*) buildah --version ;; - ch-grow) - "${ch_bin}/ch-grow" --version + ch-image|ch-grow) + "${ch_bin}/ch-image" --version ;; docker) docker --version # no wrapper: sudo not needed for --version @@ -103,8 +103,8 @@ --runtime="$runtime" \ "$@" < $stdin ;; - ch-grow) - "${ch_bin}/ch-grow" build "$@" + ch-image|ch-grow) + "${ch_bin}/ch-image" build "$@" ;; docker) # Coordinate this list with test "build.bats/proxy variables". diff -Nru charliecloud-0.20/bin/ch-builder2tar charliecloud-0.21/bin/ch-builder2tar --- charliecloud-0.20/bin/ch-builder2tar 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/bin/ch-builder2tar 2020-12-18 20:25:06.000000000 +0000 @@ -130,10 +130,10 @@ ;; -ch-grow) +ch-image|ch-grow) echo "exporting" - storage=$("${ch_bin}/ch-grow" storage-path) + storage=$("${ch_bin}/ch-image" storage-path) ( cd "${storage}/img/${image}" && tar cf - . ) | pv_ > "$tar" ;; diff -Nru charliecloud-0.20/bin/ch-fromhost charliecloud-0.21/bin/ch-fromhost --- charliecloud-0.20/bin/ch-fromhost 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/bin/ch-fromhost 2020-12-18 20:25:06.000000000 +0000 @@ -42,9 +42,7 @@ # shellcheck disable=SC2034 usage=$(cat <&2 rm -Rf --one-file-system "${newroot}" diff -Nru charliecloud-0.20/bin/ch-test charliecloud-0.21/bin/ch-test --- charliecloud-0.20/bin/ch-test 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/bin/ch-test 2020-12-18 20:25:06.000000000 +0000 @@ -64,11 +64,11 @@ builder_check () { printf 'checking builder ...\n' case $CH_BUILDER in - ch-grow) - if ! "${ch_bin}/ch-grow" --dependencies; then - fatal 'builder: ch-grow: missing dependencies' + ch-image|ch-grow) + if ! "${ch_bin}/ch-image" --dependencies; then + fatal 'builder: ch-image: missing dependencies' fi - bl=$(readlink -f "${ch_bin}/ch-grow") + bl=$(readlink -f "${ch_bin}/ch-image") bv=$("$bl" --version) ;; buildah) @@ -113,10 +113,29 @@ builder_choose method='default' fi - printf "%-*s %s (%s)\n" "$width" 'builder:' "$CH_BUILDER" "$method" if [[ $CH_BUILDER == ch-grow ]]; then - vset CH_GROW_STORAGE '' "$CH_GROW_STORAGE" "/var/tmp/$USER/ch-grow" \ - "$width" 'ch-grow storage' + export CH_BUILDER=ch-image + cat <<'EOF' 1>&2 + +WARNING: ch-grow is now called ch-image. We plan to remove the ch-grow name in +Charliecloud version 0.23. + +EOF + fi + if [[ -n $CH_GROW_STORAGE ]]; then + export CH_IMAGE_STORAGE=$CH_GROW_STORAGE + unset CH_GROW_STORAGE + cat <<'EOF' 1>&2 + +WARNING: $CH_GROW_STORAGE is now called $CH_IMAGE_STORAGE. We plan to remove +the old name in Charliecloud version 0.23. + +EOF + fi + printf "%-*s %s (%s)\n" "$width" 'builder:' "$CH_BUILDER" "$method" + if [[ $CH_BUILDER == ch-image ]]; then + vset CH_IMAGE_STORAGE '' "$CH_IMAGE_STORAGE" "/var/tmp/$USER/ch-image" \ + "$width" 'ch-image storage' fi } @@ -228,7 +247,7 @@ width=$1 default=no # Default to pedantic on CI or if user is a contributor. - if [[ -n $ch_contributor || -n $TRAVIS ]]; then + if [[ -n $ch_contributor || -n $CI ]]; then default=yes fi vset ch_pedantic "$pedantic" '' $default "$width" 'pedantic mode' @@ -760,6 +779,12 @@ require_unset imgdir imgdir=${opt#*=} ;; + --is-pedantic) # undocumented; for CI + is_pedantic=yes + ;; + --is-sudo) # undocumented; for CI + is_sudo=yes + ;; --pack-dir) require_unset tardir tardir=$1; shift @@ -910,6 +935,16 @@ printf '\n' fi +if [[ -n $is_pedantic ]]; then + printf 'exiting per --is-pedantic\n' + if [[ -n $ch_pedantic ]]; then exit 0; else exit 1; fi +fi + +if [[ -n $is_sudo ]]; then + printf 'exiting per --is-sudo\n' + if [[ -n $CH_TEST_SUDO ]]; then exit 0; else exit 1; fi +fi + if [[ -n $dry ]];then printf 'exiting per --dry-run\n' exit 0 diff -Nru charliecloud-0.20/bin/Makefile.am charliecloud-0.21/bin/Makefile.am --- charliecloud-0.20/bin/Makefile.am 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/bin/Makefile.am 2020-12-18 20:25:06.000000000 +0000 @@ -33,17 +33,19 @@ ## Python scripts - need text processing -bin_SCRIPTS = ch-run-oci -EXTRA_SCRIPTS = ch-grow -if ENABLE_CH_GROW -bin_SCRIPTS += ch-grow +bin_SCRIPTS = ch-run-oci # scripts to build +EXTRA_SCRIPTS = ch-image ch-grow # more scripts that *may* be built +if ENABLE_CH_IMAGE +bin_SCRIPTS += ch-image ch-grow endif -EXTRA_DIST = ch-grow.py.in ch-run-oci.py.in -CLEANFILES = $(bin_SCRIPTS) +EXTRA_DIST = ch-grow.py.in ch-image.py.in ch-run-oci.py.in +CLEANFILES = $(EXTRA_SCRIPTS) ch-grow: ch-grow.py.in +ch-image: ch-image.py.in ch-run-oci: ch-run-oci.py.in $(bin_SCRIPTS): %: %.py.in + rm -f $@ sed -E 's|%PYTHON_SHEBANG%|@PYTHON_SHEBANG@|' < $< > $@ - chmod +rwx $@ # respects umask + chmod +rx,-w $@ # respects umask diff -Nru charliecloud-0.20/configure.ac charliecloud-0.21/configure.ac --- charliecloud-0.20/configure.ac 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/configure.ac 2020-12-18 20:25:06.000000000 +0000 @@ -106,10 +106,10 @@ AS_HELP_STRING([--disable-test], [test suite]), [], [enable_test=yes]) -AC_ARG_ENABLE([ch-grow], - AS_HELP_STRING([--disable-ch-grow], - [ch-grow unprivileged builder & image manager]), - [], [enable_ch_grow=yes]) +AC_ARG_ENABLE([ch-image], + AS_HELP_STRING([--disable-ch-image], + [ch-image unprivileged builder & image manager]), + [], [enable_ch_image=yes]) ## Feature test macros @@ -210,7 +210,7 @@ AS_IF([test -n "$SPHINX"], [ AS_IF([test -z "$sphinx_python"], [ AC_MSG_CHECKING([for sphinx-build Python]) - sphinx_python=$(head -1 "$SPHINX" | sed -E 's/^..//') + sphinx_python=$(head -1 "$SPHINX" | sed -E -e 's/^#!\s*//' -e 's/\s+$//') AC_MSG_RESULT([$sphinx_python]) AC_MSG_CHECKING([if "$sphinx_python" starts with slash]) AS_CASE([$sphinx_python], @@ -228,7 +228,7 @@ DOCUTILS=$sphinx_python # FIXME: output is confusing CH_CHECK_VERSION([DOCUTILS], [$vmin_docutils], [-c 'import docutils; print(docutils.__version__)']) -]) +], [DOCUTILS_VERSION_NOTE='moot b/c no sphinx-build']) # "sphinx-rtd-theme" module vmin_rtd=0.2.4 @@ -247,7 +247,7 @@ AC_MSG_RESULT([$have_rtd]) CH_CHECK_VERSION([RTD], [$vmin_rtd], [-c 'import sphinx_rtd_theme; print(sphinx_rtd_theme.__version__)']) -]) +], [RTD_VERSION_NOTE='moot b/c no sphinx-build']) ## Feature tests - run time @@ -429,7 +429,7 @@ AM_CONDITIONAL([ENABLE_HTML], [test $enable_html = yes]) AM_CONDITIONAL([ENABLE_MAN], [test $enable_man = yes]) AM_CONDITIONAL([ENABLE_TEST], [test $enable_test = yes]) -AM_CONDITIONAL([ENABLE_CH_GROW], [test $enable_ch_grow = yes]) +AM_CONDITIONAL([ENABLE_CH_IMAGE], [test $enable_ch_image = yes]) ## Prepare report. @@ -448,14 +448,14 @@ [have_buildah=yes], [have_buildah=no]) -AS_IF([ test $enable_ch_grow = yes \ +AS_IF([ test $enable_ch_image = yes \ && test -n "$PYTHON" \ && test -n "$PYTHON_SHEBANG" \ && test -n "$LARK" \ && test -n "$REQUESTS" \ && test $have_ch_run = yes], - [have_ch_grow=yes], - [have_ch_grow=no]) + [have_ch_image=yes], + [have_ch_image=no]) AS_IF([ test -n "$DOCKER" \ && test -n "$MKTEMP"], @@ -465,7 +465,7 @@ # managing container images AS_IF([ test $have_buildah = yes \ - || test $have_ch_grow = yes \ + || test $have_ch_image = yes \ || test $have_docker = yes], [have_any_builder=yes], [have_any_builder=no]) @@ -582,7 +582,7 @@ HTML documentation ... ${enable_html} man pages ... ${enable_man} test suite ... ${enable_test} - ch-grow(1) ... ${enable_ch_grow} + ch-image(1) ... ${enable_ch_image} Building images via our wrappers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -590,8 +590,8 @@ with Buildah: ${have_buildah} Buildah ≥ $vmin_buildah ... ${BUILDAH_VERSION_NOTE} - with ch-grow(1): ${have_ch_grow} - enabled ... ${enable_ch_grow} + with ch-image(1): ${have_ch_image} + enabled ... ${enable_ch_image} Python shebang line ... ${PYTHON_SHEBANG:-none} Python in shebang ≥ $vmin_python ... ${PYTHON_VERSION_NOTE} "lark-parser" module ≥ $vmin_lark ... ${LARK_VERSION_NOTE} diff -Nru charliecloud-0.20/debian/changelog charliecloud-0.21/debian/changelog --- charliecloud-0.20/debian/changelog 2020-10-25 17:07:39.000000000 +0000 +++ charliecloud-0.21/debian/changelog 2020-12-20 14:44:37.000000000 +0000 @@ -1,3 +1,13 @@ +charliecloud (0.21-1) unstable; urgency=medium + + * New upstream version 0.21 + * Drop patch move-charliecloud-man-page-to-section7.patch (applied upstream) + * d/charliecloud-builders.{install,manpages}: Add ch-image builder + * Bump Standards-Version to 4.5.1 (no changes required) + * Override Lintian's duplicate-files tag for various example files + + -- Peter Wienemann Sun, 20 Dec 2020 15:44:37 +0100 + charliecloud (0.20-1) unstable; urgency=medium * New upstream version 0.20 diff -Nru charliecloud-0.20/debian/charliecloud-builders.install charliecloud-0.21/debian/charliecloud-builders.install --- charliecloud-0.20/debian/charliecloud-builders.install 2020-10-25 17:07:39.000000000 +0000 +++ charliecloud-0.21/debian/charliecloud-builders.install 2020-12-20 14:44:37.000000000 +0000 @@ -4,6 +4,7 @@ debian/tmp/usr/bin/ch-builder2tar debian/tmp/usr/bin/ch-dir2squash debian/tmp/usr/bin/ch-fromhost +debian/tmp/usr/bin/ch-image debian/tmp/usr/bin/ch-grow debian/tmp/usr/bin/ch-pull2dir debian/tmp/usr/bin/ch-pull2tar diff -Nru charliecloud-0.20/debian/charliecloud-builders.manpages charliecloud-0.21/debian/charliecloud-builders.manpages --- charliecloud-0.20/debian/charliecloud-builders.manpages 2020-10-25 17:07:39.000000000 +0000 +++ charliecloud-0.21/debian/charliecloud-builders.manpages 2020-12-20 14:44:37.000000000 +0000 @@ -4,6 +4,7 @@ debian/tmp/usr/share/man/man1/ch-builder2tar.1 debian/tmp/usr/share/man/man1/ch-dir2squash.1 debian/tmp/usr/share/man/man1/ch-fromhost.1 +debian/tmp/usr/share/man/man1/ch-image.1 debian/tmp/usr/share/man/man1/ch-grow.1 debian/tmp/usr/share/man/man1/ch-pull2dir.1 debian/tmp/usr/share/man/man1/ch-pull2tar.1 diff -Nru charliecloud-0.20/debian/charliecloud-doc.lintian-overrides charliecloud-0.21/debian/charliecloud-doc.lintian-overrides --- charliecloud-0.20/debian/charliecloud-doc.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/debian/charliecloud-doc.lintian-overrides 2020-12-20 14:44:37.000000000 +0000 @@ -0,0 +1,2 @@ +# The following files are used by symlink sensitive unit tests +charliecloud-doc: duplicate-files usr/share/doc/charliecloud/examples/copy/dirF/dir19a2/dir19b2/file19c1 usr/share/doc/charliecloud/examples/copy/dirF/dir19a2/dir19b3/file19c1 usr/share/doc/charliecloud/examples/copy/dirF/dir19a2/file19b2 usr/share/doc/charliecloud/examples/copy/dirF/dir19a2/file19b3 usr/share/doc/charliecloud/examples/copy/dirF/dir19a3/file19b1 usr/share/doc/charliecloud/examples/copy/dirF/file19a2 usr/share/doc/charliecloud/examples/copy/dirF/file19a3 usr/share/doc/charliecloud/examples/copy/dirG/filey usr/share/doc/charliecloud/examples/copy/dirG/s_dir1 usr/share/doc/charliecloud/examples/copy/dirG/s_file1 diff -Nru charliecloud-0.20/debian/control charliecloud-0.21/debian/control --- charliecloud-0.20/debian/control 2020-10-25 17:07:39.000000000 +0000 +++ charliecloud-0.21/debian/control 2020-12-20 14:44:37.000000000 +0000 @@ -7,7 +7,7 @@ po-debconf, autoconf-archive, python3-sphinx-rtd-theme -Standards-Version: 4.5.0 +Standards-Version: 4.5.1 Rules-Requires-Root: no Homepage: https://hpc.github.io/charliecloud/ Vcs-Git: https://salsa.debian.org/hpc-team/charliecloud.git diff -Nru charliecloud-0.20/debian/patches/move-charliecloud-man-page-to-section7.patch charliecloud-0.21/debian/patches/move-charliecloud-man-page-to-section7.patch --- charliecloud-0.20/debian/patches/move-charliecloud-man-page-to-section7.patch 2020-10-25 17:07:39.000000000 +0000 +++ charliecloud-0.21/debian/patches/move-charliecloud-man-page-to-section7.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,152 +0,0 @@ -From: Peter Wienemann -From: Reid Priedhorsky -Date: Sun, 25 Oct 2020 17:38:09 +0100 -Subject: Add patch to move charliecloud man page to section 7 -Forwarded: https://github.com/hpc/charliecloud/pull/880 - ---- - doc/Makefile.am | 8 ++++---- - doc/conf.py | 2 +- - doc/see_also.rst | 2 +- - packaging/fedora/charliecloud.spec | 1 + - test/docs-sane.py.in | 13 ++++++++----- - test/run/build-rpms.bats | 4 ++-- - 6 files changed, 17 insertions(+), 13 deletions(-) - -diff --git a/doc/Makefile.am b/doc/Makefile.am -index 4d599a5..ed99117 100644 ---- a/doc/Makefile.am -+++ b/doc/Makefile.am -@@ -56,8 +56,8 @@ see_also.rst \ - tutorial.rst - - if ENABLE_MAN --man1_MANS = \ --man/charliecloud.1 \ -+man_MANS = \ -+man/charliecloud.7 \ - man/ch-build.1 \ - man/ch-build2dir.1 \ - man/ch-builder2squash.1 \ -@@ -93,7 +93,7 @@ endif - - # NOTE: ./html might be a Git checkout to support "make web", so make sure not - # to delete it. --CLEANFILES = $(man1_MANS) $(nobase_html_DATA) \ -+CLEANFILES = $(man_MANS) $(nobase_html_DATA) \ - _deps.rst html/.buildinfo html/.nojekyll - if ENABLE_HTML - # Automake can't remove directories. -@@ -165,6 +165,6 @@ if ENABLE_HTML - HTML_FIRST = html - endif - --$(man1_MANS): man -+$(man_MANS): man - man: mkdir_issue115 ../lib/version.txt _deps.rst $(HTML_FIRST) - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man -diff --git a/doc/conf.py b/doc/conf.py -index a25c7b1..7519727 100644 ---- a/doc/conf.py -+++ b/doc/conf.py -@@ -243,7 +243,7 @@ latex_documents = [ - man_pages = [ - ("charliecloud", "charliecloud", - "Lightweight user-defined software stacks for high-performance computing", -- [], 1), -+ [], 7), - ("ch-build", "ch-build", - "Build an image and place it in the builder's back-end storage", - [], 1), -diff --git a/doc/see_also.rst b/doc/see_also.rst -index 3af105b..2879b60 100644 ---- a/doc/see_also.rst -+++ b/doc/see_also.rst -@@ -1,6 +1,6 @@ - See also - ======== - --charliecloud(1) -+charliecloud(7) - - Full documentation at: -diff --git a/packaging/fedora/charliecloud.spec b/packaging/fedora/charliecloud.spec -index 4ae2ffe..0875e36 100644 ---- a/packaging/fedora/charliecloud.spec -+++ b/packaging/fedora/charliecloud.spec -@@ -115,6 +115,7 @@ ln -s "${sphinxdir}/js" %{buildroot}%{_pkgdocdir}/html/_static/js - %license LICENSE - %doc README.rst %{?el7:README.EL7} - %{_mandir}/man1/ch* -+%{_mandir}/man7/charliecloud* - %{_pkgdocdir}/examples - - # Library files. -diff --git a/test/docs-sane.py.in b/test/docs-sane.py.in -index 964de5a..62ae792 100644 ---- a/test/docs-sane.py.in -+++ b/test/docs-sane.py.in -@@ -6,7 +6,7 @@ - # - # 1. Man page consistency. - # --# a. man/charliecloud.1 exists. -+# a. man/charliecloud.7 exists. - # - # b. Every executable FOO in bin has: - # -@@ -14,7 +14,7 @@ - # - doc/FOO_desc.rst - # - doc/man/FOO.1 - # - a section in doc/command-usage.rst --# - an entry under "See also" in charliecloud.1 -+# - an entry under "See also" in charliecloud.7 - # - # c. There aren't the things in (b) except for the executables (modulo a - # few execeptions for the other documentation source files). -@@ -89,17 +89,20 @@ def check_man(): - lose("conf.py: startdocname != name: %s != %s" % (docname, name)) - if (len(authors) != 0): - lose("conf.py: bad authors: %s: %s" % (name, authors)) -- if (section != 1): -- lose("conf.py: bad section: %s: %s != 1" % (name, section)) - if (name != "charliecloud"): -+ if (section != 1): -+ lose("conf.py: bad section: %s: %s != 1" % (name, section)) - if (name not in helps): - lose("conf.py: unexpected man page: %s" % name) - elif (desc + "." != helps[name]): - lose("conf.py: bad summary: %s: %s" % (name, desc)) -+ else: -+ if (section != 7): -+ lose("conf.py: bad section: %s: %s != 7" % (name, section)) - - os.chdir(CH_BASE + "/doc/man") - -- mans = set(glob.glob("*.1")) - { "charliecloud.1" } -+ mans = set(glob.glob("*.1")) - mans_expected = { i + ".1" for i in execs } - lose_lots("unexpected man", mans - mans_expected) - lose_lots("missing man", mans_expected - mans) -diff --git a/test/run/build-rpms.bats b/test/run/build-rpms.bats -index cf4adbe..79ac4e3 100644 ---- a/test/run/build-rpms.bats -+++ b/test/run/build-rpms.bats -@@ -37,7 +37,7 @@ setup () { - [[ $output = *'/usr/bin/ch-run'* ]] - [[ $output = *'/usr/lib64/charliecloud/base.sh'* ]] - [[ $output = *'/usr/share/doc/charliecloud-'*'/examples/lammps/Dockerfile'* ]] -- [[ $output = *'/usr/share/man/man1/charliecloud.1.gz'* ]] -+ [[ $output = *'/usr/share/man/man7/charliecloud.7.gz'* ]] - run ch-run "$img" -- rpm -ql "charliecloud-debuginfo" - echo "$output" - [[ $status -eq 0 ]] -@@ -103,7 +103,7 @@ setup () { - [[ $output = *'/usr/bin/ch-run'* ]] - [[ $output = *'/usr/lib64/charliecloud/base.sh'* ]] - [[ $output = *'/usr/share/doc/charliecloud/examples/lammps/Dockerfile'* ]] -- [[ $output = *'/usr/share/man/man1/charliecloud.1.gz'* ]] -+ [[ $output = *'/usr/share/man/man7/charliecloud.7.gz'* ]] - run ch-run "$img" -- rpm -ql "charliecloud-debuginfo" - echo "$output" - [[ $status -eq 0 ]] diff -Nru charliecloud-0.20/debian/patches/series charliecloud-0.21/debian/patches/series --- charliecloud-0.20/debian/patches/series 2020-10-25 17:07:39.000000000 +0000 +++ charliecloud-0.21/debian/patches/series 2020-12-20 14:44:37.000000000 +0000 @@ -1,2 +1 @@ adjust-test-suite-path.patch -move-charliecloud-man-page-to-section7.patch diff -Nru charliecloud-0.20/doc/charliecloud.rst charliecloud-0.21/doc/charliecloud.rst --- charliecloud-0.20/doc/charliecloud.rst 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/doc/charliecloud.rst 2020-12-18 20:25:06.000000000 +0000 @@ -18,6 +18,7 @@ ch-dir2squash(1), ch-fromhost(1), ch-grow(1), +ch-image(1), ch-mount(1), ch-pull2dir(1), ch-pull2tar(1), diff -Nru charliecloud-0.20/doc/ch-build_desc.rst charliecloud-0.21/doc/ch-build_desc.rst --- charliecloud-0.20/doc/ch-build_desc.rst 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/doc/ch-build_desc.rst 2020-12-18 20:25:06.000000000 +0000 @@ -24,7 +24,7 @@ :code:`ch-run` (via :code:`ch-run-oci`) for :code:`RUN` instructions. This requires Buildah v1.10.1+; see the install instructions. - * :code:`ch-grow`: Our internal builder. + * :code:`ch-image`: Our internal builder. Supported builders, privileged: @@ -46,7 +46,7 @@ Environment variable Default - :code:`docker` if Docker is installed; otherwise, :code:`ch-grow`. + :code:`docker` if Docker is installed; otherwise, :code:`ch-image`. Other arguments: @@ -95,12 +95,12 @@ $ ch-build -t foo --file=/bar/Dockerfile.baz /bar -Equivalent to the first example, but use :code:`ch-grow` even if Docker is +Equivalent to the first example, but use :code:`ch-image` even if Docker is installed:: - $ ch-build -b ch-grow -t foo /bar + $ ch-build -b ch-image -t foo /bar Equivalent to above:: - $ export CH_BUILDER=ch-grow + $ export CH_BUILDER=ch-image $ ch-build -t foo /bar diff -Nru charliecloud-0.20/doc/ch-fromhost_desc.rst charliecloud-0.21/doc/ch-fromhost_desc.rst --- charliecloud-0.20/doc/ch-fromhost_desc.rst 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/doc/ch-fromhost_desc.rst 2020-12-18 20:25:06.000000000 +0000 @@ -17,10 +17,12 @@ Inject files from the host into the Charliecloud image directory :code:`IMGDIR`. -The purpose of this command is to provide host-specific files, such as GPU -libraries, to a container. It should be run after :code:`ch-tar2dir` and -before :code:`ch-run`. After invocation, the image is no longer portable to -other hosts. +The purpose of this command is to inject files into a container image that are +necessary to run the container on a specific host; e.g., GPU libraries that +are tied to a specific kernel version. **It is not a general copy-to-image +tool**; see further discussion on use cases below. It should be run after +:code:`ch-tar2dir` and before :code:`ch-run`. After invocation, the image is +no longer portable to other hosts. Injection is not atomic; if an error occurs partway through injection, the image is left in an undefined state. Injection is currently implemented using @@ -92,6 +94,34 @@ Print version and exit. +When to use :code:`ch-fromhost` +=============================== + +This command does a lot of heuristic magic; while it *can* copy arbitrary +files into an image, this usage is discouraged and prone to error. Here are +some use cases and the recommended approach: + +1. *I have some files on my build host that I want to include in the image.* + Use the :code:`COPY` instruction within your Dockerfile. Note that it's OK + to build an image that meets your specific needs but isn't generally + portable, e.g., only runs on specific micro-architectures you're using. + +2. *I have an already built image and want to install a program I compiled + separately into the image.* Consider whether a building a new derived image + with a Dockerfile is appropriate. Another good option is to bind-mount the + directory containing your program at run time. A less good option is to + :code:`cp(1)` the program into your image, because this permanently alters + the image in a non-reproducible way. + +3. *I have some shared libraries that I need in the image for functionality or + performance, and they aren't available in a place where I can use* + :code:`COPY`. This is the intended use case of :code:`ch-fromhost`. You can + use :code:`--cmd`, :code:`--file`, and/or :code:`--path` to put together a + custom solution. But, please consider filing an issue so we can package + your functionality with a tidy option like :code:`--cray-mpi` or + :code:`--nvidia`. + + :code:`--cray-mpi` dependencies and quirks ========================================== diff -Nru charliecloud-0.20/doc/ch-grow_desc.rst charliecloud-0.21/doc/ch-grow_desc.rst --- charliecloud-0.20/doc/ch-grow_desc.rst 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/doc/ch-grow_desc.rst 2020-12-18 20:25:06.000000000 +0000 @@ -1,423 +1,4 @@ Synopsis ======== -:: - - $ ch-grow [...] build [-t TAG] [-f DOCKERFILE] [...] CONTEXT - $ ch-grow [...] list - $ ch-grow [...] pull [...] IMAGE_REF [IMAGE_DIR] - $ ch-grow [...] storage-path - $ ch-grow { --help | --version | --dependencies } - - -Description -=========== - -:code:`ch-grow` is a tool for building and manipulating container images, but -not running them (for that you want :code:`ch-run`). It is completely -unprivileged, with no setuid/setgid/setcap helpers. - -Options that print brief information and then exit: - - :code:`-h`, :code:`--help` - Print help and exit successfully. - - :code:`--dependencies` - Report dependency problems on standard output, if any, and exit. If all is - well, there is no output and the exit is successful; in case of problems, - the exit is unsuccessful. - - :code:`--version` - Print version number and exit successfully. - -Common options placed before the sub-command: - - :code:`--no-cache` - Download everything needed, ignoring the cache. - - :code:`-s`, :code:`--storage DIR` - Set the storage directory (see below for important details). - - :code:`-v`, :code:`--verbose` - Print extra chatter; can be repeated. - - -Storage directory -================= - -:code:`ch-grow` maintains state using normal files and directories, including -unpacked container images, located in its *storage directory*. There is no -notion of storage drivers, graph drivers, etc., to select and/or configure. In -descending order of priority, this directory is located at: - - :code:`-s`, :code:`--storage DIR` - Command line option. - - :code:`$CH_GROW_STORAGE` - Environment variable. - - :code:`/var/tmp/$USER/ch-grow` - Default. - -The storage directory can reside on any filesystem. However, it contains lots -of small files and metadata traffic can be intense. For example, the -Charliecloud test suite uses approximately 400,000 files and directories in -the storage directory as of this writing. Place it on a filesystem appropriate -for this; tmpfs'es such as :code:`/var/tmp` are a good choice if you have -enough RAM (:code:`/tmp` is not recommended because :code:`ch-run` bind-mounts -it into containers by default). - -While you can currently poke around in the storage directory and find unpacked -images runnable with :code:`ch-run`, this is not a supported use case. The -supported workflow uses :code:`ch-builder2tar` or :code:`ch-builder2squash` to -obtain a packed image; see the tutorial for details. - -.. warning:: - - Network filesystems, especially Lustre, are typically bad choices for the - storage directory. This is a site-specific question and your local support - will likely have strong opinions. - - -Subcommands -=========== - -:code:`build` -------------- - -Build an image from a Dockerfile and put it in the storage directory. Use -:code:`ch-run(1)` to execute :code:`RUN` instructions. - -Required argument: - - :code:`CONTEXT` - Path to context directory; this is the root of :code:`COPY` and - :code:`ADD` instructions in the Dockerfile. - -Options: - - :code:`--build-arg KEY[=VALUE]` - Set build-time variable :code:`KEY` defined by :code:`ARG` instruction - to :code:`VALUE`. If :code:`VALUE` not specified, use the value of - environment variable :code:`KEY`. - - :code:`-f`, :code:`--file DOCKERFILE` - Use :code:`DOCKERFILE` instead of :code:`CONTEXT/Dockerfile`. Specify a - single hyphen (:code:`-`) to use standard input; note that in this case, - the context directory is still provided, which matches :code:`docker build - -f -` behavior. - - :code:`-n`, :code:`--dry-run` - Don't actually execute any Dockerfile instructions. - - :code:`--no-fakeroot` - Don't try any of the unprivileged build workarounds (see section "Quirks - of a fully unprivileged builds" below). - - :code:`--parse-only` - Stop after parsing the Dockerfile. - - :code:`-t`, :code:`-tag TAG` - Name of image to create. If not specified, use the final component of path - :code:`CONTEXT`. Append :code:`:latest` if no colon present. - -:code:`storage-path` --------------------- - -Print the storage directory path and exit. - -:code:`pull` ------------- - -Pull the image described by the image reference :code:`IMAGE_REF` from a -repository by HTTPS. See the FAQ for the gory details on specifying image -references. - -This script does a fair amount of validation and fixing of the layer tarballs -before flattening in order to support unprivileged use despite image problems -we frequently see in the wild. For example, device files are ignored, and file -and directory permissions are increased to a minimum of :code:`rwx------` and -:code:`rw-------` respectively. Note, however, that symlinks pointing outside -the image are permitted, because they are not resolved until runtime within a -container. - -Destination argument: - - :code:`IMAGE_DIR` - If specified, place the unpacked image at this path; it is then ready for - use by :code:`ch-run` or other tools. The storage directory will not - contain a copy of the image, i.e., it is only unpacked once. - -Options: - - :code:`--parse-only` - Parse :code:`IMAGE_REF`, print a parse report, and exit successfully - without talking to the internet or touching the storage directory. - - -Quirks of a fully unprivileged build -==================================== - -:code:`ch-grow` is *fully* unprivileged. It runs all instructions as the -normal user who invokes it, does not use any setuid or setcap helper programs, -and does not use :code:`/etc/subuid` or :code:`/etc/subgid`, in contrast to -the “rootless” mode of some competing builders. This is accomplished by -executing :code:`RUN` instructions with :code:`ch-run -w --uid=0 --gid=0` (and -some other arguments), i.e., your host EUID and EGID both mapped to zero -inside the container, and only one UID (zero) and GID (zero) are available -inside the container. - -Under this arrangement, processes running in the container *appear* to be -running as root, but many privileged system calls will fail without the -workarounds described below. **This affects any fully unprivileged -container build, not just Charliecloud.** - -The most common time to see this is installing packages. For example, here is -RPM failing to :code:`chown(2)` a file, which makes the package update fail: - -.. code-block:: none - - Updating : 1:dbus-1.10.24-13.el7_6.x86_64 2/4 - Error unpacking rpm package 1:dbus-1.10.24-13.el7_6.x86_64 - error: unpacking of archive failed on file /usr/libexec/dbus-1/dbus-daemon-launch-helper;5cffd726: cpio: chown - Cleanup : 1:dbus-libs-1.10.24-12.el7.x86_64 3/4 - error: dbus-1:1.10.24-13.el7_6.x86_64: install failed - -This one is (ironically) :code:`apt-get` failing to drop privileges: - -.. code-block:: none - - E: setgroups 65534 failed - setgroups (1: Operation not permitted) - E: setegid 65534 failed - setegid (22: Invalid argument) - E: seteuid 100 failed - seteuid (22: Invalid argument) - E: setgroups 0 failed - setgroups (1: Operation not permitted) - -The solution :code:`ch-grow` uses is to intercept these system calls and fake -a successful result. We accomplish this by altering the Dockerfile to call -:code:`fakeroot(1)` (of which there are several implementations) for -:code:`RUN` instructions that seem to need it. There are two basic steps: - - 1. After :code:`FROM`, install a :code:`fakeroot(1)` implementation. This - sometimes also needs extra steps like turning off the :code:`apt` sandbox - (for Debian Buster) or enabling EPEL (for CentOS/RHEL). - - 2. Prepend :code:`fakeroot` to :code:`RUN` instructions that seem to need - it, e.g. ones that contain :code:`apt`, :code:`apt-get`, :code:`dpkg` for - Debian derivatives and :code:`dnf`, :code:`rpm`, or :code:`yum` for - RPM-based distributions. - -The details are specific to each distribution. :code:`ch-grow` analyzes image -content (e.g., grepping :code:`/etc/debian_version`) to select a -configuration; see :code:`lib/fakeroot.py` for details. :code:`ch-grow` prints -exactly what it is doing. - -To turn off this behavior, use the :code:`--no-fakeroot` option. - - -Compatibility with other Dockerfile interpreters -================================================ - -:code:`ch-grow` is an independent implementation and shares no code with other -Dockerfile interpreters. It uses a formal Dockerfile parsing grammar developed -from the `Dockerfile reference documentation -`_ and miscellaneous other -sources, which you can examine in the source code. - -We believe this independence is valuable for several reasons. First, it helps -the community examine Dockerfile syntax and semantics critically, think -rigorously about what is really needed, and build a more robust standard. -Second, it yields disjoint sets of bugs (note that Podman, Buildah, and Docker -all share the same Dockerfile parser). Third, because it is a much smaller -code base, it illustrates how Dockerfiles work more clearly. Finally, it -allows straightforward extensions if needed to support scientific computing. - -:code:`ch-grow` tries hard to be compatible with Docker and other -interpreters, though as an independent implementation, it is not -bug-compatible. - -This section describes differences from the Dockerfile reference that we -expect to be approximately permanent. For an overview of features we have not -yet implemented and our plans, see our `road map -`_ on GitHub. Plain old bugs -are in our `GitHub issues `_. - -None of these are set in stone. We are very interested in feedback on our -assessments and open questions. This helps us prioritize new features and -revise our thinking about what is needed for HPC containers. - -Context directory ------------------ - -The context directory is bind-mounted into the build, rather than copied like -Docker. Thus, the size of the context is immaterial, and the build reads -directly from storage like any other local process would. However, you still -can't access anything outside the context directory. - -Authentication --------------- - -:code:`ch-grow` can authenticate using one-time passwords, e.g. those provided -by a security token. Unlike :code:`docker login`, it does not assume passwords -are persistent. - -Environment variables ---------------------- - -Variable substitution happens for *all* instructions, not just the ones listed -in the Dockerfile reference. - -:code:`ARG` and :code:`ENV` cause cache misses upon *definition*, in contrast -with Docker where these variables miss upon *use*, except for certain -cache-excluded variables that never cause misses, listed below. - -Like Docker, :code:`ch-grow` pre-defines the following proxy variables, which -do not require an :code:`ARG` instruction. However, they are available if the -same-named environment variable is defined; :code:`--build-arg` is not -required. Changes to these variables do not cause a cache miss. - -.. code-block:: sh - - HTTP_PROXY - http_proxy - HTTPS_PROXY - https_proxy - FTP_PROXY - ftp_proxy - NO_PROXY - no_proxy - -The following variables are also pre-defined: - -.. code-block:: sh - - PATH=/ch/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - TAR_OPTIONS=--no-same-owner - -Note that :code:`ARG` and :code:`ENV` have different syntax despite very -similar semantics. - -:code:`COPY` ------------- - -Especially for people used to UNIX :code:`cp(1)`, the semantics of the -Dockerfile :code:`COPY` instruction can be confusing. - -Most notably, when a source of the copy is a directory, the *contents* of that -directory, not the directory itself, are copied. This is documented, but it's -a real gotcha because that's not what :code:`cp(1)` does, and it means that -many things you can do in one :code:`cp(1)` command require multiple -:code:`COPY` instructions. - -Also, the reference documentation is incomplete. In our experience, Docker -also behaves as follows; :code:`ch-grow` does the same in an attempt to be -bug-compatible for the :code:`COPY` instructions. - -1. You can use absolute paths in the source; the root is the context - directory. - -2. Destination directories are created if they don't exist in the following - situations: - - 1. If the destination path ends in slash. (Documented.) - - 2. If the number of sources is greater than 1, either by wildcard or - explicitly, regardless of whether the destination ends in slash. (Not - documented.) - - 3. If there is a single source and it is a directory. (Not documented.) - -3. Symbolic links are particularly messy (this is not documented): - - 1. If named in sources either explicitly or by wildcard, symlinks are - dereferenced, i.e., the result is a copy of the symlink target, not the - symlink itself. Keep in mind that directory contents are copied, not - directories. - - 2. If within a directory named in sources, symlinks are copied as symlinks. - -We expect the following differences to be permanent: - -* Wildcards use Python glob semantics, not the Go semantics. - -* :code:`COPY --chown` is ignored, because it doesn't make sense in an - unprivileged build. - -Features we do not plan to support ----------------------------------- - -* Parser directives are not supported. We have not identified a need for any - of them. - -* :code:`EXPOSE`: Charliecloud does not use the network namespace, so - containerized processes can simply listen on a host port like other - unprivileged processes. - -* :code:`HEALTHCHECK`: This instruction's main use case is monitoring server - processes rather than applications. Also, implementing it requires a - container supervisor daemon, which we have no plans to add. - -* :code:`MAINTAINER` is deprecated. - -* :code:`STOPSIGNAL` requires a container supervisor daemon process, which we - have no plans to add. - -* :code:`USER` does not make sense for unprivileged builds. - -* :code:`VOLUME`: This instruction is not currently supported. Charliecloud - has good support for bind mounts; we anticipate that it will continue to - focus on that and will not introduce the volume management features that - Docker has. - - -Environment variables -===================== - -.. include:: py_env.rst - - -Examples -======== - -:code:`build` -------------- - -Build image :code:`bar` using :code:`./foo/bar/Dockerfile` and context -directory :code:`./foo/bar`:: - - $ ch-grow build -t bar -f ./foo/bar/Dockerfile ./foo/bar - [...] - grown in 4 instructions: bar - -Same, but infer the image name and Dockerfile from the context directory -path:: - - $ ch-grow build ./foo/bar - [...] - grown in 4 instructions: bar - -:code:`pull` ------------- - -Download the Debian Buster image and place it in the storage directory:: - - $ ch-grow pull debian:buster - pulling image: debian:buster - - manifest: downloading - layer 1/1: d6ff36c: downloading - layer 1/1: d6ff36c: listing - validating tarball members - resolving whiteouts - flattening image - layer 1/1: d6ff36c: extracting - done - -Same, except place the image in :code:`/tmp/buster`:: - - $ ch-grow pull debian:buster /tmp/buster - [...] - $ ls /tmp/buster - bin dev home lib64 mnt proc run srv tmp var - boot etc lib media opt root sbin sys usr - -.. LocalWords: tmpfs'es +Deprecated name for :code:`ch-image`; will be removed in version 0.23. diff -Nru charliecloud-0.20/doc/ch-image_desc.rst charliecloud-0.21/doc/ch-image_desc.rst --- charliecloud-0.20/doc/ch-image_desc.rst 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/doc/ch-image_desc.rst 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1,484 @@ +Synopsis +======== + +:: + + $ ch-image [...] build [-t TAG] [-f DOCKERFILE] [...] CONTEXT + $ ch-image [...] list + $ ch-image [...] pull [...] IMAGE_REF [IMAGE_DIR] + $ ch-image [...] storage-path + $ ch-image { --help | --version | --dependencies } + + +Description +=========== + +:code:`ch-image` is a tool for building and manipulating container images, but +not running them (for that you want :code:`ch-run`). It is completely +unprivileged, with no setuid/setgid/setcap helpers. + +Options that print brief information and then exit: + + :code:`-h`, :code:`--help` + Print help and exit successfully. + + :code:`--dependencies` + Report dependency problems on standard output, if any, and exit. If all is + well, there is no output and the exit is successful; in case of problems, + the exit is unsuccessful. + + :code:`--version` + Print version number and exit successfully. + +Common options placed before the sub-command: + + :code:`--no-cache` + Download everything needed, ignoring the cache. + + :code:`-s`, :code:`--storage DIR` + Set the storage directory (see below for important details). + + :code:`--tls-no-verify` + Don't verify TLS certificates of the repository. (Do not use this option + unless you understand the risks.) + + :code:`-v`, :code:`--verbose` + Print extra chatter; can be repeated. + + +Storage directory +================= + +:code:`ch-image` maintains state using normal files and directories, including +unpacked container images, located in its *storage directory*. There is no +notion of storage drivers, graph drivers, etc., to select and/or configure. In +descending order of priority, this directory is located at: + + :code:`-s`, :code:`--storage DIR` + Command line option. + + :code:`$CH_IMAGE_STORAGE` + Environment variable. + + :code:`/var/tmp/$USER/ch-image` + Default. + +The storage directory can reside on any filesystem. However, it contains lots +of small files and metadata traffic can be intense. For example, the +Charliecloud test suite uses approximately 400,000 files and directories in +the storage directory as of this writing. Place it on a filesystem appropriate +for this; tmpfs'es such as :code:`/var/tmp` are a good choice if you have +enough RAM (:code:`/tmp` is not recommended because :code:`ch-run` bind-mounts +it into containers by default). + +While you can currently poke around in the storage directory and find unpacked +images runnable with :code:`ch-run`, this is not a supported use case. The +supported workflow uses :code:`ch-builder2tar` or :code:`ch-builder2squash` to +obtain a packed image; see the tutorial for details. + +.. warning:: + + Network filesystems, especially Lustre, are typically bad choices for the + storage directory. This is a site-specific question and your local support + will likely have strong opinions. + + +Subcommands +=========== + +:code:`build` +------------- + +Build an image from a Dockerfile and put it in the storage directory. Use +:code:`ch-run(1)` to execute :code:`RUN` instructions. + +Required argument: + + :code:`CONTEXT` + Path to context directory; this is the root of :code:`COPY` and + :code:`ADD` instructions in the Dockerfile. + +Options: + + :code:`-b`, :code:`--bind SRC[:DST]` + Bind-mount host directory :code:`SRC` at container directory :code:`DST` + during :code:`RUN` instructions. Can be repeated; the default destination + if :code:`DST` is omitted is :code:`/mnt/0`, :code:`/mnt/1`, etc. + + **Note:** This applies only to :code:`RUN` instructions. Other + instructions that modify the image filesystem, e.g. :code:`COPY`, can only + access host files from the context directory. + + :code:`--build-arg KEY[=VALUE]` + Set build-time variable :code:`KEY` defined by :code:`ARG` instruction + to :code:`VALUE`. If :code:`VALUE` not specified, use the value of + environment variable :code:`KEY`. + + :code:`-f`, :code:`--file DOCKERFILE` + Use :code:`DOCKERFILE` instead of :code:`CONTEXT/Dockerfile`. Specify a + single hyphen (:code:`-`) to use standard input; note that in this case, + the context directory is still provided, which matches :code:`docker build + -f -` behavior. + + :code:`--force` + Inject the unprivileged build workarounds; see discussion later in this + section for details on what this does and when you might need it. If a + build fails and :code:`ch-image` thinks :code:`--force` would help, it + will suggest it. + + :code:`-n`, :code:`--dry-run` + Don't actually execute any Dockerfile instructions. + + :code:`--no-force-detect` + Don't try to detect if the workarounds in :code:`--force` would help. + + :code:`--parse-only` + Stop after parsing the Dockerfile. + + :code:`-t`, :code:`-tag TAG` + Name of image to create. If not specified, use the final component of path + :code:`CONTEXT`. Append :code:`:latest` if no colon present. + +:code:`ch-image` is a *fully* unprivileged image builder. It does not use any +setuid or setcap helper programs, and it does not use configuration files +:code:`/etc/subuid` or :code:`/etc/subgid`. This contrasts with the “rootless” +or “`fakeroot `_” modes +of some competing builders, which do require privileged supporting code or +utilities. + +This approach does yield some quirks. We provide built-in workarounds that +should mostly work (i.e., :code:`--force`), but it can be helpful to +understand what is going on. + +:code:`ch-image` executes all instructions as the normal user who invokes it. +For `RUN`, this is accomplished with :code:`ch-run -w --uid=0 --gid=0` (and +some other arguments), i.e., your host EUID and EGID both mapped to zero +inside the container, and only one UID (zero) and GID (zero) are available +inside the container. Under this arrangement, processes running in the +container for each :code:`RUN` *appear* to be running as root, but many +privileged system calls will fail without the workarounds described below. +**This affects any fully unprivileged container build, not just +Charliecloud.** + +The most common time to see this is installing packages. For example, here is +RPM failing to :code:`chown(2)` a file, which makes the package update fail: + +.. code-block:: none + + Updating : 1:dbus-1.10.24-13.el7_6.x86_64 2/4 + Error unpacking rpm package 1:dbus-1.10.24-13.el7_6.x86_64 + error: unpacking of archive failed on file /usr/libexec/dbus-1/dbus-daemon-launch-helper;5cffd726: cpio: chown + Cleanup : 1:dbus-libs-1.10.24-12.el7.x86_64 3/4 + error: dbus-1:1.10.24-13.el7_6.x86_64: install failed + +This one is (ironically) :code:`apt-get` failing to drop privileges: + +.. code-block:: none + + E: setgroups 65534 failed - setgroups (1: Operation not permitted) + E: setegid 65534 failed - setegid (22: Invalid argument) + E: seteuid 100 failed - seteuid (22: Invalid argument) + E: setgroups 0 failed - setgroups (1: Operation not permitted) + +By default, nothing is done to avoid these problems, though :code:`ch-image` +does try to detect if the workarounds could help. :code:`--force` activates +the workarounds: :code:`ch-image` injects extra commands to intercept these +system calls and fake a successful result, using :code:`fakeroot(1)`. There +are three basic steps: + + 1. After :code:`FROM`, analyze the image to see what distribution it + contains, which determines the specific workarounds. + + 2. Before the user command in the first :code:`RUN` instruction where the + injection seems needed, install :code:`fakeroot(1)` in the image, if one + is not already installed, as well as any other necessary initialization + commands. For example, we turn off the :code:`apt` sandbox (for Debian + Buster) and configure EPEL but leave it disabled (for CentOS/RHEL). + + 3. Prepend :code:`fakeroot` to :code:`RUN` instructions that seem to need + it, e.g. ones that contain :code:`apt`, :code:`apt-get`, :code:`dpkg` for + Debian derivatives and :code:`dnf`, :code:`rpm`, or :code:`yum` for + RPM-based distributions. + +The details are specific to each distribution. :code:`ch-image` analyzes image +content (e.g., grepping :code:`/etc/debian_version`) to select a +configuration; see :code:`lib/fakeroot.py` for details. :code:`ch-image` +prints exactly what it is doing. + +:code:`storage-path` +-------------------- + +Print the storage directory path and exit. + +:code:`pull` +------------ + +Pull the image described by the image reference :code:`IMAGE_REF` from a +repository by HTTPS. See the FAQ for the gory details on specifying image +references. + +This script does a fair amount of validation and fixing of the layer tarballs +before flattening in order to support unprivileged use despite image problems +we frequently see in the wild. For example, device files are ignored, and file +and directory permissions are increased to a minimum of :code:`rwx------` and +:code:`rw-------` respectively. Note, however, that symlinks pointing outside +the image are permitted, because they are not resolved until runtime within a +container. + +Destination argument: + + :code:`IMAGE_DIR` + If specified, place the unpacked image at this path; it is then ready for + use by :code:`ch-run` or other tools. The storage directory will not + contain a copy of the image, i.e., it is only unpacked once. + +Options: + + :code:`--last-layer N` + Unpack only :code:`N` layers, leaving an incomplete image. This option is + intended for debugging. + + :code:`--parse-only` + Parse :code:`IMAGE_REF`, print a parse report, and exit successfully + without talking to the internet or touching the storage directory. + + +Compatibility with other Dockerfile interpreters +================================================ + +:code:`ch-image` is an independent implementation and shares no code with +other Dockerfile interpreters. It uses a formal Dockerfile parsing grammar +developed from the `Dockerfile reference documentation +`_ and miscellaneous other +sources, which you can examine in the source code. + +We believe this independence is valuable for several reasons. First, it helps +the community examine Dockerfile syntax and semantics critically, think +rigorously about what is really needed, and build a more robust standard. +Second, it yields disjoint sets of bugs (note that Podman, Buildah, and Docker +all share the same Dockerfile parser). Third, because it is a much smaller +code base, it illustrates how Dockerfiles work more clearly. Finally, it +allows straightforward extensions if needed to support scientific computing. + +:code:`ch-image` tries hard to be compatible with Docker and other +interpreters, though as an independent implementation, it is not +bug-compatible. + +This section describes differences from the Dockerfile reference that we +expect to be approximately permanent. For an overview of features we have not +yet implemented and our plans, see our `road map +`_ on GitHub. Plain old bugs +are in our `GitHub issues `_. + +None of these are set in stone. We are very interested in feedback on our +assessments and open questions. This helps us prioritize new features and +revise our thinking about what is needed for HPC containers. + +Context directory +----------------- + +The context directory is bind-mounted into the build, rather than copied like +Docker. Thus, the size of the context is immaterial, and the build reads +directly from storage like any other local process would. However, you still +can't access anything outside the context directory. + +Authentication +-------------- + +:code:`ch-image` can authenticate using one-time passwords, e.g. those +provided by a security token. Unlike :code:`docker login`, it does not assume +passwords are persistent. + +Environment variables +--------------------- + +Variable substitution happens for *all* instructions, not just the ones listed +in the Dockerfile reference. + +:code:`ARG` and :code:`ENV` cause cache misses upon *definition*, in contrast +with Docker where these variables miss upon *use*, except for certain +cache-excluded variables that never cause misses, listed below. + +Like Docker, :code:`ch-image` pre-defines the following proxy variables, which +do not require an :code:`ARG` instruction. However, they are available if the +same-named environment variable is defined; :code:`--build-arg` is not +required. Changes to these variables do not cause a cache miss. + +.. code-block:: sh + + HTTP_PROXY + http_proxy + HTTPS_PROXY + https_proxy + FTP_PROXY + ftp_proxy + NO_PROXY + no_proxy + +The following variables are also pre-defined: + +.. code-block:: sh + + PATH=/ch/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + TAR_OPTIONS=--no-same-owner + +Note that :code:`ARG` and :code:`ENV` have different syntax despite very +similar semantics. + +:code:`COPY` +------------ + +Especially for people used to UNIX :code:`cp(1)`, the semantics of the +Dockerfile :code:`COPY` instruction can be confusing. + +Most notably, when a source of the copy is a directory, the *contents* of that +directory, not the directory itself, are copied. This is documented, but it's +a real gotcha because that's not what :code:`cp(1)` does, and it means that +many things you can do in one :code:`cp(1)` command require multiple +:code:`COPY` instructions. + +Also, the reference documentation is incomplete. In our experience, Docker +also behaves as follows; :code:`ch-image` does the same in an attempt to be +bug-compatible. + +1. You can use absolute paths in the source; the root is the context + directory. + +2. Destination directories are created if they don't exist in the following + situations: + + 1. If the destination path ends in slash. (Documented.) + + 2. If the number of sources is greater than 1, either by wildcard or + explicitly, regardless of whether the destination ends in slash. (Not + documented.) + + 3. If there is a single source and it is a directory. (Not documented.) + +3. Symbolic links behave differently depending on how deep in the copied tree + they are. (Not documented.) + + 1. Symlinks at the top level — i.e., named as the destination or the + source, either explicitly or by wildcards — are dereferenced. They are + followed, and whatever they point to is used as the destination or + source, respectively. + + 2. Symlinks at deeper levels are not dereferenced, i.e., the symlink + itself is copied. + +4. If a directory appears at the same path in source and destination, and is + at the 2nd level or deeper, the source directory's metadata (e.g., + permissions) are copied to the destination directory. (Not documented.) + +5. If an object appears in both the source and destination, and is at the 2nd + level or deeper, and is of different types in the source and destination, + then the source object will overwrite the destination object. (Not + documented.) For example, if :code:`/tmp/foo/bar` is a regular file, and + :code:`/tmp` is the context directory, then the following Dockerfile + snippet will result in a *file* in the container at :code:`/foo/bar` + (copied from :code:`/tmp/foo/bar`); the directory and all its contents will + be lost. + + .. code-block:: docker + + RUN mkdir -p /foo/bar && touch /foo/bar/baz + COPY foo /foo + +We expect the following differences to be permanent: + +* Wildcards use Python glob semantics, not the Go semantics. + +* :code:`COPY --chown` is ignored, because it doesn't make sense in an + unprivileged build. + +Features we do not plan to support +---------------------------------- + +* Parser directives are not supported. We have not identified a need for any + of them. + +* :code:`EXPOSE`: Charliecloud does not use the network namespace, so + containerized processes can simply listen on a host port like other + unprivileged processes. + +* :code:`HEALTHCHECK`: This instruction's main use case is monitoring server + processes rather than applications. Also, implementing it requires a + container supervisor daemon, which we have no plans to add. + +* :code:`MAINTAINER` is deprecated. + +* :code:`STOPSIGNAL` requires a container supervisor daemon process, which we + have no plans to add. + +* :code:`USER` does not make sense for unprivileged builds. + +* :code:`VOLUME`: This instruction is not currently supported. Charliecloud + has good support for bind mounts; we anticipate that it will continue to + focus on that and will not introduce the volume management features that + Docker has. + + +Environment variables +===================== + +.. include:: py_env.rst + + +Examples +======== + +:code:`build` +------------- + +Build image :code:`bar` using :code:`./foo/bar/Dockerfile` and context +directory :code:`./foo/bar`:: + + $ ch-image build -t bar -f ./foo/bar/Dockerfile ./foo/bar + [...] + grown in 4 instructions: bar + +Same, but infer the image name and Dockerfile from the context directory +path:: + + $ ch-image build ./foo/bar + [...] + grown in 4 instructions: bar + +Build using humongous vendor compilers you want to bind-mount instead of +installing into a layer:: + + $ ch-image build --bind /opt/bigvendor:/opt . + $ cat Dockerfile + FROM centos:7 + + RUN /opt/bin/cc hello.c + #COPY /opt/lib/*.so /usr/local/lib # fail: COPY doesn't bind mount + RUN cp /opt/lib/*.so /usr/local/lib # possible workaround + RUN ldconfig + +:code:`pull` +------------ + +Download the Debian Buster image and place it in the storage directory:: + + $ ch-image pull debian:buster + pulling image: debian:buster + + manifest: downloading + layer 1/1: d6ff36c: downloading + layer 1/1: d6ff36c: listing + validating tarball members + resolving whiteouts + flattening image + layer 1/1: d6ff36c: extracting + done + +Same, except place the image in :code:`/tmp/buster`:: + + $ ch-image pull debian:buster /tmp/buster + [...] + $ ls /tmp/buster + bin dev home lib64 mnt proc run srv tmp var + boot etc lib media opt root sbin sys usr + +.. LocalWords: tmpfs'es bigvendor diff -Nru charliecloud-0.20/doc/ch-image.rst charliecloud-0.21/doc/ch-image.rst --- charliecloud-0.20/doc/ch-image.rst 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/doc/ch-image.rst 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1,8 @@ +:orphan: + +ch-image man page ++++++++++++++++++ + +.. include:: ./ch-image_desc.rst +.. include:: ./bugs.rst +.. include:: ./see_also.rst diff -Nru charliecloud-0.20/doc/command-usage.rst charliecloud-0.21/doc/command-usage.rst --- charliecloud-0.20/doc/command-usage.rst 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/doc/command-usage.rst 2020-12-18 20:25:06.000000000 +0000 @@ -54,19 +54,26 @@ .. include:: ./ch-builder2squash_desc.rst +ch-grow ++++++++ + +Deprecated name for :code:`ch-image`; will be removed in version 0.23. + +.. include:: ./ch-grow_desc.rst + ch-fromhost +++++++++++ -Inject files from the host into an image directory. +Inject files from the host into an image directory, with various magic. .. include:: ./ch-fromhost_desc.rst -ch-grow -+++++++ +ch-image +++++++++ Build and manage images; completely unprivileged. -.. include:: ./ch-grow_desc.rst +.. include:: ./ch-image_desc.rst ch-mount ++++++++ diff -Nru charliecloud-0.20/doc/conf.py charliecloud-0.21/doc/conf.py --- charliecloud-0.20/doc/conf.py 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/doc/conf.py 2020-12-18 20:25:06.000000000 +0000 @@ -243,7 +243,7 @@ man_pages = [ ("charliecloud", "charliecloud", "Lightweight user-defined software stacks for high-performance computing", - [], 1), + [], 7), ("ch-build", "ch-build", "Build an image and place it in the builder's back-end storage", [], 1), @@ -263,10 +263,13 @@ "Create a SquashFS file from an image directory", [], 1), ("ch-grow", "ch-grow", - "Build and manage images; completely unprivileged", + 'Deprecated name for "ch-image"; will be removed in version 0.23.', [], 1), ("ch-fromhost", "ch-fromhost", - "Inject files from the host into an image directory", + "Inject files from the host into an image directory, with various magic", + [], 1), + ("ch-image", "ch-image", + "Build and manage images; completely unprivileged", [], 1), ("ch-mount", "ch-mount", "Mount a SquashFS image file using FUSE", diff -Nru charliecloud-0.20/doc/dev.rst charliecloud-0.21/doc/dev.rst --- charliecloud-0.20/doc/dev.rst 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/doc/dev.rst 2020-12-18 20:25:06.000000000 +0000 @@ -81,9 +81,6 @@ then open a PR directly. In this case, the PR should be tagged and milestoned, since there is no issue. -Trivial changes (e.g., fix Travis, fix a regression within a release, -code formatting) can be done without an issue or PR. - **Address a single concern.** When possible, issues and PRs should address completely one self-contained change. If there are multiple concerns, make separate issues and/or PRs. For example, PRs should not tidy unrelated code, @@ -95,7 +92,7 @@ approach" are no fun. **Tests must pass.** PRs will not be merged until they pass the tests. While -this most saliently includes Travis, the tests should also pass on your +this most saliently includes CI, the tests should also pass on your development box as well as all relevant clusters (if appropriate for the changes). @@ -177,8 +174,8 @@ **Branch history tidiness.** Commit frequently at semantically relevant times, and keep in mind that this history will probably be squashed per above. It is not necessary to rebase or squash to keep branch history tidy. But, don't go -crazy. Commit messages like "try 2" and "fix Travis again" are a bad sign; so -are carefully proofread ones. Commit messages that are brief, technically +crazy. Commit messages like "try 2" and "fix CI again" are a bad sign; so are +carefully proofread ones. Commit messages that are brief, technically relevant, and quick to write are what you want on feature branches. **Keep branches up to date.** Merge master into your branch, rather than @@ -216,20 +213,15 @@ reasons. We are working to improve testing for normal commits on master, but full parity is probably unlikely. -**Travis budget.** Because we're on the free tier, we only get 5 Travis jobs -running at a time. Currently, each job takes about ten minutes, there are -seven of them per tested commit, and PRs double this (once on the branch and -once with a test merge commit). The resource is there for your use, so take -advantage of it, but be mindful of the cost, since your fellow developers -might be trying to get in too. +**Cycles budget.** The resource is there for your use, so take advantage of +it, but be mindful of the various costs of this compute time. Things you can do include testing locally first, cancelling jobs you know will fail or that won't give you additional information, and not pushing every -commit (Travis tests only the most recent commit in a pushed group). +commit (CI tests only the most recent commit in a pushed group). -**Iterating with Travis.** When trying to make Travis happy, use a throwaway -branch that you force-push or squash-merge. Don't submit a PR with half a -dozen "fix Travis" commits. +**Iterating.** When trying to make CI happy, force-push or squash-merge. Don't +submit a PR with half a dozen "fix CI" commits. **Purging Docker cache.** :code:`misc/docker-clean.sh` can be used to purge your Docker cache, either by removing all tags or deleting all containers and diff -Nru charliecloud-0.20/doc/faq.rst charliecloud-0.21/doc/faq.rst --- charliecloud-0.20/doc/faq.rst 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/doc/faq.rst 2020-12-18 20:25:06.000000000 +0000 @@ -562,7 +562,7 @@ ------------------------------------ You must specify an image for many use cases, including :code:`FROM` -instructions, the source of an image pull (e.g. :code:`ch-grow pull` or +instructions, the source of an image pull (e.g. :code:`ch-image pull` or :code:`docker pull`), the destination of an image push, and adding image tags. Charliecloud calls this an *image reference*, but there appears to be no established name for this concept. diff -Nru charliecloud-0.20/doc/install.rst charliecloud-0.21/doc/install.rst --- charliecloud-0.20/doc/install.rst 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/doc/install.rst 2020-12-18 20:25:06.000000000 +0000 @@ -63,14 +63,14 @@ By default, all features that can be built will be built and installed. You can exclude some features with: - ========================= ======================================================= + ========================== ======================================================= option don't build or install - ========================= ======================================================= + ========================== ======================================================= :code:`--disable-html` HTML documentation :code:`--disable-man` man pages :code:`--disable-tests` test suite - :code:`--disable-ch-grow` :code:`ch-grow` unprivileged builder & image manager - ========================= ======================================================= + :code:`--disable-ch-image` :code:`ch-image` unprivileged builder & image manager + ========================== ======================================================= You can also say :code:`--enable-FOO` to fail the build if :code:`FOO` can't be built. @@ -145,7 +145,7 @@ * Generic RPMs downloadable from our `releases page `_. * `Spack `_; - install with :code:`+builder` to get :code:`ch-grow`. + install with :code:`+builder` to get :code:`ch-image`. * `Fedora/EPEL `_; check for availabile versions with :code:`{yum,dnf} list charliecloud`. @@ -418,7 +418,7 @@ GitLab instance. * Filesystem directory, for builders that support this (e.g., - :code:`ch-grow`). + :code:`ch-image`). "lark-parser" Python package ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff -Nru charliecloud-0.20/doc/Makefile.am charliecloud-0.21/doc/Makefile.am --- charliecloud-0.20/doc/Makefile.am 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/doc/Makefile.am 2020-12-18 20:25:06.000000000 +0000 @@ -22,6 +22,8 @@ ch-fromhost.rst \ ch-grow_desc.rst \ ch-grow.rst \ +ch-image_desc.rst \ +ch-image.rst \ ch-mount_desc.rst \ ch-mount.rst \ ch-pull2dir_desc.rst \ @@ -56,8 +58,8 @@ tutorial.rst if ENABLE_MAN -man1_MANS = \ -man/charliecloud.1 \ +man_MANS = \ +man/charliecloud.7 \ man/ch-build.1 \ man/ch-build2dir.1 \ man/ch-builder2squash.1 \ @@ -66,6 +68,7 @@ man/ch-dir2squash.1 \ man/ch-fromhost.1 \ man/ch-grow.1 \ +man/ch-image.1 \ man/ch-mount.1 \ man/ch-pull2dir.1 \ man/ch-pull2tar.1 \ @@ -93,7 +96,7 @@ # NOTE: ./html might be a Git checkout to support "make web", so make sure not # to delete it. -CLEANFILES = $(man1_MANS) $(nobase_html_DATA) \ +CLEANFILES = $(man_MANS) $(nobase_html_DATA) \ _deps.rst html/.buildinfo html/.nojekyll if ENABLE_HTML # Automake can't remove directories. @@ -165,6 +168,6 @@ HTML_FIRST = html endif -$(man1_MANS): man +$(man_MANS): man man: mkdir_issue115 ../lib/version.txt _deps.rst $(HTML_FIRST) $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man diff -Nru charliecloud-0.20/doc/see_also.rst charliecloud-0.21/doc/see_also.rst --- charliecloud-0.20/doc/see_also.rst 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/doc/see_also.rst 2020-12-18 20:25:06.000000000 +0000 @@ -1,6 +1,6 @@ See also ======== -charliecloud(1) +charliecloud(7) Full documentation at: diff -Nru charliecloud-0.20/doc/tutorial.rst charliecloud-0.21/doc/tutorial.rst --- charliecloud-0.20/doc/tutorial.rst 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/doc/tutorial.rst 2020-12-18 20:25:06.000000000 +0000 @@ -30,11 +30,11 @@ :: - $ cd /usr/local/src/charliecloud/examples/serial/hello + $ cd /usr/local/share/doc/charliecloud/examples/hello $ ch-build -t hello . Sending build context to Docker daemon 5.632kB [...] - Successfully built 1136de7d4c0a + Successfully built eb5f42d5bb54 $ ch-builder2tar hello /var/tmp 114MiB 0:00:03 [=============================================] 103% -rw-r----- 1 reidpr reidpr 49M Nov 21 14:05 /var/tmp/hello.tar.gz @@ -70,7 +70,7 @@ In this section, we will create and run a simple "hello, world" image. This uses the :code:`hello` example in the Charliecloud source code. Start with:: - $ cd examples/serial/hello + $ cd examples/hello Defining your UDSS ------------------ @@ -86,7 +86,7 @@ .. literalinclude:: ../examples/hello/Dockerfile :language: docker -This creates a minimal Debian Stretch image with :code:`ssh` installed. We +This creates a minimal CentOS 8 image with :code:`ssh` installed. We will encounter more complex Dockerfiles later in this tutorial. .. note:: @@ -94,7 +94,7 @@ Docker does not update the base image unless asked to. Specific images can be updated manually; in this case:: - $ sudo docker pull debian:stretch + $ sudo docker pull centos:8 There are various resources and scripts online to help automate this process, as well as :code:`misc/docker-clean.sh`. @@ -116,12 +116,12 @@ $ ch-build -t hello . Sending build context to Docker daemon 5.632kB - Step 1/4 : FROM debian:stretch - ---> be2868bebaba + Step 1/4 : FROM centos:8 + ---> 0d120b6ccaa8 [...] Step 4/4 : RUN touch /usr/bin/ch-ssh - ---> e5920427a8f2 - Successfully built e5920427a8f2 + ---> eb5f42d5bb54 + Successfully built eb5f42d5bb54 Successfully tagged hello:latest Note that Docker prints each step of the Dockerfile as it's executed. @@ -142,9 +142,9 @@ :: $ sudo docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - debian stretch 1742affe03b5 10 days ago 125.1 MB - hello latest 1742affe03b5 10 days ago 139.7 MB + REPOSITORY TAG IMAGE ID CREATED SIZE + centos 8 0d120b6ccaa8 2 months ago 215MB MB + hello latest eb5f42d5bb54 5 minutes ago 235MB MB $ sudo docker push # FIXME Running the image with Docker is not generally useful, because Docker's @@ -170,7 +170,7 @@ :: $ ch-builder2tar hello /var/tmp - 57M /var/tmp/hello.tar.gz + 74M /var/tmp/hello.tar.gz Distribute tarball ------------------ diff -Nru charliecloud-0.20/examples/copy/dirF/dir19a2/dir19b2/file19c1 charliecloud-0.21/examples/copy/dirF/dir19a2/dir19b2/file19c1 --- charliecloud-0.20/examples/copy/dirF/dir19a2/dir19b2/file19c1 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/examples/copy/dirF/dir19a2/dir19b2/file19c1 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1 @@ +new diff -Nru charliecloud-0.20/examples/copy/dirF/dir19a2/dir19b3/file19c1 charliecloud-0.21/examples/copy/dirF/dir19a2/dir19b3/file19c1 --- charliecloud-0.20/examples/copy/dirF/dir19a2/dir19b3/file19c1 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/examples/copy/dirF/dir19a2/dir19b3/file19c1 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1 @@ +new diff -Nru charliecloud-0.20/examples/copy/dirF/dir19a2/file19b2 charliecloud-0.21/examples/copy/dirF/dir19a2/file19b2 --- charliecloud-0.20/examples/copy/dirF/dir19a2/file19b2 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/examples/copy/dirF/dir19a2/file19b2 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1 @@ +new diff -Nru charliecloud-0.20/examples/copy/dirF/dir19a2/file19b3 charliecloud-0.21/examples/copy/dirF/dir19a2/file19b3 --- charliecloud-0.20/examples/copy/dirF/dir19a2/file19b3 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/examples/copy/dirF/dir19a2/file19b3 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1 @@ +new diff -Nru charliecloud-0.20/examples/copy/dirF/dir19a3/file19b1 charliecloud-0.21/examples/copy/dirF/dir19a3/file19b1 --- charliecloud-0.20/examples/copy/dirF/dir19a3/file19b1 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/examples/copy/dirF/dir19a3/file19b1 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1 @@ +new diff -Nru charliecloud-0.20/examples/copy/dirF/file19a2 charliecloud-0.21/examples/copy/dirF/file19a2 --- charliecloud-0.20/examples/copy/dirF/file19a2 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/examples/copy/dirF/file19a2 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1 @@ +new diff -Nru charliecloud-0.20/examples/copy/dirF/file19a3 charliecloud-0.21/examples/copy/dirF/file19a3 --- charliecloud-0.20/examples/copy/dirF/file19a3 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/examples/copy/dirF/file19a3 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1 @@ +new diff -Nru charliecloud-0.20/examples/copy/dirG/diry/file_ charliecloud-0.21/examples/copy/dirG/diry/file_ --- charliecloud-0.20/examples/copy/dirG/diry/file_ 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/examples/copy/dirG/diry/file_ 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1 @@ +diry/file_ diff -Nru charliecloud-0.20/examples/copy/dirG/filey charliecloud-0.21/examples/copy/dirG/filey --- charliecloud-0.20/examples/copy/dirG/filey 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/examples/copy/dirG/filey 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1 @@ +new diff -Nru charliecloud-0.20/examples/copy/dirG/s_dir1 charliecloud-0.21/examples/copy/dirG/s_dir1 --- charliecloud-0.20/examples/copy/dirG/s_dir1 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/examples/copy/dirG/s_dir1 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1 @@ +new diff -Nru charliecloud-0.20/examples/copy/dirG/s_dir4/file_ charliecloud-0.21/examples/copy/dirG/s_dir4/file_ --- charliecloud-0.20/examples/copy/dirG/s_dir4/file_ 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/examples/copy/dirG/s_dir4/file_ 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1 @@ +s_dir4/file_ diff -Nru charliecloud-0.20/examples/copy/dirG/s_file1 charliecloud-0.21/examples/copy/dirG/s_file1 --- charliecloud-0.20/examples/copy/dirG/s_file1 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/examples/copy/dirG/s_file1 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1 @@ +new diff -Nru charliecloud-0.20/examples/copy/dirG/s_file4/file_ charliecloud-0.21/examples/copy/dirG/s_file4/file_ --- charliecloud-0.20/examples/copy/dirG/s_file4/file_ 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/examples/copy/dirG/s_file4/file_ 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1 @@ +s_file4/file_ diff -Nru charliecloud-0.20/examples/copy/Dockerfile charliecloud-0.21/examples/copy/Dockerfile --- charliecloud-0.20/examples/copy/Dockerfile 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/examples/copy/Dockerfile 2020-12-18 20:25:06.000000000 +0000 @@ -16,8 +16,12 @@ ## Source: Regular file(s) # Source: one file -# Dest: new file -COPY fileA file1 +# Dest: new file, relative to workdir +COPY fileA file1a + +# Source: one file +# Dest: new file, absolute path +COPY fileA /test/file1b # Source: one file, absolute path (root is context directory) # Dest: new file @@ -29,6 +33,18 @@ COPY fileA file3 # Source: one file +# Dest: symlink to existing file, relative path +RUN echo 'this should be overwritten' > file4 \ + && ln -s file4 symlink-to-file4 +COPY fileA symlink-to-file4 + +# Source: one file +# Dest: symlink to existing file, absolute path +RUN echo 'this should be overwritten' > file5 \ + && ln -s /test/file5 symlink-to-file5 +COPY fileA symlink-to-file5 + +# Source: one file # Dest: existing directory, no trailing slash # # Note: This behavior is inconsistent with the Dockerfile reference, which @@ -42,6 +58,42 @@ COPY fileA dir01b/ # Source: one file +# Dest: symlink to existing directory, relative, no trailing slash +RUN mkdir dir01c \ + && ln -s dir01c symlink-to-dir01c +COPY fileA symlink-to-dir01c + +# Source: one file +# Dest: symlink to existing directory, absolute, no trailing slash +RUN mkdir dir01d \ + && ln -s /test/dir01d symlink-to-dir01d +COPY fileA symlink-to-dir01d + +# Source: one file +# Dest: symlink to existing directory, relative, trailing slash +RUN mkdir dir01e \ + && ln -s dir01e symlink-to-dir01e +COPY fileA symlink-to-dir01e/ + +# Source: one file +# Dest: symlink to existing directory, absolute, trailing slash +RUN mkdir dir01f \ + && ln -s /test/dir01f symlink-to-dir01f +COPY fileA symlink-to-dir01f/ + +# Source: one file +# Dest: symlink to existing directory, multi-level, relative, no slash +RUN mkdir -p dir01g/dir \ + && ln -s dir01g symlink-to-dir01g +COPY fileA symlink-to-dir01g/dir + +# Source: one file +# Dest: symlink to existing directory, multi-level, absolute, no slash +RUN mkdir -p dir01h/dir \ + && ln -s /test/dir01h symlink-to-dir01h +COPY fileA symlink-to-dir01h/dir + +# Source: one file # Dest: new directory, one level of creation COPY fileA dir02/ @@ -79,6 +131,30 @@ COPY dirA dir07b/ # Source: one directory +# Dest: symlink to existing directory, relative, no trailing slash +RUN mkdir dir07c \ + && ln -s dir07c symlink-to-dir07c +COPY dirA symlink-to-dir07c + +# Source: one directory +# Dest: symlink to existing directory, absolute, no trailing slash +RUN mkdir dir07d \ + && ln -s /test/dir07d symlink-to-dir07d +COPY dirA symlink-to-dir07d + +# Source: one directory +# Dest: symlink to existing directory, relative, trailing slash +RUN mkdir dir07e \ + && ln -s dir07e symlink-to-dir07e +COPY dirA symlink-to-dir07e/ + +# Source: one directory +# Dest: symlink to existing directory, absolute, trailing slash +RUN mkdir dir07f \ + && ln -s /test/dir07f symlink-to-dir07f +COPY dirA symlink-to-dir07f/ + +# Source: one directory # Dest: new directory, one level, no trailing slash # # Note: Again, the reference seems to imply this shouldn't work. @@ -88,6 +164,15 @@ # Dest: new directory, one level, trailing slash COPY dirA dir08b/ +# Source: one directory +# Dest: existing file, 2nd level +# +# Note: While this fails if the existing file is at the top level (which we +# verify in test/build/50_dockerfile.bats), if the existing file is at the 2nd +# level, it's overwritten by the directory. +RUN touch dir08a/dirCb +COPY dirCa dir08a + # Source: two directories, explicit # Dest: existing directory RUN mkdir dir09 @@ -145,6 +230,78 @@ COPY fileB symlink-to-fileB-* dir18/ +## Merge directory trees + +# Set up destination directory tree. +RUN mkdir dir19 \ + && mkdir dir19/dir19a1 \ + && mkdir dir19/dir19a2 \ + && mkdir dir19/dir19a2/dir19b1 \ + && mkdir dir19/dir19a2/dir19b2 \ + && echo old > dir19/file19a1 \ + && echo old > dir19/file19a2 \ + && echo old > dir19/dir19a1/file19b1 \ + && echo old > dir19/dir19a2/file19b1 \ + && echo old > dir19/dir19a2/file19b2 \ + && echo old > dir19/dir19a2/dir19b2/file19c1 \ + && chmod 777 dir19/dir19a2 + +# Copy in the new directory tree. This is supposed to merge the two trees. +# Important considerations, from perspective of destination tree: +# +# 1. File at top level, new. +# 2. File at top level, existing (should overwrite). +# 3. File at 2nd level, new. +# 4. File at 2nd level, existing (should overwrite). +# 5. Directory at top level, new. +# 6. Directory at top level, existing (permissions should overwrite). +# 7. Directory at 2nd level, new. +# 8. Directory at 2nd level, existing (permissions should overwrite). +# +# The directories should be o-rwx so we can see if the permissions were from +# the old or new version. +RUN test $(stat -c '%A' dir19/dir19a2 | cut -c8-) = 'rwx' \ + && stat -c '%n: %A' dir19/dir19a2 +COPY dirF dir19/ +RUN test $(stat -c '%A' dir19/dir19a2 | cut -c8-) != 'rwx' \ + && stat -c '%n: %A' dir19/dir19a2 + + +## Destination: Symlink, 2nd level. + +# Note: This behavior is DIFFERENT from the symlink at 1st level tests above +# (recall we are trying to be bug-compatible with Docker). + +# Set up destination. +RUN mkdir dir20 \ + && echo new > dir20/filex \ + && mkdir dir20/dirx \ + && for i in $(seq 4); do \ + echo file$i > dir20/file$i \ + && ln -s file$i dir20/s_file$i \ + && mkdir dir20/dir$i \ + && echo dir$i/file_ > dir20/dir$i/file_ \ + && ln -s dir$i dir20/s_dir$i; \ + done \ + && ls -lR dir20 + +# Copy in the new directory tree. In all of these cases, the source simply +# overwrites the destination; symlinks are not followed. +# +# name source destination +# ------- ------------ ------------ +# 1. s_file1 file link to file +# 2. s_dir1 file link to dir +# 3. s_file2 link to file link to file +# 4. s_dir2 link to file link to dir +# 5. s_file3 link to dir link to file +# 6. s_dir3 link to dir link to dir +# 7. s_file4 directory link to file +# 8. s_dir4 directory link to dir +# +COPY dirG dir20/ + + ## Wrap up; this output helps to build the expectations in test.bats. # Need GNU find, not BusyBox find diff -Nru charliecloud-0.20/examples/copy/test.bats charliecloud-0.21/examples/copy/test.bats --- charliecloud-0.20/examples/copy/test.bats 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/examples/copy/test.bats 2020-12-18 20:25:06.000000000 +0000 @@ -13,6 +13,12 @@ .: dir01a/ dir01b/ +dir01c/ +dir01d/ +dir01e/ +dir01f/ +dir01g/ +dir01h/ dir02/ dir03a/ dir04/ @@ -20,6 +26,10 @@ dir06/ dir07a/ dir07b/ +dir07c/ +dir07d/ +dir07e/ +dir07f/ dir08a/ dir08b/ dir09/ @@ -32,9 +42,26 @@ dir16/ dir17/ dir18/ -file1 +dir19/ +dir20/ +file1a +file1b file2 file3 +file4 +file5 +symlink-to-dir01c@ +symlink-to-dir01d@ +symlink-to-dir01e@ +symlink-to-dir01f@ +symlink-to-dir01g@ +symlink-to-dir01h@ +symlink-to-dir07c@ +symlink-to-dir07d@ +symlink-to-dir07e@ +symlink-to-dir07f@ +symlink-to-file4@ +symlink-to-file5@ symlink-to-fileA ./dir01a: @@ -43,6 +70,30 @@ ./dir01b: fileA +./dir01c: +fileA + +./dir01d: +fileA + +./dir01e: +fileA + +./dir01f: +fileA + +./dir01g: +dir/ + +./dir01g/dir: +fileA + +./dir01h: +dir/ + +./dir01h/dir: +fileA + ./dir02: fileA @@ -70,8 +121,26 @@ ./dir07b: fileAa +./dir07c: +fileAa + +./dir07d: +fileAa + +./dir07e: +fileAa + +./dir07f: +fileAa + ./dir08a: +dirCb/ fileAa +symlink-to-dirCb@ + +./dir08a/dirCb: +fileCba +fileCbb ./dir08b: fileAa @@ -125,6 +194,81 @@ fileB symlink-to-fileB-A symlink-to-fileB-B + +./dir19: +dir19a1/ +dir19a2/ +dir19a3/ +file19a1 +file19a2 +file19a3 + +./dir19/dir19a1: +file19b1 + +./dir19/dir19a2: +dir19b1/ +dir19b2/ +dir19b3/ +file19b1 +file19b2 +file19b3 + +./dir19/dir19a2/dir19b1: + +./dir19/dir19a2/dir19b2: +file19c1 + +./dir19/dir19a2/dir19b3: +file19c1 + +./dir19/dir19a3: +file19b1 + +./dir20: +dir1/ +dir2/ +dir3/ +dir4/ +dirx/ +diry/ +file1 +file2 +file3 +file4 +filex +filey +s_dir1 +s_dir2@ +s_dir3@ +s_dir4/ +s_file1 +s_file2@ +s_file3@ +s_file4/ + +./dir20/dir1: +file_ + +./dir20/dir2: +file_ + +./dir20/dir3: +file_ + +./dir20/dir4: +file_ + +./dir20/dirx: + +./dir20/diry: +file_ + +./dir20/s_dir4: +file_ + +./dir20/s_file4: +file_ EOF } @@ -137,6 +281,12 @@ | sort) < %l\n' | sort) < dirCb l: ./dir14/symlink-to-fileDa -> fileDa l: ./dir15/symlink-to-fileDa -> fileDa l: ./dir16/symlink-to-dirEb -> dirEb +l: ./dir20/s_dir2 -> filey +l: ./dir20/s_dir3 -> diry +l: ./dir20/s_file2 -> filey +l: ./dir20/s_file3 -> diry +l: ./symlink-to-dir01c -> dir01c +l: ./symlink-to-dir01d -> /test/dir01d +l: ./symlink-to-dir01e -> dir01e +l: ./symlink-to-dir01f -> /test/dir01f +l: ./symlink-to-dir01g -> dir01g +l: ./symlink-to-dir01h -> /test/dir01h +l: ./symlink-to-dir07c -> dir07c +l: ./symlink-to-dir07d -> /test/dir07d +l: ./symlink-to-dir07e -> dir07e +l: ./symlink-to-dir07f -> /test/dir07f +l: ./symlink-to-file4 -> file4 +l: ./symlink-to-file5 -> /test/file5 EOF } diff -Nru charliecloud-0.20/examples/Dockerfile.centos7 charliecloud-0.21/examples/Dockerfile.centos7 --- charliecloud-0.20/examples/Dockerfile.centos7 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/examples/Dockerfile.centos7 2020-12-18 20:25:06.000000000 +0000 @@ -6,6 +6,7 @@ # Install our dependencies, ensuring we fail out if any are missing. RUN yum install -y epel-release \ + && yum-config-manager --enable epel \ && yum install -y --setopt=skip_missing_names_on_install=0 \ autoconf \ automake \ diff -Nru charliecloud-0.20/examples/Dockerfile.centos8 charliecloud-0.21/examples/Dockerfile.centos8 --- charliecloud-0.20/examples/Dockerfile.centos8 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/examples/Dockerfile.centos8 2020-12-18 20:25:06.000000000 +0000 @@ -15,9 +15,13 @@ # # 3. Install packages needed to build el8 rpms. # -RUN dnf install -y --setopt=install_weak_deps=false epel-release \ - && sed -ie 's/enabled=0/enabled=1/' /etc/yum.repos.d/CentOS-PowerTools.repo \ - && dnf install -y --setopt=install_weak_deps=false --enablerepo=epel-playground \ +RUN dnf install -y --setopt=install_weak_deps=false \ + epel-release \ + 'dnf-command(config-manager)' \ + && dnf config-manager --enable epel \ + && dnf config-manager --enable powertools \ + && dnf install -y --setopt=install_weak_deps=false \ + --enablerepo=epel-playground \ dnf-plugin-ovl \ autoconf \ automake \ diff -Nru charliecloud-0.20/examples/Dockerfile.nvidia charliecloud-0.21/examples/Dockerfile.nvidia --- charliecloud-0.20/examples/Dockerfile.nvidia 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/examples/Dockerfile.nvidia 2020-12-18 20:25:06.000000000 +0000 @@ -1,6 +1,6 @@ # ch-test-scope: full -# ch-test-arch-exclude: aarch64 # only x86-64, ppc64le supported by nVidia -# ch-test-builder-exclude: ch-grow # multi-stage build not yet supported +# ch-test-arch-exclude: aarch64 # only x86-64, ppc64le supported by nVidia +# ch-test-builder-exclude: ch-image # multi-stage build not yet supported # This Dockerfile demonstrates a multi-stage build. With a single-stage build # that brings along the nVidia build environment, the resulting unpacked image diff -Nru charliecloud-0.20/examples/exhaustive/Dockerfile charliecloud-0.21/examples/exhaustive/Dockerfile --- charliecloud-0.20/examples/exhaustive/Dockerfile 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/examples/exhaustive/Dockerfile 2020-12-18 20:25:06.000000000 +0000 @@ -2,14 +2,14 @@ # comprehensiveness of Dockerfile feature support. # # FIXME: That focus is a bit out of date. I think really what is here is the -# ways we want to exercise ch-grow in ways we care about the resulting image. +# ways we want to exercise ch-image in ways we care about the resulting image. # Exercises where we don't care are in test/build/50_dockerfile.bats. But, I # don't want to do the refactoring right now. # # See: https://docs.docker.com/engine/reference/builder # # ch-test-scope: standard -# ch-test-builder-include: ch-grow +# ch-test-builder-include: ch-image # Use a moderately complex image reference. FROM registry-1.docker.io:443/library/alpine:3.9 AS stage1 @@ -26,7 +26,7 @@ RUN echo 'a -${chse_2}-' "b -${chse_2}-" "c -${NOTSET:-c}-" "d -${chse_2:+d}-" RUN env -# WORKDIR. See test/run/ch-grow.bats where we validate this all worked OK. +# WORKDIR. See test/build/50_ch-image.bats where we validate this all worked OK. # FIXME: test with variable # # filesystem root diff -Nru charliecloud-0.20/examples/hello/Dockerfile charliecloud-0.21/examples/hello/Dockerfile --- charliecloud-0.20/examples/hello/Dockerfile 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/examples/hello/Dockerfile 2020-12-18 20:25:06.000000000 +0000 @@ -1,10 +1,8 @@ # ch-test-scope: standard -# Unable to convert to a CentOS base until issues #472 and #498 are resolved -FROM debian:stretch +FROM centos:8 -RUN apt-get update \ - && apt-get install -y --no-install-recommends openssh-client \ - && rm -rf /var/lib/apt/lists/* +RUN dnf install -y --setopt=install_weak_deps=false openssh-clients \ + && dnf clean all COPY . hello diff -Nru charliecloud-0.20/examples/hello/README charliecloud-0.21/examples/hello/README --- charliecloud-0.20/examples/hello/README 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/examples/hello/README 2020-12-18 20:25:06.000000000 +0000 @@ -1,5 +1,2 @@ This example is a hello world Charliecloud container. It demonstrates running a command on the host from inside a container. - -A script test.sh is provided to demonstrate the build and run procedure. -Detailed instructions are in the Charliecloud documentation. diff -Nru charliecloud-0.20/examples/Makefile.am charliecloud-0.21/examples/Makefile.am --- charliecloud-0.20/examples/Makefile.am 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/examples/Makefile.am 2020-12-18 20:25:06.000000000 +0000 @@ -30,6 +30,19 @@ copy/dirD/fileDa \ copy/dirEa/dirEb/fileEba \ copy/dirEa/dirEb/fileEbb \ +copy/dirF/dir19a3/file19b1 \ +copy/dirF/file19a3 \ +copy/dirF/file19a2 \ +copy/dirF/dir19a2/file19b2 \ +copy/dirF/dir19a2/dir19b2/file19c1 \ +copy/dirF/dir19a2/dir19b3/file19c1 \ +copy/dirF/dir19a2/file19b3 \ +copy/dirG/diry/file_ \ +copy/dirG/filey \ +copy/dirG/s_dir1 \ +copy/dirG/s_dir4/file_ \ +copy/dirG/s_file1 \ +copy/dirG/s_file4/file_ \ copy/fileA \ copy/fileB \ copy/test.bats \ @@ -89,13 +102,17 @@ # of everything manually. # # Note: -T prevents ln(1) from dereferencing and descending into symlinks to -# directories. Without this, new symlinks are created within symlink-to-dir*, +# directories. Without this, new symlinks are created within such directories, # instead of replacing the existing symlink as we wanted. See PR #722. all-local: ln -fTs dirCb copy/dirCa/symlink-to-dirCb ln -fTs fileDa copy/dirD/symlink-to-fileDa ln -fTs dirEb copy/dirEa/symlink-to-dirEb + ln -fTs filey copy/dirG/s_dir2 + ln -fTs diry copy/dirG/s_dir3 + ln -fTs filey copy/dirG/s_file2 + ln -fTs diry copy/dirG/s_file3 ln -fTs fileA copy/symlink-to-fileA ln -fTs fileB copy/symlink-to-fileB-A ln -fTs fileB copy/symlink-to-fileB-B @@ -104,6 +121,10 @@ rm -f copy/dirCa/symlink-to-dirCb rm -f copy/dirD/symlink-to-fileDa rm -f copy/dirEa/symlink-to-dirEb + rm -f copy/dirG/s_dir2 + rm -f copy/dirG/s_dir3 + rm -f copy/dirG/s_file2 + rm -f copy/dirG/s_file3 rm -f copy/symlink-to-fileA rm -f copy/symlink-to-fileB-A rm -f copy/symlink-to-fileB-B @@ -112,6 +133,10 @@ ln -fTs dirCb $(DESTDIR)$(examplesdir)/copy/dirCa/symlink-to-dirCb ln -fTs fileDa $(DESTDIR)$(examplesdir)/copy/dirD/symlink-to-fileDa ln -fTs dirEb $(DESTDIR)$(examplesdir)/copy/dirEa/symlink-to-dirEb + ln -fTs filey $(DESTDIR)$(examplesdir)/copy/dirG/s_dir2 + ln -fTs diry $(DESTDIR)$(examplesdir)/copy/dirG/s_dir3 + ln -fTs filey $(DESTDIR)$(examplesdir)/copy/dirG/s_file2 + ln -fTs diry $(DESTDIR)$(examplesdir)/copy/dirG/s_file3 ln -fTs fileA $(DESTDIR)$(examplesdir)/copy/symlink-to-fileA ln -fTs fileB $(DESTDIR)$(examplesdir)/copy/symlink-to-fileB-A ln -fTs fileB $(DESTDIR)$(examplesdir)/copy/symlink-to-fileB-B @@ -120,6 +145,10 @@ rm -f $(DESTDIR)$(examplesdir)/copy/dirCa/symlink-to-dirCb rm -f $(DESTDIR)$(examplesdir)/copy/dirD/symlink-to-fileDa rm -f $(DESTDIR)$(examplesdir)/copy/dirEa/symlink-to-dirEb + rm -f $(DESTDIR)$(examplesdir)/copy/dirG/s_dir2 + rm -f $(DESTDIR)$(examplesdir)/copy/dirG/s_dir3 + rm -f $(DESTDIR)$(examplesdir)/copy/dirG/s_file2 + rm -f $(DESTDIR)$(examplesdir)/copy/dirG/s_file3 rm -f $(DESTDIR)$(examplesdir)/copy/symlink-to-fileA rm -f $(DESTDIR)$(examplesdir)/copy/symlink-to-fileB-A rm -f $(DESTDIR)$(examplesdir)/copy/symlink-to-fileB-B diff -Nru charliecloud-0.20/examples/paraview/Dockerfile charliecloud-0.21/examples/paraview/Dockerfile --- charliecloud-0.20/examples/paraview/Dockerfile 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/examples/paraview/Dockerfile 2020-12-18 20:25:06.000000000 +0000 @@ -1,5 +1,5 @@ # ch-test-scope: full -# ch-test-builder-exclude: ch-grow # deferred +# ch-test-builder-exclude: ch-image # deferred FROM openmpi WORKDIR /usr/local/src diff -Nru charliecloud-0.20/examples/spack/Dockerfile charliecloud-0.21/examples/spack/Dockerfile --- charliecloud-0.20/examples/spack/Dockerfile 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/examples/spack/Dockerfile 2020-12-18 20:25:06.000000000 +0000 @@ -63,8 +63,12 @@ RUN spack spec charliecloud # Test: Install Charliecloud. -RUN spack spec charliecloud+docs -RUN spack install charliecloud+docs +# Kludge: here we specify an older python sphinx rtd_theme version because +# newer default version, 0.5.0, introduces a dependency on node-js which doesn't +# appear to build on gcc 4.8 or gcc 8.3 +# (see: https://github.com/spack/spack/issues/19310). +RUN spack spec charliecloud+docs^py-sphinx-rtd-theme@0.4.3 +RUN spack install charliecloud+docs^py-sphinx-rtd-theme@0.4.3 # Clean up. RUN spack clean --all diff -Nru charliecloud-0.20/examples/spark/Dockerfile charliecloud-0.21/examples/spark/Dockerfile --- charliecloud-0.20/examples/spark/Dockerfile 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/examples/spark/Dockerfile 2020-12-18 20:25:06.000000000 +0000 @@ -1,4 +1,4 @@ -# ch-test-scope: standard +# ch-test-scope: full # Use Buster because Stretch JRE install fails with: # # tempnam() is so ludicrously insecure as to defy implementation.tempnam: Cannot allocate memory diff -Nru charliecloud-0.20/examples/spark/test.bats charliecloud-0.21/examples/spark/test.bats --- charliecloud-0.20/examples/spark/test.bats 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/examples/spark/test.bats 2020-12-18 20:25:06.000000000 +0000 @@ -23,6 +23,10 @@ scope standard prerequisites_ok spark umask 0077 + + # Unset these Java variables so the container doesn't use host paths. + unset JAVA_BINDIR JAVA_HOME JAVA_ROOT + spark_dir=${TMP_}/spark # runs before each test, so no mktemp spark_config=$spark_dir spark_log=/tmp/sparklog diff -Nru charliecloud-0.20/.github/PERUSEME charliecloud-0.21/.github/PERUSEME --- charliecloud-0.20/.github/PERUSEME 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/.github/PERUSEME 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1,98 @@ +[This file is not called README because files named .github/README.* get +picked up by GitHub and used as the main project README.] + +This directory defines our GitHub Actions test suite setup. + +The basic strategy is to start one “job” per builder; these run in parallel. +Each job then cycles through several different configurations, which vary per +builder. It is configured to “fail fast”, i.e., if one of the jobs fails, the +others will be immediately cancelled. For example, we only run the quick test +suite on one builder, but if it fails everything will stop and you still get +notified quickly. + +The number of concurrent jobs is not clear to me, but I’ve seen 7 and the +documentation [1] implies it’s at least 20 (though I assume there is some +global limit for OSS projects too). Nominally, jobs are started from the left +side of the list, so anything we think is likely to fail fast (e.g., the quick +scope) should be leftward; in practice it seems to be random. + +We could add more matrix dimensions, but then we’d have to deal with ordering +more carefully, and pass the Docker cache manually (or not use it for some +things). + + [1]: https://docs.github.com/en/free-pro-team@latest/actions/reference/usage-limits-billing-and-administration + +Conventions: + + * We install everything to start, then uninstall as needed for more + bare-bones tests. + + * For the “extra things’ tests: + + * Docker is the fastest builder, so that’s where we put extra things. + + * We need to retain sudo for uninstalling stuff. + + * I could not figure out how to set a boolean variable for use in “if” + conditions. (I *did* get an environment variable to work, but not using + our set/unset convention, rather the strings “true” and “false”. This + seemed error-prone.) Therefore the extra things tests all use the full + expression. + +Miscellaneous notes and gotchas: + + * Runner specs (as of 2020-11-25), nominal: Azure Standard_DS2_v2 virtual + machine: 2 vCPUs, 7 GiB RAM, 15 GiB SSD storage. The OS image is + bare-bones but there is a lot of software installed in third-party + locations [1]. + + Looking at the actual VM provisioned, the disk specs are a little + different: it’s got an 84GiB root filesystem mounted, and another 9GiB + mounted on /mnt. With a little deleting, maybe we can make room for a + full-scope test. + + It does seem to boot faster than Travis; overall performance is worse; but + total test time is lower (Travis took 50–55 minutes to complete a passing + build). + + [1]: https://github.com/actions/virtual-environments/blob/ubuntu20/20201116.1/images/linux/Ubuntu2004-README.md + + * GitHub doesn’t seem to notice our setup if .github is a symlink. :( + + * GitHub seems to want us to encapsulate some of the steps that are now just + shell scripts into “actions”. I haven’t looked into this. Issue #914. + + * Force-push does start a new build. + + * Commands in “run” blocks aren’t logged by default; you need “set -x” if + you want to see them. However there seems to be a race condition, so the + commands and their output aren’t always interleaved correctly. + + * “docker” does not require “sudo”. + + * There are several places where we configure, make, make install. These + need to be kept in sync. Perhaps there is an opportunity for an “Action” + here? But the configure output validation varies. + + * The .github directory doesn’t have a Makefile.am; the files are listed in + the root Makefile.am. + + * Most variables are strings. It’s easy to get into a situation where you + set a variable to “false” but it’s the string “false” so it’s true. + + * Viewing step output is glitchy: + + * While the job is in progress, sometimes the step headings are links and + sometimes they aren’t. + + * If it does work, you can’t scroll back to the start. + + * The “in progress” throbber seems to often be on the wrong heading. + + * When it’s over, sometimes clicking on a heading opens it but the content + is blank; in this case, clicking a different job and coming back seems + to fix things. + + * Previously we listed $CH_TEST_TARDIR and $CH_TEST_IMGDIR between phases. I + didn’t transfer that over. It must have been useful, so let’s pay + attention to see if it needs to be re-added. diff -Nru charliecloud-0.20/.github/workflows/main.yml charliecloud-0.21/.github/workflows/main.yml --- charliecloud-0.20/.github/workflows/main.yml 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/.github/workflows/main.yml 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1,276 @@ +name: test suite + +on: + pull_request: # all pull requests + push: + branches: [ master ] # all commits on master + schedule: + - cron: '0 2 * * 0' # every Sunday at 2:00am UTC (Saturday 7:00pm MST) + +jobs: + main: + runs-on: ubuntu-20.04 + timeout-minutes: 60 + strategy: + fail-fast: true # if any job fails, cancel the rest immediately + matrix: + builder: [none, docker, ch-image, buildah, buildah-runc, buildah-setuid] + keep_sudo: # if false, remove self from sudoers after install/setup + - false + include: + - builder: docker + keep_sudo: true + env: + CH_BUILDER: ${{ matrix.builder }} + CH_TEST_TARDIR: /mnt/tarballs + CH_TEST_IMGDIR: /mnt/images + CH_TEST_PERMDIRS: /mnt/perms_test /run/perms_test + ch_prefix: /var/tmp + + steps: + - uses: actions/checkout@v2 + + - name: early setup & validation + run: | + sudo chmod 1777 /mnt /usr/local/src + [[ -n $CH_BUILDER ]] + echo "ch_makej=-j$(getconf _NPROCESSORS_ONLN)" >> $GITHUB_ENV + # Remove sbin directories from $PATH (see issue #43). Assume none of + # these are the first entry in $PATH. + echo "PATH=$PATH" + path_new=$PATH + for i in /sbin /usr/sbin /usr/local/sbin; do + path_new=${path_new/:$i/} + done + echo "path_new=$path_new" + echo "PATH=$path_new" >> $GITHUB_ENV + + - name: print starting environment + run: | + env | egrep '^(PATH|USER)=' + env | egrep '^(ch|CH)_' + [[ $PATH != */usr/local/sbin* ]] # verify sbin removal; see above + id + pwd + getconf _NPROCESSORS_ONLN + free -m + df -h + + - name: install Bats + # We need (1) old Bats, not bats-core and (2) prior to commit 1735a4f, + # because this is what is provided in distros we need to support and + # it contains a bug we work around (see issue #552). + run: | + cd /usr/local/src + git clone --depth 1 --branch v0.4.0 https://github.com/sstephenson/bats.git + cd bats + sudo ./install.sh /usr/local + command -v bats + bats --version + [[ $(command -v bats) == /usr/local/bin/bats ]] + [[ $(bats --version) == 'Bats 0.4.0' ]] + + - name: install/configure dependencies, all + run: | + # configure doesn't tell us about these. + sudo apt-get install pigz pv + # configure does tell us about these. + sudo apt-get install squashfs-tools squashfuse + # Track newest Sphinx in case it breaks things. + sudo pip3 install sphinx sphinx-rtd-theme + + - name: install/configure dependencies, ch-image + if: ${{ matrix.builder == 'ch-image' }} + run: | + # Use most current Lark rather than the one in Ubuntu b/c new + # versions sometimes break things. + sudo pip3 install lark-parser + + - name: install/configure dependencies, all Buildah + if: ${{ startsWith(matrix.builder, 'buildah') }} + run: | + command -v buildah + buildah --version + command -v runc + runc --version + # As of 2020-11-30, stock registries.conf is pretty simple; it + # includes Docker Hub (docker.io) and then quay.io. Still, use ours + # for stability. + cat /etc/containers/registries.conf + cat <<'EOF' | sudo tee /etc/containers/registries.conf + [registries.search] + registries = ['docker.io'] + EOF + + - name: install/configure dependencies, privileged Buildah + if: ${{ startsWith(matrix.builder, 'buildah-') }} + run: | + sudo usermod --add-subuids 10000-65536 $USER + sudo usermod --add-subgids 10000-65536 $USER + + - name: build/install from Git + run: | + ./autogen.sh + # Remove Autotools to make sure everything works without them. + sudo apt-get remove autoconf automake + # Configure and verify output. + ./configure --prefix=$ch_prefix/from-git + set -x + fgrep 'documentation: yes' config.log + [[ $CH_BUILDER = buildah* ]] && fgrep 'with Buildah: yes' config.log + [[ $CH_BUILDER = docker ]] && fgrep 'with Docker: yes' config.log + [[ $CH_BUILDER = ch-image ]] && fgrep 'with ch-image(1): yes' config.log + fgrep 'at least one builder ... yes' config.log + fgrep 'ch-run(1): yes' config.log + fgrep 'complete test suite: yes' config.log + set +x + # Build and install. + make $ch_makej + sudo make $ch_makej install + bin/ch-run --version + $ch_prefix/from-git/bin/ch-run --version + + - name: late setup & validation + run: | + bin/ch-test --is-pedantic all + bin/ch-test --is-sudo all + + - name: make filesystem permissions fixtures + run: | + bin/ch-test mk-perm-dirs + + - name: configure sudo to user root, group non-root + if: ${{ matrix.keep_sudo }} + run: | + sudo sed -Ei 's/=\(ALL\)/=(ALL:ALL)/g' /etc/sudoers.d/runner + sudo cat /etc/sudoers.d/runner + + - name: remove sudo + if: ${{ ! matrix.keep_sudo }} + run: | + sudo rm /etc/sudoers.d/runner + ! sudo echo hello + + - name: build/install from tarball + if: ${{ matrix.builder == 'docker' && matrix.keep_sudo }} + run: | + # Create and unpack tarball. The wildcard saves us having to put the + # version in a variable. This assumes there isn't already a tarball + # or unpacked directory in $ch_prefix, which is true on the clean + # VMs GitHub gives us. Note that cd fails if it gets more than one + # argument, which helps, but this is probably kind of brittle. + make $ch_makej dist + mv charliecloud-*.tar.gz $ch_prefix + cd $ch_prefix + tar xf charliecloud-*.tar.gz + rm charliecloud-*.tar.gz # else cd fails with "too many arguments" + cd charliecloud-* + pwd + # Configure and verify output. + ./configure --prefix=$ch_prefix/from-tarball + set -x + fgrep 'documentation: yes' config.log + [[ $CH_BUILDER = buildah* ]] && fgrep 'with Buildah: yes' config.log + [[ $CH_BUILDER = docker ]] && fgrep 'with Docker: yes' config.log + [[ $CH_BUILDER = ch-image ]] && fgrep 'with ch-image(1): yes' config.log + fgrep 'at least one builder ... yes' config.log + fgrep 'ch-run(1): yes' config.log + fgrep 'complete test suite: yes' config.log + set +x + # Build and install. + make $ch_makej + sudo make $ch_makej install + bin/ch-run --version + $ch_prefix/from-tarball/bin/ch-run --version + + - name: run test suite (Git WD, quick, squash) + if: ${{ matrix.builder == 'docker' && ! matrix.keep_sudo }} + run: | + bin/ch-test --scope=quick --pack-fmt=squash all + + - name: run test suite (Git WD, standard, squash) + run: | + bin/ch-test --pack-fmt=squash all + + - name: run test suite (installed from Git WD, standard, squash) + if: ${{ matrix.builder == 'docker' && ! matrix.keep_sudo }} + run: | + $ch_prefix/from-git/bin/ch-test --pack-fmt=squash all + + - name: run test suite (installed from tarball, standard, squash) + if: ${{ matrix.builder == 'docker' && matrix.keep_sudo }} + run: | + $ch_prefix/from-tarball/bin/ch-test --pack-fmt=squash all + + - name: rebuild with most things --disable’d + if: ${{ matrix.builder == 'docker' && ! matrix.keep_sudo }} + run: | + make distclean + ./configure --prefix=/doesnotexist \ + --disable-html --disable-man --disable-ch-image + set -x + fgrep 'HTML documentation ... no' config.log + fgrep 'man pages ... no' config.log + fgrep 'ch-image(1) ... no' config.log + fgrep 'with Docker: yes' config.log + fgrep 'with ch-image(1): no' config.log + fgrep 'at least one builder ... yes' config.log + fgrep 'ch-run(1): yes' config.log + fgrep 'complete test suite: yes' config.log + set +x + # Build. + make $ch_makej + bin/ch-run --version + + - name: run test suite (Git WD, standard, squash) + if: ${{ matrix.builder == 'docker' && ! matrix.keep_sudo }} + run: | + bin/ch-test --pack-fmt=squash all + + - name: remove non-essential dependencies + if: ${{ matrix.builder == 'docker' && matrix.keep_sudo }} + run: | + set -x + # This breaks lots of dependencies unrelated to our build but YOLO. + sudo dpkg --remove --force-depends \ + pigz \ + pv \ + python3-requests \ + squashfs-tools \ + squashfuse + sudo pip3 uninstall -y sphinx sphinx-rtd-theme + ! python3 -c 'import requests' + ! python3 -c 'import lark' + test -e bin/ch-image + bin/ch-test -f test/build/10_sanity.bats # issue #806 + + - name: rebuild + if: ${{ matrix.builder == 'docker' && matrix.keep_sudo }} + run: | + make distclean + ./configure --prefix=/doesnotexist + set -x + fgrep 'documentation: no' config.log + fgrep 'with Docker: yes' config.log + fgrep 'with ch-image(1): no' config.log + fgrep 'at least one builder ... yes' config.log + fgrep 'ch-run(1): yes' config.log + fgrep 'recommended tests with tarballs: yes' config.log + fgrep 'complete test suite: no' config.log + set +x + # Build and install. + make $ch_makej + bin/ch-run --version + + - name: run test suite (Git WD, standard, tarballs) + if: ${{ matrix.builder == 'docker' && matrix.keep_sudo }} + run: | + bin/ch-test --pack-fmt=tar all + + - name: print ending environment + if: ${{ always() }} + run: | + free -m + df -h + du -sch $CH_TEST_TARDIR/* + du -sch $CH_TEST_IMGDIR/* diff -Nru charliecloud-0.20/lib/base.sh charliecloud-0.21/lib/base.sh --- charliecloud-0.20/lib/base.sh 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/lib/base.sh 2020-12-18 20:25:06.000000000 +0000 @@ -15,14 +15,14 @@ if [ -z "$CH_BUILDER" ]; then if command -v docker > /dev/null 2>&1; then export CH_BUILDER=docker - elif "${ch_bin}/ch-grow" --dependencies > /dev/null 2>&1; then - export CH_BUILDER=ch-grow + elif "${ch_bin}/ch-image" --dependencies > /dev/null 2>&1; then + export CH_BUILDER=ch-image else export CH_BUILDER=none fi fi case $CH_BUILDER in - buildah|buildah-runc|buildah-setuid|ch-grow|docker|none) + buildah|buildah-runc|buildah-setuid|ch-image|ch-grow|docker|none) ;; *) echo "unknown builder: $CH_BUILDER" 1>&2 diff -Nru charliecloud-0.20/lib/build.py charliecloud-0.21/lib/build.py --- charliecloud-0.20/lib/build.py 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/lib/build.py 2020-12-18 20:25:06.000000000 +0000 @@ -1,4 +1,4 @@ -# Implementation of "ch-grow build". +# Implementation of "ch-image build". import abc import ast @@ -6,6 +6,7 @@ import inspect import os import os.path +import pathlib import re import shutil import sys @@ -22,6 +23,9 @@ # Environment object. env = None +# Fakeroot configuration (initialized during FROM). +fakeroot_config = None + # Images that we are building. Each stage gets its own image. In this # dictionary, an image appears exactly once or twice. All images appear with # an int key counting stages up from zero. Images with a name (e.g., "FROM ... @@ -66,6 +70,10 @@ global cli cli = cli_ + # Check argument validity. + if (cli.force and cli.no_force_detect): + ch.FATAL("--force and --no-force-detect are incompatible") + # Infer input file if needed. if (cli.file is None): cli.file = cli.context + "/Dockerfile" @@ -90,8 +98,6 @@ if (v is None): ch.FATAL("--build-arg: %s: no value and not in environment" % kv[0]) return (kv[0], v) - if (cli.build_arg is None): - cli.build_arg = list() cli.build_arg = dict( build_arg_get(i) for i in cli.build_arg ) # Finish CLI initialization. @@ -172,6 +178,13 @@ if (ml.instruction_ct == 0): ch.FATAL("no instructions found: %s" % cli.file) assert (image_i + 1 == image_ct) # should have errored already if not + if (cli.force): + if (fakeroot_config.inject_ct == 0): + assert (not fakeroot_config.init_done) + ch.WARNING("--force specified, but nothing to do") + else: + ch.INFO("--force: init OK & modified %d RUN instructions" + % fakeroot_config.inject_ct) ch.INFO("grown in %d instructions: %s" % (ml.instruction_ct, images[image_i])) @@ -353,13 +366,131 @@ def str_(self): return "%s -> %s" % (self.srcs, repr(self.dst)) + def copy_src_dir(self, src, dst): + """Copy the contents of directory src, named by COPY, either explicitly + or with wildcards, to dst. src might be a symlink, but dst is a + canonical path. Both must be at the top level of the COPY + instruction; i.e., this function must not be called recursively. dst + must exist already and be a directory. Unlike subdirectories, the + metadata of dst will not be altered to match src.""" + def onerror(x): + ch.FATAL("error scanning directory: %s: %s" % (x.filename, x.strerror)) + # Use Path objects in this method because the path arithmetic was + # getting too hard with strings. + src = pathlib.Path(os.path.realpath(src)) + dst = pathlib.Path(dst) + assert (os.path.isdir(src) and not os.path.islink(src)) + assert (os.path.isdir(dst) and not os.path.islink(dst)) + ch.DEBUG("copying named directory: %s -> %s" % (src, dst), v=2) + for (dirpath, dirnames, filenames) in os.walk(src, onerror=onerror): + dirpath = pathlib.Path(dirpath) + subdir = dirpath.relative_to(src) + dst_dir = dst / subdir + # dirnames can contain symlinks, which we handle as files, so we'll + # rebuild it; the walk will not descend into those "directories". + dirnames2 = dirnames.copy() # shallow copy + dirnames[:] = list() # clear in place + for d in dirnames2: + d = pathlib.Path(d) + src_path = dirpath / d + dst_path = dst_dir / d + ch.DEBUG("dir: %s -> %s" % (src_path, dst_path), v=2) + if (os.path.islink(src_path)): + filenames.append(d) # symlink, handle as file + ch.DEBUG("symlink to dir, will handle as file", v=2) + continue + else: + dirnames.append(d) # directory, descend into later + # If destination exists, but isn't a directory, remove it. + if (os.path.exists(dst_path)): + if (os.path.isdir(dst_path) and not os.path.islink(dst_path)): + ch.DEBUG("dst_path exists and is a directory", v=2) + else: + ch.DEBUG("dst_path exists, not a directory, removing", v=2) + ch.unlink(dst_path) + # If destination directory doesn't exist, create it. + if (not os.path.exists(dst_path)): + ch.DEBUG("mkdir dst_path", v=2) + ch.ossafe(os.mkdir, "can't mkdir: %s" % dst_path, dst_path) + # Copy metadata, now that we know the destination exists and is a + # directory. + ch.ossafe(shutil.copystat, + "can't copy metadata: %s -> %s" % (src_path, dst_path), + src_path, dst_path, follow_symlinks=False) + for f in filenames: + f = pathlib.Path(f) + src_path = dirpath / f + dst_path = dst_dir / f + ch.DEBUG("file or symlink via copy2: %s -> %s" + % (src_path, dst_path), v=2) + if (not (os.path.isfile(src_path) or os.path.islink(src_path))): + ch.FATAL("can't COPY: unknown file type: %s" % src_path) + if (os.path.exists(dst_path)): + ch.DEBUG("destination exists, removing", v=2) + if (os.path.isdir(dst_path) and not os.path.islink(dst_path)): + ch.rmtree(dst_path) + else: + ch.unlink(dst_path) + ch.copy2(src_path, dst_path, follow_symlinks=False) + + def copy_src_file(self, src, dst): + """Copy file src, named by COPY either explicitly or with wildcards, to + dst. src might be a symlink, but dst is a canonical path. Both must + be at the top level of the COPY instruction; i.e., this function must + not be called recursively. If dst is a directory, file should go in + that directory named src (i.e., the directory creation magic has + already happened).""" + assert (os.path.isfile(src)) + assert ( not os.path.exists(dst) + or (os.path.isdir(dst) and not os.path.islink(dst)) + or (os.path.isfile(dst) and not os.path.islink(dst))) + ch.DEBUG("copying named file: %s -> %s" % (src, dst), v=2) + ch.copy2(src, dst, follow_symlinks=True) + + def dest_realpath(self, unpack_path, dst): + """Return the canonicalized version of path dst within (canonical) image + path unpack_path. We can't use os.path.realpath() because if dst is + an absolute symlink, we need to use the *image's* root directory, not + the host. Thus, we have to resolve symlinks manually.""" + unpack_path = pathlib.Path(unpack_path) + dst_canon = pathlib.Path(unpack_path) + dst = pathlib.Path(dst) + dst_parts = list(reversed(dst.parts)) # easier to operate on end of list + iter_ct = 0 + while (len(dst_parts) > 0): + iter_ct += 1 + if (iter_ct > 100): # arbitrary + ch.FATAL("can't COPY: too many path components") + ch.DEBUG("current destination: %d %s" % (iter_ct, dst_canon), v=2) + #ch.DEBUG("parts remaining: %s" % dst_parts, v=2) + part = dst_parts.pop() + if (part == "/" or part == "//"): # 3 or more slashes yields "/" + ch.DEBUG("skipping root") + continue + cand = dst_canon / part + ch.DEBUG("checking: %s" % cand, v=2) + if (not cand.is_symlink()): + ch.DEBUG("not symlink", v=2) + dst_canon = cand + else: + target = pathlib.Path(os.readlink(cand)) + ch.DEBUG("symlink to: %s" % target, v=2) + assert (len(target.parts) > 0) # POSIX says no empty symlinks + if (target.is_absolute()): + ch.DEBUG("absolute") + dst_canon = pathlib.Path(unpack_path) + else: + ch.DEBUG("relative", v=2) + dst_parts.extend(reversed(target.parts)) + return dst_canon + def execute_(self): # Complain about unsupported stuff. if (self.options.pop("chown", False)): self.unsupported_forever_warn("--chown") # Any remaining options are invalid. self.options_assert_empty() - # Find the source directory. + # Find the context directory. if (self.from_ is None): context = cli.context else: @@ -377,65 +508,49 @@ else: ch.FATAL("COPY --from: stage %s does not exist" % self.from_) context = images[self.from_].unpack_path - ch.DEBUG("context: " + context) - # Do the copy. + context_canon = os.path.realpath(context) + ch.DEBUG("context: %s" % context) + # Expand source wildcards. srcs = list() for src in self.srcs: - if (os.path.normpath(src).startswith("..")): - ch.FATAL("can't COPY: %s climbs outside context" % src) for i in glob.glob(context + "/" + src): srcs.append(i) + ch.DEBUG("source: %s" % i) if (len(srcs) == 0): - ch.FATAL("can't COPY: no sources exist") - dst = images[image_i].unpack_path + "/" - if (not self.dst.startswith("/")): - dst += env.workdir + "/" - dst += self.dst + ch.FATAL("can't COPY: no sources found") + # Validate sources are within context directory. (Can't convert to + # canonical paths yet because we need the source path as given.) + for src in srcs: + src_canon = os.path.realpath(src) + if (not os.path.commonpath([src_canon, context_canon]) + .startswith(context_canon)): + ch.FATAL("can't COPY from outside context: %s" % src) + # Locate the destination. + unpack_canon = os.path.realpath(images[image_i].unpack_path) + if (self.dst.startswith("/")): + dst = self.dst + else: + dst = env.workdir + "/" + self.dst + ch.DEBUG("destination, as given: %s" % dst) + dst_canon = self.dest_realpath(unpack_canon, dst) # strips trailing slash + ch.DEBUG("destination, canonical: %s" % dst_canon) + if (not os.path.commonpath([dst_canon, unpack_canon]) + .startswith(unpack_canon)): + ch.FATAL("can't COPY: destination not in image: %s" % dst_canon) + # Create the destination directory if needed. if (dst.endswith("/") or len(srcs) > 1 or os.path.isdir(srcs[0])): - # Create destination directory. - if (dst.endswith("/")): - dst = dst[:-1] - if (os.path.exists(dst) and not os.path.isdir(dst)): - ch.FATAL("can't COPY: %s exists but is not a directory" % dst) - ch.mkdirs(dst) + if (not os.path.exists(dst_canon)): + ch.mkdirs(dst_canon) + elif (not os.path.isdir(dst_canon)): # not symlink b/c realpath() + ch.FATAL("can't COPY: not a directory: %s" % dst_canon) + # Copy each source. for src in srcs: - # Check for symlinks to outside context. - src_real = os.path.realpath(src) - context_real = os.path.realpath(context) - if (not os.path.commonpath([src_real, context_real]) \ - .startswith(context_real)): - ch.FATAL("can't COPY: %s climbs outside context via symlink" % src) - # Do the copy. - if (os.path.isfile(src)): # or symlink to file - ch.DEBUG("COPY via copy2 file %s to %s" % (src, dst)) - ch.copy2(src, dst, follow_symlinks=True) - elif (os.path.isdir(src)): # or symlink to directory - # Copy *contents* of src, not src itself. Note: shutil.copytree() - # has a parameter dirs_exist_ok that I think will make this easier - # in Python 3.8. - ch.DEBUG("COPY dir %s to %s" % (src, dst)) - if (not os.path.isdir(dst)): - ch.FATAL("can't COPY: destination not a directory: %s to %s" - % (src, dst)) - for src2_basename in ch.ossafe( - os.listdir, "can't list directory: %s" % src, src): - src2 = src + "/" + src2_basename - if (os.path.islink(src2)): - # Symlinks within directories do not get dereferenced. - ch.DEBUG("symlink via copy2: %s to %s" % (src2, dst)) - ch.copy2(src2, dst, follow_symlinks=False) - elif (os.path.isfile(src2)): # not symlink to file - ch.DEBUG("file via copy2: %s to %s" % (src2, dst)) - ch.copy2(src2, dst) - elif (os.path.isdir(src2)): # not symlink to directory - dst2 = dst + "/" + src2_basename - ch.DEBUG("directory via copytree: %s to %s" % (src2, dst2)) - ch.copytree(src2, dst2, symlinks=True, - ignore_dangling_symlinks=True) - else: - ch.FATAL("can't COPY unknown file type: %s" % src2) + if (os.path.isfile(src)): + self.copy_src_file(src, dst_canon) + elif (os.path.isdir(src)): + self.copy_src_dir(src, dst_canon) else: - ch.FATAL("can't COPY unknown file type: %s" % src) + ch.FATAL("can't COPY: unknown file type: %s" % src) class I_directive(Instruction_Supported_Never): @@ -529,9 +644,10 @@ self.base_image.pull_to_unpacked(fixup=True) image.copy_unpacked(self.base_image) env.reset() - # Inject fakeroot preparatory stuff if needed. - if (not cli.no_fakeroot): - fakeroot.inject_first(image.unpack_path, env.env_build) + # Find fakeroot configuration, if any. + global fakeroot_config + fakeroot_config = fakeroot.detect(image.unpack_path, + cli.force, cli.no_force_detect) def str_(self): alias = "AS %s" % self.alias if self.alias else "" @@ -540,17 +656,24 @@ class Run(Instruction): - def cmd_set(self, args): - # This can be called if RUN is erroneously placed before FROM; in this - # case there is no image yet, so don't inject. - if (cli.no_fakeroot or image_i not in images): - self.cmd = args - else: - self.cmd = fakeroot.inject_each(images[image_i].unpack_path, args) - def execute_(self): rootfs = images[image_i].unpack_path - ch.ch_run_modify(rootfs, self.cmd, env.env_build, env.workdir) + fakeroot_config.init_maybe(rootfs, self.cmd, env.env_build) + cmd = fakeroot_config.inject_run(self.cmd) + exit_code = ch.ch_run_modify(rootfs, cmd, env.env_build, env.workdir, + cli.bind, fail_ok=True) + if (exit_code != 0): + if (cli.force): + if (isinstance(fakeroot_config, fakeroot.Fakeroot_Noop)): + ch.ERROR("build failed: --force specified, but no suitable config found") + else: + pass # we did init --force OK but the build still failed + elif (not cli.no_force_detect): + if (fakeroot_config.init_done): + ch.ERROR("build failed: --force may fix it") + else: + ch.ERROR("build failed: current version of --force wouldn't help") + ch.FATAL("build failed: RUN command exited with %d" % exit_code) def str_(self): return str(self.cmd) @@ -560,8 +683,8 @@ def __init__(self, *args): super().__init__(*args) - self.cmd_set([ variables_sub(unescape(i), env.env_build) - for i in ch.tree_terminals(self.tree, "STRING_QUOTED")]) + self.cmd = [ variables_sub(unescape(i), env.env_build) + for i in ch.tree_terminals(self.tree, "STRING_QUOTED")] class I_run_shell(Run): @@ -570,7 +693,7 @@ super().__init__(*args) # FIXME: Can't figure out how to remove continuations at parse time. cmd = ch.tree_terminal(self.tree, "LINE").replace("\\\n", "") - self.cmd_set(["/bin/sh", "-c", cmd]) + self.cmd = ["/bin/sh", "-c", cmd] class I_workdir(Instruction): diff -Nru charliecloud-0.20/lib/charliecloud.py charliecloud-0.21/lib/charliecloud.py --- charliecloud-0.20/lib/charliecloud.py 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/lib/charliecloud.py 2020-12-18 20:25:06.000000000 +0000 @@ -55,7 +55,7 @@ ## Globals ## -# FIXME: currently set in ch-grow :P +# FIXME: currently set in ch-image :P CH_BIN = None CH_RUN = None @@ -64,6 +64,9 @@ log_festoon = False # If true, prepend pid and timestamp to chatter. log_fp = sys.stderr # File object to print logs to. +# Verify TLS certificates? Passed to requests. +tls_verify = True + # This is a general grammar for all the parsing we need to do. As such, you # must prepend a start rule before use. GRAMMAR = r""" @@ -192,6 +195,7 @@ "download_cache", "image_subdir", "layer_hashes", + "schema_version", "unpack_dir") def __init__(self, ref, download_cache, unpack_dir, image_subdir=None): @@ -204,6 +208,7 @@ else: self.image_subdir = image_subdir self.layer_hashes = None + self.schema_version = None def __str__(self): return str(self.ref) @@ -265,20 +270,28 @@ # Mount points. file_ensure_exists("%s/etc/hosts" % self.unpack_path) file_ensure_exists("%s/etc/resolv.conf" % self.unpack_path) + for i in range(10): + mkdirs("%s/mnt/%d" % (self.unpack_path, i)) - def flatten(self): + def flatten(self, last_layer=None): "Flatten the layers in the download cache into the unpack directory." + if (last_layer is None): + last_layer = sys.maxsize layers = self.layers_read() self.validate_members(layers) self.whiteouts_resolve(layers) INFO("flattening image") self.unpack_create() for (i, (lh, (fp, members))) in enumerate(layers.items(), start=1): - INFO("layer %d/%d: %s: extracting" % (i, len(layers), lh[:7])) - try: - fp.extractall(path=self.unpack_path, members=members) - except OSError as x: - FATAL("can't extract layer %d: %s" % (i, x.strerror)) + if (i > last_layer): + INFO("layer %d/%d: %s: skipping per --last-layer" + % (i, len(layers), lh[:7])) + else: + INFO("layer %d/%d: %s: extracting" % (i, len(layers), lh[:7])) + try: + fp.extractall(path=self.unpack_path, members=members) + except OSError as x: + FATAL("can't extract layer %d: %s" % (i, x.strerror)) def layer_hashes_load(self): "Load the layer hashes from the manifest file." @@ -293,9 +306,28 @@ FATAL("can't parse manifest file: %s:%d: %s" % (self.manifest_path, x.lineno, x.msg)) try: - self.layer_hashes = [i["digest"].split(":")[1] for i in doc["layers"]] - except (AttributeError, KeyError, IndexError): - FATAL("can't parse manifest file: %s" % self.manifest_path) + schema_version = str(doc['schemaVersion']) + except KeyError: + FATAL("manifest file %s missing expected key 'schemaVersion'" + % self.manifest_path) + if (schema_version == '1'): + DEBUG('loading layer hashes from schema version 1 manifest') + try: + self.layer_hashes = [i["blobSum"].split(":")[1] + for i in doc["fsLayers"]] + except (KeyError, AttributeError, IndexError) as x: + FATAL("can't parse manifest file: %s:%d :%s" + % self.manifest_path, x.lineno, x.msg) + elif (schema_version == '2'): + DEBUG('loading layer hashes from schema version 2 manifest') + try: + self.layer_hashes = [i["digest"].split(":")[1] for i in doc["layers"]] + except (KeyError, AttributeError, IndexError): + FATAL("can't parse manifest file: %s:%d :%s" + % self.manifest_path, x.lineno, x.msg) + else: + FATAL("unsupported manifest schema version: %s" % schema_version) + self.schema_version = schema_version def layer_path(self, layer_hash): "Return the path to tarball for layer layer_hash." @@ -320,6 +352,10 @@ if (self.layer_hashes is None): self.layer_hashes_load() layers = collections.OrderedDict() + # Schema version one (v1) allows one or more empty layers for Dockerfile + # entries like CMD (https://github.com/containers/skopeo/issues/393). + # Unpacking an empty layer doesn't accomplish anything so we ignore them. + empty_cnt = 0 for (i, lh) in enumerate(self.layer_hashes, start=1): INFO("layer %d/%d: %s: listing" % (i, len(self.layer_hashes), lh[:7])) path = self.layer_path(lh) @@ -329,14 +365,23 @@ except tarfile.TarError as x: FATAL("cannot open: %s: %s" % (path, x)) members = collections.OrderedDict([(m, None) for m in members_list]) - layers[lh] = TT(fp, members) + if (lh in layers and len(members) > 0): + FATAL("duplicate non-empty layer %s" % lh[:7]) + if (len(members) > 0): + layers[lh] = TT(fp, members) + else: + empty_cnt += 1 + if (self.schema_version == '1'): + DEBUG('reversing layer order for schema version one (v1)') + layers = collections.OrderedDict(reversed(layers.items())) + DEBUG("skipped %d empty layers" % empty_cnt) return layers - def pull_to_unpacked(self, use_cache=True, fixup=False): + def pull_to_unpacked(self, use_cache=True, fixup=False, last_layer=None): """Pull and flatten image. If fixup, then also add the Charliecloud workarounds to the image directory.""" self.download(use_cache) - self.flatten() + self.flatten(last_layer) if (fixup): self.fixup() @@ -351,17 +396,22 @@ # Device or FIFO: Ignore. dev_ct += 1 del members[m] - if (m.islnk()): + elif (m.issym()): + # Symlink: Nothing to change, but accept it. + pass + elif (m.islnk()): # Hard link: Fail if pointing outside top level. (Note that we # let symlinks point wherever they want, because they aren't # interpreted until run time in a container.) self.validate_tar_link(self.layer_path(lh), m.name, m.linkname) - if (m.isdir()): - # Fix bad directory permissions (hello, Red Hat). + elif (m.isdir()): + # Directory: Fix bad permissions (hello, Red Hat). m.mode |= 0o700 - if (m.isfile()): - # Fix bad file permissions (HELLO RED HAT!!). + elif (m.isfile()): + # Regular file: Fix bad permissions (HELLO RED HAT!!). m.mode |= 0o600 + else: + FATAL("unknown member type: %s" % m.name) if (dev_ct > 0): INFO("layer %d/%d: %s: ignored %d devices and/or FIFOs" % (i, len(layers), lh[:7], dev_ct)) @@ -440,7 +490,7 @@ FATAL("can't flatten: %s exists but is not a directory" % self.unpack_path) if ( not os.path.isdir(self.unpack_path + "/bin") - or not os.path.isdir(self.unpack_path + "/lib") + or not os.path.isdir(self.unpack_path + "/dev") or not os.path.isdir(self.unpack_path + "/usr")): FATAL("can't flatten: %s exists but does not appear to be an image" % self.unpack_path) @@ -587,7 +637,10 @@ "Set defaults for all empty fields." if (self.host is None): self.host = "registry-1.docker.io" if (self.port is None): self.port = 443 - if (len(self.path) == 0): self.path = ["library"] + if (self.host == "registry-1.docker.io" and len(self.path) == 0): + # FIXME: For Docker Hub only, images with no path need a path of + # "library" substituted. Need to understand/document the rules here. + self.path = ["library"] if (self.tag is None and self.digest is None): self.tag = "latest" def from_tree(self, t): @@ -638,16 +691,22 @@ req.headers["Authorization"] = "Bearer %s" % self.token return req + def __str__(self): + return ("Bearer %s" % self.token[:32]) + class Null_Auth(requests.auth.AuthBase): def __call__(self, req): return req + def __str__(self): + return "no authorization" + def __init__(self, ref): # Need an image ref with all the defaults filled in. self.ref = ref.copy() self.ref.defaults_add() - self.auth = None + self.auth = self.Null_Auth() self.session = None if (verbose >= 2): http.client.HTTPConnection.debuglevel = 1 @@ -657,62 +716,85 @@ url_base = "https://%s:%d/v2" % (self.ref.host, self.ref.port) return "/".join((url_base, self.ref.path_full, type_, address)) - def authenticate_maybe(self, url): - """If we need to authenticate, do so using the 401 from url; otherwise - do nothing.""" - if (self.auth is None): - DEBUG("requesting auth parameters") - res = self.get_raw(url, expected_statuses=(401,200)) - if (res.status_code == 200): - self.auth = self.Null_Auth() - else: - if ("WWW-Authenticate" not in res.headers): - FATAL("WWW-Authenticate header not found") - auth = res.headers["WWW-Authenticate"] - if (not auth.startswith("Bearer ")): - FATAL("authentication scheme is not Bearer") - # Apparently parsing the WWW-Authenticate header correctly is - # pretty hard. This is a non-compliant regex kludge [1,2]. - # Alternatives include putting the grammar into Lark (this can be - # gotten by reading the RFCs enough) or using the www-authenticate - # library [3]. - # - # [1]: https://stackoverflow.com/a/1349528 - # [2]: https://stackoverflow.com/a/1547940 - # [3]: https://pypi.org/project/www-authenticate - authd = dict(re.findall(r'(?:(\w+)[:=] ?"?([\w.~:/?#@!$&()*+,;=\'\[\]-]+)"?)+', auth)) - DEBUG("WWW-Authenticate parse: %s" % authd, v=2) - for k in ("realm", "service", "scope"): - if (k not in authd): - FATAL("WWW-Authenticate missing key: %s" % k) - # Request auth token. - DEBUG("requesting anonymous auth token") - res = self.get_raw(authd["realm"], expected_statuses=(200,403), - params={"service": authd["service"], - "scope": authd["scope"]}) - if (res.status_code == 403): - INFO("anonymous access rejected") - username = input("Username: ") - password = getpass.getpass("Password: ") - auth = requests.auth.HTTPBasicAuth(username, password) - res = self.get_raw(authd["realm"], auth=auth, - params={"service": authd["service"], - "scope": authd["scope"]}) - token = res.json()["token"] - DEBUG("got token: %s..." % (token[:32])) - self.auth = self.Bearer_Auth(token) + def authenticate_basic(self, res, auth_d): + DEBUG("authenticating using Basic") + if ("realm" not in auth_d): + FATAL("WWW-Authenticate missing realm") + (username, password) = self.credentials_read() + self.auth = requests.auth.HTTPBasicAuth(username, password) + + def authenticate_bearer(self, res, auth_d): + DEBUG("authenticating using Bearer") + for k in ("realm", "service", "scope"): + if (k not in auth_d): + FATAL("WWW-Authenticate missing key: %s" % k) + # First, try for an anonymous auth token. If that fails, try for an + # authenticated token. + DEBUG("requesting anonymous auth token") + res = self.get_raw(auth_d["realm"], [200,403], + params={"service": auth_d["service"], + "scope": auth_d["scope"]}) + if (res.status_code == 403): + INFO("anonymous access rejected") + (username, password) = self.credentials_read() + auth = requests.auth.HTTPBasicAuth(username, password) + res = self.get_raw(auth_d["realm"], [200], auth=auth, + params={"service": auth_d["service"], + "scope": auth_d["scope"]}) + token = res.json()["token"] + DEBUG("received auth token: %s" % (token[:32])) + self.auth = self.Bearer_Auth(token) + + def authorize(self, res): + "Authorize using the WWW-Authenticate header in failed response res." + DEBUG("authorizing") + assert (res.status_code == 401) + # Get authentication instructions. + if ("WWW-Authenticate" not in res.headers): + FATAL("WWW-Authenticate header not found") + auth_h = res.headers["WWW-Authenticate"] + DEBUG("WWW-Authenticate raw: %s" % auth_h) + # Parse the WWW-Authenticate header. Apparently doing this correctly is + # pretty hard. We use a non-compliant regex kludge [1,2]. Alternatives + # include putting the grammar into Lark (this can be gotten by reading + # the RFCs enough) or using the www-authenticate library [3]. + # + # [1]: https://stackoverflow.com/a/1349528 + # [2]: https://stackoverflow.com/a/1547940 + # [3]: https://pypi.org/project/www-authenticate + auth_type = auth_h.split()[0] + auth_d = dict(re.findall(r'(?:(\w+)[:=] ?"?([\w.~:/?#@!$&()*+,;=\'\[\]-]+)"?)+', auth_h)) + DEBUG("WWW-Authenticate parsed: %s %s" % (auth_type, auth_d)) + # Dispatch to proper method. + if (auth_type == "Bearer"): + self.authenticate_bearer(res, auth_d) + elif (auth_type == "Basic"): + self.authenticate_basic(res, auth_d) + else: + FATAL("unknown auth type: %s" % auth_h) def close(self): if (self.session is not None): self.session.close() - def get(self, url, path, headers=dict()): - """GET url, passing headers, including authentication and session magic, - and write the body of the response to path.""" + def credentials_read(self): + username = input("Username: ") + password = getpass.getpass("Password: ") + return (username, password) + + def get(self, url, path, headers=dict(), statuses=[200]): + """GET url, passing headers, and write the body of the response to path. + Use current session if there is one, or start a new one if not. + Authenticate if needed.""" DEBUG("GETting: %s" % url) self.session_init_maybe() - self.authenticate_maybe(url) - res = self.get_raw(url, headers) + DEBUG("auth: %s" % self.auth) + res = self.get_raw(url, statuses+[401], headers) + if (res.status_code == 401): + DEBUG("HTTP 401 unauthorized") + self.authorize(res) + DEBUG("retrying with auth: %s" % self.auth) + res = self.get_raw(url, statuses, headers) try: fp = open_(path, "wb") ossafe(fp.write, "can't write: %s" % path, res.content) @@ -733,22 +815,25 @@ accept = "application/vnd.docker.distribution.manifest.v2+json" self.get(url, path, { "Accept": accept }) - def get_raw(self, url, headers=dict(), auth=None, expected_statuses=(200,), - **kwargs): - """GET url, passing headers, with no magic. If auth is None, use - self.auth (which might also be None). If status is not in - expected_statuses, barf with a fatal error. Pass kwargs unchanged to - requests.session.get().""" + def get_raw(self, url, statuses, headers=dict(), auth=None, **kwargs): + """GET url, expecting a status code in statuses, passing headers. + self.session must be valid. If auth is None, use self.auth (which + might also be None). If status is not in statuses, barf with a fatal + error. Pass kwargs unchanged to requests.session.get().""" if (auth is None): auth = self.auth try: res = self.session.get(url, headers=headers, auth=auth, **kwargs) - if (res.status_code not in expected_statuses): + if (res.status_code not in statuses): FATAL("HTTP GET failed; expected status %s but got %d: %s" - % (" or ".join(str(i) for i in expected_statuses), + % (" or ".join(str(i) for i in statuses), res.status_code, res.reason)) except requests.exceptions.RequestException as x: FATAL("HTTP GET failed: %s" % x) + # Log the rate limit headers if present. + for h in ("RateLimit-Limit", "RateLimit-Remaining"): + if (h in res.headers): + DEBUG("%s: %s" % (h, res.headers[h])) return res def session_init_maybe(self): @@ -756,6 +841,7 @@ if (self.session is None): DEBUG("initializing session") self.session = requests.Session() + self.session.verify = tls_verify class TarFile(tarfile.TarFile): @@ -766,18 +852,19 @@ # class method TarFile.open(), and the source code recommends subclassing # TarFile [2]. # + # It's here because the standard library class has problems with symlinks + # and replacing one file type with another; see issues #819 and #825 as + # well as multiple unfixed Python bugs [e.g. 3,4,5]. We work around this + # with manual deletions. + # # [1]: https://docs.python.org/3/library/tarfile.html # [2]: https://github.com/python/cpython/blob/2bcd0fe7a5d1a3c3dd99e7e067239a514a780402/Lib/tarfile.py#L2159 + # [3]: https://bugs.python.org/issue35483 + # [4]: https://bugs.python.org/issue19974 + # [5]: https://bugs.python.org/issue23228 - def makefile(self, tarinfo, targetpath): - """If targetpath is a symlink, stock makefile() overwrites the *target* - of that symlink rather than replacing the symlink. This is a known, - but long-standing unfixed, bug in Python [1,2]. To work around this, - we manually delete targetpath if it exists and is a symlink. See - issue #819. - - [1]: https://bugs.python.org/issue35483 - [2]: https://bugs.python.org/issue19974""" + def clobber(self, targetpath, regulars=False, symlinks=False, dirs=False): + assert (regulars or symlinks or dirs) try: st = os.lstat(targetpath) except FileNotFoundError: @@ -789,17 +876,35 @@ FATAL("can't lstat: %s" % targetpath, targetpath) if (st is not None): if (stat.S_ISREG(st.st_mode)): - pass # regular file; do nothing (will be overwritten) - elif (stat.S_ISDIR(st.st_mode)): - FATAL("can't overwrite directory with regular file: %s" - % targetpath) + if (regulars): + unlink(targetpath) elif (stat.S_ISLNK(st.st_mode)): - unlink(targetpath) + if (symlinks): + unlink(targetpath) + elif (stat.S_ISDIR(st.st_mode)): + if (dirs): + rmtree(targetpath) else: FATAL("invalid file type 0%o in previous layer; see inode(7): %s" % (stat.S_IFMT(st.st_mode), targetpath)) + + def makedir(self, tarinfo, targetpath): + # Note: This gets called a lot, e.g. once for each component in the path + # of the member being extracted. + DEBUG("makedir: %s" % targetpath, v=2) + self.clobber(targetpath, regulars=True, symlinks=True) + super().makedir(tarinfo, targetpath) + + def makefile(self, tarinfo, targetpath): + DEBUG("makefile: %s" % targetpath, v=2) + self.clobber(targetpath, symlinks=True, dirs=True) super().makefile(tarinfo, targetpath) + def makelink(self, tarinfo, targetpath): + DEBUG("makelink: %s -> %s" % (targetpath, tarinfo.linkname), v=2) + self.clobber(targetpath, regulars=True, symlinks=True, dirs=True) + super().makelink(tarinfo, targetpath) + ## Supporting functions ## @@ -820,19 +925,22 @@ def WARNING(*args, **kwargs): log(color="31m", prefix="warning: ", *args, **kwargs) -def ch_run_modify(img, args, env, workdir="/"): - args = [CH_BIN + "/ch-run", "-w", "--cd", workdir, "--uid=0", "--gid=0", - "--no-home", "--no-passwd", img, "--"] + args - cmd(args, env) +def ch_run_modify(img, args, env, workdir="/", binds=[], fail_ok=False): + args = ( [CH_BIN + "/ch-run"] + + ["-w", "-u0", "-g0", "--no-home", "--no-passwd", "--cd", workdir] + + sum([["-b", i] for i in binds], []) + + [img, "--"] + args) + return cmd(args, env, fail_ok) -def cmd(args, env=None): +def cmd(args, env=None, fail_ok=False): DEBUG("environment: %s" % env) DEBUG("executing: %s" % args) color_set("33m", sys.stdout) cp = subprocess.run(args, env=env, stdin=subprocess.DEVNULL) color_reset(sys.stdout) - if (cp.returncode): + if (not fail_ok and cp.returncode): FATAL("command failed with code %d: %s" % (cp.returncode, args[0])) + return cp.returncode def color_reset(*fps): for fp in fps: @@ -909,7 +1017,7 @@ DEBUG("verbose level: %d" % verbose) def mkdirs(path): - DEBUG("ensuring directory: " + path) + DEBUG("ensuring directory: %s" % path) try: os.makedirs(path, exist_ok=True) except OSError as x: @@ -941,12 +1049,18 @@ assert False, "unimplemented" def storage_env(): - """Return path to builder storage as configured by $CH_GROW_STORAGE, or the - default if that's not set.""" + """Return path to builder storage as configured by $CH_IMAGE_STORAGE, or + the default if that's not set.""" try: - return os.environ["CH_GROW_STORAGE"] + return os.environ["CH_IMAGE_STORAGE"] except KeyError: - return storage_default() + try: + p = os.environ["CH_GROW_STORAGE"] + WARNING("$CH_GROW_STORAGE is deprecated in favor of $CH_IMAGE_STORAGE") + WARNING("the old name will be removed in Charliecloud version 0.23") + return p + except KeyError: + return storage_default() def storage_default(): # FIXME: Perhaps we should use getpass.getuser() instead of the $USER @@ -957,7 +1071,7 @@ username = os.environ["USER"] except KeyError: FATAL("can't get username: $USER not set") - return "/var/tmp/%s/ch-grow" % username + return "/var/tmp/%s/ch-image" % username def symlink(target, source, clobber=False): if (clobber and os.path.isfile(source)): diff -Nru charliecloud-0.20/lib/fakeroot.py charliecloud-0.21/lib/fakeroot.py --- charliecloud-0.20/lib/fakeroot.py 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/lib/fakeroot.py 2020-12-18 20:25:06.000000000 +0000 @@ -5,16 +5,114 @@ ## Globals ## -# FIXME: document this config -# FIXME: sequence of command vs. one long command? - -DEFAULT_CONFIGS = [ +DEFAULT_CONFIGS = { # General notes: # - # 1. The first match here wins. + # 1. Semantics of these configurations. (Character limits are to support + # tidy code and message formatting.) + # + # a. This is a dictionary of configurations, which themselves are + # dictionaries. + # + # b. Key is an arbitrary tag; user-visible. There's no enforced + # character set but let's stick with [a-z0-9_] for now and limit to + # at most 10 characters. + # + # c. A configuration has the following keys. + # + # name ... Human-readable name for the configuration. Max 46 chars. + # + # match .. Tuple; first item is the name of a file and the second is + # a regular expression. If the regex matches any line in the + # file, that configuration is used for the image. + # + # init ... List of tuples containing POSIX shell commands to perform + # fakeroot installation and any other initialization steps. + # + # Item 1: Command to detect if the step is necessary. If the + # command exits successfully, the step is already + # complete; if unsuccessful, it is still needed. The sense + # of the test is so something like "is command FOO + # available?", which seems the most common command, does + # not require negation. + # + # The test should be fairly permissive; e.g., if the image + # already has a fakeroot implementation installed, but + # it's a different one than we would have chosen, the + # command should succeed. + # + # IMPORTANT: This command must have no side effects, + # because it is normally run in all matching images, even + # if --force is not specified. Note that talking to the + # internet is a side effect! + # + # Item 2: Command to do the init step. + # + # I.e., to perform each fakeroot initialization step, + # ch-image does roughly: + # + # if ( ! $CMD_1 ); then + # $CMD_2 + # fi + # + # For both commands, the output is visible to the user but + # is not analyzed. + # + # cmds ... List of RUN command words that need fakeroot injection. + # Each item in the list is matched against each + # whitespace-separated word in the RUN instructions. For + # example, suppose that each is the list "dnf", "rpm", and + # "yum"; consider the following RUN instructions: + # + # RUN ['dnf', 'install', 'foo'] + # RUN dnf install foo + # + # These are fairly standard forms. "dnf" matches both, the + # first on the first element in the list and the second + # after breaking the shell command on whitespace. + # + # RUN true&&dnf install foo + # + # This third example does *not* match (false negative) + # because breaking on whitespace yields "true&&dnf", + # "install", and "foo"; none of these words are "dnf". + # + # RUN echo dnf install foo # - # 2. There are three implementations of fakeroot that I could find: + # This final example *does* match (false positive) becaus + # the second word *is* "dnf"; the algorithm isn't smart + # enough to realize that it's an argument to "echo". + # + # The last two illustrate that the algorithm uses simple + # whitespace delimiters, not even a partial shell parser. + # + # each ... List of words to prepend to RUN instructions that match + # cmd_each. For example, if each is ["fr", "-z"], then these + # instructions: + # + # RUN ['dnf', 'install', 'foo'] + # RUN dnf install foo + # + # become: + # + # RUN ['fr', '-z', 'dnf', 'install', 'foo'] + # RUN ['fr', '-z', '/bin/sh', '-c', 'dnf install foo'] + # + # (Note that "/bin/sh -c" is how shell-form RUN instructions + # are executed regardless of --force.) + # + # 2. The first match wins. However, because dictionary ordering can't be + # relied on yet, since it was introduced in Python 3.6 [1], matches + # should be disjoint. + # + # [1]: https://docs.python.org/3/library/stdtypes.html#dict + # + # 3. A matching configuration is considered applicable if any of the + # fakeroot-able commands are present. We do nothing if the config isn't + # applicable. We do not look for other matches. + # + # 4. There are three implementations of fakeroot that I could find: # fakeroot, fakeroot-ng, and pseudo. As of 2020-09-02: # # * fakeroot-ng and pseudo use a daemon process, while fakeroot does @@ -35,7 +133,7 @@ # Generally, we select the first one that seems to work in the order # fakeroot, pseudo, fakeroot-ng. # - # 3. Why grep specified files vs. simpler alternatives? + # 5. Why grep a specified file vs. simpler alternatives? # # * Look at image name: Misses derived images, large number of tags # seems a maintenance headache, :latest changes. @@ -48,19 +146,45 @@ # # 1. CentOS seems to have only fakeroot, which is in EPEL, not the standard # repos. - - { "match": ("/etc/redhat-release", r"release 7\."), - "config": { "name": "CentOS/RHEL 7", - "first": ["yum install -y epel-release", - "yum install -y fakeroot"], - "cmds_each": ["dnf", "rpm", "yum"], - "each": ["fakeroot"] } }, - { "match": ("/etc/redhat-release", r"release 8\."), - "config": { "name": "CentOS/RHEL 8", - "first": ["dnf install -y epel-release", - "dnf install -y fakeroot"], - "cmds_each": ["dnf", "rpm", "yum"], - "each": ["fakeroot"] } }, + # + # 2. Enabling EPEL can have undesirable side effects, e.g. different + # version of things in the base repo that breaks other things. Thus, + # when we install EPEL, we don't enable it. Existing EPEL installations + # are left alone. + # + # 3. "yum repolist" has a lot of side effects, e.g. locking the RPM + # database and asking configured repos for something or other. + # + # 4. "dnf config-manager" (CentOS 8) requires installing dnf-plugins-core, + # which requires fakeroot, which we don't have when initializing + # fakeroot. So sed it is. :P + + "rhel7": + { "name": "CentOS/RHEL 7", + "match": ("/etc/redhat-release", r"release 7\."), + "init": [ ("command -v fakeroot > /dev/null", + "set -ex; " + "if ! grep -Eq '\[epel\]' /etc/yum.conf /etc/yum.repos.d/*; then " + "yum install -y epel-release; " + "yum-config-manager --disable epel; " + "fi; " + "yum --enablerepo=epel install -y fakeroot; ") ], + "cmds": ["dnf", "rpm", "yum"], + "each": ["fakeroot"] }, + + "rhel8": + { "name": "CentOS/RHEL 8", + "match": ("/etc/redhat-release", r"release 8\."), + "init": [ ("command -v fakeroot > /dev/null", + "set -ex; " + "if ! grep -Eq '\[epel\]' /etc/yum.conf /etc/yum.repos.d/*; then " + "dnf install -y epel-release; " + "ls -lh /etc/yum.repos.d; " + "sed -Ei 's/enabled=1$/enabled=0/g' /etc/yum.repos.d/epel*.repo; " + "fi; " + "dnf --enablerepo=epel install -y fakeroot; ") ], + "cmds": ["dnf", "rpm", "yum"], + "each": ["fakeroot"] }, # Debian notes: # @@ -78,53 +202,146 @@ # # Configuring apt not to use the sandbox seemed cleaner than deleting # this user and eliminates the warning. + # + # 2. If we wanted to test if a fakeroot package was installed, we could say: + # + # dpkg-query -Wf '${Package}\n' \ + # | egrep '^(fakeroot|fakeroot-ng|pseudo)$' + + "debSB": + { "name": "Debian 9 (Stretch) or 10 (Buster)", + "match": ("/etc/debian_version", r"^(9|10)\."), + "init": [ ("apt-config dump | fgrep -q 'APT::Sandbox::User \"root\"'" + " || ! fgrep -q _apt /etc/passwd", + "echo 'APT::Sandbox::User \"root\";'" + " > /etc/apt/apt.conf.d/no-sandbox"), + ("command -v fakeroot > /dev/null", + # update b/c base image ships with no package indexes + "apt-get update && apt-get install -y pseudo") ], + "cmds": ["apt", "apt-get", "dpkg"], + "each": ["fakeroot"] }, + +} + + +## Functions ### + +def detect(image, force, no_force_detect): + f = None + if (no_force_detect): + ch.DEBUG("not detecting --force config, per --no-force-detect") + else: + # Try to find a real fakeroot config. + for (tag, cfg) in DEFAULT_CONFIGS.items(): + try: + f = Fakeroot(image, tag, cfg, force) + break + except Config_Aint_Matched: + pass + # Report findings. + if (f is None): + msg = "--force not available (no suitable config found)" + if (force): + ch.WARNING(msg) + else: + ch.DEBUG(msg) + else: + if (force): + adj = "will use" + else: + adj = "available" + ch.INFO("%s --force: %s: %s" % (adj, f.tag, f.name)) + # Wrap up + if (f is None): + f = Fakeroot_Noop() + return f + + +## Classes ## + +class Config_Aint_Matched(Exception): + pass + +class Fakeroot_Noop(): + + __slots__ = ("init_done", + "inject_ct") + + def __init__(self): + self.init_done = False + self.inject_ct = 0 - { "match": ("/etc/debian_version", r"^(9|10)\."), - "config": { "name": "Debian 9 (Stretch) or 10 (Buster)", - "first": -["echo 'APT::Sandbox::User \"root\";' > /etc/apt/apt.conf.d/no-sandbox", - "apt-get update", # base image ships with no package indexes - "apt-get install -y pseudo"], - "cmds_each": ["apt", "apt-get", "dpkg"], - "each": ["fakeroot"] } } -] - - -## Functions ## - -def config(img): - ch.DEBUG("fakeroot: checking configs: %s" % img) - for c in DEFAULT_CONFIGS: - (path, rx) = c["match"] - path_full = "%s/%s" % (img, path) - ch.DEBUG("fakeroot: checking %s: grep '%s' %s" - % (c["config"]["name"], rx, path)) - if (os.path.isfile(path_full) and ch.grep_p(path_full, rx)): - ch.DEBUG("fakeroot: using config %s" % c["config"]["name"]) - return c["config"] - ch.DEBUG("fakeroot: no config found") - return None - -def inject_each(img, args): - c = config(img) - if (c is None): + def init_maybe(self, img_path, args, env): + pass + + def inject_run(self, args): return args - # Match on words, not substrings. - for each in c["cmds_each"]: - for arg in args: - if (each in arg.split()): - return c["each"] + args - return args - -def inject_first(img, env): - c = config(img) - if (c is None): - return - if (os.path.exists("%s/ch/fakeroot-first-run")): - ch.DEBUG("fakeroot: already initialized") - return - ch.INFO("fakeroot: initializing for %s" % c["name"]) - for cl in c["first"]: - ch.INFO("fakeroot: $ %s" % cl) - args = ["/bin/sh", "-c", cl] - ch.ch_run_modify(img, args, env) + +class Fakeroot(): + + __slots__ = ("tag", + "name", + "init", + "cmds", + "each", + "init_done", + "inject_ct", + "inject_p") + + def __init__(self, image_path, tag, cfg, inject_p): + ch.DEBUG("workarounds: testing config: %s" % tag) + file_path = "%s/%s" % (image_path, cfg["match"][0]) + if (not ( os.path.isfile(file_path) + and ch.grep_p(file_path, cfg["match"][1]))): + raise Config_Aint_Matched(tag) + self.tag = tag + self.inject_ct = 0 + self.inject_p = inject_p + for i in ("name", "init", "cmds", "each"): + setattr(self, i, cfg[i]) + self.init_done = False + + def init_maybe(self, img_path, args, env): + if (not self.needs_inject(args)): + ch.DEBUG("workarounds: init: instruction doesn't need injection") + return + if (self.init_done): + ch.DEBUG("workarounds: init: already initialized") + return + for (i, (test_cmd, init_cmd)) in enumerate(self.init, 1): + ch.INFO("workarounds: init step %s: checking: $ %s" % (i, test_cmd)) + args = ["/bin/sh", "-c", test_cmd] + exit_code = ch.ch_run_modify(img_path, args, env, fail_ok=True) + if (exit_code == 0): + ch.INFO("workarounds: init step %d: exit code %d, step not needed" + % (i, exit_code)) + else: + if (not self.inject_p): + ch.INFO("workarounds: init step %d: no --force, skipping" % i) + else: + ch.INFO("workarounds: init step %d: $ %s" % (i, init_cmd)) + args = ["/bin/sh", "-c", init_cmd] + ch.ch_run_modify(img_path, args, env) + self.init_done = True + + def inject_run(self, args): + if (not self.needs_inject(args)): + ch.DEBUG("workarounds: RUN: instruction doesn't need injection") + return args + assert (self.init_done) + if (not self.inject_p): + ch.INFO("workarounds: RUN: available here with --force") + return args + args = self.each + args + self.inject_ct += 1 + ch.INFO("workarounds: RUN: new command: %s" % args) + return args + + def needs_inject(self, args): + """Return True if the command in args seems to need fakeroot injection, + False otherwise.""" + for word in self.cmds: + for arg in args: + if (word in arg.split()): # arg words separate by whitespace + return True + return False diff -Nru charliecloud-0.20/lib/misc.py charliecloud-0.21/lib/misc.py --- charliecloud-0.20/lib/misc.py 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/lib/misc.py 2020-12-18 20:25:06.000000000 +0000 @@ -65,7 +65,8 @@ ch.DEBUG("download cache: %s" % image.download_cache) ch.DEBUG("manifest: %s" % image.manifest_path) # Pull! - image.pull_to_unpacked(use_cache=(not cli.no_cache)) + image.pull_to_unpacked(use_cache=(not cli.no_cache), + last_layer=cli.last_layer) # Done. ch.INFO("done") diff -Nru charliecloud-0.20/Makefile.am charliecloud-0.21/Makefile.am --- charliecloud-0.20/Makefile.am 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/Makefile.am 2020-12-18 20:25:06.000000000 +0000 @@ -1,7 +1,7 @@ SUBDIRS = lib bin doc examples misc packaging test -# The Travis stuff isn't really relevant for the tarballs, but they should +# The CI stuff isn't really relevant for the tarballs, but they should # have complete source code. -EXTRA_DIST = .travis.yml +EXTRA_DIST = .github/PERUSEME .github/workflows/main.yml EXTRA_DIST += LICENSE README.rst VERSION autogen.sh diff -Nru charliecloud-0.20/misc/grep charliecloud-0.21/misc/grep --- charliecloud-0.20/misc/grep 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/misc/grep 2020-12-18 20:25:06.000000000 +0000 @@ -21,6 +21,7 @@ -o -path ./doc/doctrees \ -o -path ./doc/html \ -o -path ./doc/man \ + -o -path ./packaging/vagrant/.vagrant \ -o -name '*.pyc' \ -o -name configure \ -o -name 'config.*' \ diff -Nru charliecloud-0.20/packaging/fedora/build charliecloud-0.21/packaging/fedora/build --- charliecloud-0.20/packaging/fedora/build 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/packaging/fedora/build 2020-12-18 20:25:06.000000000 +0000 @@ -57,7 +57,7 @@ "--short", "HEAD"])[:-1] except subprocess.CalledProcessError as x: if (x.returncode != 1): raise - # Detached HEAD (e.g. Travis) is also fine; use commit hash. + # Detached HEAD (e.g. CI) is also fine; use commit hash. commit = subprocess.check_output(["git", "rev-parse", "--verify", "HEAD"])[:-1] rpm_release = "0" diff -Nru charliecloud-0.20/packaging/fedora/charliecloud.spec charliecloud-0.21/packaging/fedora/charliecloud.spec --- charliecloud-0.20/packaging/fedora/charliecloud.spec 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/packaging/fedora/charliecloud.spec 2020-12-18 20:25:06.000000000 +0000 @@ -115,6 +115,7 @@ %license LICENSE %doc README.rst %{?el7:README.EL7} %{_mandir}/man1/ch* +%{_mandir}/man7/charliecloud* %{_pkgdocdir}/examples # Library files. diff -Nru charliecloud-0.20/packaging/fedora/lib64.patch charliecloud-0.21/packaging/fedora/lib64.patch --- charliecloud-0.20/packaging/fedora/lib64.patch 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/packaging/fedora/lib64.patch 2020-12-18 20:25:06.000000000 +0000 @@ -68,9 +68,9 @@ . "${lib}/base.sh" set -e -diff -ur charliecloud/bin/ch-grow.py.in charliecloud-lib/bin/ch-grow.py.in ---- charliecloud/bin/ch-grow.py.in 2020-04-20 10:34:22.163969775 -0600 -+++ charliecloud-lib/bin/ch-grow.py.in 2020-04-20 11:53:58.006701265 -0600 +diff -ur charliecloud/bin/ch-image.py.in charliecloud-lib/bin/ch-image.py.in +--- charliecloud/bin/ch-image.py.in 2020-04-20 10:34:22.163969775 -0600 ++++ charliecloud-lib/bin/ch-image.py.in 2020-04-20 11:53:58.006701265 -0600 @@ -6,7 +6,7 @@ import sys diff -Nru charliecloud-0.20/packaging/vagrant/Vagrantfile charliecloud-0.21/packaging/vagrant/Vagrantfile --- charliecloud-0.20/packaging/vagrant/Vagrantfile 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/packaging/vagrant/Vagrantfile 2020-12-18 20:25:06.000000000 +0000 @@ -235,8 +235,8 @@ sudo cp tests/registries.conf /etc/containers EOF - # Install ch-grow dependencies. - c.vm.provision "ch-grow", type: "shell", privileged: false, + # Install ch-image dependencies. + c.vm.provision "ch-image", type: "shell", privileged: false, inline: <<-EOF set -e cd /usr/local/src @@ -329,7 +329,7 @@ fi sudo -iu $user -- sh -c "ch-test --pedantic yes -b docker all" sudo -iu $user -- sh -c "ch-test --pedantic yes -b buildah all" - sudo -iu $user -- sh -c "ch-test --pedantic yes -b ch-grow all" + sudo -iu $user -- sh -c "ch-test --pedantic yes -b ch-image all" EOF end diff -Nru charliecloud-0.20/test/build/10_sanity.bats charliecloud-0.21/test/build/10_sanity.bats --- charliecloud-0.20/test/build/10_sanity.bats 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/build/10_sanity.bats 2020-12-18 20:25:06.000000000 +0000 @@ -22,7 +22,7 @@ # yielded hundreds of false positives but zero actual bugs. scope quick echo "version: ${ch_version}" - re='^0\.[0-9]+(\.[0-9]+)?(~pre\+[A-Za-z0-9]+(\.[0-9a-f]+(\.dirty)?)?)?$' + re='^0\.[0-9]+(\.[0-9]+)?(~pre\+([A-Za-z0-9]+\.)?([0-9a-f]+(\.dirty)?)?)?$' [[ $ch_version =~ $re ]] } @@ -107,6 +107,7 @@ shellcheck -e SC1090,SC2002,SC2154 "$i" done < <( find "$ch_base" \ \( -name .git \ + -o -name .vagrant \ -o -name build-aux \) -prune \ -o \( -name '*.sh' -print \) \ -o \( -name '*.bash' -print \) \ diff -Nru charliecloud-0.20/test/build/50_ch-grow.bats charliecloud-0.21/test/build/50_ch-grow.bats --- charliecloud-0.20/test/build/50_ch-grow.bats 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/build/50_ch-grow.bats 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ -load ../common - -setup () { - scope standard - [[ $CH_BUILDER = ch-grow ]] || skip 'ch-grow only' -} - -@test 'ch-grow common options' { - # no common options - run ch-grow storage-path - echo "$output" - [[ $status -eq 0 ]] - [[ $output != *'verbose level'* ]] - - # before only - run ch-grow -vv storage-path - echo "$output" - [[ $status -eq 0 ]] - [[ $output = *'verbose level: 2'* ]] - - # after only - run ch-grow storage-path -vv - echo "$output" - [[ $status -eq 0 ]] - [[ $output = *'verbose level: 2'* ]] - - # before and after; after wins - run ch-grow -vv storage-path -v - echo "$output" - [[ $status -eq 0 ]] - [[ $output = *'verbose level: 1'* ]] -} - -@test 'ch-grow list' { - run ch-grow list - echo "$output" - [[ $status -eq 0 ]] - [[ $output = *"00_tiny"* ]] -} - -@test 'ch-grow storage-path' { - run ch-grow storage-path - echo "$output" - [[ $status -eq 0 ]] - [[ $output = /* ]] # absolute path - [[ $CH_GROW_STORAGE && $output = "$CH_GROW_STORAGE" ]] # match what we set -} diff -Nru charliecloud-0.20/test/build/50_ch-image.bats charliecloud-0.21/test/build/50_ch-image.bats --- charliecloud-0.20/test/build/50_ch-image.bats 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/test/build/50_ch-image.bats 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1,60 @@ +load ../common + +setup () { + scope standard + [[ $CH_BUILDER = ch-image ]] || skip 'ch-image only' +} + +@test 'ch-image common options' { + # no common options + run ch-image storage-path + echo "$output" + [[ $status -eq 0 ]] + [[ $output != *'verbose level'* ]] + + # before only + run ch-image -vv storage-path + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'verbose level: 2'* ]] + + # after only + run ch-image storage-path -vv + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'verbose level: 2'* ]] + + # before and after; after wins + run ch-image -vv storage-path -v + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'verbose level: 1'* ]] +} + +@test 'ch-image list' { + run ch-image list + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *"00_tiny"* ]] +} + +@test 'ch-image storage-path' { + run ch-image storage-path + echo "$output" + [[ $status -eq 0 ]] + [[ $output = /* ]] # absolute path + [[ $CH_IMAGE_STORAGE && $output = "$CH_IMAGE_STORAGE" ]] # match what we set +} + +@test 'ch-image build --bind' { + run ch-image --no-cache build -t build-bind -f - \ + -b ./fixtures -b ./fixtures:/mnt/9 . <<'EOF' +FROM 00_tiny +RUN mount +RUN ls -lR /mnt +RUN test -f /mnt/0/empty-file +RUN test -f /mnt/9/empty-file +EOF + echo "$output" + [[ $status -eq 0 ]] +} diff -Nru charliecloud-0.20/test/build/50_dockerfile.bats charliecloud-0.21/test/build/50_dockerfile.bats --- charliecloud-0.20/test/build/50_dockerfile.bats 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/build/50_dockerfile.bats 2020-12-18 20:25:06.000000000 +0000 @@ -1,38 +1,37 @@ load ../common - @test 'Dockerfile: syntax quirks' { # These should all yield an output image, but we don't actually care about # it, so re-use the same one. scope standard - [[ $CH_BUILDER = ch-grow ]] || skip 'ch-grow only' # FIXME: other builders? + [[ $CH_BUILDER = ch-image ]] || skip 'ch-image only' # FIXME: other builders? # No newline at end of file. printf 'FROM 00_tiny\nRUN echo hello' \ - | ch-grow build -t syntax-quirks -f - . + | ch-image build -t syntax-quirks -f - . # Newline before FROM. - ch-grow build -t syntax-quirks -f - . <<'EOF' + ch-image build -t syntax-quirks -f - . <<'EOF' FROM 00_tiny RUN echo hello EOF # Comment before FROM. - ch-grow build -t syntax-quirks -f - . <<'EOF' + ch-image build -t syntax-quirks -f - . <<'EOF' # foo FROM 00_tiny RUN echo hello EOF # Single instruction. - ch-grow build -t syntax-quirks -f - . <<'EOF' + ch-image build -t syntax-quirks -f - . <<'EOF' FROM 00_tiny EOF # Whitespace around comment hash. - run ch-grow -v build -t syntax-quirks -f - . <<'EOF' + run ch-image -v build -t syntax-quirks -f - . <<'EOF' FROM 00_tiny #no whitespace #before only @@ -49,10 +48,10 @@ @test 'Dockerfile: syntax errors' { scope standard - [[ $CH_BUILDER = ch-grow ]] || skip 'ch-grow only' + [[ $CH_BUILDER = ch-image ]] || skip 'ch-image only' # Bad instruction. Also, -v should give interal blabber about the grammar. - run ch-grow -v build -t foo -f - . <<'EOF' + run ch-image -v build -t foo -f - . <<'EOF' FROM 00_tiny WEIRDAL EOF @@ -65,7 +64,7 @@ [[ $output = *'Expecting: {'* ]] # Bad long option. - run ch-grow build -t foo -f - . <<'EOF' + run ch-image build -t foo -f - . <<'EOF' FROM 00_tiny COPY --chown= foo bar EOF @@ -74,13 +73,13 @@ [[ $output = *"can't parse: -:2,14"* ]] # Empty input. - run ch-grow build -t foo -f /dev/null . + run ch-image build -t foo -f /dev/null . echo "$output" [[ $status -eq 1 ]] [[ $output = *'no instructions found: /dev/null'* ]] # Newline only. - run ch-grow build -t foo -f - . <<'EOF' + run ch-image build -t foo -f - . <<'EOF' EOF echo "$output" @@ -88,7 +87,7 @@ [[ $output = *'no instructions found: -'* ]] # Comment only. - run ch-grow build -t foo -f - . <<'EOF' + run ch-image build -t foo -f - . <<'EOF' # foo EOF echo "$output" @@ -96,7 +95,7 @@ [[ $output = *'no instructions found: -'* ]] # Only newline, then comment. - run ch-grow build -t foo -f - . <<'EOF' + run ch-image build -t foo -f - . <<'EOF' # foo EOF @@ -105,7 +104,7 @@ [[ $output = *'no instructions found: -'* ]] # Non-ARG instruction before FROM - run ch-grow build -t foo -f - . <<'EOF' + run ch-image build -t foo -f - . <<'EOF' RUN echo uh oh FROM 00_tiny EOF @@ -117,10 +116,10 @@ @test 'Dockerfile: semantic errors' { scope standard - [[ $CH_BUILDER = ch-grow ]] || skip 'ch-grow only' + [[ $CH_BUILDER = ch-image ]] || skip 'ch-image only' # Repeated instruction option. - run ch-grow build -t foo -f - . <<'EOF' + run ch-image build -t foo -f - . <<'EOF' FROM 00_tiny COPY --chown=foo --chown=bar fixtures/empty-file . EOF @@ -129,7 +128,7 @@ [[ $output = *' 2 COPY: repeated option --chown'* ]] # COPY invalid option. - run ch-grow build -t foo -f - . <<'EOF' + run ch-image build -t foo -f - . <<'EOF' FROM 00_tiny COPY --foo=foo fixtures/empty-file . EOF @@ -138,7 +137,7 @@ [[ $output = *'COPY: invalid option --foo'* ]] # FROM invalid option. - run ch-grow build -t foo -f - . <<'EOF' + run ch-image build -t foo -f - . <<'EOF' FROM --foo=bar 00_tiny EOF echo "$output" @@ -151,10 +150,10 @@ # This test also creates images we don't care about. scope standard - [[ $CH_BUILDER = ch-grow ]] || skip 'ch-grow only' + [[ $CH_BUILDER = ch-image ]] || skip 'ch-image only' # ARG before FROM - run ch-grow build -t not-yet-supported -f - . <<'EOF' + run ch-image build -t not-yet-supported -f - . <<'EOF' ARG foo=bar FROM 00_tiny EOF @@ -163,7 +162,7 @@ [[ $output = *'warning: ARG before FROM not yet supported; see issue #779'* ]] # COPY list form - run ch-grow build -t not-yet-supported -f - . <<'EOF' + run ch-image build -t not-yet-supported -f - . <<'EOF' FROM 00_tiny COPY ["fixtures/empty-file", "."] EOF @@ -172,7 +171,7 @@ [[ $output = *'error: not yet supported: issue #784: COPY list form'* ]] # FROM --platform - run ch-grow build -t not-yet-supported -f - . <<'EOF' + run ch-image build -t not-yet-supported -f - . <<'EOF' FROM --platform=foo 00_tiny EOF echo "$output" @@ -180,7 +179,7 @@ [[ $output = *'error: not yet supported: issue #778: FROM --platform'* ]] # other instructions - run ch-grow build -t unsupported -f - . <<'EOF' + run ch-image build -t unsupported -f - . <<'EOF' FROM 00_tiny ADD foo CMD foo @@ -200,7 +199,7 @@ [[ $output = *'warning: not yet supported, ignored: issue #789: SHELL instruction'* ]] # .dockerignore files - run ch-grow build -t not-yet-supported -f - . <<'EOF' + run ch-image build -t not-yet-supported -f - . <<'EOF' FROM 00_tiny EOF echo "$output" @@ -208,14 +207,14 @@ [[ $output = *'warning: not yet supported, ignored: issue #777: .dockerignore file'* ]] # URL (Git repo) contexts - run ch-grow build -t not-yet-supported -f - \ + run ch-image build -t not-yet-supported -f - \ git@github.com:hpc/charliecloud.git <<'EOF' FROM 00_tiny EOF echo "$output" [[ $status -eq 1 ]] [[ $output = *'error: not yet supported: issue #773: URL context'* ]] - run ch-grow build -t not-yet-supported -f - \ + run ch-image build -t not-yet-supported -f - \ https://github.com/hpc/charliecloud.git <<'EOF' FROM 00_tiny EOF @@ -224,7 +223,7 @@ [[ $output = *'error: not yet supported: issue #773: URL context'* ]] # variable expansion modifiers - run ch-grow build -t not-yet-supported -f - . <<'EOF' + run ch-image build -t not-yet-supported -f - . <<'EOF' FROM 00_tiny ARG foo=README COPY fixtures/${foo:+bar} . @@ -233,7 +232,7 @@ [[ $status -eq 1 ]] # shellcheck disable=SC2016 [[ $output = *'error: modifiers ${foo:+bar} and ${foo:-bar} not yet supported (issue #774)'* ]] - run ch-grow build -t not-yet-supported -f - . <<'EOF' + run ch-image build -t not-yet-supported -f - . <<'EOF' FROM 00_tiny ARG foo=README COPY fixtures/${foo:-bar} . @@ -249,10 +248,10 @@ # This test also creates images we don't care about. scope standard - [[ $CH_BUILDER = ch-grow ]] || skip 'ch-grow only' + [[ $CH_BUILDER = ch-image ]] || skip 'ch-image only' # parser directives - run ch-grow build -t unsupported -f - . <<'EOF' + run ch-image build -t unsupported -f - . <<'EOF' # escape=foo # syntax=foo #syntax=foo @@ -268,7 +267,7 @@ [[ $(echo "$output" | grep -Fc 'parser directives') -eq 5 ]] # COPY --from - run ch-grow build -t unsupported -f - . <<'EOF' + run ch-image build -t unsupported -f - . <<'EOF' FROM 00_tiny COPY --chown=foo fixtures/empty-file . EOF @@ -277,7 +276,7 @@ [[ $output = *'warning: not supported, ignored: COPY --chown'* ]] # Unsupported instructions - run ch-grow build -t unsupported -f - . <<'EOF' + run ch-image build -t unsupported -f - . <<'EOF' FROM 00_tiny EXPOSE foo HEALTHCHECK foo @@ -377,11 +376,11 @@ @test 'Dockerfile: ARG and ENV values' { - # We use full scope for builders other than ch-grow because (1) with - # ch-grow, we are responsible for --build-arg being implemented correctly + # We use full scope for builders other than ch-image because (1) with + # ch-image, we are responsible for --build-arg being implemented correctly # and (2) Docker and Buildah take a full minute for this test, vs. three - # seconds for ch-grow. - if [[ $CH_BUILDER = ch-grow ]]; then + # seconds for ch-image. + if [[ $CH_BUILDER = ch-image ]]; then scope standard elif [[ $CH_BUILDER = none ]]; then skip 'no builder' @@ -392,7 +391,7 @@ # Note that this test illustrates a number of behavior differences between # the builders. For most of these, but not all, Docker and Buildah have - # the same behavior and ch-grow differs. + # the same behavior and ch-image differs. echo '*** default (no --build-arg)' env_expected=$(cat <<'EOF' @@ -440,7 +439,7 @@ diff -u <(echo "$env_expected") <(echo "$env_actual") echo '*** one --build-arg from environment' - if [[ $CH_BUILDER == ch-grow ]]; then + if [[ $CH_BUILDER == ch-image ]]; then env_expected=$(cat <<'EOF' chse_arg1_df=foo1 chse_arg2_df=arg2 @@ -520,7 +519,7 @@ diff -u <(echo "$env_expected") <(echo "$env_actual") echo '*** two --build-arg with substitution' - if [[ $CH_BUILDER == ch-grow ]]; then + if [[ $CH_BUILDER == ch-image ]]; then env_expected=$(cat <<'EOF' chse_arg2_df=bar2 chse_arg3_df=bar3 bar2 @@ -553,7 +552,7 @@ run ch-build --build-arg chse_doesnotexist=foo \ --no-cache -t argenv -f ./Dockerfile.argenv . echo "$output" - if [[ $CH_BUILDER = ch-grow ]]; then + if [[ $CH_BUILDER = ch-image ]]; then [[ $status -eq 1 ]] else [[ $status -eq 0 ]] @@ -565,7 +564,7 @@ run ch-build --build-arg chse_arg1_df \ --no-cache -t argenv -f ./Dockerfile.argenv . echo "$output" - if [[ $CH_BUILDER = ch-grow ]]; then + if [[ $CH_BUILDER = ch-image ]]; then [[ $status -eq 1 ]] [[ $output = *'--build-arg: chse_arg1_df: no value and not in environment'* ]] else @@ -580,7 +579,7 @@ [[ $CH_BUILDER = buildah* ]] && skip 'Buildah untested' # Dockerfile on stdin, so no context directory. - if [[ $CH_BUILDER != ch-grow ]]; then # ch-grow doesn't support this yet + if [[ $CH_BUILDER != ch-image ]]; then # ch-image doesn't support this yet run ch-build -t foo - <<'EOF' FROM 00_tiny COPY doesnotexist . @@ -599,17 +598,17 @@ # SRC not inside context directory. # # Case 1: leading "..". - run ch-build -t foo -f - . <<'EOF' + run ch-build -t foo -f - sotest <<'EOF' FROM 00_tiny -COPY ../foo . +COPY ../common.bash . EOF echo "$output" [[ $status -ne 0 ]] [[ $output = *'outside'*'context'* ]] # Case 2: ".." inside path. - run ch-build -t foo -f - . <<'EOF' + run ch-build -t foo -f - sotest <<'EOF' FROM 00_tiny -COPY foo/../../baz . +COPY lib/../../common.bash . EOF echo "$output" [[ $status -ne 0 ]] @@ -668,9 +667,9 @@ EOF echo "$output" [[ $status -ne 0 ]] - if [[ $CH_BUILDER = ch-grow ]]; then + if [[ $CH_BUILDER = ch-image ]]; then # This diagnostic is not fantastic, but it's what we got for now. - [[ $output = *'no sources exist'* ]] + [[ $output = *'no sources found'* ]] else [[ $output = *'doesnotexist:'*'o such file or directory'* ]] fi @@ -702,7 +701,7 @@ echo "$output" [[ $status -ne 0 ]] case $CH_BUILDER in - ch-grow) + ch-image) [[ $output = *'current stage'* ]] ;; docker) @@ -721,7 +720,7 @@ echo "$output" [[ $status -ne 0 ]] case $CH_BUILDER in - ch-grow) + ch-image) [[ $output = *'does not exist'* ]] ;; docker) @@ -740,7 +739,7 @@ echo "$output" [[ $status -ne 0 ]] case $CH_BUILDER in - ch-grow) + ch-image) [[ $output = *'does not exist'* ]] ;; docker) @@ -760,7 +759,7 @@ echo "$output" [[ $status -ne 0 ]] case $CH_BUILDER in - ch-grow) + ch-image) [[ $output = *'does not exist yet'* ]] ;; docker) @@ -780,7 +779,7 @@ echo "$output" [[ $status -ne 0 ]] case $CH_BUILDER in - ch-grow) + ch-image) [[ $output = *'does not exist'* ]] [[ $output != *'does not exist yet'* ]] # so we review test ;; @@ -801,7 +800,7 @@ echo "$output" [[ $status -ne 0 ]] case $CH_BUILDER in - ch-grow) + ch-image) [[ $output = *'invalid negative stage index'* ]] ;; docker) diff -Nru charliecloud-0.20/test/build/50_fakeroot.bats charliecloud-0.21/test/build/50_fakeroot.bats --- charliecloud-0.20/test/build/50_fakeroot.bats 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/test/build/50_fakeroot.bats 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1,467 @@ +load ../common + +# shellcheck disable=SC2034 +tag='ch-image --force' + +setup () { + [[ $CH_BUILDER = ch-image ]] || skip 'ch-image only' +} + +@test "${tag}: no matching distro" { + scope standard + + # without --force + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM alpine:3.9 +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'--force not available (no suitable config found)'* ]] + + # with --force + run ch-image -v build --force -t fakeroot-temp -f - . <<'EOF' +FROM alpine:3.9 +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'--force not available (no suitable config found)'* ]] +} + +@test "${tag}: --no-force-detect" { + scope standard + + run ch-image -v build --no-force-detect -t fakeroot-temp -f - . <<'EOF' +FROM alpine:3.9 +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'not detecting --force config, per --no-force-detect'* ]] + +} + +@test "${tag}: misc errors" { + scope standard + + run ch-image build --force --no-force-detect . + echo "$output" + [[ $status -eq 1 ]] + [[ $output = 'error'*'are incompatible'* ]] +} + +@test "${tag}: multiple RUN" { + scope standard + + # 1. List form of RUN. + # 2. apt-get not at beginning. + run ch-image -v build --force -t fakeroot-temp -f - . <<'EOF' +FROM debian:buster +RUN true +RUN true && apt-get update +RUN ["apt-get", "install", "-y", "hello"] +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $(echo "$output" | grep -Fc 'init step 1: checking: $') -eq 1 ]] + [[ $(echo "$output" | grep -Fc 'init step 1: $') -eq 1 ]] + [[ $(echo "$output" | grep -Fc 'RUN: new command:') -eq 2 ]] + [[ $output = *'init: already initialized'* ]] + [[ $output = *'--force: init OK & modified 2 RUN instructions'* ]] + [[ $output = *'grown in 4 instructions: fakeroot-temp'* ]] +} + +@test "${tag}: CentOS 7: unneeded, no --force, build succeeds" { + scope standard + # no commands that may need it, without --force, build succeeds + # also: correct config, last config tested is the one selected + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM centos:7 +RUN true +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'available --force: rhel7'* ]] + [[ $output = *$'testing config: rhel7\navailable --force'* ]] +} + +@test "${tag}: CentOS 7: unneeded, no --force, build fails" { + scope full + # no commands that may need it, without --force, build fails + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM centos:7 +RUN false +EOF + echo "$output" + [[ $status -eq 1 ]] + [[ $output = *"build failed: current version of --force wouldn't help"* ]] + [[ $output = *'build failed: RUN command exited with 1'* ]] +} + +@test "${tag}: CentOS 7: unneeded, with --force" { + scope full + # no commands that may need it, with --force, warning + run ch-image -v build --force -t fakeroot-temp -f - . <<'EOF' +FROM centos:7 +RUN true +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'warning: --force specified, but nothing to do'* ]] +} + +@test "${tag}: CentOS 7: maybe needed but actually not, no --force" { + scope full + # commands that may need it, but turns out they don’t, without --force + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM centos:7 +RUN yum install -y ed +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'available --force'* ]] + [[ $output = *'RUN: available here with --force'* ]] +} + +@test "${tag}: CentOS 7: maybe needed but actually not, with --force" { + scope full + # commands that may need it, but turns out they don’t, with --force + run ch-image -v build --force -t fakeroot-temp -f - . <<'EOF' +FROM centos:7 +RUN yum install -y ed +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'will use --force'* ]] + [[ $output = *'--force: init OK & modified 1 RUN instructions'* ]] +} + +@test "${tag}: CentOS 7: needed but no --force" { + scope full + # commands that may need it, they do, fail & suggest + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM centos:7 +RUN yum install -y openssh +EOF + echo "$output" + [[ $status -eq 1 ]] + [[ $output = *'available --force'* ]] + [[ $output = *'RUN: available here with --force'* ]] + [[ $output = *'build failed: --force may fix it'* ]] + [[ $output = *'build failed: RUN command exited with 1'* ]] +} + +@test "${tag}: CentOS 7: needed, with --force" { + scope standard + # commands that may need it, they do, --force, success + run ch-image -v build --force -t fakeroot-temp -f - . <<'EOF' +FROM centos:7 +RUN yum install -y openssh +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'will use --force'* ]] + [[ $output = *'--force: init OK & modified 1 RUN instructions'* ]] +} + +@test "${tag}: CentOS 7: EPEL already enabled" { + scope standard + + # 7: install EPEL (no fakeroot) + run ch-image -v build -t centos7-epel1 -f - . <<'EOF' +FROM centos:7 +RUN yum install -y epel-release +RUN yum repolist | egrep '^epel/' +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'available --force'* ]] + echo "$output" | grep -E 'Installing.+: epel-release' + + # 7: install openssh (with fakeroot) + run ch-image -v build --force -t centos7-epel2 -f - . <<'EOF' +FROM centos7-epel1 +RUN yum install -y openssh +RUN yum repolist | egrep '^epel/' +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'will use --force'* ]] + [[ $output = *'--force: init OK & modified 2 RUN instructions'* ]] + ! ( echo "$output" | grep -E '(Updating|Installing).+: epel-release' ) +} + +@test "${tag}: CentOS 8: unneeded, no --force, build succeeds" { + scope standard + # no commands that may need it, without --force, build succeeds + # also: correct config + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM centos:8 +RUN true +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'available --force: rhel8'* ]] +} + +@test "${tag}: CentOS 8: unneeded, no --force, build fails" { + scope standard + # no commands that may need it, without --force, build fails + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM centos:8 +RUN false +EOF + echo "$output" + [[ $status -eq 1 ]] + [[ $output = *"build failed: current version of --force wouldn't help"* ]] + [[ $output = *'build failed: RUN command exited with 1'* ]] +} + +@test "${tag}: CentOS 8: unneeded, with --force" { + scope standard + # no commands that may need it, with --force, warning + run ch-image -v build --force -t fakeroot-temp -f - . <<'EOF' +FROM centos:8 +RUN true +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'warning: --force specified, but nothing to do'* ]] +} + +@test "${tag}: CentOS 8: maybe needed but actually not, no --force" { + scope standard + # commands that may need it, but turns out they don’t, without --force + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM centos:8 +RUN dnf install -y ed +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'available --force'* ]] + [[ $output = *'RUN: available here with --force'* ]] +} + +@test "${tag}: CentOS 8: maybe needed but actually not, with --force" { + scope standard + # commands that may need it, but turns out they don’t, with --force + run ch-image -v build --force -t fakeroot-temp -f - . <<'EOF' +FROM centos:8 +RUN dnf install -y ed +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'will use --force'* ]] + [[ $output = *'--force: init OK & modified 1 RUN instructions'* ]] +} + +@test "${tag}: CentOS 8: needed but no --force" { + scope standard + # commands that may need it, they do, fail & suggest + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM centos:8 +RUN dnf install -y --setopt=install_weak_deps=false openssh +EOF + echo "$output" + [[ $status -eq 1 ]] + [[ $output = *'available --force'* ]] + [[ $output = *'RUN: available here with --force'* ]] + [[ $output = *'build failed: --force may fix it'* ]] + [[ $output = *'build failed: RUN command exited with 1'* ]] +} + +@test "${tag}: CentOS 8: needed, with --force" { + scope standard + # commands that may need it, they do, --force, success + run ch-image -v build --force -t fakeroot-temp -f - . <<'EOF' +FROM centos:8 +RUN dnf install -y --setopt=install_weak_deps=false openssh +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'will use --force'* ]] + [[ $output = *'--force: init OK & modified 1 RUN instructions'* ]] + # validate EPEL is installed but not enabled + ls -lh "$CH_IMAGE_STORAGE"/img/fakeroot-temp/etc/yum.repos.d/epel*.repo + ! grep -Eq 'enabled=1' "$CH_IMAGE_STORAGE"/img/fakeroot-temp/etc/yum.repos.d/epel*.repo +} + +@test "${tag}: CentOS 8: EPEL already installed" { + scope standard + + # install EPEL, no --force + run ch-image -v build -t epel1 -f - . <<'EOF' +FROM centos:8 +RUN dnf install -y epel-release +RUN dnf repolist | egrep '^epel' +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'available --force'* ]] + echo "$output" | grep -E 'Installing.+: epel-release' + + # new image based on that + run ch-image -v build --force -t fakeroot-temp -f - . <<'EOF' +FROM epel1 +RUN dnf install -y openssh +RUN dnf repolist | egrep '^epel' +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'will use --force'* ]] + [[ $output = *'--force: init OK & modified 2 RUN instructions'* ]] + ! ( echo "$output" | grep -E '(Updating|Installing).+: epel-release' ) + # validate EPEL is installed *and* enabled + ls -lh "$CH_IMAGE_STORAGE"/img/fakeroot-temp/etc/yum.repos.d/epel*.repo + grep -Eq 'enabled=1' "$CH_IMAGE_STORAGE"/img/fakeroot-temp/etc/yum.repos.d/epel*.repo +} + +@test "${tag}: Debian Stretch: unneeded, no --force, build succeeds" { + scope standard + # no commands that may need it, without --force, build succeeds + # also: correct config + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM debian:stretch +RUN true +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'available --force: debSB'* ]] +} + +@test "${tag}: Debian Stretch: unneeded, no --force, build fails" { + scope full + # no commands that may need it, without --force, build fails + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM debian:stretch +RUN false +EOF + echo "$output" + [[ $status -eq 1 ]] + [[ $output = *"build failed: current version of --force wouldn't help"* ]] + [[ $output = *'build failed: RUN command exited with 1'* ]] +} + +@test "${tag}: Debian Stretch: unneeded, with --force" { + scope full + # no commands that may need it, with --force, warning + run ch-image -v build --force -t fakeroot-temp -f - . <<'EOF' +FROM debian:stretch +RUN true +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'warning: --force specified, but nothing to do'* ]] +} + +# FIXME: Not sure how to do this on Debian; any use of apt-get to install +# needs "apt-get update" first, which requires --force. +#@test "${tag}: Debian Stretch: maybe needed but actually not, no --force" { +#} + +# FIXME: Not sure how to do this on Debian; any use of apt-get to install +# needs "apt-get update" first, which requires --force. +#@test "${tag}: Debian Stretch: maybe needed but actually not, with --force" { +#} + +@test "${tag}: Debian Stretch: needed but no --force" { + scope full + # commands that may need it, they do, fail & suggest + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM debian:stretch +RUN apt-get update && apt-get install -y openssh-client +EOF + echo "$output" + [[ $status -eq 1 ]] + [[ $output = *'available --force'* ]] + [[ $output = *'RUN: available here with --force'* ]] + [[ $output = *'build failed: --force may fix it'* ]] + [[ $output = *'build failed: RUN command exited with 1'* ]] +} + +@test "${tag}: Debian Stretch: needed, with --force" { + scope full + # commands that may need it, they do, --force, success + run ch-image -v build --force -t fakeroot-temp -f - . <<'EOF' +FROM debian:stretch +RUN apt-get update && apt-get install -y openssh-client +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'will use --force'* ]] + [[ $output = *'--force: init OK & modified 1 RUN instructions'* ]] +} + +@test "${tag}: Debian Buster: unneeded, no --force, build succeeds" { + scope standard + # no commands that may need it, without --force, build succeeds + # also: correct config + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM debian:buster +RUN true +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'available --force: debSB'* ]] +} + +@test "${tag}: Debian Buster: unneeded, no --force, build fails" { + scope full + # no commands that may need it, without --force, build fails + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM debian:buster +RUN false +EOF + echo "$output" + [[ $status -eq 1 ]] + [[ $output = *"build failed: current version of --force wouldn't help"* ]] + [[ $output = *'build failed: RUN command exited with 1'* ]] +} + +@test "${tag}: Debian Buster: unneeded, with --force" { + scope full + # no commands that may need it, with --force, warning + run ch-image -v build --force -t fakeroot-temp -f - . <<'EOF' +FROM debian:buster +RUN true +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'warning: --force specified, but nothing to do'* ]] +} + +# FIXME: Not sure how to do this on Debian; any use of apt-get to install +# needs "apt-get update" first, which requires --force. +#@test "${tag}: Debian Stretch: maybe needed but actually not, no --force" { +#} + +# FIXME: Not sure how to do this on Debian; any use of apt-get to install +# needs "apt-get update" first, which requires --force. +#@test "${tag}: Debian Stretch: maybe needed but actually not, with --force" { +#} + +@test "${tag}: Debian Buster: needed but no --force" { + scope full + # commands that may need it, they do, fail & suggest + run ch-image -v build -t fakeroot-temp -f - . <<'EOF' +FROM debian:buster +RUN apt-get update && apt-get install -y openssh-client +EOF + echo "$output" + [[ $status -eq 1 ]] + [[ $output = *'available --force'* ]] + [[ $output = *'RUN: available here with --force'* ]] + [[ $output = *'build failed: --force may fix it'* ]] + [[ $output = *'build failed: RUN command exited with 1'* ]] +} + +@test "${tag}: Debian Buster: needed, with --force" { + scope standard + # commands that may need it, they do, --force, success + run ch-image -v build --force -t fakeroot-temp -f - . <<'EOF' +FROM debian:buster +RUN apt-get update && apt-get install -y openssh-client +EOF + echo "$output" + [[ $status -eq 0 ]] + [[ $output = *'will use --force'* ]] + [[ $output = *'--force: init OK & modified 1 RUN instructions'* ]] +} diff -Nru charliecloud-0.20/test/build/50_pull.bats charliecloud-0.21/test/build/50_pull.bats --- charliecloud-0.20/test/build/50_pull.bats 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/build/50_pull.bats 2020-12-18 20:25:06.000000000 +0000 @@ -7,7 +7,7 @@ retcode_expected=$2 echo "--- parsing: ${ref}" set +e - out=$(ch-grow pull --parse-only "$ref" 2>&1) + out=$(ch-image pull --parse-only "$ref" 2>&1) retcode=$? set -e echo "--- return code: ${retcode}" @@ -23,9 +23,9 @@ @test 'image ref parsing' { scope standard - if ( ! ch-grow --dependencies ); then - [[ $CH_BUILDER != ch-grow ]] - skip "ch-grow missing dependencies" + if ( ! ch-image --dependencies ); then + [[ $CH_BUILDER != ch-image ]] + skip "ch-image missing dependencies" fi # simplest @@ -204,32 +204,121 @@ EOF } -@test 'pull image with symlink' { - # Validate that if a prior layer contains a symlink and a subsequent layer - # contains a regular file at the same path, the symlink is replaced with a - # regular file and the symlink target is unchanged. See issue #819. +@test 'pull image with quirky files' { + # Validate that layers replace symlinks correctly. See + # test/Dockerfile.symlink and issues #819 & 825. scope standard - if ( ! ch-grow --dependencies ); then - [[ $CH_BUILDER != ch-grow ]] - skip "ch-grow missing dependencies" + if ( ! ch-image --dependencies ); then + [[ $CH_BUILDER != ch-image ]] + skip "ch-image missing dependencies" fi - img=$BATS_TMPDIR/charliecloud%symlink + img=$BATS_TMPDIR/charliecloud%file-quirks - ch-grow pull charliecloud/symlink "$img" + ch-image pull charliecloud/file-quirks:2020-10-21 "$img" ls -lh "${img}/test" - # /test/target should be a regular file with contents "target" - run stat -c '%F' "${img}/test/target" - [[ $status -eq 0 ]] + output_expected=$(cat <<'EOF' +regular file 'df_member' +symbolic link 'ds_link' -> 'ds_target' +regular file 'ds_target' +directory 'fd_member' +symbolic link 'fs_link' -> 'fs_target' +regular file 'fs_target' +symbolic link 'link_b0rken' -> 'doesnotexist' +symbolic link 'link_imageonly' -> '/test' +symbolic link 'link_self' -> 'link_self' +directory 'sd_link' +regular file 'sd_target' +regular file 'sf_link' +regular file 'sf_target' +symbolic link 'ss_link' -> 'ss_target2' +regular file 'ss_target1' +regular file 'ss_target2' +EOF +) + + cd "${img}/test" + run stat -c '%-14F %N' -- * echo "$output" - [[ $output = 'regular file' ]] - [[ $(cat "${img}/test/target") = 'target' ]] + [[ $status -eq 0 ]] + diff -u <(echo "$output_expected") <(echo "$output") + cd - +} + +@test 'pull image with manifest schema v1' { + # Verify we handle images with manifest schema version one (v1). + scope standard + if ( ! ch-image --dependencies ); then + [[ $CH_BUILDER != ch-image ]] + skip "ch-image missing dependencies" + fi - # /test/source should be a regular file with contents "regular" - run stat -c '%F' "${img}/test/source" + unpack=$BATS_TMPDIR + cache=$unpack/dlcache + # We target debian:squeeze because 1) it always returns a v1 manifest + # schema (regardless of media type specified), and 2) it isn't very large, + # thus keeps test time down. + img=debian:squeeze + + ch-image pull --storage="$unpack" \ + --no-cache \ + "$img" [[ $status -eq 0 ]] - echo "$output" - [[ $output = 'regular file' ]] - [[ $(cat "${img}/test/source") = 'regular' ]] + grep -F '"schemaVersion": 1' "${cache}/${img}.manifest.json" +} + +@test 'pull from public repos' { + scope standard + [[ $CH_BUILDER = ch-image ]] || skip 'ch-image only' + if [[ -z $CI ]]; then + # Verify we can reach the public internet, except on CI, where we + # insist this should work. + ping -c3 8.8.8.8 || skip "no public internet (can't ping 8.8.8.8)" + fi + + # These images are selected to be official-ish and small. My rough goal is + # to keep them under 10MiB uncompressed, but this isn't working great. It + # may be worth our while to upload some small test images to these places. + + # Docker Hub: https://hub.docker.com/_/alpine + ch-image pull registry-1.docker.io/library/alpine:latest + + # quay.io: https://quay.io/repository/quay/busybox + ch-image pull quay.io/quay/busybox:latest + + # gitlab.com: https://gitlab.com/pages/hugo + # FIXME: 50 MiB, try to do better; seems to be the slowest repo. + ch-image pull registry.gitlab.com/pages/hugo:latest + + # Google Container Registry: + # https://console.cloud.google.com/gcr/images/google-containers/GLOBAL + # FIXME: "latest" tags do not work, but they do in Docker (issue #896) + ch-image pull gcr.io/google-containers/busybox:1.27 + + # Things not here (yet?): + # + # 1. Harbor (issue #899): Has a demo repo (https://demo.goharbor.io) that + # you can make an account on, but I couldn't find a public repo, and + # the demo repo gets reset every two days. + # + # 2. Docker registry container (https://hub.docker.com/_/registry): Would + # need to set up an instance. + # + # 3. Amazon public repo (issue #901, + # https://aws.amazon.com/blogs/containers/advice-for-customers-dealing-with-docker-hub-rate-limits-and-a-coming-soon-announcement/): + # Does not exist yet; coming "within weeks" of 2020-11-02. + # + # 4. Microsoft Azure registry [1] (issue #902): I could not find any + # public images. It seems that public pull is "currently a preview + # feature" as of 2020-11-06 [2]. + # + # [1]: https://azure.microsoft.com/en-us/services/container-registry + # [2]: https://docs.microsoft.com/en-us/azure/container-registry/container-registry-faq#how-do-i-enable-anonymous-pull-access + # + # 5. JFrog / Artifactory (https://jfrog.com/container-registry/): Could + # not find any public registry. + # + # 6. nVidia NGC (https://ngc.nvidia.com, issue #897): (1) does not work + # and (2) appears to contain only monster images. } diff -Nru charliecloud-0.20/test/build/99_cleanup.bats charliecloud-0.21/test/build/99_cleanup.bats --- charliecloud-0.20/test/build/99_cleanup.bats 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/build/99_cleanup.bats 2020-12-18 20:25:06.000000000 +0000 @@ -3,8 +3,7 @@ @test 'nothing unexpected in tarball directory' { scope quick run find "$ch_tardir" -mindepth 1 -maxdepth 1 \ - -not \( -name '_ch-grow' \ - -o -name 'WEIRD_AL_YANKOVIC' \ + -not \( -name 'WEIRD_AL_YANKOVIC' \ -o -name '*.sqfs' \ -o -name '*.tar.gz' \ -o -name '*.tar.xz' \ diff -Nru charliecloud-0.20/test/common.bash charliecloud-0.21/test/common.bash --- charliecloud-0.20/test/common.bash 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/common.bash 2020-12-18 20:25:06.000000000 +0000 @@ -36,14 +36,14 @@ return 0 fi ;; - ch-grow) - if [[ -d ${CH_GROW_STORAGE}/img/${1} ]]; then + ch-image) + if [[ -d ${CH_IMAGE_STORAGE}/img/${1} ]]; then echo "ok" return 0 fi ;; docker) - hash_=$(sudo docker images -q "$1" | sort -u) + hash_=$(docker_ images -q "$1" | sort -u) if [[ $hash_ ]]; then echo "$hash_" return 0 @@ -63,6 +63,17 @@ fi } +# Do we need sudo to run docker? +if docker info > /dev/null 2>&1; then + docker_ () { + docker "$@" + } +else + docker_ () { + sudo docker "$@" + } +fi + env_require () { if [[ -z ${!1} ]]; then printf '$%s is empty or not set\n\n' "$1" >&2 @@ -167,8 +178,8 @@ env_require CH_TEST_IMGDIR env_require CH_TEST_PERMDIRS env_require CH_BUILDER -if [[ $CH_BUILDER == ch-grow ]]; then - env_require CH_GROW_STORAGE +if [[ $CH_BUILDER == ch-image ]]; then + env_require CH_IMAGE_STORAGE fi # User-private temporary directory in case multiple users are running the diff -Nru charliecloud-0.20/test/Dockerfile.fakeroot charliecloud-0.21/test/Dockerfile.fakeroot --- charliecloud-0.20/test/Dockerfile.fakeroot 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/Dockerfile.fakeroot 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -# Several images that fail to build without fakeroot injection. -# -# These images don't build on one another; they're a multi-stage only for -# organizational convenience. -# -# ch-test-scope: standard -# ch-test-builder-include: ch-grow - -# CentOS 7 -FROM centos:7 -RUN yum install -y openssh - -# CentOS 8 -FROM centos:8 -RUN dnf install -y --setopt=install_weak_deps=false openssh - -# Debian 9 (Stretch). Notes: -# -# 1. Tests list form of RUN. -# -# 2. fontconfig-config calls chown(1), which looks up users & groups by -# name. This fails if the expected user or group is not present. -# -# 3. openssh-client calls chgrp(2). -# -FROM debian:stretch -RUN ["apt-get", "update"] -RUN ["apt-get", "install", "-y", "fontconfig-config", "openssh-client"] - -# Debian 10 (Buster) -# Note: Tests two apt-get in the same RUN. echo(1) should not get fakeroot. -FROM debian:buster -RUN echo hello -RUN apt-get update \ - && apt-get install -y fontconfig-config openssh-client diff -Nru charliecloud-0.20/test/Dockerfile.file-quirks charliecloud-0.21/test/Dockerfile.file-quirks --- charliecloud-0.20/test/Dockerfile.file-quirks 1970-01-01 00:00:00.000000000 +0000 +++ charliecloud-0.21/test/Dockerfile.file-quirks 2020-12-18 20:25:06.000000000 +0000 @@ -0,0 +1,145 @@ +# This Dockerfile is used to test that pull deals with quirky files, e.g. +# replacement by different types (issues #819 and #825)`. Scope is “skip” +# because we pull the image to test it; see test/build/50_pull.bats. +# +# To build and push: +# +# $ VERSION=$(date +%Y-%m-%d) # or other date as appropriate +# $ sudo docker login # if needed +# $ sudo docker build -t file-quirks -f Dockerfile.file-quirks . +# $ sudo docker tag file-quirks:latest charliecloud/file-quirks:$VERSION +# $ sudo docker images | fgrep file-quirks +# $ sudo docker push charliecloud/file-quirks:$VERSION +# +# ch-test-scope: skip + +FROM 00_tiny +WORKDIR /test + + +## Replace symlink with symlink. + +# Set up a symlink & targets. +RUN echo target1 > ss_target1 \ + && echo target2 > ss_target2 \ + && ln -s ss_target1 ss_link +# link and target should both contain "target1" +RUN ls -l \ + && for i in ss_*; do printf '%s : ' $i; cat $i; done +# Overwrite it with a new symlink. +RUN rm ss_link \ + && ln -s ss_target2 ss_link +# Now link should still be a symlink but contain "target2". +RUN ls -l \ + && for i in ss_*; do printf '%s : ' $i; cat $i; done + + +## Replace symlink with regular file (issue #819). + +# Set up a symlink. +RUN echo target > sf_target \ + && ln -s sf_target sf_link +# Link and target should both contain "target". +RUN ls -l \ + && for i in sf_*; do printf '%s : ' $i; cat $i; done +# Overwrite it with a regular file. +RUN rm sf_link \ + && echo regular > sf_link +# Now link should be a regular file and contain "regular". +RUN ls -l \ + && for i in sf_*; do printf '%s : ' $i; cat $i; done + + +## Replace regular file with symlink. + +# Set up two regular files. +RUN echo regular > fs_link \ + && echo target > fs_target +# Link should be a regular file and contain "regular". +RUN ls -l \ + && for i in fs_*; do printf '%s : ' $i; cat $i; done +# Overwrite it with a symlink. +RUN rm fs_link \ + && ln -s fs_target fs_link +# Now link should be a symlink; both should contain "target". +RUN ls -l \ + && for i in fs_*; do printf '%s : ' $i; cat $i; done + + +## Replace symlink with directory. + +# Set up a symlink. +RUN echo target > sd_target \ + && ln -s sd_target sd_link +# link and target should both contain "target" +RUN ls -l \ + && for i in sd_*; do printf '%s : ' $i; cat $i; done +# Overwrite it with a directory. +RUN rm sd_link \ + && mkdir sd_link +# Now link should be a directory. +RUN ls -l + + +## Replace directory with symlink. + +# I think this is what's in image ppc64le.neo4j/2.3.5, as reported in issue +# #825, but it doesn't cause the same infinite recursion. + +# Set up a directory and a target. +RUN mkdir ds_link \ + && echo target > ds_target +# It should be a directory. +RUN ls -l +# Overwrite it with a symlink. +RUN rmdir ds_link \ + && ln -s ds_target ds_link +# Now link should be a symlink; both should contain "target". +RUN ls -l \ + && for i in ds_*; do printf '%s : ' $i; cat $i; done + + +## Replace regular file with directory. + +# Set up a file. +RUN echo regular > fd_member +# It should be a file. +RUN ls -l \ + && for i in fd_*; do printf '%s : ' $i; cat $i; done +# Overwrite it with a directory. +RUN rm fd_member \ + && mkdir fd_member +# Now it should be a directory. +RUN ls -l + + +## Replace directory with regular file. + +# Set up a directory. +RUN mkdir df_member +# It should be a directory. +RUN ls -l +# Overwrite it with a file. +RUN rmdir df_member \ + && echo regular > df_member +# Now it should be a file. +RUN ls -l \ + && for i in df_*; do printf '%s : ' $i; cat $i; done + + +## Symlink with cycle (https://bugs.python.org/file37774). + +# Set up a symlink pointing to itself. +RUN ln -s link_self link_self +# List. +RUN ls -l + + +## Broken symlinks (https://bugs.python.org/file37774). + +# Set up a symlink pointing to (1) a nonexistent file and (2) a directory that +# only exists in the image. +RUN ln -s doesnotexist link_b0rken \ + && ln -s /test link_imageonly +# List. +RUN ls -l diff -Nru charliecloud-0.20/test/Dockerfile.symlink charliecloud-0.21/test/Dockerfile.symlink --- charliecloud-0.20/test/Dockerfile.symlink 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/Dockerfile.symlink 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -# This Dockerfile is used to test that ch-tug deals with replacement of -# symlink with a regular file correctly (see issue #819). -# -# To build and push: -# -# $ sudo docker login # if needed -# $ sudo docker build -t symlink -f Dockerfile.symlink . -# $ sudo docker tag symlink:latest charliecloud/symlink -# $ sudo docker images | fgrep symlink -# $ sudo docker push charliecloud/symlink -# -# ch-test-scope: skip - -FROM 00_tiny -WORKDIR /test - -# Set up a symlink. -RUN echo target > target \ - && ln -s target source - -# source and target should both contain "target". -RUN ls -l /test \ - && for i in /test/*; do printf '%s : ' $i; cat $i; done - -# Overwrite it with a regular file. -RUN rm source \ - && echo regular > source - -# Now source should be a regular file and contain "regular". -RUN ls -l /test \ - && for i in /test/*; do printf '%s : ' $i; cat $i; done - diff -Nru charliecloud-0.20/test/.dockerignore charliecloud-0.21/test/.dockerignore --- charliecloud-0.20/test/.dockerignore 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/.dockerignore 2020-12-18 20:25:06.000000000 +0000 @@ -1 +1 @@ -# Nothing yet; used for testing ch-grow warnings. +# Nothing yet; used for testing ch-image warnings. diff -Nru charliecloud-0.20/test/docs-sane.py.in charliecloud-0.21/test/docs-sane.py.in --- charliecloud-0.20/test/docs-sane.py.in 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/docs-sane.py.in 2020-12-18 20:25:06.000000000 +0000 @@ -6,7 +6,7 @@ # # 1. Man page consistency. # -# a. man/charliecloud.1 exists. +# a. man/charliecloud.7 exists. # # b. Every executable FOO in bin has: # @@ -14,7 +14,7 @@ # - doc/FOO_desc.rst # - doc/man/FOO.1 # - a section in doc/command-usage.rst -# - an entry under "See also" in charliecloud.1 +# - an entry under "See also" in charliecloud.7 # # c. There aren't the things in (b) except for the executables (modulo a # few execeptions for the other documentation source files). @@ -74,7 +74,9 @@ lose_lots("bad summary in command-usage.rst", { "%s: %s" % (p, s) for (p, s) in sect_helps.items() - if (p in helps and summary_unrest(s) != helps[p]) }) + if ( p in helps + and summary_unrest(s) != helps[p]) + and "deprecated" not in s.lower() }) sees = { m.group(0) for m in re.finditer(r"ch-[a-z0-9-]+\(1\)", open("charliecloud.rst").read()) } @@ -89,17 +91,20 @@ lose("conf.py: startdocname != name: %s != %s" % (docname, name)) if (len(authors) != 0): lose("conf.py: bad authors: %s: %s" % (name, authors)) - if (section != 1): - lose("conf.py: bad section: %s: %s != 1" % (name, section)) if (name != "charliecloud"): + if (section != 1): + lose("conf.py: bad section: %s: %s != 1" % (name, section)) if (name not in helps): lose("conf.py: unexpected man page: %s" % name) - elif (desc + "." != helps[name]): + elif (desc + "." != helps[name] and "deprecated" not in desc.lower()): lose("conf.py: bad summary: %s: %s" % (name, desc)) + else: + if (section != 7): + lose("conf.py: bad section: %s: %s != 7" % (name, section)) os.chdir(CH_BASE + "/doc/man") - mans = set(glob.glob("*.1")) - { "charliecloud.1" } + mans = set(glob.glob("*.1")) mans_expected = { i + ".1" for i in execs } lose_lots("unexpected man", mans - mans_expected) lose_lots("missing man", mans_expected - mans) diff -Nru charliecloud-0.20/test/make-auto.d/build.bats.in charliecloud-0.21/test/make-auto.d/build.bats.in --- charliecloud-0.20/test/make-auto.d/build.bats.in 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/make-auto.d/build.bats.in 2020-12-18 20:25:06.000000000 +0000 @@ -1,6 +1,12 @@ @test 'ch-build %(tag)s' { scope %(scope)s - ch-build -t %(tag)s --file="%(path)s" "%(dirname)s" + if [[ $CH_BUILDER = ch-image ]]; then + force=--force + else + force= + fi + # shellcheck disable=SC2086 + ch-build $force -t %(tag)s --file="%(path)s" "%(dirname)s" #sudo docker tag %(tag)s "%(tag)s:$ch_version_docker" builder_ok %(tag)s } diff -Nru charliecloud-0.20/test/Makefile.am charliecloud-0.21/test/Makefile.am --- charliecloud-0.20/test/Makefile.am 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/Makefile.am 2020-12-18 20:25:06.000000000 +0000 @@ -7,8 +7,9 @@ Dockerfile.argenv \ Dockerfile.build2dir \ build/10_sanity.bats \ -build/50_ch-grow.bats \ +build/50_ch-image.bats \ build/50_dockerfile.bats \ +build/50_fakeroot.bats \ build/50_misc.bats \ build/50_pull.bats \ build/99_cleanup.bats \ @@ -44,11 +45,7 @@ # Stuff that doesn't need to be installed. EXTRA_DIST = \ -fixtures/README \ -travis.bash \ -travis-before.bash \ -travis-install.bash \ -travis.yml +fixtures/README # Program and shared library used for testing shared library injection. It's # built according to the rules below. In principle, we could use libtool for @@ -84,8 +81,9 @@ ## Python scripts - need text processing docs-sane make-perms-test: %: %.py.in + rm -f $@ sed -E 's|%PYTHON_SHEBANG%|@PYTHON_SHEBANG@|' < $< > $@ - chmod +rwx $@ # respects umask + chmod +rx,-w $@ # respects umask sotest/sotest: sotest/sotest.c sotest/libsotest.so.1.0 sotest/libsotest.so sotest/libsotest.so.1 $(CC) -o $@ $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) -L./sotest -lsotest $^ diff -Nru charliecloud-0.20/test/run/build-rpms.bats charliecloud-0.21/test/run/build-rpms.bats --- charliecloud-0.20/test/run/build-rpms.bats 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/run/build-rpms.bats 2020-12-18 20:25:06.000000000 +0000 @@ -37,7 +37,7 @@ [[ $output = *'/usr/bin/ch-run'* ]] [[ $output = *'/usr/lib64/charliecloud/base.sh'* ]] [[ $output = *'/usr/share/doc/charliecloud-'*'/examples/lammps/Dockerfile'* ]] - [[ $output = *'/usr/share/man/man1/charliecloud.1.gz'* ]] + [[ $output = *'/usr/share/man/man7/charliecloud.7.gz'* ]] run ch-run "$img" -- rpm -ql "charliecloud-debuginfo" echo "$output" [[ $status -eq 0 ]] @@ -103,7 +103,7 @@ [[ $output = *'/usr/bin/ch-run'* ]] [[ $output = *'/usr/lib64/charliecloud/base.sh'* ]] [[ $output = *'/usr/share/doc/charliecloud/examples/lammps/Dockerfile'* ]] - [[ $output = *'/usr/share/man/man1/charliecloud.1.gz'* ]] + [[ $output = *'/usr/share/man/man7/charliecloud.7.gz'* ]] run ch-run "$img" -- rpm -ql "charliecloud-debuginfo" echo "$output" [[ $status -eq 0 ]] diff -Nru charliecloud-0.20/test/run/ch-run_uidgid.bats charliecloud-0.21/test/run/ch-run_uidgid.bats --- charliecloud-0.20/test/run/ch-run_uidgid.bats 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/run/ch-run_uidgid.bats 2020-12-18 20:25:06.000000000 +0000 @@ -29,23 +29,29 @@ } @test 'user and group as specified' { + # shellcheck disable=SC2086 g=$(ch-run $uid_args $gid_args "$ch_timg" -- id -un) [[ $GUEST_USER = "$g" ]] + # shellcheck disable=SC2086 g=$(ch-run $uid_args $gid_args "$ch_timg" -- id -u) [[ $guest_uid = "$g" ]] + # shellcheck disable=SC2086 g=$(ch-run $uid_args $gid_args "$ch_timg" -- id -gn) [[ $GUEST_GROUP = "$g" ]] + # shellcheck disable=SC2086 g=$(ch-run $uid_args $gid_args "$ch_timg" -- id -g) [[ $guest_gid = "$g" ]] } @test 'chroot escape' { # Try to escape a chroot(2) using the standard approach. + # shellcheck disable=SC2086 ch-run $uid_args $gid_args "$ch_timg" -- /test/chroot-escape } @test '/dev /proc /sys' { # Read some files in /dev, /proc, and /sys that I shouldn't have access to. + # shellcheck disable=SC2086 ch-run $uid_args $gid_args "$ch_timg" -- /test/dev_proc_sys.py } @@ -54,16 +60,17 @@ for d in $CH_TEST_PERMDIRS; do d="${d}/pass" echo "verifying: ${d}" - ch-run --no-home --private-tmp \ - $uid_args $gid_args -b "$d" "$ch_timg" -- \ - /test/fs_perms.py /mnt/0 + # shellcheck disable=SC2086 + ch-run --no-home --private-tmp \ + $uid_args $gid_args -b "$d" "$ch_timg" -- \ + /test/fs_perms.py /mnt/0 done } @test 'mknod(2)' { # Make some device files. If this works, we might be able to later read or # write them to do things we shouldn't. Try on all mount points. - # shellcheck disable=SC2016 + # shellcheck disable=SC2016,SC2086 ch-run $uid_args $gid_args "$ch_timg" -- \ sh -c '/test/mknods $(cat /proc/mounts | cut -d" " -f2)' } @@ -88,11 +95,14 @@ # - We leave the filesystem mounted even if successful, again to make # the test simpler. The rest of the tests will ignore it or maybe # over-mount something else. + # + # shellcheck disable=SC2086 ch-run $uid_args $gid_args "$ch_timg" -- \ sh -c '[ -f /bin/mount -a -x /bin/mount ]' dev=$(findmnt -n -o SOURCE -T /) type=$(findmnt -n -o FSTYPE -T /) opts=$(findmnt -n -o OPTIONS -T /) + # shellcheck disable=SC2086 run ch-run $uid_args $gid_args "$ch_timg" -- \ /bin/mount -n -o "$opts" -t "$type" "$dev" /mnt/0 echo "$output" @@ -128,11 +138,13 @@ @test 'setgroups(2)' { # Can we change our supplemental groups? + # shellcheck disable=SC2086 ch-run $uid_args $gid_args "$ch_timg" -- /test/setgroups } @test 'seteuid(2)' { # Try to seteuid(2) to another UID we shouldn't have access to + # shellcheck disable=SC2086 ch-run $uid_args $gid_args "$ch_timg" -- /test/setuid } @@ -144,5 +156,6 @@ # dynamically, so there may be none running. See your distro's # documentation on how to configure this. See also e.g. issue #840. [[ $(pgrep -c getty) -eq 0 ]] && pedantic_fail 'no getty process found' + # shellcheck disable=SC2086 ch-run $uid_args $gid_args "$ch_timg" -- /test/signal_out.py } diff -Nru charliecloud-0.20/test/travis.bash charliecloud-0.21/test/travis.bash --- charliecloud-0.20/test/travis.bash 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/travis.bash 1970-01-01 00:00:00.000000000 +0000 @@ -1,98 +0,0 @@ -#!/bin/bash - -# Warning: This script installs software and messes with your "docker" binary. -# Don't run it unless you know what you are doing. - -# We start in the Charliecloud Git working directory. - -set -e -PREFIX=/var/tmp -MAKEJ=-j$(getconf _NPROCESSORS_ONLN) - -# Remove sbin directories from $PATH (see issue #43). Assume none are first. -echo "$PATH" -for i in /sbin /usr/sbin /usr/local/sbin; do - export PATH=${PATH/:$i/} -done -echo "$PATH" - -set -x - -./autogen.sh - -# Remove Autotools to make sure everything works without them. -sudo apt-get remove autoconf automake - -if [[ $MINIMAL_DEPS && $CH_BUILDER != ch-grow ]]; then - # Make sure ch-grow dependencies haven't crept back somehow (issue #806). - ( pip3 freeze | grep -F lark-parser ) && exit 1 - ( pip3 freeze | grep -F requests ) && exit 1 -fi - -if [[ $MINIMAL_CONFIG ]]; then - # Everything except --disable-test, which would defeat the point. - disable='--disable-html --disable-man --disable-ch-grow' -fi - -case $TARBALL in - export) - # shellcheck disable=SC2086 - ./configure --prefix="$PREFIX" $disable - make "$MAKEJ" dist - mv charliecloud-*.tar.gz "$PREFIX" - cd "$PREFIX" - tar xf charliecloud-*.tar.gz - rm charliecloud-*.tar.gz - cd charliecloud-* - ;; - archive) - git archive HEAD --prefix=charliecloud/ -o "$PREFIX/charliecloud.tar" - cd "$PREFIX" - tar xf charliecloud.tar - cd charliecloud - ;; - '') - ;; - *) - false - ;; -esac - -# shellcheck disable=SC2086 -./configure --prefix="$PREFIX" $disable -make "$MAKEJ" -bin/ch-run --version - -if [[ $MAKE_INSTALL ]]; then - sudo make "$MAKEJ" install - ch_test="${PREFIX}/bin/ch-test" -else - ch_test=$(readlink -f bin/ch-test) # need absolute path -fi - -"$ch_test" mk-perm-dirs --sudo - -if [[ $SUDO_RM_FIRST ]]; then - sudo rm /etc/sudoers.d/travis -fi -if sudo -v; then - sudo_=--sudo -else - sudo_= -fi - -"$ch_test" build $sudo_ -ls -lha "$CH_TEST_TARDIR" - -if [[ $SUDO_RM_AFTER_BUILD ]]; then - sudo rm /etc/sudoers.d/travis -fi -if sudo -v; then - sudo_=--sudo -else - sudo_= -fi - -"$ch_test" run $sudo_ -ls -lha "$CH_TEST_IMGDIR" -"$ch_test" examples $sudo_ diff -Nru charliecloud-0.20/test/travis-before.bash charliecloud-0.21/test/travis-before.bash --- charliecloud-0.20/test/travis-before.bash 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/travis-before.bash 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -# shellcheck shell=bash - -set -ex - -getconf _NPROCESSORS_ONLN -free -m -df -h -df -h /var/tmp - -export CH_TEST_TARDIR=/var/tmp/tarballs -export CH_TEST_IMGDIR=/var/tmp/images -export CH_TEST_PERMDIRS='/var/tmp/perms_test /run/perms_test' - -unset JAVA_HOME # otherwise Spark tries to use host's Java - -sudo usermod --add-subuids 10000-65536 "$USER" -sudo usermod --add-subgids 10000-65536 "$USER" - -[[ $CH_BUILDER ]] # no default builder for Travis -if [[ $CH_BUILDER != docker ]]; then - export SUDO_RM_FIRST=yes - sudo rm "$(command -v docker)" -fi - -set +ex diff -Nru charliecloud-0.20/test/travis-install.bash charliecloud-0.21/test/travis-install.bash --- charliecloud-0.20/test/travis-install.bash 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/travis-install.bash 1970-01-01 00:00:00.000000000 +0000 @@ -1,83 +0,0 @@ -# shellcheck shell=bash - -set -ex - -# Make /usr/local/src writeable for everyone. -sudo chmod 1777 /usr/local/src - -# Remove Travis Bats. We need buggy version provided by Ubuntu (issue #552). -sudo rm /usr/local/bin/bats - -# Allow sudo to user root, group non-root. -sudo sed -Ei 's/=\(ALL\)/=(ALL:ALL)/g' /etc/sudoers.d/travis -sudo cat /etc/sudoers.d/travis - -# Install conditional packages. -if [[ -z $MINIMAL_DEPS ]]; then - sudo apt-get install pigz pv -else - PACK_FMT=tar - if [[ $CH_BUILDER != ch-grow ]]; then - # Remove ch-grow dependency "requests" (issue #806). - sudo dpkg --remove \ - apport \ - cloud-init \ - gce-compute-image-packages \ - python3-apport \ - python3-requests \ - python3-requests-unixsocket \ - ssh-import-id \ - ubuntu-server - fi -fi -if [[ $CH_BUILDER = ch-grow ]]; then - sudo pip3 install lark-parser requests -fi -case $PACK_FMT in - '') # default - export CH_PACK_FMT=squash - sudo apt-get install squashfs-tools squashfuse - ;; - squash-unpack) - export CH_PACK_FMT=squash - sudo apt-get install squashfs-tools - ;; - tar) - export CH_PACK_FMT=tar - # tar already installed - ;; - *) - echo "unknown \$PACK_FMT: $PACK_FMT" 1>&2 - exit 1 - ;; -esac - -# Install Buildah; adapted from upstream instructions [1]. I believe this -# tracks upstream current version fairly well. (I tried to use -# add-app-repository but couldn't get it to work.) -# -# [1]: https://github.com/containers/buildah/blob/master/install.md -if [[ $CH_BUILDER = buildah* ]]; then - echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_18.04/ /' | sudo tee /etc/apt/sources.list.d/buildah.list - wget -nv -O /tmp/Release.key https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_18.04/Release.key - sudo apt-key add /tmp/Release.key - sudo apt-get update - sudo apt-get -y install buildah - command -v buildah && buildah --version - sudo ln -s /usr/sbin/runc /usr/bin/runc - command -v runc && runc --version - # As of 2020-04-21, stock registries.conf is pretty simple; it includes - # Docker Hub (docker.io) and then quay.io. Still, use ours for stability. - cat /etc/containers/registries.conf - cat <<'EOF' | sudo tee /etc/containers/registries.conf -[registries.search] - registries = ['docker.io'] -EOF -fi - -# Documentation. -if [[ -z $MINIMAL_DEPS ]]; then - sudo pip3 install sphinx sphinx-rtd-theme -fi - -set +ex diff -Nru charliecloud-0.20/test/travis.yml charliecloud-0.21/test/travis.yml --- charliecloud-0.20/test/travis.yml 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/test/travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,128 +0,0 @@ -os: linux -dist: bionic -language: c -compiler: gcc - -# This defines a "matrix" of jobs. Each combination of environment variables -# defines a different job. They run in parallel, five at a time. We have -# divided the matrix into "stages"; if any stage has a failure, testing stops -# and the remaining stages are skipped. Note that some stages are conditional. -# -# Note: Matrixing of variables that happens at the top level is not supported -# within stages: https://github.com/travis-ci/travis-ci/issues/8295. -# -# We do not do any full-scope tests, because they give a >10-minute gap in -# output, so Travis times out. -# -# FIXME: Each job starts with a cold Docker cache, which wastes work heating -# it up in parallel. It would be nice if "make test-build" could be done -# serially before splitting into parallel jobs. -# -# TARBALL= # build in Git checkout -# TARBALL=archive # build from "git archive" tarball -# TARBALL=export # build from "make export" tarball -# MAKE_INSTALL= # run from build directory -# MAKE_INSTALL=yes # "make install"; run that one -# -# Note: $INSTALL is used by Autotools, and setting it to "yes" causes very -# weird errors, e.g.: -# -# make[2]: Entering directory [...] -# /bin/mkdir -p '/var/tmp/lib/charliecloud' -# ../yes base.sh '/var/tmp/lib/charliecloud' -# /bin/bash: line 23: ../yes: No such file or directory -# Makefile:323: recipe for target 'install-dist_pkglibSCRIPTS' failed -# -# Additional options: -# -# CH_BUILDER # which builder to use -# MINIMAL_CONFIG # exclude all optional features with --disable-foo -# MINIMAL_DEPS # minimal dependencies; implies PACK_FMT=tar -# PACK_FMT= # squash pack format, ch-mount/ch-unmount -# PACK_FMT=squash-unpack # squash pack format, but unpack instead of mount -# PACK_FMT=tar # tarball pack format -# SUDO_RM_FIRST # remove sudo before build (implied if non-Docker) -# SUDO_RM_AFTER_BUILD # remove sudo after build - -# Only run the tests on master or in a pull request. In principle, people -# might start a branch and want Travis on it before it becomes a PR. However, -# in practice, this doesn't happen, and the merge is what really matters. -if: branch = master OR type = pull_request - -stages: - - quick - - builders - - install - - misc - -_stage_quick: &stage_quick - stage: quick -_stage_builders: &stage_builders - stage: builders -_stage_install: &stage_install - stage: install -_stage_misc: &stage_misc - stage: misc - -jobs: - include: - - - <<: *stage_quick - env: CH_BUILDER=docker CH_TEST_SCOPE=quick - - - <<: *stage_builders - env: CH_BUILDER=none - - <<: *stage_builders - env: CH_BUILDER=ch-grow - - <<: *stage_builders - env: CH_BUILDER=docker -# - <<: *stage_builders -# env: CH_BUILDER=docker PACK_FMT=squash-unpack - - <<: *stage_builders - env: CH_BUILDER=docker PACK_FMT=tar - - - <<: *stage_install - env: CH_BUILDER=none TARBALL=export MAKE_INSTALL=yes - - <<: *stage_install - env: CH_BUILDER=buildah TARBALL=export MAKE_INSTALL=yes - - <<: *stage_install - env: CH_BUILDER=buildah-runc TARBALL=export MAKE_INSTALL=yes - - <<: *stage_install - env: CH_BUILDER=ch-grow TARBALL=export MAKE_INSTALL=yes - - <<: *stage_install - env: CH_BUILDER=docker TARBALL=export MAKE_INSTALL=yes - - - <<: *stage_misc - env: CH_BUILDER=buildah MINIMAL_DEPS=yes - - <<: *stage_misc - env: CH_BUILDER=ch-grow MINIMAL_DEPS=yes - - <<: *stage_misc - env: CH_BUILDER=docker MINIMAL_DEPS=yes - - <<: *stage_misc - env: CH_BUILDER=docker MINIMAL_CONFIG=yes - - <<: *stage_misc - env: CH_BUILDER=docker SUDO_RM_AFTER_BUILD=yes - -addons: - apt: - sources: - - sourceline: 'ppa:projectatomic/ppa' - packages: - - autoconf - - automake - - bats - - python3-pip - - python3-setuptools - -install: - - . test/travis-install.bash # source b/c we're setting variables - -before_script: - - . test/travis-before.bash - -script: - - test/travis.bash - -after_script: - - free -m - - df -h diff -Nru charliecloud-0.20/.travis.yml charliecloud-0.21/.travis.yml --- charliecloud-0.20/.travis.yml 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,128 +0,0 @@ -os: linux -dist: bionic -language: c -compiler: gcc - -# This defines a "matrix" of jobs. Each combination of environment variables -# defines a different job. They run in parallel, five at a time. We have -# divided the matrix into "stages"; if any stage has a failure, testing stops -# and the remaining stages are skipped. Note that some stages are conditional. -# -# Note: Matrixing of variables that happens at the top level is not supported -# within stages: https://github.com/travis-ci/travis-ci/issues/8295. -# -# We do not do any full-scope tests, because they give a >10-minute gap in -# output, so Travis times out. -# -# FIXME: Each job starts with a cold Docker cache, which wastes work heating -# it up in parallel. It would be nice if "make test-build" could be done -# serially before splitting into parallel jobs. -# -# TARBALL= # build in Git checkout -# TARBALL=archive # build from "git archive" tarball -# TARBALL=export # build from "make export" tarball -# MAKE_INSTALL= # run from build directory -# MAKE_INSTALL=yes # "make install"; run that one -# -# Note: $INSTALL is used by Autotools, and setting it to "yes" causes very -# weird errors, e.g.: -# -# make[2]: Entering directory [...] -# /bin/mkdir -p '/var/tmp/lib/charliecloud' -# ../yes base.sh '/var/tmp/lib/charliecloud' -# /bin/bash: line 23: ../yes: No such file or directory -# Makefile:323: recipe for target 'install-dist_pkglibSCRIPTS' failed -# -# Additional options: -# -# CH_BUILDER # which builder to use -# MINIMAL_CONFIG # exclude all optional features with --disable-foo -# MINIMAL_DEPS # minimal dependencies; implies PACK_FMT=tar -# PACK_FMT= # squash pack format, ch-mount/ch-unmount -# PACK_FMT=squash-unpack # squash pack format, but unpack instead of mount -# PACK_FMT=tar # tarball pack format -# SUDO_RM_FIRST # remove sudo before build (implied if non-Docker) -# SUDO_RM_AFTER_BUILD # remove sudo after build - -# Only run the tests on master or in a pull request. In principle, people -# might start a branch and want Travis on it before it becomes a PR. However, -# in practice, this doesn't happen, and the merge is what really matters. -if: branch = master OR type = pull_request - -stages: - - quick - - builders - - install - - misc - -_stage_quick: &stage_quick - stage: quick -_stage_builders: &stage_builders - stage: builders -_stage_install: &stage_install - stage: install -_stage_misc: &stage_misc - stage: misc - -jobs: - include: - - - <<: *stage_quick - env: CH_BUILDER=docker CH_TEST_SCOPE=quick - - - <<: *stage_builders - env: CH_BUILDER=none - - <<: *stage_builders - env: CH_BUILDER=ch-grow - - <<: *stage_builders - env: CH_BUILDER=docker -# - <<: *stage_builders -# env: CH_BUILDER=docker PACK_FMT=squash-unpack - - <<: *stage_builders - env: CH_BUILDER=docker PACK_FMT=tar - - - <<: *stage_install - env: CH_BUILDER=none TARBALL=export MAKE_INSTALL=yes - - <<: *stage_install - env: CH_BUILDER=buildah TARBALL=export MAKE_INSTALL=yes - - <<: *stage_install - env: CH_BUILDER=buildah-runc TARBALL=export MAKE_INSTALL=yes - - <<: *stage_install - env: CH_BUILDER=ch-grow TARBALL=export MAKE_INSTALL=yes - - <<: *stage_install - env: CH_BUILDER=docker TARBALL=export MAKE_INSTALL=yes - - - <<: *stage_misc - env: CH_BUILDER=buildah MINIMAL_DEPS=yes - - <<: *stage_misc - env: CH_BUILDER=ch-grow MINIMAL_DEPS=yes - - <<: *stage_misc - env: CH_BUILDER=docker MINIMAL_DEPS=yes - - <<: *stage_misc - env: CH_BUILDER=docker MINIMAL_CONFIG=yes - - <<: *stage_misc - env: CH_BUILDER=docker SUDO_RM_AFTER_BUILD=yes - -addons: - apt: - sources: - - sourceline: 'ppa:projectatomic/ppa' - packages: - - autoconf - - automake - - bats - - python3-pip - - python3-setuptools - -install: - - . test/travis-install.bash # source b/c we're setting variables - -before_script: - - . test/travis-before.bash - -script: - - test/travis.bash - -after_script: - - free -m - - df -h diff -Nru charliecloud-0.20/VERSION charliecloud-0.21/VERSION --- charliecloud-0.20/VERSION 2020-10-20 16:45:52.000000000 +0000 +++ charliecloud-0.21/VERSION 2020-12-18 20:25:06.000000000 +0000 @@ -1 +1 @@ -0.20 +0.21