diff -Nru zfs-linux-0.7.13/cmd/arcstat/arcstat.py zfs-linux-0.7.13/cmd/arcstat/arcstat.py --- zfs-linux-0.7.13/cmd/arcstat/arcstat.py 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/cmd/arcstat/arcstat.py 2019-04-19 06:54:36.000000000 +0000 @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python3 # # Print out ZFS ARC Statistics exported via kstat(1) # For a definition of fields, or usage, use arctstat.pl -v diff -Nru zfs-linux-0.7.13/cmd/arc_summary/arc_summary.py zfs-linux-0.7.13/cmd/arc_summary/arc_summary.py --- zfs-linux-0.7.13/cmd/arc_summary/arc_summary.py 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/cmd/arc_summary/arc_summary.py 2019-04-19 06:54:36.000000000 +0000 @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python3 # # $Id: arc_summary.pl,v 388:e27800740aa2 2011-07-08 02:53:29Z jhell $ # diff -Nru zfs-linux-0.7.13/cmd/dbufstat/dbufstat.py zfs-linux-0.7.13/cmd/dbufstat/dbufstat.py --- zfs-linux-0.7.13/cmd/dbufstat/dbufstat.py 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/cmd/dbufstat/dbufstat.py 2019-04-19 06:54:36.000000000 +0000 @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python3 # # Print out statistics for all cached dmu buffers. This information # is available through the dbufs kstat and may be post-processed as diff -Nru zfs-linux-0.7.13/cmd/zdb/zdb.c zfs-linux-0.7.13/cmd/zdb/zdb.c --- zfs-linux-0.7.13/cmd/zdb/zdb.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/cmd/zdb/zdb.c 2019-04-19 06:54:36.000000000 +0000 @@ -1982,13 +1982,13 @@ aux[0] = '\0'; if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) { - (void) snprintf(aux + strlen(aux), sizeof (aux), " (K=%s)", - ZDB_CHECKSUM_NAME(doi.doi_checksum)); + (void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux), + " (K=%s)", ZDB_CHECKSUM_NAME(doi.doi_checksum)); } if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) { - (void) snprintf(aux + strlen(aux), sizeof (aux), " (Z=%s)", - ZDB_COMPRESS_NAME(doi.doi_compress)); + (void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux), + " (Z=%s)", ZDB_COMPRESS_NAME(doi.doi_compress)); } (void) printf("%10lld %3u %5s %5s %5s %6s %5s %6s %s%s\n", @@ -4472,6 +4472,8 @@ } else { zdb_set_skip_mmp(target); error = open_objset(target, DMU_OST_ANY, FTAG, &os); + if (error == 0) + spa = dmu_objset_spa(os); } } nvlist_free(policy); @@ -4479,6 +4481,14 @@ if (error) fatal("can't open '%s': %s", target, strerror(error)); + /* + * Set the pool failure mode to panic in order to prevent the pool + * from suspending. A suspended I/O will have no way to resume and + * can prevent the zdb(8) command from terminating as expected. + */ + if (spa != NULL) + spa->spa_failmode = ZIO_FAILURE_MODE_PANIC; + argv++; argc--; if (!dump_opt['R']) { diff -Nru zfs-linux-0.7.13/cmd/zed/zed.d/zed.rc zfs-linux-0.7.13/cmd/zed/zed.d/zed.rc --- zfs-linux-0.7.13/cmd/zed/zed.d/zed.rc 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/cmd/zed/zed.d/zed.rc 2019-04-19 06:54:36.000000000 +0000 @@ -15,7 +15,7 @@ # Email will only be sent if ZED_EMAIL_ADDR is defined. # Disabled by default; uncomment to enable. # -#ZED_EMAIL_ADDR="root" +ZED_EMAIL_ADDR="root" ## # Name or path of executable responsible for sending notifications via email; @@ -41,7 +41,7 @@ ## # Minimum number of seconds between notifications for a similar event. # -#ZED_NOTIFY_INTERVAL_SECS=3600 +ZED_NOTIFY_INTERVAL_SECS=3600 ## # Notification verbosity. diff -Nru zfs-linux-0.7.13/cmd/zfs/zfs_main.c zfs-linux-0.7.13/cmd/zfs/zfs_main.c --- zfs-linux-0.7.13/cmd/zfs/zfs_main.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/cmd/zfs/zfs_main.c 2019-04-19 06:54:36.000000000 +0000 @@ -764,8 +764,7 @@ return (!!ret); usage: - if (zhp) - zfs_close(zhp); + ASSERT3P(zhp, ==, NULL); nvlist_free(props); usage(B_FALSE); return (-1); @@ -5721,8 +5720,6 @@ uint64_t val = 0; time_t time; struct tm t; - char sep = scripted ? '\t' : ' '; - int sepnum = scripted ? 1 : 2; (void) nvpair_value_uint64(nvp2, &val); time = (time_t)val; @@ -5730,8 +5727,13 @@ (void) strftime(tsbuf, DATETIME_BUF_LEN, gettext(STRFTIME_FMT_STR), &t); - (void) printf("%-*s%*c%-*s%*c%s\n", nwidth, zname, - sepnum, sep, tagwidth, tagname, sepnum, sep, tsbuf); + if (scripted) { + (void) printf("%s\t%s\t%s\n", zname, + tagname, tsbuf); + } else { + (void) printf("%-*s %-*s %s\n", nwidth, + zname, tagwidth, tagname, tsbuf); + } } } } diff -Nru zfs-linux-0.7.13/cmd/zpool/Makefile.am zfs-linux-0.7.13/cmd/zpool/Makefile.am --- zfs-linux-0.7.13/cmd/zpool/Makefile.am 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/cmd/zpool/Makefile.am 2019-04-19 06:54:36.000000000 +0000 @@ -22,11 +22,11 @@ -lm $(LIBBLKID) zpoolconfdir = $(sysconfdir)/zfs/zpool.d -zpoolexecdir = $(libexecdir)/zfs/zpool.d +zpoollibdir = /usr/lib/zfs-linux/zpool.d EXTRA_DIST = zpool.d/README -dist_zpoolexec_SCRIPTS = \ +dist_zpoollib_SCRIPTS = \ zpool.d/enc \ zpool.d/encdev \ zpool.d/fault_led \ @@ -119,5 +119,5 @@ for f in $(zpoolconfdefaults); do \ test -f "$(DESTDIR)$(zpoolconfdir)/$${f}" -o \ -L "$(DESTDIR)$(zpoolconfdir)/$${f}" || \ - ln -s "$(zpoolexecdir)/$${f}" "$(DESTDIR)$(zpoolconfdir)"; \ + ln -s "$(zpoollibdir)/$${f}" "$(DESTDIR)$(zpoolconfdir)"; \ done diff -Nru zfs-linux-0.7.13/cmd/zpool/zpool_main.c zfs-linux-0.7.13/cmd/zpool/zpool_main.c --- zfs-linux-0.7.13/cmd/zpool/zpool_main.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/cmd/zpool/zpool_main.c 2019-04-19 06:54:36.000000000 +0000 @@ -895,7 +895,8 @@ if (zpool_read_label(fd, &config, NULL) != 0 || config == NULL) { (void) fprintf(stderr, gettext("failed to check state for %s\n"), vdev); - return (1); + ret = 1; + goto errout; } nvlist_free(config); @@ -903,7 +904,8 @@ if (ret != 0) { (void) fprintf(stderr, gettext("failed to check state for %s\n"), vdev); - return (1); + ret = 1; + goto errout; } if (!inuse) @@ -3622,7 +3624,7 @@ nvlist_t *newnv, iostat_cbdata_t *cb, int depth) { nvlist_t **oldchild, **newchild; - uint_t c, children; + uint_t c, children, oldchildren; vdev_stat_t *oldvs, *newvs, *calcvs; vdev_stat_t zerovs = { 0 }; char *vname; @@ -3734,9 +3736,13 @@ &newchild, &children) != 0) return (ret); - if (oldnv && nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN, - &oldchild, &c) != 0) - return (ret); + if (oldnv) { + if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN, + &oldchild, &oldchildren) != 0) + return (ret); + + children = MIN(oldchildren, children); + } for (c = 0; c < children; c++) { uint64_t ishole = B_FALSE, islog = B_FALSE; @@ -3792,9 +3798,13 @@ &newchild, &children) != 0) return (ret); - if (oldnv && nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE, - &oldchild, &c) != 0) - return (ret); + if (oldnv) { + if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE, + &oldchild, &oldchildren) != 0) + return (ret); + + children = MIN(oldchildren, children); + } if (children > 0) { if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted && @@ -7358,7 +7368,7 @@ } ev_opts_t; static void -zpool_do_events_short(nvlist_t *nvl) +zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts) { char ctime_str[26], str[32], *ptr; int64_t *tv; @@ -7371,7 +7381,10 @@ (void) strncpy(str+7, ctime_str+20, 4); /* '1993' */ (void) strncpy(str+12, ctime_str+11, 8); /* '21:49:08' */ (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */ - (void) printf(gettext("%s "), str); + if (opts->scripted) + (void) printf(gettext("%s\t"), str); + else + (void) printf(gettext("%s "), str); verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0); (void) printf(gettext("%s\n"), ptr); @@ -7635,7 +7648,7 @@ if (dropped > 0) (void) printf(gettext("dropped %d events\n"), dropped); - zpool_do_events_short(nvl); + zpool_do_events_short(nvl, opts); if (opts->verbose) { zpool_do_events_nvprint(nvl, 8); diff -Nru zfs-linux-0.7.13/cmd/zpool/zpool_vdev.c zfs-linux-0.7.13/cmd/zpool/zpool_vdev.c --- zfs-linux-0.7.13/cmd/zpool/zpool_vdev.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/cmd/zpool/zpool_vdev.c 2019-04-19 06:54:36.000000000 +0000 @@ -188,6 +188,7 @@ {"ATA INTEL SSDSC2BB60", 4096}, {"ATA INTEL SSDSC2BB80", 4096}, {"ATA INTEL SSDSC2BW24", 4096}, + {"ATA INTEL SSDSC2BW48", 4096}, {"ATA INTEL SSDSC2BP24", 4096}, {"ATA INTEL SSDSC2BP48", 4096}, {"NA SmrtStorSDLKAE9W", 4096}, diff -Nru zfs-linux-0.7.13/config/dkms.m4 zfs-linux-0.7.13/config/dkms.m4 --- zfs-linux-0.7.13/config/dkms.m4 1970-01-01 00:00:00.000000000 +0000 +++ zfs-linux-0.7.13/config/dkms.m4 2019-04-19 06:54:36.000000000 +0000 @@ -0,0 +1,14 @@ +dnl # +dnl # Prevent manual building in DKMS source tree. +dnl # +AC_DEFUN([ZFS_AC_DKMS_INHIBIT], [ + AC_MSG_CHECKING([for dkms.conf file]) + AS_IF([test -e dkms.conf], [ + AC_MSG_ERROR([ + *** ZFS should not be manually built in the DKMS source tree. + *** Remove all ZFS packages before compiling the ZoL sources. + *** Running "make install" breaks ZFS packages.]) + ], [ + AC_MSG_RESULT([not found]) + ]) +]) diff -Nru zfs-linux-0.7.13/config/user.m4 zfs-linux-0.7.13/config/user.m4 --- zfs-linux-0.7.13/config/user.m4 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/config/user.m4 2019-04-19 06:54:36.000000000 +0000 @@ -2,6 +2,7 @@ dnl # Default ZFS user configuration dnl # AC_DEFUN([ZFS_AC_CONFIG_USER], [ + ZFS_AC_DKMS_INHIBIT ZFS_AC_CONFIG_USER_MOUNT_HELPER ZFS_AC_CONFIG_USER_UDEV ZFS_AC_CONFIG_USER_SYSTEMD diff -Nru zfs-linux-0.7.13/config/zfs-meta.m4 zfs-linux-0.7.13/config/zfs-meta.m4 --- zfs-linux-0.7.13/config/zfs-meta.m4 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/config/zfs-meta.m4 2019-04-19 06:54:36.000000000 +0000 @@ -1,9 +1,10 @@ dnl # dnl # DESCRIPTION: -dnl # Read meta data from the META file. When building from a git repository -dnl # the ZFS_META_RELEASE field will be overwritten if there is an annotated -dnl # tag matching the form ZFS_META_NAME-ZFS_META_VERSION-*. This allows -dnl # for working builds to be uniquely identified using the git commit hash. +dnl # Read meta data from the META file or the debian/changelog file if it +dnl # exists. When building from a git repository the ZFS_META_RELEASE field +dnl # will be overwritten if there is an annotated tag matching the form +dnl # ZFS_META_NAME-ZFS_META_VERSION-*. This allows for working builds to be +dnl # uniquely identified using the git commit hash. dnl # dnl # The META file format is as follows: dnl # ^[ ]*KEY:[ \t]+VALUE$ @@ -49,6 +50,7 @@ _zfs_ac_meta_type="none" if test -f "$META"; then _zfs_ac_meta_type="META file" + _dpkg_parsechangelog=$(dpkg-parsechangelog 2>/dev/null) ZFS_META_NAME=_ZFS_AC_META_GETVAL([(Name|Project|Package)]); if test -n "$ZFS_META_NAME"; then @@ -66,8 +68,30 @@ AC_SUBST([ZFS_META_VERSION]) fi + if test -n "${_dpkg_parsechangelog}"; then + _dpkg_version=$(echo "${_dpkg_parsechangelog}" \ + | $AWK '$[]1 == "Version:" { print $[]2; }' \ + | cut -d- -f1) + if test "${_dpkg_version}" != "$ZFS_META_VERSION"; then + AC_MSG_ERROR([ + *** Version $ZFS_META_VERSION in the META file is different than + *** version $_dpkg_version in the debian/changelog file. DKMS and DEB + *** packaging require that these files have the same version. + ]) + fi + fi + ZFS_META_RELEASE=_ZFS_AC_META_GETVAL([Release]); - if test ! -f ".nogitrelease" && git rev-parse --git-dir > /dev/null 2>&1; then + + if test -n "${_dpkg_parsechangelog}"; then + _dpkg_release=$(echo "${_dpkg_parsechangelog}" \ + | $AWK '$[]1 == "Version:" { print $[]2; }' \ + | cut -d- -f2-) + if test -n "${_dpkg_release}"; then + ZFS_META_RELEASE=${_dpkg_release} + _zfs_ac_meta_type="dpkg-parsechangelog" + fi + elif test ! -f ".nogitrelease" && git rev-parse --git-dir > /dev/null 2>&1; then _match="${ZFS_META_NAME}-${ZFS_META_VERSION}" _alias=$(git describe --match=${_match} 2>/dev/null) _release=$(echo ${_alias}|cut -f3- -d'-'|sed 's/-/_/g') diff -Nru zfs-linux-0.7.13/contrib/dracut/02zfsexpandknowledge/module-setup.sh.in zfs-linux-0.7.13/contrib/dracut/02zfsexpandknowledge/module-setup.sh.in --- zfs-linux-0.7.13/contrib/dracut/02zfsexpandknowledge/module-setup.sh.in 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/contrib/dracut/02zfsexpandknowledge/module-setup.sh.in 2019-04-19 06:54:36.000000000 +0000 @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash get_devtype() { local typ diff -Nru zfs-linux-0.7.13/contrib/dracut/90zfs/module-setup.sh.in zfs-linux-0.7.13/contrib/dracut/90zfs/module-setup.sh.in --- zfs-linux-0.7.13/contrib/dracut/90zfs/module-setup.sh.in 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/contrib/dracut/90zfs/module-setup.sh.in 2019-04-19 06:54:36.000000000 +0000 @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash check() { # We depend on udev-rules being loaded diff -Nru zfs-linux-0.7.13/debian/bzr-builder.manifest zfs-linux-0.7.13/debian/bzr-builder.manifest --- zfs-linux-0.7.13/debian/bzr-builder.manifest 2019-04-19 06:44:44.000000000 +0000 +++ zfs-linux-0.7.13/debian/bzr-builder.manifest 2019-04-19 06:54:37.000000000 +0000 @@ -1,2 +1,2 @@ -# bzr-builder format 0.3 deb-version {debupstream}-N~23 -lp:~mmach/netext73/zfs-linux revid:netbit73@gmail.com-20190419064320-n8tr7tfcoxc6jagv +# bzr-builder format 0.3 deb-version {debupstream}-N~24 +lp:~mmach/netext73/zfs-linux revid:netbit73@gmail.com-20190419065330-3t230mitamrfgoma diff -Nru zfs-linux-0.7.13/debian/changelog zfs-linux-0.7.13/debian/changelog --- zfs-linux-0.7.13/debian/changelog 2019-04-19 06:44:44.000000000 +0000 +++ zfs-linux-0.7.13/debian/changelog 2019-04-19 06:54:37.000000000 +0000 @@ -1,8 +1,8 @@ -zfs-linux (0.7.13-N~23~ubuntu18.10.1) cosmic; urgency=low +zfs-linux (0.7.13-N~24~ubuntu18.10.1) cosmic; urgency=low * Auto build. - -- NetBit73 Fri, 19 Apr 2019 06:44:44 +0000 + -- NetBit73 Fri, 19 Apr 2019 06:54:37 +0000 zfs-linux (0.7.13-1~18.04.york0) bionic; urgency=medium diff -Nru zfs-linux-0.7.13/etc/init.d/zfs-functions.in zfs-linux-0.7.13/etc/init.d/zfs-functions.in --- zfs-linux-0.7.13/etc/init.d/zfs-functions.in 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/etc/init.d/zfs-functions.in 2019-04-19 06:54:36.000000000 +0000 @@ -89,7 +89,7 @@ # Paths to what we need ZFS="@sbindir@/zfs" -ZED="@sbindir@/zed" +ZED="/usr/sbin/zed" ZPOOL="@sbindir@/zpool" ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" diff -Nru zfs-linux-0.7.13/etc/init.d/zfs-import.in zfs-linux-0.7.13/etc/init.d/zfs-import.in --- zfs-linux-0.7.13/etc/init.d/zfs-import.in 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/etc/init.d/zfs-import.in 2019-04-19 06:54:36.000000000 +0000 @@ -308,8 +308,7 @@ # ---------------------------------------------------- -if [ ! -e /sbin/openrc-run ] -then +if ! (echo @SHELL@ | grep openrc 1>/dev/null 2>/dev/null); then case "$1" in start) do_start diff -Nru zfs-linux-0.7.13/etc/init.d/zfs-mount.in zfs-linux-0.7.13/etc/init.d/zfs-mount.in --- zfs-linux-0.7.13/etc/init.d/zfs-mount.in 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/etc/init.d/zfs-mount.in 2019-04-19 06:54:36.000000000 +0000 @@ -199,8 +199,7 @@ # ---------------------------------------------------- -if [ ! -e /sbin/openrc-run ] -then +if ! (echo @SHELL@ | grep openrc 1>/dev/null 2>/dev/null); then case "$1" in start) do_start diff -Nru zfs-linux-0.7.13/etc/init.d/zfs-share.in zfs-linux-0.7.13/etc/init.d/zfs-share.in --- zfs-linux-0.7.13/etc/init.d/zfs-share.in 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/etc/init.d/zfs-share.in 2019-04-19 06:54:36.000000000 +0000 @@ -58,7 +58,7 @@ # ---------------------------------------------------- -if [ ! -e /sbin/openrc-run ]; then +if ! (echo @SHELL@ | grep openrc 1>/dev/null 2>/dev/null); then case "$1" in start) do_start diff -Nru zfs-linux-0.7.13/etc/init.d/zfs-zed.in zfs-linux-0.7.13/etc/init.d/zfs-zed.in --- zfs-linux-0.7.13/etc/init.d/zfs-zed.in 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/etc/init.d/zfs-zed.in 2019-04-19 06:54:36.000000000 +0000 @@ -98,7 +98,7 @@ # ---------------------------------------------------- -if [ ! -e /sbin/openrc-run ]; then +if ! (echo @SHELL@ | grep openrc 1>/dev/null 2>/dev/null); then case "$1" in start) do_start diff -Nru zfs-linux-0.7.13/etc/systemd/system/50-zfs.preset.in zfs-linux-0.7.13/etc/systemd/system/50-zfs.preset.in --- zfs-linux-0.7.13/etc/systemd/system/50-zfs.preset.in 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/etc/systemd/system/50-zfs.preset.in 2019-04-19 06:54:36.000000000 +0000 @@ -6,3 +6,4 @@ enable zfs-share.service enable zfs-zed.service enable zfs.target +enable zfs-load-module.service diff -Nru zfs-linux-0.7.13/etc/systemd/system/Makefile.am zfs-linux-0.7.13/etc/systemd/system/Makefile.am --- zfs-linux-0.7.13/etc/systemd/system/Makefile.am 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/etc/systemd/system/Makefile.am 2019-04-19 06:54:36.000000000 +0000 @@ -2,6 +2,7 @@ 50-zfs.preset systemdunit_DATA = \ + zfs-load-module.service \ zfs-zed.service \ zfs-import-cache.service \ zfs-import-scan.service \ @@ -11,6 +12,7 @@ zfs.target EXTRA_DIST = \ + $(top_srcdir)/etc/systemd/system/zfs-load-module.service.in \ $(top_srcdir)/etc/systemd/system/zfs-zed.service.in \ $(top_srcdir)/etc/systemd/system/zfs-import-cache.service.in \ $(top_srcdir)/etc/systemd/system/zfs-import-scan.service.in \ diff -Nru zfs-linux-0.7.13/etc/systemd/system/zfs-import-cache.service.in zfs-linux-0.7.13/etc/systemd/system/zfs-import-cache.service.in --- zfs-linux-0.7.13/etc/systemd/system/zfs-import-cache.service.in 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/etc/systemd/system/zfs-import-cache.service.in 2019-04-19 06:54:36.000000000 +0000 @@ -1,8 +1,12 @@ [Unit] Description=Import ZFS pools by cache file +After=zfs-load-module.service +Documentation=man:zpool(8) DefaultDependencies=no Requires=systemd-udev-settle.service +Requires=zfs-load-module.service After=systemd-udev-settle.service +After=zfs-load-module.service After=cryptsetup.target After=systemd-remount-fs.service Before=dracut-mount.service diff -Nru zfs-linux-0.7.13/etc/systemd/system/zfs-import-scan.service.in zfs-linux-0.7.13/etc/systemd/system/zfs-import-scan.service.in --- zfs-linux-0.7.13/etc/systemd/system/zfs-import-scan.service.in 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/etc/systemd/system/zfs-import-scan.service.in 2019-04-19 06:54:36.000000000 +0000 @@ -1,8 +1,11 @@ [Unit] Description=Import ZFS pools by device scanning +Documentation=man:zpool(8) DefaultDependencies=no Requires=systemd-udev-settle.service +Requires=zfs-load-module.service After=systemd-udev-settle.service +After=zfs-load-module.service After=cryptsetup.target Before=dracut-mount.service Before=zfs-import.target diff -Nru zfs-linux-0.7.13/etc/systemd/system/zfs-load-module.service.in zfs-linux-0.7.13/etc/systemd/system/zfs-load-module.service.in --- zfs-linux-0.7.13/etc/systemd/system/zfs-load-module.service.in 1970-01-01 00:00:00.000000000 +0000 +++ zfs-linux-0.7.13/etc/systemd/system/zfs-load-module.service.in 2019-04-19 06:54:36.000000000 +0000 @@ -0,0 +1,17 @@ +[Unit] +Description=Install ZFS kernel module +DefaultDependencies=no +Requires=systemd-udev-settle.service +After=systemd-udev-settle.service +After=cryptsetup.target +Before=dracut-mount.service +After=systemd-remount-fs.service + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/sbin/modprobe zfs + +[Install] +WantedBy=zfs-mount.service +WantedBy=zfs.target diff -Nru zfs-linux-0.7.13/etc/systemd/system/zfs-mount.service.in zfs-linux-0.7.13/etc/systemd/system/zfs-mount.service.in --- zfs-linux-0.7.13/etc/systemd/system/zfs-mount.service.in 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/etc/systemd/system/zfs-mount.service.in 2019-04-19 06:54:36.000000000 +0000 @@ -1,5 +1,6 @@ [Unit] Description=Mount ZFS filesystems +Documentation=man:zfs(8) DefaultDependencies=no After=systemd-udev-settle.service After=zfs-import.target diff -Nru zfs-linux-0.7.13/etc/systemd/system/zfs-share.service.in zfs-linux-0.7.13/etc/systemd/system/zfs-share.service.in --- zfs-linux-0.7.13/etc/systemd/system/zfs-share.service.in 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/etc/systemd/system/zfs-share.service.in 2019-04-19 06:54:36.000000000 +0000 @@ -1,7 +1,9 @@ [Unit] Description=ZFS file system shares +Documentation=man:zfs(8) After=nfs-server.service nfs-kernel-server.service After=smb.service +Before=rpc-statd-notify.service After=zfs-mount.service PartOf=nfs-server.service nfs-kernel-server.service PartOf=smb.service diff -Nru zfs-linux-0.7.13/etc/systemd/system/zfs-zed.service.in zfs-linux-0.7.13/etc/systemd/system/zfs-zed.service.in --- zfs-linux-0.7.13/etc/systemd/system/zfs-zed.service.in 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/etc/systemd/system/zfs-zed.service.in 2019-04-19 06:54:36.000000000 +0000 @@ -5,7 +5,7 @@ After=zfs-import-scan.service [Service] -ExecStart=@sbindir@/zed -F +ExecStart=/usr/sbin/zed -F Restart=on-abort [Install] diff -Nru zfs-linux-0.7.13/include/sys/dsl_pool.h zfs-linux-0.7.13/include/sys/dsl_pool.h --- zfs-linux-0.7.13/include/sys/dsl_pool.h 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/include/sys/dsl_pool.h 2019-04-19 06:54:36.000000000 +0000 @@ -126,6 +126,7 @@ txg_list_t dp_dirty_dirs; txg_list_t dp_sync_tasks; taskq_t *dp_sync_taskq; + taskq_t *dp_zil_clean_taskq; /* * Protects administrative changes (properties, namespace) diff -Nru zfs-linux-0.7.13/include/sys/zfs_ioctl.h zfs-linux-0.7.13/include/sys/zfs_ioctl.h --- zfs-linux-0.7.13/include/sys/zfs_ioctl.h 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/include/sys/zfs_ioctl.h 2019-04-19 06:54:36.000000000 +0000 @@ -328,6 +328,25 @@ uint32_t zi_pad; } zinject_record_t; +typedef struct zinject_record_V065 { + uint64_t zi_objset; + uint64_t zi_object; + uint64_t zi_start; + uint64_t zi_end; + uint64_t zi_guid; + uint32_t zi_level; + uint32_t zi_error; + uint64_t zi_type; + uint32_t zi_freq; + uint32_t zi_failfast; + char zi_func[MAXNAMELEN]; + uint32_t zi_iotype; + int32_t zi_duration; + uint64_t zi_timer; + uint32_t zi_cmd; + uint32_t zi_pad; +} zinject_record_V065_t; + #define ZINJECT_NULL 0x1 #define ZINJECT_FLUSH_ARC 0x2 #define ZINJECT_UNLOAD_SPA 0x4 @@ -420,6 +439,48 @@ zfs_stat_t zc_stat; } zfs_cmd_t; +typedef struct zfs_cmd_V065 { + char zc_name[MAXPATHLEN]; /* name of pool or dataset */ + uint64_t zc_nvlist_src; /* really (char *) */ + uint64_t zc_nvlist_src_size; + uint64_t zc_nvlist_dst; /* really (char *) */ + uint64_t zc_nvlist_dst_size; + boolean_t zc_nvlist_dst_filled; /* put an nvlist in dst? */ + int zc_pad2; + + /* + * The following members are for legacy ioctls which haven't been + * converted to the new method. + */ + uint64_t zc_history; /* really (char *) */ + char zc_value[MAXPATHLEN * 2]; + char zc_string[MAXNAMELEN]; + uint64_t zc_guid; + uint64_t zc_nvlist_conf; /* really (char *) */ + uint64_t zc_nvlist_conf_size; + uint64_t zc_cookie; + uint64_t zc_objset_type; + uint64_t zc_perm_action; + uint64_t zc_history_len; + uint64_t zc_history_offset; + uint64_t zc_obj; + uint64_t zc_iflags; /* internal to zfs(7fs) */ + zfs_share_t zc_share; + dmu_objset_stats_t zc_objset_stats; + struct drr_begin zc_begin_record; + zinject_record_V065_t zc_inject_record; + uint32_t zc_defer_destroy; + uint32_t zc_flags; + uint64_t zc_action_handle; + int zc_cleanup_fd; + uint8_t zc_simple; + uint8_t zc_pad[3]; /* alignment */ + uint64_t zc_sendobj; + uint64_t zc_fromobj; + uint64_t zc_createtxg; + zfs_stat_t zc_stat; +} zfs_cmd_V065_t; + typedef struct zfs_useracct { char zu_domain[256]; uid_t zu_rid; diff -Nru zfs-linux-0.7.13/include/sys/zil_impl.h zfs-linux-0.7.13/include/sys/zil_impl.h --- zfs-linux-0.7.13/include/sys/zil_impl.h 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/include/sys/zil_impl.h 2019-04-19 06:54:36.000000000 +0000 @@ -124,7 +124,6 @@ list_t zl_lwb_list; /* in-flight log write list */ kmutex_t zl_vdev_lock; /* protects zl_vdev_tree */ avl_tree_t zl_vdev_tree; /* vdevs to flush in zil_commit() */ - taskq_t *zl_clean_taskq; /* runs lwb and itx clean tasks */ avl_tree_t zl_bp_tree; /* track bps during log parse */ clock_t zl_replay_time; /* lbolt of when replay started */ uint64_t zl_replay_blks; /* number of log blocks replayed */ diff -Nru zfs-linux-0.7.13/lib/libspl/include/sys/kstat.h zfs-linux-0.7.13/lib/libspl/include/sys/kstat.h --- zfs-linux-0.7.13/lib/libspl/include/sys/kstat.h 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/lib/libspl/include/sys/kstat.h 2019-04-19 06:54:36.000000000 +0000 @@ -59,7 +59,7 @@ * kcid = ioctl(kd, KSTAT_IOC_WRITE, kstat_t *); */ -#define KSTAT_STRLEN 31 /* 30 chars + NULL; must be 16 * n - 1 */ +#define KSTAT_STRLEN 255 /* 254 chars + NULL; must be 16 * n - 1 */ /* * The generic kstat header diff -Nru zfs-linux-0.7.13/lib/libzfs/libzfs_dataset.c zfs-linux-0.7.13/lib/libzfs/libzfs_dataset.c --- zfs-linux-0.7.13/lib/libzfs/libzfs_dataset.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/lib/libzfs/libzfs_dataset.c 2019-04-19 06:54:36.000000000 +0000 @@ -27,7 +27,7 @@ * Copyright (c) 2012 Pawel Jakub Dawidek . * Copyright (c) 2013 Martin Matuska. All rights reserved. * Copyright (c) 2013 Steven Hartland. All rights reserved. - * Copyright 2016 Nexenta Systems, Inc. + * Copyright 2017 Nexenta Systems, Inc. * Copyright 2016 Igor Kozhukhov * Copyright 2017 RackTop Systems. */ @@ -2611,9 +2611,14 @@ case ZFS_PROP_COMPRESSRATIO: if (get_numeric_property(zhp, prop, src, &source, &val) != 0) return (-1); - (void) snprintf(propbuf, proplen, "%llu.%02llux", - (u_longlong_t)(val / 100), - (u_longlong_t)(val % 100)); + if (literal) + (void) snprintf(propbuf, proplen, "%llu.%02llu", + (u_longlong_t)(val / 100), + (u_longlong_t)(val % 100)); + else + (void) snprintf(propbuf, proplen, "%llu.%02llux", + (u_longlong_t)(val / 100), + (u_longlong_t)(val % 100)); break; case ZFS_PROP_TYPE: @@ -3502,6 +3507,11 @@ "pool must be upgraded to set this " "property or value")); return (zfs_error(hdl, EZFS_BADVERSION, errbuf)); + + case ERANGE: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "invalid property value(s) specified")); + return (zfs_error(hdl, EZFS_BADPROP, errbuf)); #ifdef _ILP32 case EOVERFLOW: /* @@ -3761,6 +3771,9 @@ return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); } + if (!zfs_validate_name(hdl, zhp->zfs_name, zhp->zfs_type, B_TRUE)) + return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); + ret = lzc_promote(zhp->zfs_name, snapname, sizeof (snapname)); if (ret != 0) { @@ -4090,6 +4103,10 @@ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot rename to '%s'"), target); + /* make sure source name is valid */ + if (!zfs_validate_name(hdl, zhp->zfs_name, zhp->zfs_type, B_TRUE)) + return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); + /* * Make sure the target name is valid */ diff -Nru zfs-linux-0.7.13/lib/libzfs/libzfs_pool.c zfs-linux-0.7.13/lib/libzfs/libzfs_pool.c --- zfs-linux-0.7.13/lib/libzfs/libzfs_pool.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/lib/libzfs/libzfs_pool.c 2019-04-19 06:54:36.000000000 +0000 @@ -51,6 +51,7 @@ #include "zfeature_common.h" static int read_efi_label(nvlist_t *config, diskaddr_t *sb); +static boolean_t zpool_vdev_is_interior(const char *name); typedef struct prop_flags { int create:1; /* Validate property on creation */ @@ -2125,10 +2126,7 @@ break; } - verify(strncmp(type, VDEV_TYPE_RAIDZ, - strlen(VDEV_TYPE_RAIDZ)) == 0 || - strncmp(type, VDEV_TYPE_MIRROR, - strlen(VDEV_TYPE_MIRROR)) == 0); + verify(zpool_vdev_is_interior(type)); verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &id) == 0); @@ -2235,10 +2233,13 @@ /* * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). */ -boolean_t +static boolean_t zpool_vdev_is_interior(const char *name) { if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || + strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 || + strncmp(name, + VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 || strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) return (B_TRUE); return (B_FALSE); @@ -2510,6 +2511,7 @@ { zfs_cmd_t zc = {"\0"}; char msg[1024]; + char *pathname; nvlist_t *tgt; boolean_t avail_spare, l2cache, islog; libzfs_handle_t *hdl = zhp->zpool_hdl; @@ -2533,8 +2535,9 @@ if (avail_spare) return (zfs_error(hdl, EZFS_ISSPARE, msg)); - if (flags & ZFS_ONLINE_EXPAND || - zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { + if ((flags & ZFS_ONLINE_EXPAND || + zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) && + nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) { uint64_t wholedisk = 0; (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, diff -Nru zfs-linux-0.7.13/lib/libzfs/libzfs_util.c zfs-linux-0.7.13/lib/libzfs/libzfs_util.c --- zfs-linux-0.7.13/lib/libzfs/libzfs_util.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/lib/libzfs/libzfs_util.c 2019-04-19 06:54:36.000000000 +0000 @@ -50,6 +50,8 @@ #include #include +static int zfs_major_ver, zfs_minor_ver; + #include "libzfs_impl.h" #include "zfs_prop.h" #include "zfeature_common.h" @@ -745,12 +747,23 @@ static int libzfs_module_loaded(const char *module) { + FILE *fp; const char path_prefix[] = "/sys/module/"; char path[256]; memcpy(path, path_prefix, sizeof (path_prefix) - 1); strcpy(path + sizeof (path_prefix) - 1, module); + strcpy(path + sizeof (path_prefix) - 1 + strlen(module), "/version"); + fp = fopen(path, "r"); + if (fp) { + if (fscanf(fp, "%d.%d", &zfs_major_ver, &zfs_minor_ver) != 2) { + zfs_major_ver = 0; + zfs_minor_ver = 0; + } + fclose(fp); + } + return (access(path, F_OK) == 0); } @@ -952,6 +965,15 @@ int load = 0, fd; hrtime_t start; + /* + * If inside a container, set the timeout to zero (LP: #1760173), + * however, this can be over-ridden by ZFS_MODULE_TIMEOUT just + * in case the user explicitly wants to set the timeout for some + * reason just for backward compatibilty + */ + if (access("/run/systemd/container", R_OK) == 0) + timeout = 0; + /* Optionally request module loading */ if (!libzfs_module_loaded(module)) { load_str = getenv("ZFS_MODULE_LOADING"); @@ -1355,7 +1377,7 @@ zcmd_alloc_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, size_t len) { if (len == 0) - len = 16 * 1024; + len = 256 * 1024; zc->zc_nvlist_dst_size = len; zc->zc_nvlist_dst = (uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size); @@ -1446,7 +1468,120 @@ int zfs_ioctl(libzfs_handle_t *hdl, int request, zfs_cmd_t *zc) { - return (ioctl(hdl->libzfs_fd, request, zc)); + zfs_cmd_V065_t zc_065; + int rc; + + if ((zfs_major_ver * 100) + zfs_minor_ver >= 7) + return (ioctl(hdl->libzfs_fd, request, zc)); + + memcpy(zc_065.zc_name, zc->zc_name, sizeof zc_065.zc_name); + zc_065.zc_nvlist_src = zc->zc_nvlist_src; + zc_065.zc_nvlist_src_size = zc->zc_nvlist_src_size; + zc_065.zc_nvlist_dst = zc->zc_nvlist_dst; + zc_065.zc_nvlist_dst_size = zc->zc_nvlist_dst_size; + zc_065.zc_nvlist_dst_filled = zc->zc_nvlist_dst_filled; + zc_065.zc_pad2 = zc->zc_pad2; + zc_065.zc_history = zc->zc_history; + memcpy(zc_065.zc_value, zc->zc_value, sizeof zc_065.zc_value); + memcpy(zc_065.zc_string, zc->zc_string, sizeof zc_065.zc_string); + zc_065.zc_guid = zc->zc_guid; + zc_065.zc_nvlist_conf = zc->zc_nvlist_conf; + zc_065.zc_nvlist_conf_size = zc->zc_nvlist_conf_size; + zc_065.zc_cookie = zc->zc_cookie; + zc_065.zc_objset_type = zc->zc_objset_type; + zc_065.zc_perm_action = zc->zc_perm_action; + zc_065.zc_history_len = zc->zc_history_len; + zc_065.zc_history_offset = zc->zc_history_offset; + zc_065.zc_obj = zc->zc_obj; + zc_065.zc_iflags = zc->zc_iflags; + zc_065.zc_share = zc->zc_share; + zc_065.zc_objset_stats = zc->zc_objset_stats; + zc_065.zc_begin_record = zc->zc_begin_record; + + zc_065.zc_inject_record.zi_objset = zc->zc_inject_record.zi_objset; + zc_065.zc_inject_record.zi_object = zc->zc_inject_record.zi_object; + zc_065.zc_inject_record.zi_start = zc->zc_inject_record.zi_start; + zc_065.zc_inject_record.zi_end = zc->zc_inject_record.zi_end; + zc_065.zc_inject_record.zi_guid = zc->zc_inject_record.zi_guid; + zc_065.zc_inject_record.zi_level = zc->zc_inject_record.zi_level; + zc_065.zc_inject_record.zi_error = zc->zc_inject_record.zi_error; + zc_065.zc_inject_record.zi_type = zc->zc_inject_record.zi_type; + zc_065.zc_inject_record.zi_freq = zc->zc_inject_record.zi_freq; + zc_065.zc_inject_record.zi_failfast = zc->zc_inject_record.zi_failfast; + memcpy(zc_065.zc_inject_record.zi_func, zc->zc_inject_record.zi_func, sizeof zc_065.zc_inject_record.zi_func); + zc_065.zc_inject_record.zi_iotype = zc->zc_inject_record.zi_iotype; + zc_065.zc_inject_record.zi_duration = zc->zc_inject_record.zi_duration; + zc_065.zc_inject_record.zi_timer = zc->zc_inject_record.zi_timer; + zc_065.zc_inject_record.zi_cmd = zc->zc_inject_record.zi_cmd; + zc_065.zc_inject_record.zi_pad = zc->zc_inject_record.zi_pad; + + zc_065.zc_defer_destroy = zc->zc_defer_destroy; + zc_065.zc_flags = zc->zc_flags; + zc_065.zc_action_handle = zc->zc_action_handle; + zc_065.zc_cleanup_fd = zc->zc_cleanup_fd; + zc_065.zc_simple = zc->zc_simple; + memcpy(zc_065.zc_pad, zc->zc_pad, sizeof zc_065.zc_pad); + zc_065.zc_sendobj = zc->zc_sendobj; + zc_065.zc_fromobj = zc->zc_fromobj; + zc_065.zc_createtxg = zc->zc_createtxg; + zc_065.zc_stat = zc->zc_stat; + + rc = ioctl(hdl->libzfs_fd, request, &zc_065); + + memcpy(zc->zc_name, zc_065.zc_name, sizeof zc->zc_name); + zc->zc_nvlist_src = zc_065.zc_nvlist_src; + zc->zc_nvlist_src_size = zc_065.zc_nvlist_src_size; + zc->zc_nvlist_dst = zc_065.zc_nvlist_dst; + zc->zc_nvlist_dst_size = zc_065.zc_nvlist_dst_size; + zc->zc_nvlist_dst_filled = zc_065.zc_nvlist_dst_filled; + zc->zc_pad2 = zc_065.zc_pad2; + zc->zc_history = zc_065.zc_history; + memcpy(zc->zc_value, zc_065.zc_value, sizeof zc->zc_value); + memcpy(zc->zc_string, zc_065.zc_string, sizeof zc->zc_string); + zc->zc_guid = zc_065.zc_guid; + zc->zc_nvlist_conf = zc_065.zc_nvlist_conf; + zc->zc_nvlist_conf_size = zc_065.zc_nvlist_conf_size; + zc->zc_cookie = zc_065.zc_cookie; + zc->zc_objset_type = zc_065.zc_objset_type; + zc->zc_perm_action = zc_065.zc_perm_action; + zc->zc_history_len = zc_065.zc_history_len; + zc->zc_history_offset = zc_065.zc_history_offset; + zc->zc_obj = zc_065.zc_obj; + zc->zc_iflags = zc_065.zc_iflags; + zc->zc_share = zc_065.zc_share; + zc->zc_objset_stats = zc_065.zc_objset_stats; + zc->zc_begin_record = zc_065.zc_begin_record; + + zc->zc_inject_record.zi_objset = zc_065.zc_inject_record.zi_objset; + zc->zc_inject_record.zi_object = zc_065.zc_inject_record.zi_object; + zc->zc_inject_record.zi_start = zc_065.zc_inject_record.zi_start; + zc->zc_inject_record.zi_end = zc_065.zc_inject_record.zi_end; + zc->zc_inject_record.zi_guid = zc_065.zc_inject_record.zi_guid; + zc->zc_inject_record.zi_level = zc_065.zc_inject_record.zi_level; + zc->zc_inject_record.zi_error = zc_065.zc_inject_record.zi_error; + zc->zc_inject_record.zi_type = zc_065.zc_inject_record.zi_type; + zc->zc_inject_record.zi_freq = zc_065.zc_inject_record.zi_freq; + zc->zc_inject_record.zi_failfast = zc_065.zc_inject_record.zi_failfast; + memcpy(zc->zc_inject_record.zi_func, zc_065.zc_inject_record.zi_func, sizeof zc->zc_inject_record.zi_func); + zc->zc_inject_record.zi_iotype = zc_065.zc_inject_record.zi_iotype; + zc->zc_inject_record.zi_duration = zc_065.zc_inject_record.zi_duration; + zc->zc_inject_record.zi_timer = zc_065.zc_inject_record.zi_timer; + zc->zc_inject_record.zi_nlanes = 0; + zc->zc_inject_record.zi_cmd = zc_065.zc_inject_record.zi_cmd; + zc->zc_inject_record.zi_pad = zc_065.zc_inject_record.zi_pad; + + zc->zc_defer_destroy = zc_065.zc_defer_destroy; + zc->zc_flags = zc_065.zc_flags; + zc->zc_action_handle = zc_065.zc_action_handle; + zc->zc_cleanup_fd = zc_065.zc_cleanup_fd; + zc->zc_simple = zc_065.zc_simple; + memcpy(zc->zc_pad, zc_065.zc_pad, sizeof zc->zc_pad); + zc->zc_sendobj = zc_065.zc_sendobj; + zc->zc_fromobj = zc_065.zc_fromobj; + zc->zc_createtxg = zc_065.zc_createtxg; + zc->zc_stat = zc_065.zc_stat; + + return rc; } /* diff -Nru zfs-linux-0.7.13/lib/libzfs/Makefile.am zfs-linux-0.7.13/lib/libzfs/Makefile.am --- zfs-linux-0.7.13/lib/libzfs/Makefile.am 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/lib/libzfs/Makefile.am 2019-04-19 06:54:36.000000000 +0000 @@ -31,6 +31,7 @@ libzfs_la_LIBADD = \ $(top_builddir)/lib/libzfs_core/libzfs_core.la \ + $(top_builddir)/lib/libuutil/libuutil.la \ $(top_builddir)/lib/libshare/libshare.la \ $(top_builddir)/lib/libnvpair/libnvpair.la \ $(top_builddir)/lib/libzpool/libzpool.la diff -Nru zfs-linux-0.7.13/lib/libzfs_core/Makefile.am zfs-linux-0.7.13/lib/libzfs_core/Makefile.am --- zfs-linux-0.7.13/lib/libzfs_core/Makefile.am 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/lib/libzfs_core/Makefile.am 2019-04-19 06:54:36.000000000 +0000 @@ -16,7 +16,8 @@ $(KERNEL_C) libzfs_core_la_LIBADD = \ - $(top_builddir)/lib/libnvpair/libnvpair.la + $(top_builddir)/lib/libnvpair/libnvpair.la \ + $(top_builddir)/lib/libuutil/libuutil.la libzfs_core_la_LDFLAGS = -version-info 1:0:0 diff -Nru zfs-linux-0.7.13/lib/libzpool/util.c zfs-linux-0.7.13/lib/libzpool/util.c --- zfs-linux-0.7.13/lib/libzpool/util.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/lib/libzpool/util.c 2019-04-19 06:54:36.000000000 +0000 @@ -118,7 +118,7 @@ vs->vs_space ? 6 : 0, vs->vs_space ? avail : "", rops, wops, rbytes, wbytes, rerr, werr, cerr); } - free(v0); + umem_free(v0, sizeof (*v0)); if (nvlist_lookup_nvlist_array(nv, ctype, &child, &children) != 0) return; @@ -137,7 +137,7 @@ if (nvlist_lookup_uint64(cnv, ZPOOL_CONFIG_NPARITY, &np) == 0) tname[strlen(tname)] = '0' + np; show_vdev_stats(tname, ctype, cnv, indent + 2); - free(tname); + umem_free(tname, len); } } diff -Nru zfs-linux-0.7.13/man/man5/zfs-module-parameters.5 zfs-linux-0.7.13/man/man5/zfs-module-parameters.5 --- zfs-linux-0.7.13/man/man5/zfs-module-parameters.5 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/man/man5/zfs-module-parameters.5 2019-04-19 06:54:36.000000000 +0000 @@ -1802,6 +1802,18 @@ .sp .ne 2 .na +\fBzfs_sync_taskq_batch_pct\fR (int) +.ad +.RS 12n +This controls the number of threads used by the dp_sync_taskq. The default +value of 75% will create a maximum of one thread per cpu. +.sp +Default value: \fB75\fR. +.RE + +.sp +.ne 2 +.na \fBzfs_sync_pass_dont_compress\fR (int) .ad .RS 12n @@ -2077,6 +2089,42 @@ .RE .sp +.ne 2 +.na +\fBzfs_zil_clean_taskq_maxalloc\fR (int) +.ad +.RS 12n +The maximum number of taskq entries that are allowed to be cached. When this +limit is exceeded itx's will be cleaned synchronously. +.sp +Default value: \fB1048576\fR. +.RE + +.sp +.ne 2 +.na +\fBzfs_zil_clean_taskq_minalloc\fR (int) +.ad +.RS 12n +The number of taskq entries that are pre-populated when the taskq is first +created and are immediately available for use. +.sp +Default value: \fB1024\fR. +.RE + +.sp +.ne 2 +.na +\fBzfs_zil_clean_taskq_nthr_pct\fR (int) +.ad +.RS 12n +This controls the number of threads used by the dp_zil_clean_taskq. The default +value of 100% will create a maximum of one thread per cpu. +.sp +Default value: \fB100\fR. +.RE + +.sp .ne 2 .na \fBzil_replay_disable\fR (int) diff -Nru zfs-linux-0.7.13/man/man8/zfs.8 zfs-linux-0.7.13/man/man8/zfs.8 --- zfs-linux-0.7.13/man/man8/zfs.8 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/man/man8/zfs.8 2019-04-19 06:54:36.000000000 +0000 @@ -1057,6 +1057,10 @@ for more information on these algorithms. .Pp Changing this property affects only newly-written data. ++.Pp ++Salted checksum algorithms ++.Pq Cm edonr , skein ++are currently not supported for any filesystem on the boot pools. .It Xo .Sy compression Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy gzip Ns | Ns .Sy gzip- Ns Em N Ns | Ns Sy lz4 Ns | Ns Sy lzjb Ns | Ns Sy zle @@ -2792,16 +2796,24 @@ .Op Fl o Ar options .Fl a | Ar filesystem .Xc -Mounts ZFS file systems. +Mount ZFS filesystem on a path described by its +.Sy mountpoint +property, if the path exists and is empty. If +.Sy mountpoint +is set to +.Em legacy , +the filesystem should be instead mounted using +.Xr mount 8 . .Bl -tag -width "-O" .It Fl O -Perform an overlay mount. +Perform an overlay mount. Allows mounting in non-empty +.Sy mountpoint . See .Xr mount 8 for more information. .It Fl a Mount all available ZFS file systems. -Invoked automatically as part of the boot process. +Invoked automatically as part of the boot process if configured. .It Ar filesystem Mount the specified filesystem. .It Fl o Ar options diff -Nru zfs-linux-0.7.13/man/man8/zpool.8 zfs-linux-0.7.13/man/man8/zpool.8 --- zfs-linux-0.7.13/man/man8/zpool.8 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/man/man8/zpool.8 2019-04-19 06:54:36.000000000 +0000 @@ -1431,10 +1431,15 @@ .Oo Oo Ar pool Ns ... Oc Ns | Ns Oo Ar pool vdev Ns ... Oc Ns | Ns Oo Ar vdev Ns ... Oc Oc .Op Ar interval Op Ar count .Xc -Displays I/O statistics for the given pools/vdevs. You can pass in a -list of pools, a pool and list of vdevs in that pool, or a list of any -vdevs from any pool. If no items are specified, statistics for every -pool in the system are shown. +Displays logical I/O statistics for the given pools/vdevs. Physical I/Os may +be observed via +.Xr iostat 1 . +If writes are located nearby, they may be merged into a single +larger operation. Additional I/O may be generated depending on the level of +vdev redundancy. +To filter output, you may pass in a list of pools, a pool and list of vdevs +in that pool, or a list of any vdevs from any pool. If no items are specified, +statistics for every pool in the system are shown. When given an .Ar interval , the statistics are printed every diff -Nru zfs-linux-0.7.13/module/icp/asm-x86_64/aes/aes_intel.S zfs-linux-0.7.13/module/icp/asm-x86_64/aes/aes_intel.S --- zfs-linux-0.7.13/module/icp/asm-x86_64/aes/aes_intel.S 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/icp/asm-x86_64/aes/aes_intel.S 2019-04-19 06:54:36.000000000 +0000 @@ -207,7 +207,7 @@ shufps $0b10001100, %xmm0, %xmm4 pxor %xmm4, %xmm0 pxor %xmm1, %xmm0 - movaps %xmm0, (%rcx) + movups %xmm0, (%rcx) add $0x10, %rcx ret nop @@ -224,18 +224,18 @@ pxor %xmm4, %xmm0 pxor %xmm1, %xmm0 - movaps %xmm2, %xmm5 - movaps %xmm2, %xmm6 + movups %xmm2, %xmm5 + movups %xmm2, %xmm6 pslldq $4, %xmm5 pshufd $0b11111111, %xmm0, %xmm3 pxor %xmm3, %xmm2 pxor %xmm5, %xmm2 - movaps %xmm0, %xmm1 + movups %xmm0, %xmm1 shufps $0b01000100, %xmm0, %xmm6 - movaps %xmm6, (%rcx) + movups %xmm6, (%rcx) shufps $0b01001110, %xmm2, %xmm1 - movaps %xmm1, 0x10(%rcx) + movups %xmm1, 0x10(%rcx) add $0x20, %rcx ret SET_SIZE(_key_expansion_192a) @@ -250,13 +250,13 @@ pxor %xmm4, %xmm0 pxor %xmm1, %xmm0 - movaps %xmm2, %xmm5 + movups %xmm2, %xmm5 pslldq $4, %xmm5 pshufd $0b11111111, %xmm0, %xmm3 pxor %xmm3, %xmm2 pxor %xmm5, %xmm2 - movaps %xmm0, (%rcx) + movups %xmm0, (%rcx) add $0x10, %rcx ret SET_SIZE(_key_expansion_192b) @@ -270,7 +270,7 @@ shufps $0b10001100, %xmm2, %xmm4 pxor %xmm4, %xmm2 pxor %xmm1, %xmm2 - movaps %xmm2, (%rcx) + movups %xmm2, (%rcx) add $0x10, %rcx ret SET_SIZE(_key_expansion_256b) @@ -327,7 +327,7 @@ jz .Lenc_key_invalid_param movups (%USERCIPHERKEY), %xmm0 // user key (first 16 bytes) - movaps %xmm0, (%AESKEY) + movups %xmm0, (%AESKEY) lea 0x10(%AESKEY), %rcx // key addr pxor %xmm4, %xmm4 // xmm4 is assumed 0 in _key_expansion_x @@ -341,7 +341,7 @@ #endif /* OPENSSL_INTERFACE */ movups 0x10(%USERCIPHERKEY), %xmm2 // other user key (2nd 16 bytes) - movaps %xmm2, (%rcx) + movups %xmm2, (%rcx) add $0x10, %rcx aeskeygenassist $0x1, %xmm2, %xmm1 // expand the key @@ -525,10 +525,10 @@ .align 4 .Ldec_key_reorder_loop: - movaps (%AESKEY), %xmm0 - movaps (%ROUNDS64), %xmm1 - movaps %xmm0, (%ROUNDS64) - movaps %xmm1, (%AESKEY) + movups (%AESKEY), %xmm0 + movups (%ROUNDS64), %xmm1 + movups %xmm0, (%ROUNDS64) + movups %xmm1, (%AESKEY) lea 0x10(%AESKEY), %AESKEY lea -0x10(%ROUNDS64), %ROUNDS64 cmp %AESKEY, %ROUNDS64 @@ -536,11 +536,11 @@ .align 4 .Ldec_key_inv_loop: - movaps (%rcx), %xmm0 + movups (%rcx), %xmm0 // Convert an encryption round key to a form usable for decryption // with the "AES Inverse Mix Columns" instruction aesimc %xmm0, %xmm1 - movaps %xmm1, (%rcx) + movups %xmm1, (%rcx) lea 0x10(%rcx), %rcx cmp %ENDAESKEY, %rcx jnz .Ldec_key_inv_loop @@ -602,7 +602,7 @@ ENTRY_NP(aes_encrypt_intel) movups (%INP), %STATE // input - movaps (%KEYP), %KEY // key + movups (%KEYP), %KEY // key #ifdef OPENSSL_INTERFACE mov 240(%KEYP), %NROUNDS32 // round count #else /* OpenSolaris Interface */ @@ -618,41 +618,41 @@ // AES 256 lea 0x20(%KEYP), %KEYP - movaps -0x60(%KEYP), %KEY + movups -0x60(%KEYP), %KEY aesenc %KEY, %STATE - movaps -0x50(%KEYP), %KEY + movups -0x50(%KEYP), %KEY aesenc %KEY, %STATE .align 4 .Lenc192: // AES 192 and 256 - movaps -0x40(%KEYP), %KEY + movups -0x40(%KEYP), %KEY aesenc %KEY, %STATE - movaps -0x30(%KEYP), %KEY + movups -0x30(%KEYP), %KEY aesenc %KEY, %STATE .align 4 .Lenc128: // AES 128, 192, and 256 - movaps -0x20(%KEYP), %KEY + movups -0x20(%KEYP), %KEY aesenc %KEY, %STATE - movaps -0x10(%KEYP), %KEY + movups -0x10(%KEYP), %KEY aesenc %KEY, %STATE - movaps (%KEYP), %KEY + movups (%KEYP), %KEY aesenc %KEY, %STATE - movaps 0x10(%KEYP), %KEY + movups 0x10(%KEYP), %KEY aesenc %KEY, %STATE - movaps 0x20(%KEYP), %KEY + movups 0x20(%KEYP), %KEY aesenc %KEY, %STATE - movaps 0x30(%KEYP), %KEY + movups 0x30(%KEYP), %KEY aesenc %KEY, %STATE - movaps 0x40(%KEYP), %KEY + movups 0x40(%KEYP), %KEY aesenc %KEY, %STATE - movaps 0x50(%KEYP), %KEY + movups 0x50(%KEYP), %KEY aesenc %KEY, %STATE - movaps 0x60(%KEYP), %KEY + movups 0x60(%KEYP), %KEY aesenc %KEY, %STATE - movaps 0x70(%KEYP), %KEY + movups 0x70(%KEYP), %KEY aesenclast %KEY, %STATE // last round movups %STATE, (%OUTP) // output @@ -685,7 +685,7 @@ ENTRY_NP(aes_decrypt_intel) movups (%INP), %STATE // input - movaps (%KEYP), %KEY // key + movups (%KEYP), %KEY // key #ifdef OPENSSL_INTERFACE mov 240(%KEYP), %NROUNDS32 // round count #else /* OpenSolaris Interface */ @@ -701,41 +701,41 @@ // AES 256 lea 0x20(%KEYP), %KEYP - movaps -0x60(%KEYP), %KEY + movups -0x60(%KEYP), %KEY aesdec %KEY, %STATE - movaps -0x50(%KEYP), %KEY + movups -0x50(%KEYP), %KEY aesdec %KEY, %STATE .align 4 .Ldec192: // AES 192 and 256 - movaps -0x40(%KEYP), %KEY + movups -0x40(%KEYP), %KEY aesdec %KEY, %STATE - movaps -0x30(%KEYP), %KEY + movups -0x30(%KEYP), %KEY aesdec %KEY, %STATE .align 4 .Ldec128: // AES 128, 192, and 256 - movaps -0x20(%KEYP), %KEY + movups -0x20(%KEYP), %KEY aesdec %KEY, %STATE - movaps -0x10(%KEYP), %KEY + movups -0x10(%KEYP), %KEY aesdec %KEY, %STATE - movaps (%KEYP), %KEY + movups (%KEYP), %KEY aesdec %KEY, %STATE - movaps 0x10(%KEYP), %KEY + movups 0x10(%KEYP), %KEY aesdec %KEY, %STATE - movaps 0x20(%KEYP), %KEY + movups 0x20(%KEYP), %KEY aesdec %KEY, %STATE - movaps 0x30(%KEYP), %KEY + movups 0x30(%KEYP), %KEY aesdec %KEY, %STATE - movaps 0x40(%KEYP), %KEY + movups 0x40(%KEYP), %KEY aesdec %KEY, %STATE - movaps 0x50(%KEYP), %KEY + movups 0x50(%KEYP), %KEY aesdec %KEY, %STATE - movaps 0x60(%KEYP), %KEY + movups 0x60(%KEYP), %KEY aesdec %KEY, %STATE - movaps 0x70(%KEYP), %KEY + movups 0x70(%KEYP), %KEY aesdeclast %KEY, %STATE // last round movups %STATE, (%OUTP) // output diff -Nru zfs-linux-0.7.13/module/icp/asm-x86_64/modes/gcm_intel.S zfs-linux-0.7.13/module/icp/asm-x86_64/modes/gcm_intel.S --- zfs-linux-0.7.13/module/icp/asm-x86_64/modes/gcm_intel.S 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/icp/asm-x86_64/modes/gcm_intel.S 2019-04-19 06:54:36.000000000 +0000 @@ -150,7 +150,7 @@ // Byte swap 16-byte input // lea .Lbyte_swap16_mask(%rip), %rax - movaps (%rax), %xmm10 + movups (%rax), %xmm10 pshufb %xmm10, %xmm0 pshufb %xmm10, %xmm1 diff -Nru zfs-linux-0.7.13/module/icp/spi/kcf_spi.c zfs-linux-0.7.13/module/icp/spi/kcf_spi.c --- zfs-linux-0.7.13/module/icp/spi/kcf_spi.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/icp/spi/kcf_spi.c 2019-04-19 06:54:36.000000000 +0000 @@ -111,7 +111,7 @@ crypto_register_provider(crypto_provider_info_t *info, crypto_kcf_provider_handle_t *handle) { - char ks_name[KSTAT_STRLEN]; + char *ks_name; kcf_provider_desc_t *prov_desc = NULL; int ret = CRYPTO_ARGUMENTS_BAD; @@ -238,12 +238,12 @@ * This kstat is deleted, when the provider unregisters. */ if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) { - (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s", + ks_name = kmem_asprintf("%s_%s", "NONAME", "provider_stats"); } else { - (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s", - "NONAME", 0, - prov_desc->pd_prov_id, "provider_stats"); + ks_name = kmem_asprintf("%s_%d_%u_%s", + "NONAME", 0, prov_desc->pd_prov_id, + "provider_stats"); } prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto", @@ -261,6 +261,7 @@ prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update; kstat_install(prov_desc->pd_kstat); } + strfree(ks_name); } if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) diff -Nru zfs-linux-0.7.13/module/nvpair/nvpair.c zfs-linux-0.7.13/module/nvpair/nvpair.c --- zfs-linux-0.7.13/module/nvpair/nvpair.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/nvpair/nvpair.c 2019-04-19 06:54:36.000000000 +0000 @@ -21,7 +21,7 @@ /* * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2015, 2016 by Delphix. All rights reserved. + * Copyright (c) 2015, 2017 by Delphix. All rights reserved. */ #include @@ -916,6 +916,8 @@ /* calculate sizes of the nvpair elements and the nvpair itself */ name_sz = strlen(name) + 1; + if (name_sz >= 1ULL << (sizeof (nvp->nvp_name_sz) * NBBY - 1)) + return (EINVAL); nvp_sz = NVP_SIZE_CALC(name_sz, value_sz); @@ -1242,6 +1244,7 @@ data_type_t type = NVP_TYPE(nvp); if ((type == DATA_TYPE_BYTE_ARRAY) || + (type == DATA_TYPE_INT8_ARRAY) || (type == DATA_TYPE_UINT8_ARRAY) || (type == DATA_TYPE_INT16_ARRAY) || (type == DATA_TYPE_UINT16_ARRAY) || @@ -2200,8 +2203,10 @@ nvlist_init(embedded, embedded->nvl_nvflag, priv); - if (nvs->nvs_recursion >= nvpair_max_recursion) + if (nvs->nvs_recursion >= nvpair_max_recursion) { + nvlist_free(embedded); return (EINVAL); + } nvs->nvs_recursion++; if ((err = nvs_operation(nvs, embedded, NULL)) != 0) nvlist_free(embedded); diff -Nru zfs-linux-0.7.13/module/zfs/abd.c zfs-linux-0.7.13/module/zfs/abd.c --- zfs-linux-0.7.13/module/zfs/abd.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/zfs/abd.c 2019-04-19 06:54:36.000000000 +0000 @@ -571,7 +571,7 @@ abd_free_struct(abd_t *abd) { kmem_cache_free(abd_cache, abd); - ABDSTAT_INCR(abdstat_struct_size, -sizeof (abd_t)); + ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t)); } /* @@ -618,7 +618,7 @@ ABDSTAT_BUMPDOWN(abdstat_scatter_cnt); ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size); ABDSTAT_INCR(abdstat_scatter_chunk_waste, - abd->abd_size - P2ROUNDUP(abd->abd_size, PAGESIZE)); + (int)abd->abd_size - (int)P2ROUNDUP(abd->abd_size, PAGESIZE)); abd_free_struct(abd); } diff -Nru zfs-linux-0.7.13/module/zfs/bpobj.c zfs-linux-0.7.13/module/zfs/bpobj.c --- zfs-linux-0.7.13/module/zfs/bpobj.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/zfs/bpobj.c 2019-04-19 06:54:36.000000000 +0000 @@ -261,7 +261,7 @@ } if (free) { VERIFY3U(0, ==, dmu_free_range(bpo->bpo_os, bpo->bpo_object, - (i + 1) * sizeof (blkptr_t), -1ULL, tx)); + (i + 1) * sizeof (blkptr_t), DMU_OBJECT_END, tx)); } if (err || !bpo->bpo_havesubobj || bpo->bpo_phys->bpo_subobjs == 0) goto out; @@ -339,7 +339,7 @@ if (free) { VERIFY3U(0, ==, dmu_free_range(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, - (i + 1) * sizeof (uint64_t), -1ULL, tx)); + (i + 1) * sizeof (uint64_t), DMU_OBJECT_END, tx)); } out: diff -Nru zfs-linux-0.7.13/module/zfs/dmu.c zfs-linux-0.7.13/module/zfs/dmu.c --- zfs-linux-0.7.13/module/zfs/dmu.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/zfs/dmu.c 2019-04-19 06:54:36.000000000 +0000 @@ -887,7 +887,7 @@ if (err) return (err); ASSERT(offset < UINT64_MAX); - ASSERT(size == -1ULL || size <= UINT64_MAX - offset); + ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset); dnode_free_range(dn, offset, size, tx); dnode_rele(dn, FTAG); return (0); diff -Nru zfs-linux-0.7.13/module/zfs/dmu_send.c zfs-linux-0.7.13/module/zfs/dmu_send.c --- zfs-linux-0.7.13/module/zfs/dmu_send.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/zfs/dmu_send.c 2019-04-19 06:54:36.000000000 +0000 @@ -224,9 +224,6 @@ (object == dsp->dsa_last_data_object && offset > dsp->dsa_last_data_offset)); - if (length != -1ULL && offset + length < offset) - length = -1ULL; - /* * If there is a pending op, but it's not PENDING_FREE, push it out, * since free block aggregation can only be done for blocks of the @@ -243,19 +240,22 @@ if (dsp->dsa_pending_op == PENDING_FREE) { /* - * There should never be a PENDING_FREE if length is -1 - * (because dump_dnode is the only place where this - * function is called with a -1, and only after flushing - * any pending record). + * There should never be a PENDING_FREE if length is + * DMU_OBJECT_END (because dump_dnode is the only place where + * this function is called with a DMU_OBJECT_END, and only after + * flushing any pending record). */ - ASSERT(length != -1ULL); + ASSERT(length != DMU_OBJECT_END); /* * Check to see whether this free block can be aggregated * with pending one. */ if (drrf->drr_object == object && drrf->drr_offset + drrf->drr_length == offset) { - drrf->drr_length += length; + if (offset + length < offset) + drrf->drr_length = DMU_OBJECT_END; + else + drrf->drr_length += length; return (0); } else { /* not a continuation. Push out pending record */ @@ -269,9 +269,12 @@ dsp->dsa_drr->drr_type = DRR_FREE; drrf->drr_object = object; drrf->drr_offset = offset; - drrf->drr_length = length; + if (offset + length < offset) + drrf->drr_length = DMU_OBJECT_END; + else + drrf->drr_length = length; drrf->drr_toguid = dsp->dsa_toguid; - if (length == -1ULL) { + if (length == DMU_OBJECT_END) { if (dump_record(dsp, NULL, 0) != 0) return (SET_ERROR(EINTR)); } else { @@ -530,7 +533,7 @@ /* Free anything past the end of the file. */ if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) * - (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0) + (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), DMU_OBJECT_END) != 0) return (SET_ERROR(EINTR)); if (dsp->dsa_err != 0) return (SET_ERROR(EINTR)); @@ -666,7 +669,9 @@ } else if (BP_IS_HOLE(bp)) { uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level); uint64_t offset = zb->zb_blkid * span; - err = dump_free(dsa, zb->zb_object, offset, span); + /* Don't dump free records for offsets > DMU_OBJECT_END */ + if (zb->zb_blkid == 0 || span <= DMU_OBJECT_END / zb->zb_blkid) + err = dump_free(dsa, zb->zb_object, offset, span); } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) { return (0); } else if (type == DMU_OT_DNODE) { @@ -2562,7 +2567,7 @@ { int err; - if (drrf->drr_length != -1ULL && + if (drrf->drr_length != DMU_OBJECT_END && drrf->drr_offset + drrf->drr_length < drrf->drr_offset) return (SET_ERROR(EINVAL)); diff -Nru zfs-linux-0.7.13/module/zfs/dsl_pool.c zfs-linux-0.7.13/module/zfs/dsl_pool.c --- zfs-linux-0.7.13/module/zfs/dsl_pool.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/zfs/dsl_pool.c 2019-04-19 06:54:36.000000000 +0000 @@ -135,6 +135,36 @@ */ int zfs_sync_taskq_batch_pct = 75; +/* + * These tunables determine the behavior of how zil_itxg_clean() is + * called via zil_clean() in the context of spa_sync(). When an itxg + * list needs to be cleaned, TQ_NOSLEEP will be used when dispatching. + * If the dispatch fails, the call to zil_itxg_clean() will occur + * synchronously in the context of spa_sync(), which can negatively + * impact the performance of spa_sync() (e.g. in the case of the itxg + * list having a large number of itxs that needs to be cleaned). + * + * Thus, these tunables can be used to manipulate the behavior of the + * taskq used by zil_clean(); they determine the number of taskq entries + * that are pre-populated when the taskq is first created (via the + * "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of + * taskq entries that are cached after an on-demand allocation (via the + * "zfs_zil_clean_taskq_maxalloc"). + * + * The idea being, we want to try reasonably hard to ensure there will + * already be a taskq entry pre-allocated by the time that it is needed + * by zil_clean(). This way, we can avoid the possibility of an + * on-demand allocation of a new taskq entry from failing, which would + * result in zil_itxg_clean() being called synchronously from zil_clean() + * (which can adversely affect performance of spa_sync()). + * + * Additionally, the number of threads used by the taskq can be + * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable. + */ +int zfs_zil_clean_taskq_nthr_pct = 100; +int zfs_zil_clean_taskq_minalloc = 1024; +int zfs_zil_clean_taskq_maxalloc = 1024 * 1024; + int dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) { @@ -176,6 +206,12 @@ zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX, TASKQ_THREADS_CPU_PCT); + dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq", + zfs_zil_clean_taskq_nthr_pct, minclsyspri, + zfs_zil_clean_taskq_minalloc, + zfs_zil_clean_taskq_maxalloc, + TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT); + mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL); @@ -334,6 +370,7 @@ txg_list_destroy(&dp->dp_sync_tasks); txg_list_destroy(&dp->dp_dirty_dirs); + taskq_destroy(dp->dp_zil_clean_taskq); taskq_destroy(dp->dp_sync_taskq); /* @@ -1142,5 +1179,18 @@ module_param(zfs_sync_taskq_batch_pct, int, 0644); MODULE_PARM_DESC(zfs_sync_taskq_batch_pct, "max percent of CPUs that are used to sync dirty data"); + +module_param(zfs_zil_clean_taskq_nthr_pct, int, 0644); +MODULE_PARM_DESC(zfs_zil_clean_taskq_nthr_pct, + "max percent of CPUs that are used per dp_sync_taskq"); + +module_param(zfs_zil_clean_taskq_minalloc, int, 0644); +MODULE_PARM_DESC(zfs_zil_clean_taskq_minalloc, + "number of taskq entries that are pre-populated"); + +module_param(zfs_zil_clean_taskq_maxalloc, int, 0644); +MODULE_PARM_DESC(zfs_zil_clean_taskq_maxalloc, + "max number of taskq entries that are cached"); + /* END CSTYLED */ #endif diff -Nru zfs-linux-0.7.13/module/zfs/spa.c zfs-linux-0.7.13/module/zfs/spa.c --- zfs-linux-0.7.13/module/zfs/spa.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/zfs/spa.c 2019-04-19 06:54:36.000000000 +0000 @@ -1574,7 +1574,7 @@ static void spa_load_l2cache(spa_t *spa) { - nvlist_t **l2cache; + nvlist_t **l2cache = NULL; uint_t nl2cache; int i, j, oldnvdevs; uint64_t guid; @@ -1658,7 +1658,9 @@ VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); - l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); + if (sav->sav_count > 0) + l2cache = kmem_alloc(sav->sav_count * sizeof (void *), + KM_SLEEP); for (i = 0; i < sav->sav_count; i++) l2cache[i] = vdev_config_generate(spa, sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); diff -Nru zfs-linux-0.7.13/module/zfs/spa_stats.c zfs-linux-0.7.13/module/zfs/spa_stats.c --- zfs-linux-0.7.13/module/zfs/spa_stats.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/zfs/spa_stats.c 2019-04-19 06:54:36.000000000 +0000 @@ -144,7 +144,7 @@ spa_read_history_init(spa_t *spa) { spa_stats_history_t *ssh = &spa->spa_stats.read_history; - char name[KSTAT_STRLEN]; + char *name; kstat_t *ksp; mutex_init(&ssh->lock, NULL, MUTEX_DEFAULT, NULL); @@ -155,7 +155,7 @@ ssh->size = 0; ssh->private = NULL; - (void) snprintf(name, KSTAT_STRLEN, "zfs/%s", spa_name(spa)); + name = kmem_asprintf("zfs/%s", spa_name(spa)); ksp = kstat_create(name, 0, "reads", "misc", KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL); @@ -170,6 +170,7 @@ spa_read_history_data, spa_read_history_addr); kstat_install(ksp); } + strfree(name); } static void @@ -367,7 +368,7 @@ spa_txg_history_init(spa_t *spa) { spa_stats_history_t *ssh = &spa->spa_stats.txg_history; - char name[KSTAT_STRLEN]; + char *name; kstat_t *ksp; mutex_init(&ssh->lock, NULL, MUTEX_DEFAULT, NULL); @@ -378,7 +379,7 @@ ssh->size = 0; ssh->private = NULL; - (void) snprintf(name, KSTAT_STRLEN, "zfs/%s", spa_name(spa)); + name = kmem_asprintf("zfs/%s", spa_name(spa)); ksp = kstat_create(name, 0, "txgs", "misc", KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL); @@ -393,6 +394,7 @@ spa_txg_history_data, spa_txg_history_addr); kstat_install(ksp); } + strfree(name); } static void @@ -600,7 +602,7 @@ spa_tx_assign_init(spa_t *spa) { spa_stats_history_t *ssh = &spa->spa_stats.tx_assign_histogram; - char name[KSTAT_STRLEN]; + char *name; kstat_named_t *ks; kstat_t *ksp; int i; @@ -611,7 +613,7 @@ ssh->size = ssh->count * sizeof (kstat_named_t); ssh->private = kmem_alloc(ssh->size, KM_SLEEP); - (void) snprintf(name, KSTAT_STRLEN, "zfs/%s", spa_name(spa)); + name = kmem_asprintf("zfs/%s", spa_name(spa)); for (i = 0; i < ssh->count; i++) { ks = &((kstat_named_t *)ssh->private)[i]; @@ -634,6 +636,7 @@ ksp->ks_update = spa_tx_assign_update; kstat_install(ksp); } + strfree(name); } static void @@ -680,12 +683,12 @@ spa_io_history_init(spa_t *spa) { spa_stats_history_t *ssh = &spa->spa_stats.io_history; - char name[KSTAT_STRLEN]; + char *name; kstat_t *ksp; mutex_init(&ssh->lock, NULL, MUTEX_DEFAULT, NULL); - (void) snprintf(name, KSTAT_STRLEN, "zfs/%s", spa_name(spa)); + name = kmem_asprintf("zfs/%s", spa_name(spa)); ksp = kstat_create(name, 0, "io", "disk", KSTAT_TYPE_IO, 1, 0); ssh->kstat = ksp; @@ -696,6 +699,7 @@ ksp->ks_update = spa_io_history_update; kstat_install(ksp); } + strfree(name); } static void @@ -825,7 +829,7 @@ spa_mmp_history_init(spa_t *spa) { spa_stats_history_t *ssh = &spa->spa_stats.mmp_history; - char name[KSTAT_STRLEN]; + char *name; kstat_t *ksp; mutex_init(&ssh->lock, NULL, MUTEX_DEFAULT, NULL); @@ -836,7 +840,7 @@ ssh->size = 0; ssh->private = NULL; - (void) snprintf(name, KSTAT_STRLEN, "zfs/%s", spa_name(spa)); + name = kmem_asprintf("zfs/%s", spa_name(spa)); ksp = kstat_create(name, 0, "multihost", "misc", KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL); @@ -851,6 +855,7 @@ spa_mmp_history_data, spa_mmp_history_addr); kstat_install(ksp); } + strfree(name); } static void diff -Nru zfs-linux-0.7.13/module/zfs/vdev_disk.c zfs-linux-0.7.13/module/zfs/vdev_disk.c --- zfs-linux-0.7.13/module/zfs/vdev_disk.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/zfs/vdev_disk.c 2019-04-19 06:54:36.000000000 +0000 @@ -100,7 +100,7 @@ vdev_disk_error(zio_t *zio) { #ifdef ZFS_DEBUG - printk("ZFS: zio error=%d type=%d offset=%llu size=%llu " + printk(KERN_WARNING "ZFS: zio error=%d type=%d offset=%llu size=%llu " "flags=%x\n", zio->io_error, zio->io_type, (u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size, zio->io_flags); diff -Nru zfs-linux-0.7.13/module/zfs/zfs_fm.c zfs-linux-0.7.13/module/zfs/zfs_fm.c --- zfs-linux-0.7.13/module/zfs/zfs_fm.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/zfs/zfs_fm.c 2019-04-19 06:54:36.000000000 +0000 @@ -455,8 +455,8 @@ typedef struct zfs_ecksum_info { /* histograms of set and cleared bits by bit number in a 64-bit word */ - uint16_t zei_histogram_set[sizeof (uint64_t) * NBBY]; - uint16_t zei_histogram_cleared[sizeof (uint64_t) * NBBY]; + uint32_t zei_histogram_set[sizeof (uint64_t) * NBBY]; + uint32_t zei_histogram_cleared[sizeof (uint64_t) * NBBY]; /* inline arrays of bits set and cleared. */ uint64_t zei_bits_set[ZFM_MAX_INLINE]; @@ -481,7 +481,7 @@ } zfs_ecksum_info_t; static void -update_histogram(uint64_t value_arg, uint16_t *hist, uint32_t *count) +update_histogram(uint64_t value_arg, uint32_t *hist, uint32_t *count) { size_t i; size_t bits = 0; @@ -490,8 +490,7 @@ /* We store the bits in big-endian (largest-first) order */ for (i = 0; i < 64; i++) { if (value & (1ull << i)) { - if (hist[63 - i] < UINT16_MAX) - hist[63 - i]++; + hist[63 - i]++; ++bits; } } @@ -649,6 +648,7 @@ if (badabd == NULL || goodabd == NULL) return (eip); + ASSERT3U(nui64s, <=, UINT32_MAX); ASSERT3U(size, ==, nui64s * sizeof (uint64_t)); ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); ASSERT3U(size, <=, UINT32_MAX); @@ -759,10 +759,10 @@ } else { fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM, - DATA_TYPE_UINT16_ARRAY, + DATA_TYPE_UINT32_ARRAY, NBBY * sizeof (uint64_t), eip->zei_histogram_set, FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM, - DATA_TYPE_UINT16_ARRAY, + DATA_TYPE_UINT32_ARRAY, NBBY * sizeof (uint64_t), eip->zei_histogram_cleared, NULL); } diff -Nru zfs-linux-0.7.13/module/zfs/zfs_ioctl.c zfs-linux-0.7.13/module/zfs/zfs_ioctl.c --- zfs-linux-0.7.13/module/zfs/zfs_ioctl.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/zfs/zfs_ioctl.c 2019-04-19 06:54:36.000000000 +0000 @@ -1474,6 +1474,7 @@ nvlist_t *config, *props = NULL; nvlist_t *rootprops = NULL; nvlist_t *zplprops = NULL; + char *spa_name = zc->zc_name; if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, zc->zc_iflags, &config))) @@ -1489,6 +1490,7 @@ if (props) { nvlist_t *nvl = NULL; uint64_t version = SPA_VERSION; + char *tname; (void) nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION), &version); @@ -1511,6 +1513,10 @@ zplprops, NULL); if (error != 0) goto pool_props_bad; + + if (nvlist_lookup_string(props, + zpool_prop_to_name(ZPOOL_PROP_TNAME), &tname) == 0) + spa_name = tname; } error = spa_create(zc->zc_name, config, props, zplprops); @@ -1518,9 +1524,9 @@ /* * Set the remaining root properties */ - if (!error && (error = zfs_set_prop_nvlist(zc->zc_name, + if (!error && (error = zfs_set_prop_nvlist(spa_name, ZPROP_SRC_LOCAL, rootprops, NULL)) != 0) - (void) spa_destroy(zc->zc_name); + (void) spa_destroy(spa_name); pool_props_bad: nvlist_free(rootprops); @@ -3738,9 +3744,12 @@ boolean_t recursive = zc->zc_cookie & 1; char *at; + /* "zfs rename" from and to ...%recv datasets should both fail */ + zc->zc_name[sizeof (zc->zc_name) - 1] = '\0'; zc->zc_value[sizeof (zc->zc_value) - 1] = '\0'; - if (dataset_namecheck(zc->zc_value, NULL, NULL) != 0 || - strchr(zc->zc_value, '%')) + if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0 || + dataset_namecheck(zc->zc_value, NULL, NULL) != 0 || + strchr(zc->zc_name, '%') || strchr(zc->zc_value, '%')) return (SET_ERROR(EINVAL)); at = strchr(zc->zc_name, '@'); @@ -4993,6 +5002,11 @@ char *cp; int error; + zc->zc_name[sizeof (zc->zc_name) - 1] = '\0'; + if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0 || + strchr(zc->zc_name, '%')) + return (SET_ERROR(EINVAL)); + error = dsl_pool_hold(zc->zc_name, FTAG, &dp); if (error != 0) return (error); diff -Nru zfs-linux-0.7.13/module/zfs/zil.c zfs-linux-0.7.13/module/zfs/zil.c --- zfs-linux-0.7.13/module/zfs/zil.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/zfs/zil.c 2019-04-19 06:54:36.000000000 +0000 @@ -1442,8 +1442,7 @@ return; } ASSERT3U(itxg->itxg_txg, <=, synced_txg); - ASSERT(itxg->itxg_txg != 0); - ASSERT(zilog->zl_clean_taskq != NULL); + ASSERT3U(itxg->itxg_txg, !=, 0); clean_me = itxg->itxg_itxs; itxg->itxg_itxs = NULL; itxg->itxg_txg = 0; @@ -1454,8 +1453,11 @@ * free it in-line. This should be rare. Note, using TQ_SLEEP * created a bad performance problem. */ - if (taskq_dispatch(zilog->zl_clean_taskq, - (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == 0) + ASSERT3P(zilog->zl_dmu_pool, !=, NULL); + ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL); + taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq, + (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP); + if (id == TASKQID_INVALID) zil_itxg_clean(clean_me); } @@ -1928,13 +1930,10 @@ { zilog_t *zilog = dmu_objset_zil(os); - ASSERT(zilog->zl_clean_taskq == NULL); ASSERT(zilog->zl_get_data == NULL); ASSERT(list_is_empty(&zilog->zl_lwb_list)); zilog->zl_get_data = get_data; - zilog->zl_clean_taskq = taskq_create("zil_clean", 1, defclsyspri, - 2, 2, TASKQ_PREPOPULATE); return (zilog); } @@ -1969,8 +1968,6 @@ if (txg < spa_freeze_txg(zilog->zl_spa)) VERIFY(!zilog_is_dirty(zilog)); - taskq_destroy(zilog->zl_clean_taskq); - zilog->zl_clean_taskq = NULL; zilog->zl_get_data = NULL; /* diff -Nru zfs-linux-0.7.13/module/zfs/zio.c zfs-linux-0.7.13/module/zfs/zio.c --- zfs-linux-0.7.13/module/zfs/zio.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/zfs/zio.c 2019-04-19 06:54:36.000000000 +0000 @@ -1621,6 +1621,7 @@ if (NSEC_TO_TICK(diff) == 0) { /* Our delay is less than a jiffy - just spin */ zfs_sleep_until(zio->io_target_timestamp); + zio_interrupt(zio); } else { /* * Use taskq_dispatch_delay() in the place of diff -Nru zfs-linux-0.7.13/module/zfs/zvol.c zfs-linux-0.7.13/module/zfs/zvol.c --- zfs-linux-0.7.13/module/zfs/zvol.c 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/module/zfs/zvol.c 2019-04-19 06:54:36.000000000 +0000 @@ -984,6 +984,16 @@ zvol_write(zvr); } } else { + /* + * The SCST driver, and possibly others, may issue READ I/Os + * with a length of zero bytes. These empty I/Os contain no + * data and require no additional handling. + */ + if (size == 0) { + BIO_END_IO(bio, 0); + goto out; + } + zvr = kmem_alloc(sizeof (zv_request_t), KM_SLEEP); zvr->zv = zv; zvr->bio = bio; @@ -2226,12 +2236,6 @@ mutex_enter(&zv->zv_state_lock); - /* If in use, leave alone */ - if (zv->zv_open_count > 0) { - mutex_exit(&zv->zv_state_lock); - continue; - } - if (strcmp(zv->zv_name, oldname) == 0) { zvol_rename_minor(zv, newname); } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 && diff -Nru zfs-linux-0.7.13/tests/runfiles/linux.run zfs-linux-0.7.13/tests/runfiles/linux.run --- zfs-linux-0.7.13/tests/runfiles/linux.run 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/tests/runfiles/linux.run 2019-04-19 06:54:36.000000000 +0000 @@ -172,7 +172,7 @@ [tests/functional/cli_root/zfs_send] tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos', 'zfs_send_004_neg', 'zfs_send_005_pos', 'zfs_send_006_pos', - 'zfs_send_007_pos'] + 'zfs_send_007_pos', 'zfs_send_sparse'] tags = ['functional', 'cli_root', 'zfs_send'] [tests/functional/cli_root/zfs_set] @@ -253,7 +253,7 @@ 'zpool_create_features_001_pos', 'zpool_create_features_002_pos', 'zpool_create_features_003_pos', 'zpool_create_features_004_neg', 'zpool_create_features_005_pos', - 'create-o_ashift'] + 'create-o_ashift', 'zpool_create_tempname'] tags = ['functional', 'cli_root', 'zpool_create'] [tests/functional/cli_root/zpool_destroy] diff -Nru zfs-linux-0.7.13/tests/zfs-tests/include/libtest.shlib zfs-linux-0.7.13/tests/zfs-tests/include/libtest.shlib --- zfs-linux-0.7.13/tests/zfs-tests/include/libtest.shlib 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/tests/zfs-tests/include/libtest.shlib 2019-04-19 06:54:36.000000000 +0000 @@ -351,6 +351,41 @@ log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark } +# +# Create a temporary clone result of an interrupted resumable 'zfs receive' +# $1 Destination filesystem name. Must not exist, will be created as the result +# of this function along with its %recv temporary clone +# $2 Source filesystem name. Must not exist, will be created and destroyed +# +function create_recv_clone +{ + typeset recvfs="$1" + typeset sendfs="${2:-$TESTPOOL/create_recv_clone}" + typeset snap="$sendfs@snap1" + typeset incr="$sendfs@snap2" + typeset mountpoint="$TESTDIR/create_recv_clone" + typeset sendfile="$TESTDIR/create_recv_clone.zsnap" + + [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined." + + datasetexists $recvfs && log_fail "Recv filesystem must not exist." + datasetexists $sendfs && log_fail "Send filesystem must not exist." + + log_must zfs create -o mountpoint="$mountpoint" $sendfs + log_must zfs snapshot $snap + log_must eval "zfs send $snap | zfs recv -u $recvfs" + log_must mkfile 1m "$mountpoint/data" + log_must zfs snapshot $incr + log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 > $sendfile" + log_mustnot eval "zfs recv -su $recvfs < $sendfile" + log_must zfs destroy -r $sendfs + log_must rm -f "$sendfile" + + if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then + log_fail "Error creating temporary $recvfs/%recv clone" + fi +} + function default_mirror_setup { default_mirror_setup_noexit $1 $2 $3 diff -Nru zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_promote/zfs_promote_006_neg.ksh zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_promote/zfs_promote_006_neg.ksh --- zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_promote/zfs_promote_006_neg.ksh 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_promote/zfs_promote_006_neg.ksh 2019-04-19 06:54:36.000000000 +0000 @@ -40,6 +40,7 @@ # pool, fs, snapshot,volume # (4) too many arguments. # (5) invalid options +# (6) temporary %recv datasets # # STRATEGY: # 1. Create an array of invalid arguments @@ -50,11 +51,14 @@ verify_runnable "both" snap=$TESTPOOL/$TESTFS@$TESTSNAP +clone=$TESTPOOL/$TESTCLONE +recvfs=$TESTPOOL/recvfs set -A args "" \ "$TESTPOOL/blah" \ "$TESTPOOL" "$TESTPOOL/$TESTFS" "$snap" \ "$TESTPOOL/$TESTVOL" "$TESTPOOL $TESTPOOL/$TESTFS" \ - "$clone $TESTPOOL/$TESTFS" "- $clone" "-? $clone" + "$clone $TESTPOOL/$TESTFS" "- $clone" "-? $clone" \ + "$recvfs/%recv" function cleanup { @@ -62,6 +66,10 @@ log_must zfs destroy $clone fi + if datasetexists $recvfs; then + log_must zfs destroy -r $recvfs + fi + if snapexists $snap; then destroy_snapshot $snap fi @@ -70,10 +78,7 @@ log_assert "'zfs promote' will fail with invalid arguments. " log_onexit cleanup -snap=$TESTPOOL/$TESTFS@$TESTSNAP -clone=$TESTPOOL/$TESTCLONE -log_must zfs snapshot $snap -log_must zfs clone $snap $clone +create_recv_clone $recvfs typeset -i i=0 while (( i < ${#args[*]} )); do diff -Nru zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_004_neg.ksh zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_004_neg.ksh --- zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_004_neg.ksh 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_004_neg.ksh 2019-04-19 06:54:36.000000000 +0000 @@ -77,8 +77,8 @@ $TESTPOOL/$TESTFS1 $TESTPOOL/${TESTFS1}%x \ $TESTPOOL/$TESTFS1 $TESTPOOL/${TESTFS1}%p \ $TESTPOOL/$TESTFS1 $TESTPOOL/${TESTFS1}%s \ - $TESTPOOL/$TESTFS@snapshot \ - $TESTPOOL/$TESTFS@snapshot/fs + $TESTPOOL/$TESTFS@snapshot $TESTPOOL/$TESTFS@snapshot/fs \ + $TESTPOOL/$RECVFS/%recv $TESTPOOL/renamed.$$ # # cleanup defined in zfs_rename.kshlib diff -Nru zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename.cfg zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename.cfg --- zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename.cfg 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename.cfg 2019-04-19 06:54:36.000000000 +0000 @@ -36,3 +36,4 @@ export CNT=2048 export VOL_R_PATH=$ZVOL_RDEVDIR/$TESTPOOL/$TESTVOL export VOLDATA=$TESTDIR2/voldata.rename +export RECVFS=recvfs diff -Nru zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename.kshlib zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename.kshlib --- zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename.kshlib 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename.kshlib 2019-04-19 06:54:36.000000000 +0000 @@ -63,6 +63,8 @@ log_must cp $DATA $(get_prop mountpoint $TESTPOOL/$TESTVOL)/$TESTFILE0 fi + # Create temporary %recv clone + create_recv_clone $TESTPOOL/$RECVFS } function rename_dataset # src dest @@ -110,6 +112,9 @@ log_must zfs destroy -fR $TESTPOOL/$TESTFS@snapshot fi + if datasetexists $TESTPOOL/$RECVFS; then + log_must zfs destroy -r $TESTPOOL/$RECVFS + fi } function cmp_data #<$1 src data, $2 tgt data> diff -Nru zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_send/Makefile.am zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_send/Makefile.am --- zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_send/Makefile.am 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_send/Makefile.am 2019-04-19 06:54:36.000000000 +0000 @@ -8,7 +8,8 @@ zfs_send_004_neg.ksh \ zfs_send_005_pos.ksh \ zfs_send_006_pos.ksh \ - zfs_send_007_pos.ksh + zfs_send_007_pos.ksh \ + zfs_send_sparse.ksh dist_pkgdata_DATA = \ zfs_send.cfg diff -Nru zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_sparse.ksh zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_sparse.ksh --- zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_sparse.ksh 1970-01-01 00:00:00.000000000 +0000 +++ zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_sparse.ksh 2019-04-19 06:54:36.000000000 +0000 @@ -0,0 +1,83 @@ +#!/bin/ksh -p +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright 2017, loli10K . All rights reserved. +# + +. $STF_SUITE/include/libtest.shlib + +# +# DESCRIPTION: +# 'zfs send' should be able to send (big) sparse files correctly. +# +# STRATEGY: +# 1. Create sparse files of various size +# 2. Snapshot and send these sparse files +# 3. Verify these files are received correctly and we don't trigger any issue +# like the one described in https://github.com/zfsonlinux/zfs/pull/6760 +# + +verify_runnable "both" + +function cleanup +{ + datasetexists $SENDFS && log_must zfs destroy -r $SENDFS + datasetexists $RECVFS && log_must zfs destroy -r $RECVFS +} + +# +# Write 1 random byte at $offset of "source" file in $sendfs dataset +# Snapshot and send $sendfs dataset to $recvfs +# Compare the received file with its source +# +function write_compare_files # +{ + typeset sendfs="$1" + typeset recvfs="$2" + typeset offset="$3" + + # create source filesystem + log_must zfs create $sendfs + # write sparse file + sendfile="$(get_prop mountpoint $sendfs)/data.bin" + log_must dd if=/dev/urandom of=$sendfile bs=1 count=1 seek=$offset + # send/receive the file + log_must zfs snapshot $sendfs@snap + log_must eval "zfs send $sendfs@snap | zfs receive $recvfs" + # compare sparse files + recvfile="$(get_prop mountpoint $recvfs)/data.bin" + log_must cmp $sendfile $recvfile $offset $offset + sendsz=$(stat -c '%s' $sendfile) + recvsz=$(stat -c '%s' $recvfile) + if [[ $sendsz -ne $recvsz ]]; then + log_fail "$sendfile ($sendsz) and $recvfile ($recvsz) differ." + fi + # cleanup + log_must zfs destroy -r $sendfs + log_must zfs destroy -r $recvfs +} + +log_assert "'zfs send' should be able to send (big) sparse files correctly." +log_onexit cleanup + +SENDFS="$TESTPOOL/sendfs" +RECVFS="$TESTPOOL/recvfs" +OFF_T_MAX="$(echo '2 ^ 40 * 8 - 1' | bc)" + +for i in {1..60}; do + offset=$(echo "2 ^ $i" | bc) + [[ is_32bit ]] && [[ $offset -ge $OFF_T_MAX ]] && continue; + write_compare_files $SENDFS $RECVFS $offset +done + +log_pass "'zfs send' sends (big) sparse files correctly." diff -Nru zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zpool_create/Makefile.am zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zpool_create/Makefile.am --- zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zpool_create/Makefile.am 2019-04-19 06:44:41.000000000 +0000 +++ zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zpool_create/Makefile.am 2019-04-19 06:54:36.000000000 +0000 @@ -30,7 +30,8 @@ zpool_create_features_003_pos.ksh \ zpool_create_features_004_neg.ksh \ zpool_create_features_005_pos.ksh \ - create-o_ashift.ksh + create-o_ashift.ksh \ + zpool_create_tempname.ksh dist_pkgdata_DATA = \ zpool_create.cfg \ diff -Nru zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_tempname.ksh zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_tempname.ksh --- zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_tempname.ksh 1970-01-01 00:00:00.000000000 +0000 +++ zfs-linux-0.7.13/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_tempname.ksh 2019-04-19 06:54:36.000000000 +0000 @@ -0,0 +1,68 @@ +#!/bin/ksh -p +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright 2018, loli10K . All rights reserved. +# + +. $STF_SUITE/include/libtest.shlib + +# +# DESCRIPTION: +# 'zpool create -t ' can create a pool with the specified temporary +# name. The pool should be present in the namespace as until exported +# +# STRATEGY: +# 1. Create a pool with '-t' option +# 2. Verify the pool is created with the specified temporary name +# + +verify_runnable "global" + +function cleanup +{ + destroy_pool $TESTPOOL + destroy_pool $TEMPPOOL + +} + +log_assert "'zpool create -t ' can create a pool with the specified" \ + " temporary name." +log_onexit cleanup + +TEMPPOOL="tempname.$$" +typeset poolprops=('comment=text' 'ashift=12' 'listsnapshots=on' 'autoexpand=on' + 'autoreplace=on' 'delegation=off' 'failmode=continue') +typeset fsprops=('canmount=off' 'mountpoint=none' 'utf8only=on' + 'casesensitivity=mixed' 'version=1' 'normalization=formKD') + +for poolprop in "${poolprops[@]}"; do + for fsprop in "${fsprops[@]}"; do + # 1. Create a pool with '-t' option + log_must zpool create $TESTPOOL -t $TEMPPOOL \ + -O $fsprop -o $poolprop $DISKS + # 2. Verify the pool is created with the specified temporary name + log_must poolexists $TEMPPOOL + log_mustnot poolexists $TESTPOOL + propname="$(awk -F= '{print $1}' <<< $fsprop)" + propval="$(awk -F= '{print $2}' <<< $fsprop)" + log_must test "$(get_prop $propname $TEMPPOOL)" == "$propval" + propname="$(awk -F= '{print $1}' <<< $poolprop)" + propval="$(awk -F= '{print $2}' <<< $poolprop)" + log_must test "$(get_pool_prop $propname $TEMPPOOL)" == "$propval" + # Cleanup + destroy_pool $TEMPPOOL + done +done + +log_pass "'zpool create -t ' successfully creates pools with" \ + " temporary names"