diff -Nru glusterfs-3.12.14/ChangeLog glusterfs-3.12.15/ChangeLog --- glusterfs-3.12.14/ChangeLog 2018-09-06 17:07:18.427428638 +0000 +++ glusterfs-3.12.15/ChangeLog 2018-10-13 07:07:40.766279669 +0000 @@ -1,4 +1,178 @@ ===== git log ==== +commit a1cf0f2c8ba4dcff39f6fa39a9904a5598b724d5 +Author: Jiffin Tony Thottan +Date: Sat Oct 13 11:12:10 2018 +0530 + + Release notes for Gluster 3.12.15 + + Change-Id: Ifc5dfc21f6dbd4c91c38618a298eecb5fa9aaf65 + BUG: 1638188 + Signed-off-by: Jiffin Tony Thottan + +commit 166e66c67428e6263e77d969e3447832e85d5f23 +Author: Ravishankar N +Date: Wed Oct 10 17:57:33 2018 +0530 + + afr: prevent winding inodelks twice for arbiter volumes + + Backport of https://review.gluster.org/#/c/glusterfs/+/21380/ + + Problem: + In an arbiter volume, if there is a pending data heal of a file only on + arbiter brick, self-heal takes inodelks twice due to a code-bug but unlocks + it only once, leaving behind a stale lock on the brick. This causes + the next write to the file to hang. + + Fix: + Fix the code-bug to take lock only once. This bug was introduced master + with commit eb472d82a083883335bc494b87ea175ac43471ff + + Thanks to Pranith Kumar K for finding the RCA. + + fixes: bz#1637989 + Change-Id: I15ad969e10a6a3c4bd255e2948b6be6dcddc61e1 + BUG: 1637989 + Signed-off-by: Ravishankar N + +commit f030db7bec36f0d97f2beacb3306d31379e4a79f +Author: Pranith Kumar K +Date: Mon Aug 27 11:46:33 2018 +0530 + + cluster/afr: Delegate metadata heal with pending xattrs to SHD + + Problem: + When metadata-self-heal is triggered on the mount, it blocks + lookup until metadata-self-heal completes. But that can lead + to hangs when lot of clients are accessing a directory which + needs metadata heal and all of them trigger heals waiting + for other clients to complete heal. + + Fix: + Only when the heal is needed but the pending xattrs are not set, + trigger metadata heal that could block lookup. This is the only + case where different clients may give different metadata to the + clients without heals, which should be avoided. + + Updates bz#1625588 + Change-Id: I6089e9fda0770a83fb287941b229c882711f4e66 + Signed-off-by: Pranith Kumar K + +commit a570ee702d968d1733a3e31b259d4d0fbf5bca3c +Author: Pranith Kumar K +Date: Mon Aug 27 12:40:16 2018 +0530 + + cluster/afr: Delegate name-heal when possible + + Problem: + When name-self-heal is triggered on the mount, it blocks + lookup until name-self-heal completes. But that can lead + to hangs when lot of clients are accessing a directory which + needs name heal and all of them trigger heals waiting + for other clients to complete heal. + + Fix: + When a name-heal is needed but quorum number of names have the + file and pending xattrs exist on the parent, then better to + delegate the heal to SHD which will be completed as part of + entry-heal of the parent directory. We could also do the same + for quorum-number of names not present but we don't have + any known use-case where this is a frequent occurrence so + not changing that part at the moment. When there is a gfid + mismatch or missing gfid it is important to complete the heal + so that next rename doesn't assume everything is fine and + perform a rename etc + + fixes bz#1625588 + Change-Id: I8b002c85dffc6eb6f2833e742684a233daefeb2c + Signed-off-by: Pranith Kumar K + +commit 9cde9d7153d03893b7e6aef3a62320f7b03ced80 +Author: Poornima G +Date: Mon Nov 13 12:55:06 2017 +0530 + + dht: Fill first_up_subvol before use in dht_opendir + + Reported by: Sam McLeod + + Change-Id: Ic8f9b46b173796afd70aff1042834b03ac3e80b2 + BUG: 1512371 + Signed-off-by: Poornima G + +commit 9f32a0adc51820948d8e1bd34e1805869bbf2ad2 +Author: Kaleb S. KEITHLEY +Date: Mon Oct 2 10:44:59 2017 -0400 + + packaging: manual systemctl daemon reload required after install + + Use the %systemd_{post,preun,postun_with_restart} macros provided + + Reported-by: Sam McLeod + + Changes from the following patches from master branch are included: + * https://review.gluster.org/#/c/18418 + * https://review.gluster.org/#/c/18432 + + Change-Id: Ibb33a748fc4226864019765b59d1a154b7297bae + BUG: 1497989 + Signed-off-by: Kaleb S. KEITHLEY + +commit 76788178ba725442c95541e37e56d4a83da2bb78 +Author: Ravishankar N +Date: Thu Sep 27 17:26:52 2018 +0530 + + afr: fix incorrect reporting of directory split-brain + + Backport of https://review.gluster.org/#/c/glusterfs/+/21135/ + + Problem: + When a directory has dirty xattrs due to failed post-ops or when + replace/reset brick is performed, AFR does a conservative merge as + expected, but heal-info reports it as split-brain because there are no + clear sources. + + Fix: + Modify pending flag to contain information about pending heals and + split-brains. For directories, if spit-brain flag is not set,just show + them as needing heal and not being in split-brain. + + Fixes: bz#1633625 + Change-Id: I09ef821f6887c87d315ae99e6b1de05103cd9383 + BUG: 1633625 + Signed-off-by: Ravishankar N + +commit ca5adfb65b08841714431e97751a0c0c63a4bbdf +Author: hari gowtham +Date: Wed Apr 11 17:38:26 2018 +0530 + + glusterd: volume inode/fd status broken with brick mux + + backport of:https://review.gluster.org/#/c/19846/6 + + Problem: + The values for inode/fd was populated from the ctx received + from the server xlator. + Without brickmux, every brick from a volume belonged to a + single brick from the volume. + So searching the server and populating it worked. + + With brickmux, a number of bricks can be confined to a single + process. These bricks can be from different volumes too (if + we use the max-bricks-per-process option). + If they are from different volumes, using the server xlator + to populate causes problem. + + Fix: + Use the brick to validate and populate the inode/fd status. + + >Signed-off-by: hari gowtham + >Change-Id: I2543fa5397ea095f8338b518460037bba3dfdbfd + >fixes: bz#1566067 + + Change-Id: I2543fa5397ea095f8338b518460037bba3dfdbfd + BUG: 1569336 + fixes: bz#1569336 + Signed-off-by: hari gowtham + commit fe5b6bc8522b3539a97765b243ad37ef227c05b6 Author: Jiffin Tony Thottan Date: Thu Sep 6 21:39:15 2018 +0530 diff -Nru glusterfs-3.12.14/configure glusterfs-3.12.15/configure --- glusterfs-3.12.14/configure 2018-09-06 17:06:29.157297845 +0000 +++ glusterfs-3.12.15/configure 2018-10-13 07:06:51.334122033 +0000 @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for glusterfs 3.12.14. +# Generated by GNU Autoconf 2.69 for glusterfs 3.12.15. # # Report bugs to . # @@ -590,8 +590,8 @@ # Identity of this package. PACKAGE_NAME='glusterfs' PACKAGE_TARNAME='glusterfs' -PACKAGE_VERSION='3.12.14' -PACKAGE_STRING='glusterfs 3.12.14' +PACKAGE_VERSION='3.12.15' +PACKAGE_STRING='glusterfs 3.12.15' PACKAGE_BUGREPORT='gluster-users@gluster.org' PACKAGE_URL='https://github.com/gluster/glusterfs.git' @@ -1530,7 +1530,7 @@ # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures glusterfs 3.12.14 to adapt to many kinds of systems. +\`configure' configures glusterfs 3.12.15 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1600,7 +1600,7 @@ if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of glusterfs 3.12.14:";; + short | recursive ) echo "Configuration of glusterfs 3.12.15:";; esac cat <<\_ACEOF @@ -1792,7 +1792,7 @@ test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -glusterfs configure 3.12.14 +glusterfs configure 3.12.15 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2447,7 +2447,7 @@ This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by glusterfs $as_me 3.12.14, which was +It was created by glusterfs $as_me 3.12.15, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -3315,7 +3315,7 @@ # Define the identity of the package. PACKAGE='glusterfs' - VERSION='3.12.14' + VERSION='3.12.15' cat >>confdefs.h <<_ACEOF @@ -17470,7 +17470,7 @@ # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by glusterfs $as_me 3.12.14, which was +This file was extended by glusterfs $as_me 3.12.15, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -17537,7 +17537,7 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -glusterfs config.status 3.12.14 +glusterfs config.status 3.12.15 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff -Nru glusterfs-3.12.14/contrib/umountd/Makefile glusterfs-3.12.15/contrib/umountd/Makefile --- glusterfs-3.12.14/contrib/umountd/Makefile 2018-09-06 17:06:59.839379298 +0000 +++ glusterfs-3.12.15/contrib/umountd/Makefile 2018-10-13 07:07:21.718218955 +0000 @@ -212,7 +212,7 @@ GFAPI_EXTRA_LDFLAGS = -Wl,--version-script=$(top_srcdir)/api/src/gfapi.map GFAPI_LIBS = -lacl GFAPI_LT_VERSION = 0:0:0 -GFAPI_VERSION = 7.3.12.14 +GFAPI_VERSION = 7.3.12.15 GF_CFLAGS = -g -O2 -g -rdynamic -Wformat -Werror=format-security -Werror=implicit-function-declaration GF_CPPFLAGS = -I/usr/include/uuid -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -D$(GF_HOST_OS) -include $(top_builddir)/config.h -include $(top_builddir)/site.h -I$(top_srcdir)/libglusterfs/src -I$(top_builddir)/libglusterfs/src GF_DISTRIBUTION = Redhat @@ -276,10 +276,10 @@ PACKAGE_BUGREPORT = gluster-users@gluster.org PACKAGE_NAME = glusterfs PACKAGE_RELEASE = 0 -PACKAGE_STRING = glusterfs 3.12.14 +PACKAGE_STRING = glusterfs 3.12.15 PACKAGE_TARNAME = glusterfs PACKAGE_URL = https://github.com/gluster/glusterfs.git -PACKAGE_VERSION = 3.12.14 +PACKAGE_VERSION = 3.12.15 PATH_SEPARATOR = : PKGCONFIG_UUID = uuid PKG_CONFIG = /usr/bin/pkg-config @@ -327,7 +327,7 @@ USE_POSIX_ACLS = 1 UUID_CFLAGS = -I/usr/include/uuid UUID_LIBS = -luuid -VERSION = 3.12.14 +VERSION = 3.12.15 XML2_CONFIG = /usr/bin/xml2-config XML_CPPFLAGS = -I/usr/include/libxml2 XML_LIBS = -lxml2 -lz -lm -ldl diff -Nru glusterfs-3.12.14/debian/changelog glusterfs-3.12.15/debian/changelog --- glusterfs-3.12.14/debian/changelog 2018-09-06 13:19:51.000000000 +0000 +++ glusterfs-3.12.15/debian/changelog 2018-10-13 13:19:51.000000000 +0000 @@ -1,3 +1,9 @@ +glusterfs (3.12.15-ubuntu1~cosmic1) cosmic; urgency=medium + + * GlusterFS 3.12.15 GA + + -- GlusterFS GlusterFS deb packages Sat, 13 Oct 2018 09:19:51 -0400 + glusterfs (3.12.14-ubuntu1~cosmic1) cosmic; urgency=medium * GlusterFS 3.12.14 GA diff -Nru glusterfs-3.12.14/glusterfsd/src/glusterfsd-mgmt.c glusterfs-3.12.15/glusterfsd/src/glusterfsd-mgmt.c --- glusterfs-3.12.14/glusterfsd/src/glusterfsd-mgmt.c 2018-09-06 17:06:24.518285528 +0000 +++ glusterfs-3.12.15/glusterfsd/src/glusterfsd-mgmt.c 2018-10-13 07:06:43.477096955 +0000 @@ -1053,14 +1053,14 @@ glusterfs_ctx_t *ctx = NULL; glusterfs_graph_t *active = NULL; xlator_t *this = NULL; - xlator_t *any = NULL; - xlator_t *xlator = NULL; + xlator_t *server_xl = NULL; + xlator_t *brick_xl = NULL; dict_t *dict = NULL; dict_t *output = NULL; - char *volname = NULL; char *xname = NULL; uint32_t cmd = 0; char *msg = NULL; + char *brickname = NULL; GF_ASSERT (req); this = THIS; @@ -1088,32 +1088,26 @@ goto out; } - ret = dict_get_str (dict, "volname", &volname); + ret = dict_get_str (dict, "brick-name", &brickname); if (ret) { - gf_log (this->name, GF_LOG_ERROR, "Couldn't get volname"); + gf_log (this->name, GF_LOG_ERROR, "Couldn't get brickname from" + " dict"); goto out; } ctx = glusterfsd_ctx; GF_ASSERT (ctx); active = ctx->active; - any = active->first; + server_xl = active->first; - ret = gf_asprintf (&xname, "%s-server", volname); - if (-1 == ret) { - gf_log (this->name, GF_LOG_ERROR, "Out of memory"); - goto out; - } - - xlator = xlator_search_by_name (any, xname); - if (!xlator) { + brick_xl = get_xlator_by_name (server_xl, brickname); + if (!brick_xl) { gf_log (this->name, GF_LOG_ERROR, "xlator %s is not loaded", xname); ret = -1; goto out; } - output = dict_new (); switch (cmd & GF_CLI_STATUS_MASK) { case GF_CLI_STATUS_MEM: @@ -1123,15 +1117,17 @@ break; case GF_CLI_STATUS_CLIENTS: - ret = xlator->dumpops->priv_to_dict (xlator, output); + ret = server_xl->dumpops->priv_to_dict (server_xl, + output, brickname); break; case GF_CLI_STATUS_INODE: - ret = xlator->dumpops->inode_to_dict (xlator, output); + ret = server_xl->dumpops->inode_to_dict (brick_xl, + output); break; case GF_CLI_STATUS_FD: - ret = xlator->dumpops->fd_to_dict (xlator, output); + ret = server_xl->dumpops->fd_to_dict (brick_xl, output); break; case GF_CLI_STATUS_CALLPOOL: @@ -1307,7 +1303,7 @@ "Error setting volname to dict"); goto out; } - ret = node->dumpops->priv_to_dict (node, output); + ret = node->dumpops->priv_to_dict (node, output, NULL); break; case GF_CLI_STATUS_INODE: diff -Nru glusterfs-3.12.14/glusterfs.spec glusterfs-3.12.15/glusterfs.spec --- glusterfs-3.12.14/glusterfs.spec 2018-09-06 17:07:00.233380344 +0000 +++ glusterfs-3.12.15/glusterfs.spec 2018-10-13 07:07:22.043219992 +0000 @@ -10,19 +10,19 @@ ## # if you wish to compile an rpm with debugging... -# rpmbuild -ta glusterfs-3.12.14.tar.gz --with debug +# rpmbuild -ta glusterfs-3.12.15.tar.gz --with debug %{?_with_debug:%global _with_debug --enable-debug} # if you wish to compile an rpm to run all processes under valgrind... -# rpmbuild -ta glusterfs-3.12.14.tar.gz --with valgrind +# rpmbuild -ta glusterfs-3.12.15.tar.gz --with valgrind %{?_with_valgrind:%global _with_valgrind --enable-valgrind} # if you wish to compile an rpm with cmocka unit testing... -# rpmbuild -ta glusterfs-3.12.14.tar.gz --with cmocka +# rpmbuild -ta glusterfs-3.12.15.tar.gz --with cmocka %{?_with_cmocka:%global _with_cmocka --enable-cmocka} # if you wish to compile an rpm without rdma support, compile like this... -# rpmbuild -ta glusterfs-3.12.14.tar.gz --without rdma +# rpmbuild -ta glusterfs-3.12.15.tar.gz --without rdma %{?_without_rdma:%global _without_rdma --disable-ibverbs} # No RDMA Support on s390(x) @@ -31,15 +31,15 @@ %endif # if you wish to compile an rpm without epoll... -# rpmbuild -ta glusterfs-3.12.14.tar.gz --without epoll +# rpmbuild -ta glusterfs-3.12.15.tar.gz --without epoll %{?_without_epoll:%global _without_epoll --disable-epoll} # if you wish to compile an rpm without fusermount... -# rpmbuild -ta glusterfs-3.12.14.tar.gz --without fusermount +# rpmbuild -ta glusterfs-3.12.15.tar.gz --without fusermount %{?_without_fusermount:%global _without_fusermount --disable-fusermount} # if you wish to compile an rpm without geo-replication support, compile like this... -# rpmbuild -ta glusterfs-3.12.14.tar.gz --without georeplication +# rpmbuild -ta glusterfs-3.12.15.tar.gz --without georeplication %{?_without_georeplication:%global _without_georeplication --disable-georeplication} # Disable geo-replication on EL5, as its default Python is too old @@ -48,15 +48,15 @@ %endif # if you wish to compile an rpm with the legacy gNFS server xlator -# rpmbuild -ta glusterfs-3.12.14.tar.gz --with gnfs +# rpmbuild -ta glusterfs-3.12.15.tar.gz --with gnfs %{?_with_gnfs:%global _with_gnfs --enable-gnfs} # if you wish to compile an rpm without the OCF resource agents... -# rpmbuild -ta glusterfs-3.12.14.tar.gz --without ocf +# rpmbuild -ta glusterfs-3.12.15.tar.gz --without ocf %{?_without_ocf:%global _without_ocf --without-ocf} # if you wish to build rpms without syslog logging, compile like this -# rpmbuild -ta glusterfs-3.12.14.tar.gz --without syslog +# rpmbuild -ta glusterfs-3.12.15.tar.gz --without syslog %{?_without_syslog:%global _without_syslog --disable-syslog} # disable syslog forcefully as rhel <= 6 doesn't have rsyslog or rsyslog-mmcount @@ -68,7 +68,7 @@ %endif # if you wish to compile an rpm without the BD map support... -# rpmbuild -ta glusterfs-3.12.14.tar.gz --without bd +# rpmbuild -ta glusterfs-3.12.15.tar.gz --without bd %{?_without_bd:%global _without_bd --disable-bd-xlator} %if ( 0%{?rhel} && 0%{?rhel} < 6 || 0%{?sles_version} ) @@ -111,28 +111,34 @@ %endif %if ( 0%{?_with_systemd:1} ) -%global _init_enable() /bin/systemctl enable %1.service ; -%global _init_disable() /bin/systemctl disable %1.service ; -%global _init_restart() /bin/systemctl try-restart %1.service ; -%global _init_start() /bin/systemctl start %1.service ; -%global _init_stop() /bin/systemctl stop %1.service ; -%global _init_install() install -D -p -m 0644 %1 %{buildroot}%{_unitdir}/%2.service ; +%global service_start() /bin/systemctl --quiet start %1.service || : \ +%{nil} +%global service_stop() /bin/systemctl --quiet stop %1.service || :\ +%{nil} +%global service_install() install -D -p -m 0644 %1.service %{buildroot}%2 \ +%{nil} # can't seem to make a generic macro that works -%global _init_glusterd %{_unitdir}/glusterd.service -%global _init_glusterfsd %{_unitdir}/glusterfsd.service -%global _init_glustereventsd %{_unitdir}/glustereventsd.service -%global _init_glusterfssharedstorage %{_unitdir}/glusterfssharedstorage.service +%global glusterd_svcfile %{_unitdir}/glusterd.service +%global glusterfsd_svcfile %{_unitdir}/glusterfsd.service +%global glustereventsd_svcfile %{_unitdir}/glustereventsd.service +%global glusterfssharedstorage_svcfile %{_unitdir}/glusterfssharedstorage.service %else -%global _init_enable() /sbin/chkconfig --add %1 ; -%global _init_disable() /sbin/chkconfig --del %1 ; -%global _init_restart() /sbin/service %1 condrestart &>/dev/null ; -%global _init_start() /sbin/service %1 start &>/dev/null ; -%global _init_stop() /sbin/service %1 stop &>/dev/null ; -%global _init_install() install -D -p -m 0755 %1 %{buildroot}%{_sysconfdir}/init.d/%2 ; +%global systemd_post() /sbin/chkconfig --add %1 >/dev/null 2>&1 || : \ +%{nil} +%global systemd_preun() /sbin/chkconfig --del %1 >/dev/null 2>&1 || : \ +%{nil} +%global systemd_postun_with_restart() /sbin/service %1 condrestart >/dev/null 2>&1 || : \ +%{nil} +%global service_start() /sbin/service %1 start >/dev/null 2>&1 || : \ +%{nil} +%global service_stop() /sbin/service %1 stop >/dev/null 2>&1 || : \ +%{nil} +%global service_install() install -D -p -m 0755 %1.init %{buildroot}%2 \ +%{nil} # can't seem to make a generic macro that works -%global _init_glusterd %{_sysconfdir}/init.d/glusterd -%global _init_glusterfsd %{_sysconfdir}/init.d/glusterfsd -%global _init_glustereventsd %{_sysconfdir}/init.d/glustereventsd +%global glusterd_svcfile %{_sysconfdir}/init.d/glusterd +%global glusterfsd_svcfile %{_sysconfdir}/init.d/glusterfsd +%global glustereventsd_svcfile %{_sysconfdir}/init.d/glustereventsd %endif %if ( 0%{_for_fedora_koji_builds} ) @@ -177,7 +183,7 @@ Release: 0.1%{?prereltag:.%{prereltag}}%{?dist} %else Name: glusterfs -Version: 3.12.14 +Version: 3.12.15 Release: 0.0%{?dist} %endif License: GPLv2 or LGPLv3+ @@ -191,7 +197,7 @@ Source7: glusterfsd.service Source8: glusterfsd.init %else -Source0: glusterfs-3.12.14.tar.gz +Source0: glusterfs-3.12.15.tar.gz %endif BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) @@ -755,7 +761,7 @@ # Install glusterfsd .service or init.d file %if ( 0%{_for_fedora_koji_builds} ) -%_init_install %{glusterfsd_service} glusterfsd +%service_install glusterfsd %{glusterfsd_svcfile} %endif install -D -p -m 0644 extras/glusterfs-logrotate \ @@ -804,7 +810,7 @@ /sbin/ldconfig %if ( 0%{!?_without_syslog:1} ) %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 ) -%_init_restart rsyslog +%systemd_postun_with_restart rsyslog %endif %endif exit 0 @@ -814,7 +820,7 @@ %if ( 0%{!?_without_events:1} ) %post events -%_init_restart glustereventsd +%systemd_post glustereventsd %endif %if ( 0%{?rhel} == 5 ) @@ -826,7 +832,7 @@ %if ( 0%{!?_without_georeplication:1} ) %post geo-replication if [ $1 -ge 1 ]; then - %_init_restart glusterd + %systemd_postun_with_restart glusterd fi exit 0 %endif @@ -836,9 +842,9 @@ %post server # Legacy server -%_init_enable glusterd +%systemd_post glusterd %if ( 0%{_for_fedora_koji_builds} ) -%_init_enable glusterfsd +%systemd_post glusterfsd %endif # ".cmd_log_history" is renamed to "cmd_history.log" in GlusterFS-3.7 . # While upgrading glusterfs-server package form GlusterFS version <= 3.6 to @@ -896,7 +902,7 @@ # glusterd _was_ running, we killed it, it exited after *.upgrade=on, # so start it again - %_init_start glusterd + %service_start glusterd else glusterd --xlator-option *.upgrade=on -N @@ -921,9 +927,9 @@ %if ( 0%{!?_without_events:1} ) %preun events if [ $1 -eq 0 ]; then - if [ -f %_init_glustereventsd ]; then - %_init_stop glustereventsd - %_init_disable glustereventsd + if [ -f %glustereventsd_svcfile ]; then + %service_stop glustereventsd + %systemd_preun glustereventsd fi fi exit 0 @@ -931,20 +937,20 @@ %preun server if [ $1 -eq 0 ]; then - if [ -f %_init_glusterfsd ]; then - %_init_stop glusterfsd + if [ -f %glusterfsd_svcfile ]; then + %service_stop glusterfsd fi - %_init_stop glusterd - if [ -f %_init_glusterfsd ]; then - %_init_disable glusterfsd + %service_stop glusterd + if [ -f %glusterfsd_svcfile ]; then + %systemd_preun glusterfsd fi - %_init_disable glusterd + %systemd_postun_with_restart glusterd fi if [ $1 -ge 1 ]; then - if [ -f %_init_glusterfsd ]; then - %_init_restart glusterfsd + if [ -f %glusterfsd_svcfile ]; then + %systemd_postun_with_restart glusterfsd fi - %_init_restart glusterd + %systemd_postun_with_restart glusterd fi exit 0 @@ -955,7 +961,7 @@ /sbin/ldconfig %if ( 0%{!?_without_syslog:1} ) %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 ) -%_init_restart rsyslog +%systemd_postun_with_restart rsyslog %endif %endif @@ -1212,12 +1218,12 @@ %endif # init files -%_init_glusterd +%glusterd_svcfile %if ( 0%{_for_fedora_koji_builds} ) -%_init_glusterfsd +%glusterfsd_svcfile %endif %if ( 0%{?_with_systemd:1} ) -%_init_glusterfssharedstorage +%glusterfssharedstorage_svcfile %endif # binaries diff -Nru glusterfs-3.12.14/glusterfs.spec.in glusterfs-3.12.15/glusterfs.spec.in --- glusterfs-3.12.14/glusterfs.spec.in 2018-09-06 17:06:24.517285525 +0000 +++ glusterfs-3.12.15/glusterfs.spec.in 2018-10-13 07:06:43.475096949 +0000 @@ -111,28 +111,34 @@ %endif %if ( 0%{?_with_systemd:1} ) -%global _init_enable() /bin/systemctl enable %1.service ; -%global _init_disable() /bin/systemctl disable %1.service ; -%global _init_restart() /bin/systemctl try-restart %1.service ; -%global _init_start() /bin/systemctl start %1.service ; -%global _init_stop() /bin/systemctl stop %1.service ; -%global _init_install() install -D -p -m 0644 %1 %{buildroot}%{_unitdir}/%2.service ; +%global service_start() /bin/systemctl --quiet start %1.service || : \ +%{nil} +%global service_stop() /bin/systemctl --quiet stop %1.service || :\ +%{nil} +%global service_install() install -D -p -m 0644 %1.service %{buildroot}%2 \ +%{nil} # can't seem to make a generic macro that works -%global _init_glusterd %{_unitdir}/glusterd.service -%global _init_glusterfsd %{_unitdir}/glusterfsd.service -%global _init_glustereventsd %{_unitdir}/glustereventsd.service -%global _init_glusterfssharedstorage %{_unitdir}/glusterfssharedstorage.service +%global glusterd_svcfile %{_unitdir}/glusterd.service +%global glusterfsd_svcfile %{_unitdir}/glusterfsd.service +%global glustereventsd_svcfile %{_unitdir}/glustereventsd.service +%global glusterfssharedstorage_svcfile %{_unitdir}/glusterfssharedstorage.service %else -%global _init_enable() /sbin/chkconfig --add %1 ; -%global _init_disable() /sbin/chkconfig --del %1 ; -%global _init_restart() /sbin/service %1 condrestart &>/dev/null ; -%global _init_start() /sbin/service %1 start &>/dev/null ; -%global _init_stop() /sbin/service %1 stop &>/dev/null ; -%global _init_install() install -D -p -m 0755 %1 %{buildroot}%{_sysconfdir}/init.d/%2 ; +%global systemd_post() /sbin/chkconfig --add %1 >/dev/null 2>&1 || : \ +%{nil} +%global systemd_preun() /sbin/chkconfig --del %1 >/dev/null 2>&1 || : \ +%{nil} +%global systemd_postun_with_restart() /sbin/service %1 condrestart >/dev/null 2>&1 || : \ +%{nil} +%global service_start() /sbin/service %1 start >/dev/null 2>&1 || : \ +%{nil} +%global service_stop() /sbin/service %1 stop >/dev/null 2>&1 || : \ +%{nil} +%global service_install() install -D -p -m 0755 %1.init %{buildroot}%2 \ +%{nil} # can't seem to make a generic macro that works -%global _init_glusterd %{_sysconfdir}/init.d/glusterd -%global _init_glusterfsd %{_sysconfdir}/init.d/glusterfsd -%global _init_glustereventsd %{_sysconfdir}/init.d/glustereventsd +%global glusterd_svcfile %{_sysconfdir}/init.d/glusterd +%global glusterfsd_svcfile %{_sysconfdir}/init.d/glusterfsd +%global glustereventsd_svcfile %{_sysconfdir}/init.d/glustereventsd %endif %if ( 0%{_for_fedora_koji_builds} ) @@ -755,7 +761,7 @@ # Install glusterfsd .service or init.d file %if ( 0%{_for_fedora_koji_builds} ) -%_init_install %{glusterfsd_service} glusterfsd +%service_install glusterfsd %{glusterfsd_svcfile} %endif install -D -p -m 0644 extras/glusterfs-logrotate \ @@ -804,7 +810,7 @@ /sbin/ldconfig %if ( 0%{!?_without_syslog:1} ) %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 ) -%_init_restart rsyslog +%systemd_postun_with_restart rsyslog %endif %endif exit 0 @@ -814,7 +820,7 @@ %if ( 0%{!?_without_events:1} ) %post events -%_init_restart glustereventsd +%systemd_post glustereventsd %endif %if ( 0%{?rhel} == 5 ) @@ -826,7 +832,7 @@ %if ( 0%{!?_without_georeplication:1} ) %post geo-replication if [ $1 -ge 1 ]; then - %_init_restart glusterd + %systemd_postun_with_restart glusterd fi exit 0 %endif @@ -836,9 +842,9 @@ %post server # Legacy server -%_init_enable glusterd +%systemd_post glusterd %if ( 0%{_for_fedora_koji_builds} ) -%_init_enable glusterfsd +%systemd_post glusterfsd %endif # ".cmd_log_history" is renamed to "cmd_history.log" in GlusterFS-3.7 . # While upgrading glusterfs-server package form GlusterFS version <= 3.6 to @@ -896,7 +902,7 @@ # glusterd _was_ running, we killed it, it exited after *.upgrade=on, # so start it again - %_init_start glusterd + %service_start glusterd else glusterd --xlator-option *.upgrade=on -N @@ -921,9 +927,9 @@ %if ( 0%{!?_without_events:1} ) %preun events if [ $1 -eq 0 ]; then - if [ -f %_init_glustereventsd ]; then - %_init_stop glustereventsd - %_init_disable glustereventsd + if [ -f %glustereventsd_svcfile ]; then + %service_stop glustereventsd + %systemd_preun glustereventsd fi fi exit 0 @@ -931,20 +937,20 @@ %preun server if [ $1 -eq 0 ]; then - if [ -f %_init_glusterfsd ]; then - %_init_stop glusterfsd + if [ -f %glusterfsd_svcfile ]; then + %service_stop glusterfsd fi - %_init_stop glusterd - if [ -f %_init_glusterfsd ]; then - %_init_disable glusterfsd + %service_stop glusterd + if [ -f %glusterfsd_svcfile ]; then + %systemd_preun glusterfsd fi - %_init_disable glusterd + %systemd_postun_with_restart glusterd fi if [ $1 -ge 1 ]; then - if [ -f %_init_glusterfsd ]; then - %_init_restart glusterfsd + if [ -f %glusterfsd_svcfile ]; then + %systemd_postun_with_restart glusterfsd fi - %_init_restart glusterd + %systemd_postun_with_restart glusterd fi exit 0 @@ -955,7 +961,7 @@ /sbin/ldconfig %if ( 0%{!?_without_syslog:1} ) %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 ) -%_init_restart rsyslog +%systemd_postun_with_restart rsyslog %endif %endif @@ -1212,12 +1218,12 @@ %endif # init files -%_init_glusterd +%glusterd_svcfile %if ( 0%{_for_fedora_koji_builds} ) -%_init_glusterfsd +%glusterfsd_svcfile %endif %if ( 0%{?_with_systemd:1} ) -%_init_glusterfssharedstorage +%glusterfssharedstorage_svcfile %endif # binaries diff -Nru glusterfs-3.12.14/libglusterfs/src/client_t.c glusterfs-3.12.15/libglusterfs/src/client_t.c --- glusterfs-3.12.14/libglusterfs/src/client_t.c 2018-09-06 17:06:24.522285539 +0000 +++ glusterfs-3.12.15/libglusterfs/src/client_t.c 2018-10-13 07:06:43.480096965 +0000 @@ -743,10 +743,13 @@ clienttable->cliententries[count].next_free) continue; client = clienttable->cliententries[count].client; - memset(key, 0, sizeof key); - snprintf (key, sizeof key, "conn%d", count++); - fdtable_dump_to_dict (client->server_ctx.fdtable, - key, dict); + if (!strcmp (client->bound_xl->name, this->name)) { + memset(key, 0, sizeof (key)); + snprintf (key, sizeof (key), "conn%d", count++); + fdtable_dump_to_dict (client->server_ctx. + fdtable, + key, dict); + } } } UNLOCK(&clienttable->lock); @@ -859,25 +862,30 @@ clienttable->cliententries[count].next_free) continue; client = clienttable->cliententries[count].client; - memset(key, 0, sizeof key); - if (client->bound_xl && client->bound_xl->itable) { - /* Presently every brick contains only - * one bound_xl for all connections. - * This will lead to duplicating of - * the inode lists, if listing is - * done for every connection. This - * simple check prevents duplication - * in the present case. If need arises - * the check can be improved. - */ - if (client->bound_xl == prev_bound_xl) - continue; - prev_bound_xl = client->bound_xl; - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "conn%d", count); - inode_table_dump_to_dict (client->bound_xl->itable, - key, dict); + if (!strcmp (client->bound_xl->name, this->name)) { + memset(key, 0, sizeof (key)); + if (client->bound_xl && client->bound_xl-> + itable) { + /* Presently every brick contains only + * one bound_xl for all connections. + * This will lead to duplicating of + * the inode lists, if listing is + * done for every connection. This + * simple check prevents duplication + * in the present case. If need arises + * the check can be improved. + */ + if (client->bound_xl == prev_bound_xl) + continue; + prev_bound_xl = client->bound_xl; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "conn%d", + count); + inode_table_dump_to_dict (client-> + bound_xl->itable, + key, dict); + } } } } diff -Nru glusterfs-3.12.14/libglusterfs/src/xlator.h glusterfs-3.12.15/libglusterfs/src/xlator.h --- glusterfs-3.12.14/libglusterfs/src/xlator.h 2018-09-06 17:06:24.543285595 +0000 +++ glusterfs-3.12.15/libglusterfs/src/xlator.h 2018-10-13 07:06:43.503097038 +0000 @@ -873,7 +873,8 @@ typedef int32_t (*dumpop_fdctx_t) (xlator_t *this, fd_t *fd); -typedef int32_t (*dumpop_priv_to_dict_t) (xlator_t *this, dict_t *dict); +typedef int32_t (*dumpop_priv_to_dict_t) (xlator_t *this, dict_t *dict, + char *brickname); typedef int32_t (*dumpop_inode_to_dict_t) (xlator_t *this, dict_t *dict); diff -Nru glusterfs-3.12.14/tests/afr.rc glusterfs-3.12.15/tests/afr.rc --- glusterfs-3.12.14/tests/afr.rc 2018-09-06 17:06:24.556285629 +0000 +++ glusterfs-3.12.15/tests/afr.rc 2018-10-13 07:06:43.516097079 +0000 @@ -2,7 +2,7 @@ function create_brick_xattrop_entry { local xattrop_dir=$(afr_get_index_path $1) - local base_entry=`ls $xattrop_dir` + local base_entry=`ls $xattrop_dir|grep xattrop` local gfid_str local params=`echo "$@" | cut -d' ' -f2-` echo $params @@ -89,3 +89,11 @@ { ls $1/.glusterfs/indices/xattrop | wc -l } + +function get_quorum_type() +{ + local m="$1" + local v="$2" + local repl_id="$3" + cat $m/.meta/graphs/active/$v-replicate-$repl_id/private|grep quorum-type|awk '{print $3}' +} diff -Nru glusterfs-3.12.14/tests/basic/afr/client-side-heal.t glusterfs-3.12.15/tests/basic/afr/client-side-heal.t --- glusterfs-3.12.14/tests/basic/afr/client-side-heal.t 2018-09-06 17:06:24.557285632 +0000 +++ glusterfs-3.12.15/tests/basic/afr/client-side-heal.t 2018-10-13 07:06:43.516097079 +0000 @@ -17,6 +17,7 @@ echo "some data" > $M0/datafile EXPECT 0 echo $? TEST touch $M0/mdatafile +TEST touch $M0/mdatafile-backend-direct-modify TEST mkdir $M0/dir #Kill a brick and perform I/O to have pending heals. @@ -29,6 +30,7 @@ #pending metadata heal TEST chmod +x $M0/mdatafile +TEST chmod +x $B0/${V0}0/mdatafile-backend-direct-modify #pending entry heal. Also causes pending metadata/data heals on file{1..5} TEST touch $M0/dir/file{1..5} @@ -40,9 +42,12 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 #Medatada heal via explicit lookup must not happen -TEST ls $M0/mdatafile +TEST getfattr -d -m. -e hex $M0/mdatafile +TEST ls $M0/mdatafile-backend-direct-modify -#Inode refresh must not trigger data and entry heals. +TEST [[ "$(stat -c %A $B0/${V0}0/mdatafile-backend-direct-modify)" != "$(stat -c %A $B0/${V0}1/mdatafile-backend-direct-modify)" ]] + +#Inode refresh must not trigger data metadata and entry heals. #To trigger inode refresh for sure, the volume is unmounted and mounted each time. #Check that data heal does not happen. EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 @@ -52,7 +57,6 @@ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; TEST ls $M0/dir - #No heal must have happened EXPECT 8 get_pending_heal_count $V0 @@ -61,21 +65,25 @@ TEST $CLI volume set $V0 cluster.metadata-self-heal on TEST $CLI volume set $V0 cluster.entry-self-heal on -#Metadata heal is triggered by lookup without need for inode refresh. -TEST ls $M0/mdatafile -EXPECT 7 get_pending_heal_count $V0 - -#Inode refresh must trigger data and entry heals. +#Inode refresh must trigger data metadata and entry heals. #To trigger inode refresh for sure, the volume is unmounted and mounted each time. EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; +TEST ls $M0/mdatafile-backend-direct-modify + +TEST [[ "$(stat -c %A $B0/${V0}0/mdatafile-backend-direct-modify)" == "$(stat -c %A $B0/${V0}1/mdatafile-backend-direct-modify)" ]] + + +TEST getfattr -d -m. -e hex $M0/mdatafile +EXPECT_WITHIN $HEAL_TIMEOUT 7 get_pending_heal_count $V0 + TEST cat $M0/datafile EXPECT_WITHIN $HEAL_TIMEOUT 6 get_pending_heal_count $V0 EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; TEST ls $M0/dir -EXPECT 5 get_pending_heal_count $V0 +EXPECT_WITHIN $HEAL_TIMEOUT 5 get_pending_heal_count $V0 TEST cat $M0/dir/file1 TEST cat $M0/dir/file2 @@ -83,5 +91,5 @@ TEST cat $M0/dir/file4 TEST cat $M0/dir/file5 -EXPECT 0 get_pending_heal_count $V0 +EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0 cleanup; diff -Nru glusterfs-3.12.14/tests/basic/afr/name-self-heal.t glusterfs-3.12.15/tests/basic/afr/name-self-heal.t --- glusterfs-3.12.14/tests/basic/afr/name-self-heal.t 1970-01-01 00:00:00.000000000 +0000 +++ glusterfs-3.12.15/tests/basic/afr/name-self-heal.t 2018-10-13 07:06:43.519097089 +0000 @@ -0,0 +1,112 @@ +#!/bin/bash +#Self-heal tests + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc +cleanup; + +#Check that when quorum is not enabled name-heal happens correctly +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1} +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume start $V0 +TEST $CLI volume heal $V0 disable +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; + +TEST touch $M0/a +TEST touch $M0/c +TEST kill_brick $V0 $H0 $B0/brick0 +TEST touch $M0/b +TEST rm -f $M0/a +TEST rm -f $M0/c +TEST touch $M0/c #gfid mismatch case +c_gfid=$(gf_get_gfid_xattr $B0/brick1/c) +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +TEST ! stat $M0/a +TEST ! stat $B0/brick0/a +TEST ! stat $B0/brick1/a + +TEST stat $M0/b +TEST stat $B0/brick0/b +TEST stat $B0/brick1/b +TEST [[ "$(gf_get_gfid_xattr $B0/brick0/b)" == "$(gf_get_gfid_xattr $B0/brick1/b)" ]] + +TEST stat $M0/c +TEST stat $B0/brick0/c +TEST stat $B0/brick1/c +TEST [[ "$(gf_get_gfid_xattr $B0/brick0/c)" == "$c_gfid" ]] + +cleanup; + +#Check that when quorum is enabled name-heal happens as expected +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2} +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume start $V0 +TEST $CLI volume heal $V0 disable +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; + +TEST touch $M0/a +TEST touch $M0/c +TEST kill_brick $V0 $H0 $B0/brick0 +TEST touch $M0/b +TEST rm -f $M0/a +TEST rm -f $M0/c +TEST touch $M0/c #gfid mismatch case +c_gfid=$(gf_get_gfid_xattr $B0/brick1/c) +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +TEST ! stat $M0/a +TEST ! stat $B0/brick0/a +TEST ! stat $B0/brick1/a +TEST ! stat $B0/brick2/a + +TEST stat $M0/b +TEST ! stat $B0/brick0/b #Name heal shouldn't be triggered +TEST stat $B0/brick1/b +TEST stat $B0/brick2/b + +TEST stat $M0/c +TEST stat $B0/brick0/c +TEST stat $B0/brick1/c +TEST stat $B0/brick2/c +TEST [[ "$(gf_get_gfid_xattr $B0/brick0/c)" == "$c_gfid" ]] + +TEST $CLI volume set $V0 cluster.quorum-type none +EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "none" get_quorum_type $M0 $V0 0 +TEST stat $M0/b +TEST stat $B0/brick0/b #Name heal should be triggered +TEST stat $B0/brick1/b +TEST stat $B0/brick2/b +TEST [[ "$(gf_get_gfid_xattr $B0/brick0/b)" == "$(gf_get_gfid_xattr $B0/brick1/b)" ]] +TEST $CLI volume set $V0 cluster.quorum-type auto +EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "auto" get_quorum_type $M0 $V0 0 + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +#Missing parent xattrs cases +TEST $CLI volume heal $V0 enable +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 +TEST $CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0 + +TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; +TEST $CLI volume heal $V0 disable +#In cases where a good parent doesn't have pending xattrs and a file, +#name-heal will be triggered +TEST gf_rm_file_and_gfid_link $B0/brick1 c +TEST stat $M0/c +TEST stat $B0/brick0/c +TEST stat $B0/brick1/c +TEST stat $B0/brick2/c +TEST [[ "$(gf_get_gfid_xattr $B0/brick0/c)" == "$c_gfid" ]] +cleanup diff -Nru glusterfs-3.12.14/tests/basic/volume-status.t glusterfs-3.12.15/tests/basic/volume-status.t --- glusterfs-3.12.14/tests/basic/volume-status.t 2018-09-06 17:06:24.575285680 +0000 +++ glusterfs-3.12.15/tests/basic/volume-status.t 2018-10-13 07:06:43.533097134 +0000 @@ -6,6 +6,14 @@ cleanup; +function gluster_fd_status () { + gluster volume status $V0 fd | sed -n '/Brick :/ p' | wc -l +} + +function gluster_inode_status () { + gluster volume status $V0 inode | sed -n '/Connection / p' | wc -l +} + TEST glusterd TEST pidof glusterd TEST $CLI volume info; @@ -21,6 +29,10 @@ ## Mount FUSE TEST $GFS -s $H0 --volfile-id $V0 $M0; +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "8" gluster_fd_status + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1024" gluster_inode_status + ##Wait for connection establishment between nfs server and brick process EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; diff -Nru glusterfs-3.12.14/tests/bugs/glusterfs/bug-906646.t glusterfs-3.12.15/tests/bugs/glusterfs/bug-906646.t --- glusterfs-3.12.14/tests/bugs/glusterfs/bug-906646.t 2018-09-06 17:06:24.601285749 +0000 +++ glusterfs-3.12.15/tests/bugs/glusterfs/bug-906646.t 2018-10-13 07:06:43.558097213 +0000 @@ -13,7 +13,6 @@ TEST $CLI volume create $V0 replica $REPLICA $H0:$B0/${V0}-00 $H0:$B0/${V0}-01 $H0:$B0/${V0}-10 $H0:$B0/${V0}-11 TEST $CLI volume start $V0 -TEST $CLI volume set $V0 cluster.self-heal-daemon off TEST $CLI volume set $V0 cluster.background-self-heal-count 0 ## Mount FUSE with caching disabled @@ -82,10 +81,15 @@ # restart the brick process TEST $CLI volume start $V0 force -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 `expr $brick_id - 1` +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3 -cat $pth >/dev/null +TEST $CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0 # check backends - xattr should not be present anywhere EXPECT 1 xattr_query_check ${backend_paths_array[0]} "trusted.name" EXPECT 1 xattr_query_check ${backend_paths_array[1]} "trusted.name" diff -Nru glusterfs-3.12.14/tests/bugs/readdir-ahead/bug-1512437.t glusterfs-3.12.15/tests/bugs/readdir-ahead/bug-1512437.t --- glusterfs-3.12.14/tests/bugs/readdir-ahead/bug-1512437.t 1970-01-01 00:00:00.000000000 +0000 +++ glusterfs-3.12.15/tests/bugs/readdir-ahead/bug-1512437.t 2018-10-13 07:06:43.565097236 +0000 @@ -0,0 +1,23 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}1 +TEST $CLI volume start $V0 + +TEST $CLI volume set $V0 parallel-readdir on +TEST $CLI volume set $V0 readdir-optimize on + +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 +TEST mkdir -p $M0/subdir1/subdir2; +umount $M0 +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 +count=`ls -1 $M0/subdir1 | wc -l` +TEST [ $count -eq 1 ] + +cleanup; diff -Nru glusterfs-3.12.14/tests/bugs/replicate/bug-1626994-info-split-brain.t glusterfs-3.12.15/tests/bugs/replicate/bug-1626994-info-split-brain.t --- glusterfs-3.12.14/tests/bugs/replicate/bug-1626994-info-split-brain.t 1970-01-01 00:00:00.000000000 +0000 +++ glusterfs-3.12.15/tests/bugs/replicate/bug-1626994-info-split-brain.t 2018-10-13 07:06:43.569097249 +0000 @@ -0,0 +1,62 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc + +cleanup; + +# Test to check dirs having dirty xattr do not show up in info split-brain. + +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}; +TEST $CLI volume set $V0 self-heal-daemon off +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2 +TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2 + +# Create base entry in indices/xattrop +echo "Data" > $M0/FILE +rm -f $M0/FILE +EXPECT "1" count_index_entries $B0/${V0}0 +EXPECT "1" count_index_entries $B0/${V0}1 +EXPECT "1" count_index_entries $B0/${V0}2 + +TEST mkdir $M0/dirty_dir +TEST mkdir $M0/pending_dir + +# Set dirty xattrs on all bricks to simulate the case where entry transaction +# succeeded only the pre-op phase. +TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}0/dirty_dir +TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/dirty_dir +TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}2/dirty_dir +create_brick_xattrop_entry $B0/${V0}0 dirty_dir +# Should not show up as split-brain. +EXPECT "0" afr_get_split_brain_count $V0 + +# replace/reset brick case where the new brick has dirty and the other 2 bricks +# blame it should not be reported as split-brain. +TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/${V0}0 +TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/${V0}1 +TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}2 +create_brick_xattrop_entry $B0/${V0}0 "/" +# Should not show up as split-brain. +EXPECT "0" afr_get_split_brain_count $V0 + +# Set pending xattrs on all bricks blaming each other to simulate the case of +# entry split-brain. +TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/pending_dir +TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/${V0}1/pending_dir +TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}2/pending_dir +create_brick_xattrop_entry $B0/${V0}0 pending_dir +# Should show up as split-brain. +EXPECT "1" afr_get_split_brain_count $V0 + +cleanup; diff -Nru glusterfs-3.12.14/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t glusterfs-3.12.15/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t --- glusterfs-3.12.14/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t 1970-01-01 00:00:00.000000000 +0000 +++ glusterfs-3.12.15/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t 2018-10-13 07:06:43.569097249 +0000 @@ -0,0 +1,44 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc + +cleanup; + +# Test to check that data self-heal does not leave any stale lock. + +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2}; +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2 +TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2 + +# Create base entry in indices/xattrop +echo "Data" > $M0/FILE + +# Kill arbiter brick and write to FILE. +TEST kill_brick $V0 $H0 $B0/${V0}2 +echo "arbiter down" >> $M0/FILE +EXPECT 2 get_pending_heal_count $V0 + +# Bring it back up and let heal complete. +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 +TEST $CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 + +# write to the FILE must succeed. +echo "this must succeed" >> $M0/FILE +TEST [ $? -eq 0 ] +cleanup; diff -Nru glusterfs-3.12.14/VERSION glusterfs-3.12.15/VERSION --- glusterfs-3.12.14/VERSION 2018-09-06 17:07:17.993427486 +0000 +++ glusterfs-3.12.15/VERSION 2018-10-13 07:07:40.396278490 +0000 @@ -1 +1 @@ -v3.12.14-0 \ No newline at end of file +v3.12.15-0 \ No newline at end of file diff -Nru glusterfs-3.12.14/xlators/cluster/afr/src/afr-common.c glusterfs-3.12.15/xlators/cluster/afr/src/afr-common.c --- glusterfs-3.12.14/xlators/cluster/afr/src/afr-common.c 2018-09-06 17:06:24.637285844 +0000 +++ glusterfs-3.12.15/xlators/cluster/afr/src/afr-common.c 2018-10-13 07:06:43.592097322 +0000 @@ -2308,8 +2308,6 @@ */ for (i = 0; i < priv->child_count; i++) { if (!replies[i].valid || replies[i].op_ret == -1) { - if (priv->child_up[i]) - can_interpret = _gf_false; continue; } @@ -2577,6 +2575,42 @@ return 0; } +gf_boolean_t +afr_is_pending_set (xlator_t *this, dict_t *xdata, int type) +{ + int idx = -1; + afr_private_t *priv = NULL; + void *pending_raw = NULL; + int *pending_int = NULL; + int i = 0; + + priv = this->private; + idx = afr_index_for_transaction_type (type); + + if (dict_get_ptr (xdata, AFR_DIRTY, &pending_raw) == 0) { + if (pending_raw) { + pending_int = pending_raw; + + if (ntoh32 (pending_int[idx])) + return _gf_true; + } + } + + for (i = 0; i < priv->child_count; i++) { + if (dict_get_ptr (xdata, priv->pending_key[i], + &pending_raw)) + continue; + if (!pending_raw) + continue; + pending_int = pending_raw; + + if (ntoh32 (pending_int[idx])) + return _gf_true; + } + + return _gf_false; +} + static gf_boolean_t afr_can_start_metadata_self_heal(call_frame_t *frame, xlator_t *this) { @@ -2603,6 +2637,14 @@ continue; } + if (afr_is_pending_set (this, replies[i].xdata, + AFR_METADATA_TRANSACTION)) { + /* Let shd do the heal so that lookup is not blocked + * on getting metadata lock/doing the heal */ + start = _gf_false; + break; + } + if (gf_uuid_compare (stbuf.ia_gfid, replies[i].poststat.ia_gfid)) { start = _gf_false; break; @@ -2704,21 +2746,52 @@ afr_private_t *priv = NULL; call_frame_t *heal = NULL; int i = 0, first = -1; - gf_boolean_t need_heal = _gf_false; + gf_boolean_t name_state_mismatch = _gf_false; struct afr_reply *replies = NULL; int ret = 0; + unsigned char *par_readables = NULL; + unsigned char *success = NULL; + int32_t op_errno = 0; + uuid_t gfid = {0}; local = frame->local; replies = local->replies; priv = this->private; + par_readables = alloca0(priv->child_count); + success = alloca0(priv->child_count); + + ret = afr_inode_read_subvol_get (local->loc.parent, this, par_readables, + NULL, NULL); + if (ret < 0 || AFR_COUNT (par_readables, priv->child_count) == 0) { + /* In this case set par_readables to all 1 so that name_heal + * need checks at the end of this function will flag missing + * entry when name state mismatches*/ + memset (par_readables, 1, priv->child_count); + } for (i = 0; i < priv->child_count; i++) { if (!replies[i].valid) continue; + if (replies[i].op_ret == 0) { + if (uuid_is_null (gfid)) { + gf_uuid_copy (gfid, + replies[i].poststat.ia_gfid); + } + success[i] = 1; + } else { + if ((replies[i].op_errno != ENOTCONN) && + (replies[i].op_errno != ENOENT) && + (replies[i].op_errno != ESTALE)) { + op_errno = replies[i].op_errno; + } + } + + /*gfid is missing, needs heal*/ if ((replies[i].op_ret == -1) && - (replies[i].op_errno == ENODATA)) - need_heal = _gf_true; + (replies[i].op_errno == ENODATA)) { + goto name_heal; + } if (first == -1) { first = i; @@ -2726,30 +2799,53 @@ } if (replies[i].op_ret != replies[first].op_ret) { - need_heal = _gf_true; - break; + name_state_mismatch = _gf_true; } - if (gf_uuid_compare (replies[i].poststat.ia_gfid, - replies[first].poststat.ia_gfid)) { - need_heal = _gf_true; - break; - } + if (replies[i].op_ret == 0) { + /* Rename after this lookup may succeed if we don't do + * a name-heal and the destination may not have pending xattrs + * to indicate which name is good and which is bad so always do + * this heal*/ + if (gf_uuid_compare (replies[i].poststat.ia_gfid, + gfid)) { + goto name_heal; + } + } } - if (need_heal) { - heal = afr_frame_create (this, NULL); - if (!heal) - goto metadata_heal; - - ret = synctask_new (this->ctx->env, afr_lookup_selfheal_wrap, - afr_refresh_selfheal_done, heal, frame); - if (ret) { - AFR_STACK_DESTROY (heal); - goto metadata_heal; + if (name_state_mismatch) { + if (!priv->quorum_count) + goto name_heal; + if (!afr_has_quorum (success, this)) + goto name_heal; + if (op_errno) + goto name_heal; + for (i = 0; i < priv->child_count; i++) { + if (!replies[i].valid) + continue; + if (par_readables[i] && replies[i].op_ret < 0 && + replies[i].op_errno != ENOTCONN) { + goto name_heal; + } } - return ret; - } + } + + goto metadata_heal; + +name_heal: + heal = afr_frame_create (this, NULL); + if (!heal) + goto metadata_heal; + + ret = synctask_new (this->ctx->env, afr_lookup_selfheal_wrap, + afr_refresh_selfheal_done, heal, frame); + if (ret) { + AFR_STACK_DESTROY (heal); + goto metadata_heal; + } + return ret; + metadata_heal: ret = afr_lookup_metadata_heal_check (frame, this); @@ -5700,7 +5796,7 @@ int afr_selfheal_locked_metadata_inspect (call_frame_t *frame, xlator_t *this, inode_t *inode, gf_boolean_t *msh, - gf_boolean_t *pending) + unsigned char *pending) { int ret = -1; unsigned char *locked_on = NULL; @@ -5749,7 +5845,7 @@ int afr_selfheal_locked_data_inspect (call_frame_t *frame, xlator_t *this, inode_t *inode, gf_boolean_t *dsh, - gf_boolean_t *pflag) + unsigned char *pflag) { int ret = -1; unsigned char *data_lock = NULL; @@ -5810,7 +5906,7 @@ int afr_selfheal_locked_entry_inspect (call_frame_t *frame, xlator_t *this, inode_t *inode, - gf_boolean_t *esh, gf_boolean_t *pflag) + gf_boolean_t *esh, unsigned char *pflag) { int ret = -1; int source = -1; @@ -5861,7 +5957,7 @@ sinks, healed_sinks, locked_replies, &source, pflag); - if ((ret == 0) && source < 0) + if ((ret == 0) && (*pflag & PFLAG_SBRAIN)) ret = -EIO; *esh = afr_decide_heal_info (priv, sources, ret); } @@ -5884,7 +5980,7 @@ gf_boolean_t *entry_selfheal, gf_boolean_t *data_selfheal, gf_boolean_t *metadata_selfheal, - gf_boolean_t *pending) + unsigned char *pending) { int ret = -1; @@ -5954,7 +6050,7 @@ gf_boolean_t data_selfheal = _gf_false; gf_boolean_t metadata_selfheal = _gf_false; gf_boolean_t entry_selfheal = _gf_false; - gf_boolean_t pending = _gf_false; + unsigned char pending = 0; dict_t *dict = NULL; int ret = -1; int op_errno = 0; @@ -5974,7 +6070,7 @@ goto out; } - if (pending) { + if (pending & PFLAG_PENDING) { size = strlen ("-pending") + 1; gf_asprintf (&substr, "-pending"); if (!substr) diff -Nru glusterfs-3.12.14/xlators/cluster/afr/src/afr.h glusterfs-3.12.15/xlators/cluster/afr/src/afr.h --- glusterfs-3.12.14/xlators/cluster/afr/src/afr.h 2018-09-06 17:06:24.642285857 +0000 +++ glusterfs-3.12.15/xlators/cluster/afr/src/afr.h 2018-10-13 07:06:43.598097341 +0000 @@ -36,6 +36,9 @@ #define ARBITER_BRICK_INDEX 2 +#define PFLAG_PENDING (1 << 0) +#define PFLAG_SBRAIN (1 << 1) + typedef int (*afr_lock_cbk_t) (call_frame_t *frame, xlator_t *this); typedef int (*afr_read_txn_wind_t) (call_frame_t *frame, xlator_t *this, int subvol); @@ -1284,4 +1287,7 @@ int afr_set_inode_local (xlator_t *this, afr_local_t *local, inode_t *inode); + +gf_boolean_t +afr_is_pending_set (xlator_t *this, dict_t *xdata, int type); #endif /* __AFR_H__ */ diff -Nru glusterfs-3.12.14/xlators/cluster/afr/src/afr-self-heal-common.c glusterfs-3.12.15/xlators/cluster/afr/src/afr-self-heal-common.c --- glusterfs-3.12.14/xlators/cluster/afr/src/afr-self-heal-common.c 2018-09-06 17:06:24.640285852 +0000 +++ glusterfs-3.12.15/xlators/cluster/afr/src/afr-self-heal-common.c 2018-10-13 07:06:43.595097332 +0000 @@ -1539,7 +1539,7 @@ afr_transaction_type type, unsigned char *locked_on, unsigned char *sources, unsigned char *sinks, uint64_t *witness, - gf_boolean_t *pflag) + unsigned char *pflag) { afr_private_t *priv = NULL; int i = 0; @@ -1567,7 +1567,7 @@ for (i = 0; i < priv->child_count; i++) { for (j = 0; j < priv->child_count; j++) if (matrix[i][j]) - *pflag = _gf_true; + *pflag |= PFLAG_PENDING; if (*pflag) break; } @@ -1649,6 +1649,8 @@ if (locked_on[i]) sinks[i] = 1; } + if (pflag) + *pflag |= PFLAG_SBRAIN; } /* One more class of witness similar to dirty in v2 is where no pending @@ -2174,44 +2176,6 @@ return 0; } - -gf_boolean_t -afr_is_pending_set (xlator_t *this, dict_t *xdata, int type) -{ - int idx = -1; - afr_private_t *priv = NULL; - void *pending_raw = NULL; - int *pending_int = NULL; - int i = 0; - - priv = this->private; - idx = afr_index_for_transaction_type (type); - - if (dict_get_ptr (xdata, AFR_DIRTY, &pending_raw) == 0) { - if (pending_raw) { - pending_int = pending_raw; - - if (ntoh32 (pending_int[idx])) - return _gf_true; - } - } - - for (i = 0; i < priv->child_count; i++) { - if (dict_get_ptr (xdata, priv->pending_key[i], - &pending_raw)) - continue; - if (!pending_raw) - continue; - pending_int = pending_raw; - - if (ntoh32 (pending_int[idx])) - return _gf_true; - } - - return _gf_false; -} - - gf_boolean_t afr_is_data_set (xlator_t *this, dict_t *xdata) { diff -Nru glusterfs-3.12.14/xlators/cluster/afr/src/afr-self-heal-data.c glusterfs-3.12.15/xlators/cluster/afr/src/afr-self-heal-data.c --- glusterfs-3.12.14/xlators/cluster/afr/src/afr-self-heal-data.c 2018-09-06 17:06:24.640285852 +0000 +++ glusterfs-3.12.15/xlators/cluster/afr/src/afr-self-heal-data.c 2018-10-13 07:06:43.595097332 +0000 @@ -611,7 +611,7 @@ unsigned char *sources, unsigned char *sinks, unsigned char *healed_sinks, unsigned char *undid_pending, - struct afr_reply *replies, gf_boolean_t *pflag) + struct afr_reply *replies, unsigned char *pflag) { int ret = -1; int source = -1; @@ -752,7 +752,7 @@ afr_selfheal_restore_time (frame, this, fd->inode, source, healed_sinks, locked_replies); - if (!is_arbiter_the_only_sink || !empty_file) { + if (!is_arbiter_the_only_sink && !empty_file) { ret = afr_selfheal_inodelk (frame, this, fd->inode, this->name, 0, 0, data_lock); if (ret < priv->child_count) { diff -Nru glusterfs-3.12.14/xlators/cluster/afr/src/afr-self-heal-entry.c glusterfs-3.12.15/xlators/cluster/afr/src/afr-self-heal-entry.c --- glusterfs-3.12.14/xlators/cluster/afr/src/afr-self-heal-entry.c 2018-09-06 17:06:24.640285852 +0000 +++ glusterfs-3.12.15/xlators/cluster/afr/src/afr-self-heal-entry.c 2018-10-13 07:06:43.596097335 +0000 @@ -496,7 +496,7 @@ unsigned char *sources, unsigned char *sinks, unsigned char *healed_sinks, struct afr_reply *replies, int *source_p, - gf_boolean_t *pflag) + unsigned char *pflag) { int ret = -1; int source = -1; diff -Nru glusterfs-3.12.14/xlators/cluster/afr/src/afr-self-heal.h glusterfs-3.12.15/xlators/cluster/afr/src/afr-self-heal.h --- glusterfs-3.12.14/xlators/cluster/afr/src/afr-self-heal.h 2018-09-06 17:06:24.641285855 +0000 +++ glusterfs-3.12.15/xlators/cluster/afr/src/afr-self-heal.h 2018-10-13 07:06:43.596097335 +0000 @@ -172,7 +172,7 @@ afr_transaction_type type, unsigned char *locked_on, unsigned char *sources, unsigned char *sinks, uint64_t *witness, - gf_boolean_t *flag); + unsigned char *flag); int afr_selfheal_fill_matrix (xlator_t *this, int **matrix, int subvol, int idx, dict_t *xdata); @@ -286,7 +286,7 @@ unsigned char *sources, unsigned char *sinks, unsigned char *healed_sinks, unsigned char *undid_pending, - struct afr_reply *replies, gf_boolean_t *flag); + struct afr_reply *replies, unsigned char *flag); int __afr_selfheal_metadata_prepare (call_frame_t *frame, xlator_t *this, @@ -296,7 +296,7 @@ unsigned char *healed_sinks, unsigned char *undid_pending, struct afr_reply *replies, - gf_boolean_t *flag); + unsigned char *flag); int __afr_selfheal_entry_prepare (call_frame_t *frame, xlator_t *this, inode_t *inode, unsigned char *locked_on, @@ -304,7 +304,7 @@ unsigned char *sinks, unsigned char *healed_sinks, struct afr_reply *replies, int *source_p, - gf_boolean_t *flag); + unsigned char *flag); int afr_selfheal_unlocked_inspect (call_frame_t *frame, xlator_t *this, diff -Nru glusterfs-3.12.14/xlators/cluster/afr/src/afr-self-heal-metadata.c glusterfs-3.12.15/xlators/cluster/afr/src/afr-self-heal-metadata.c --- glusterfs-3.12.14/xlators/cluster/afr/src/afr-self-heal-metadata.c 2018-09-06 17:06:24.640285852 +0000 +++ glusterfs-3.12.15/xlators/cluster/afr/src/afr-self-heal-metadata.c 2018-10-13 07:06:43.596097335 +0000 @@ -318,7 +318,8 @@ unsigned char *locked_on, unsigned char *sources, unsigned char *sinks, unsigned char *healed_sinks, unsigned char *undid_pending, - struct afr_reply *replies, gf_boolean_t *pflag) + struct afr_reply *replies, + unsigned char *pflag) { int ret = -1; int source = -1; diff -Nru glusterfs-3.12.14/xlators/cluster/afr/src/afr-self-heal-name.c glusterfs-3.12.15/xlators/cluster/afr/src/afr-self-heal-name.c --- glusterfs-3.12.14/xlators/cluster/afr/src/afr-self-heal-name.c 2018-09-06 17:06:24.641285855 +0000 +++ glusterfs-3.12.15/xlators/cluster/afr/src/afr-self-heal-name.c 2018-10-13 07:06:43.596097335 +0000 @@ -634,20 +634,26 @@ continue; if ((replies[i].op_ret == -1) && - (replies[i].op_errno == ENODATA)) + (replies[i].op_errno == ENODATA)) { *need_heal = _gf_true; + break; + } if (first_idx == -1) { first_idx = i; continue; } - if (replies[i].op_ret != replies[first_idx].op_ret) + if (replies[i].op_ret != replies[first_idx].op_ret) { *need_heal = _gf_true; + break; + } if (gf_uuid_compare (replies[i].poststat.ia_gfid, - replies[first_idx].poststat.ia_gfid)) + replies[first_idx].poststat.ia_gfid)) { *need_heal = _gf_true; + break; + } } if (inode) diff -Nru glusterfs-3.12.14/xlators/cluster/dht/src/dht-common.c glusterfs-3.12.15/xlators/cluster/dht/src/dht-common.c --- glusterfs-3.12.14/xlators/cluster/dht/src/dht-common.c 2018-09-06 17:06:24.645285865 +0000 +++ glusterfs-3.12.15/xlators/cluster/dht/src/dht-common.c 2018-10-13 07:06:43.600097348 +0000 @@ -5051,6 +5051,7 @@ op_errno = ENOMEM; goto err; } + local->first_up_subvol = dht_first_up_subvol (this); if (!xdata) { xdata = dict_new (); @@ -5073,6 +5074,10 @@ call_count = local->call_cnt = conf->subvolume_cnt; subvolumes = conf->subvolumes; + /* In case of parallel-readdir, the readdir-ahead will be loaded + * below dht, in this case, if we want to enable or disable SKIP_DIRs + * it has to be done in opendir, so that prefetching logic in + * readdir-ahead, honors it */ for (i = 0; i < call_count; i++) { if (conf->readdir_optimize == _gf_true) { if (subvolumes[i] != local->first_up_subvol) { diff -Nru glusterfs-3.12.14/xlators/mgmt/glusterd/src/glusterd-handler.c glusterfs-3.12.15/xlators/mgmt/glusterd/src/glusterd-handler.c --- glusterfs-3.12.14/xlators/mgmt/glusterd/src/glusterd-handler.c 2018-09-06 17:06:24.717286057 +0000 +++ glusterfs-3.12.15/xlators/mgmt/glusterd/src/glusterd-handler.c 2018-10-13 07:06:43.670097571 +0000 @@ -5222,6 +5222,10 @@ brick_req->op = GLUSTERD_BRICK_STATUS; brick_req->name = ""; + ret = dict_set_str (dict, "brick-name", brickinfo->path); + if (ret) + goto out; + ret = dict_set_int32 (dict, "cmd", GF_CLI_STATUS_CLIENTS); if (ret) goto out; diff -Nru glusterfs-3.12.14/xlators/mgmt/glusterd/src/glusterd-op-sm.c glusterfs-3.12.15/xlators/mgmt/glusterd/src/glusterd-op-sm.c --- glusterfs-3.12.14/xlators/mgmt/glusterd/src/glusterd-op-sm.c 2018-09-06 17:06:24.724286075 +0000 +++ glusterfs-3.12.15/xlators/mgmt/glusterd/src/glusterd-op-sm.c 2018-10-13 07:06:43.677097593 +0000 @@ -612,6 +612,9 @@ goto out; brick_req->op = GLUSTERD_BRICK_STATUS; brick_req->name = ""; + ret = dict_set_str (dict, "brick-name", brickinfo->path); + if (ret) + goto out; } break; case GD_OP_REBALANCE: diff -Nru glusterfs-3.12.14/xlators/nfs/server/src/nfs.c glusterfs-3.12.15/xlators/nfs/server/src/nfs.c --- glusterfs-3.12.14/xlators/nfs/server/src/nfs.c 2018-09-06 17:06:24.752286149 +0000 +++ glusterfs-3.12.15/xlators/nfs/server/src/nfs.c 2018-10-13 07:06:43.704097680 +0000 @@ -1604,7 +1604,7 @@ } int -nfs_priv_to_dict (xlator_t *this, dict_t *dict) +nfs_priv_to_dict (xlator_t *this, dict_t *dict, char *brickname) { int ret = -1; struct nfs_state *priv = NULL; diff -Nru glusterfs-3.12.14/xlators/protocol/server/src/server.c glusterfs-3.12.15/xlators/protocol/server/src/server.c --- glusterfs-3.12.14/xlators/protocol/server/src/server.c 2018-09-06 17:06:24.775286210 +0000 +++ glusterfs-3.12.15/xlators/protocol/server/src/server.c 2018-10-13 07:06:43.726097750 +0000 @@ -225,7 +225,7 @@ int -server_priv_to_dict (xlator_t *this, dict_t *dict) +server_priv_to_dict (xlator_t *this, dict_t *dict, char *brickname) { server_conf_t *conf = NULL; rpc_transport_t *xprt = NULL; @@ -245,39 +245,47 @@ pthread_mutex_lock (&conf->mutex); { list_for_each_entry (xprt, &conf->xprt_list, list) { - peerinfo = &xprt->peerinfo; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "client%d.hostname", - count); - ret = dict_set_str (dict, key, peerinfo->identifier); - if (ret) - goto unlock; - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "client%d.bytesread", - count); - ret = dict_set_uint64 (dict, key, - xprt->total_bytes_read); - if (ret) - goto unlock; - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "client%d.byteswrite", - count); - ret = dict_set_uint64 (dict, key, - xprt->total_bytes_write); - if (ret) - goto unlock; - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "client%d.opversion", - count); - ret = dict_set_uint32 (dict, key, - peerinfo->max_op_version); - if (ret) - goto unlock; + if (!strcmp (brickname, + xprt->xl_private->bound_xl->name)) { + peerinfo = &xprt->peerinfo; + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), + "client%d.hostname", + count); + ret = dict_set_str (dict, key, + peerinfo->identifier); + if (ret) + goto unlock; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), + "client%d.bytesread", + count); + ret = dict_set_uint64 (dict, key, + xprt->total_bytes_read); + if (ret) + goto unlock; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), + "client%d.byteswrite", + count); + ret = dict_set_uint64 (dict, key, + xprt->total_bytes_write); + if (ret) + goto unlock; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), + "client%d.opversion", + count); + ret = dict_set_uint32 (dict, key, + peerinfo->max_op_version); + if (ret) + goto unlock; - count++; + count++; + } } } unlock: