summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbuild-aux/pkg-version4
-rwxr-xr-xbuildrpm38.sh95
-rw-r--r--configure.ac140
-rw-r--r--extras/init.d/glusterd-FbRedhat.in214
-rw-r--r--extras/init.d/glusterd.functions-FbRedhat.in630
-rw-r--r--extras/systemd/Makefile.am18
-rw-r--r--extras/systemd/glusterd-gfproxyd.service16
-rw-r--r--extras/systemd/glusterd-gfproxyd.service.in16
-rw-r--r--extras/systemd/glusterd-mgmt.service15
-rw-r--r--extras/systemd/glusterd-mgmt.service.in15
-rw-r--r--extras/systemd/glusterd-nfsd.service16
-rw-r--r--extras/systemd/glusterd-nfsd.service.in16
-rw-r--r--extras/systemd/glusterd-shd.service17
-rw-r--r--extras/systemd/glusterd-shd.service.in17
-rw-r--r--extras/systemd/glusterd.service.in17
-rw-r--r--extras/systemd/glustereventsd.service14
-rw-r--r--extras/systemd/glusterfssharedstorage.service13
-rw-r--r--glusterfs.spec.in576
-rw-r--r--libglusterfs/src/compat.h2
-rwxr-xr-xmakerelease38.sh153
-rw-r--r--xlators/mount/fuse/utils/Makefile.am3
-rw-r--r--xlators/mount/fuse/utils/umount.fuse.glusterfs.in31
22 files changed, 1907 insertions, 131 deletions
diff --git a/build-aux/pkg-version b/build-aux/pkg-version
index 83d4a5f9136..6590fbc0525 100755
--- a/build-aux/pkg-version
+++ b/build-aux/pkg-version
@@ -18,7 +18,7 @@ get_version()
sub(/^v/,"") ; print $1
}'
- echo $PKG_VERSION | awk "$AWK_VERSION" | tr -cd '[:alnum:].'
+ echo $PKG_VERSION | awk "$AWK_VERSION"
}
get_release()
@@ -37,7 +37,7 @@ get_release()
else if (NF == 4) print $2, $3, "git" substr($4, 2)
}'
- echo $PKG_VERSION | awk "$AWK_RELEASE" | tr -cd '[:alnum:].'
+ echo $PKG_VERSION | awk "$AWK_RELEASE"
}
if test "x$1" = "x--full"; then
diff --git a/buildrpm38.sh b/buildrpm38.sh
new file mode 100755
index 00000000000..45f25da7a51
--- /dev/null
+++ b/buildrpm38.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+#
+# Simple script to clean-house and build some RPMs
+#
+
+RPMBUILD_BIN='/usr/bin/rpmbuild'
+
+function usage {
+cat << EOF
+
+Usage $0 <release tag> <extra rpm build flags>
+
+e.g. "$0 4" builds RPMs with a version of 3.8_fb-4.
+
+e.g. "$0 4 --with asan" builds RPMS with a version of 3.8_fb-4 with ASAN turned on
+
+EOF
+exit 1
+}
+
+(( $# == 0 )) && usage
+
+echo -n "Stashing uncommitted files..."
+if STASH_OUTPUT=$(git stash); then
+ if echo $STASH_OUTPUT | grep -q "No local changes"; then
+ echo "No changes found"
+ else
+ # Make sure we clean up even if someone exits early on failure.
+ trap "git stash pop" EXIT
+ echo DONE
+ fi
+else
+ echo "Failed to stash uncommitted files, aborting!" && exit 1
+fi
+
+RELEASE_TAG=$1
+echo -n "Updating glusterfs.spec.in file..."
+if sed -i "s@%global release fb_release@%global release $RELEASE_TAG@g" glusterfs.spec.in; then
+ echo DONE
+else
+ echo FAILED && exit 1
+fi
+
+EXTRA_RPM_BUILD_FLAGS=${@:2}
+
+# We need to patch find-debug-info.sh to prevent symbol stripping
+# while still building a debuginfo RPM which contains our source.
+# This makes debugging MUCH easier. This patch works for both
+# CentOS 5 & 6
+
+# Don't sudo for svcscm user as this will break jenkins
+[ $USER == "svcscm" ] || sudo ./patch-find-debuginfo.sh
+
+echo -n "Checking for .rpmmacros...."
+if grep -q "%_topdir" ~/.rpmmacros; then
+ echo DONE
+else
+ echo "not found"
+ echo "Adding _topdir to .rpmmacros..."
+ echo "%_topdir /home/$USER/local/rpmbuild" >> ~/.rpmmacros
+fi
+
+echo -n "Checking for ~/local/rpmbuild directory..."
+if [ -d ~/local/rpmbuild ]; then
+ echo DONE
+else
+ echo "not found"
+ echo "Creating rpmbuild directories..."
+ mkdir -vp ~/local/rpmbuild/BUILD
+ mkdir -vp ~/local/rpmbuild/BUILDROOT
+ mkdir -vp ~/local/rpmbuild/RPMS
+ mkdir -vp ~/local/rpmbuild/SOURCES
+ mkdir -vp ~/local/rpmbuild/SPECS
+ mkdir -vp ~/local/rpmbuild/SRPMS
+fi
+
+echo "Building GlusterFS..."
+source ./build_env
+./build.sh
+
+echo "Creating tarball for rpmbuild..."
+make -j dist
+echo -n "Restoring glusterfs.spec.in..."
+git checkout glusterfs.spec.in &> /dev/null
+echo DONE
+
+MY_TARBALL=~/local/rpmbuild/glusterfs-3.8.15_fb.tar.gz
+cp $(basename $MY_TARBALL) $MY_TARBALL
+MY_RPM_BUILD_FLAGS="--with fbextras --without georeplication"
+ALL_RPM_BUILD_FLAGS="$MY_RPM_BUILD_FLAGS $EXTRA_RPM_BUILD_FLAGS"
+if ! $RPMBUILD_BIN -tb $MY_TARBALL $ALL_RPM_BUILD_FLAGS; then
+ exit 1
+fi
+
+exit 0
diff --git a/configure.ac b/configure.ac
index ad502e33526..fe4ff5c1daa 100644
--- a/configure.ac
+++ b/configure.ac
@@ -6,8 +6,7 @@ dnl General Public License, version 3 or any later version (LGPLv3 or
dnl later), or the GNU General Public License, version 2 (GPLv2), in all
dnl cases as published by the Free Software Foundation.
-AC_INIT([glusterfs],
- [m4_esyscmd([build-aux/pkg-version --version])],
+AC_INIT([glusterfs],[3.8_fb],
[gluster-users@gluster.org],,[https://github.com/gluster/glusterfs.git])
AC_SUBST([PACKAGE_RELEASE],
@@ -33,7 +32,7 @@ if libtool --help 2>&1 | grep -q quiet; then
AM_LIBTOOLFLAGS="--quiet";
fi
-AC_CONFIG_HEADERS([config.h site.h])
+AC_CONFIG_HEADERS([config.h])
AC_CONFIG_FILES([Makefile
libglusterfs/Makefile
@@ -106,6 +105,8 @@ AC_CONFIG_FILES([Makefile
xlators/debug/trace/src/Makefile
xlators/debug/error-gen/Makefile
xlators/debug/error-gen/src/Makefile
+ xlators/debug/delay-gen/Makefile
+ xlators/debug/delay-gen/src/Makefile
xlators/debug/io-stats/Makefile
xlators/debug/io-stats/src/Makefile
xlators/protocol/Makefile
@@ -204,12 +205,17 @@ AC_CONFIG_FILES([Makefile
extras/init.d/glusterd-Redhat
extras/init.d/glusterd-FreeBSD
extras/init.d/glusterd-SuSE
+ extras/init.d/glusterd-FbRedhat
+ extras/init.d/glusterd.functions-FbRedhat
extras/ganesha/Makefile
extras/ganesha/config/Makefile
extras/ganesha/scripts/Makefile
extras/ganesha/ocf/Makefile
extras/systemd/Makefile
- extras/systemd/glusterd.service
+ extras/systemd/glusterd-mgmt.service
+ extras/systemd/glusterd-nfsd.service
+ extras/systemd/glusterd-gfproxyd.service
+ extras/systemd/glusterd-shd.service
extras/run-gluster.tmpfiles
extras/benchmarking/Makefile
extras/hook-scripts/Makefile
@@ -257,6 +263,12 @@ AC_CONFIG_FILES([Makefile
tests/basic/fuse/Makefile
tests/basic/gfapi/Makefile])
+if test "xBUILD_FBEXTRAS" = "xyes"; then
+AC_CONFIG_FILES([
+ xlators/mount/fuse/utils/umount.fuse.glusterfs
+ ])
+fi
+
AC_CANONICAL_HOST
AC_PROG_CC
@@ -279,19 +291,35 @@ if test "x$enable_debug" = "xyes"; then
CFLAGS="${CFLAGS} -g -O0 -DDEBUG"
else
BUILD_DEBUG=no
- CFLAGS="${CFLAGS} -g"
fi
-AC_ARG_WITH([fbextras], AC_HELP_STRING([--with-fbextras], [Enable Facebook specific extras.]))
+AC_ARG_WITH([fbextras],
+ AC_HELP_STRING([--with-fbextras],
+ [Enable Facebook specific extras.]))
if test "x$with_fbextras" = "xyes"; then
BUILD_FBEXTRAS=yes
else
BUILD_FBEXTRAS=no
fi
-AC_ARG_ENABLE([privport_prefer], AC_HELP_STRING([--disable-privport_prefer], [Disable preferred usage of privleged ports.]))
-if test "x$enable_privport_prefer" = "xno"; then
- CFLAGS="${CFLAGS} -DNO_PRIVPORT"
+AC_ARG_WITH([systemd],
+ AC_HELP_STRING([--with-systemd],
+ [Enable systemd support.]))
+if test "x$with_systemd" = "xyes"; then
+ WITH_SYSTEMD=1
+else
+ WITH_SYSTEMD=0
+fi
+
+AC_ARG_ENABLE([mempool],
+ AC_HELP_STRING([--enable-mempool],
+ [Enable the Gluster memory pooler.]))
+MEMPOOL_CFLAGS=""
+if test "x$enable_mempool" = "xyes"; then
+ MEMPOOL_CFLAGS="-DENABLE_MEMPOOL"
+ USE_MEMPOOL="yes"
+else
+ USE_MEMPOOL="no"
fi
case $host_os in
@@ -488,6 +516,16 @@ AC_SUBST(ZLIB_LIBS)
AC_CHECK_HEADERS([linux/falloc.h])
+AC_CHECK_HEADER([tirpc/rpc/rpc.h])
+
+case $host_os in
+ darwin*)
+ if ! test "`/usr/bin/sw_vers | grep ProductVersion: | cut -f 2 | cut -d. -f2`" -ge 5; then
+ AC_MSG_ERROR([You need at least OS X 10.5 (Leopard) to build Glusterfs])
+ fi
+ ;;
+esac
+
AC_CHECK_HEADERS([linux/oom.h], AC_DEFINE(HAVE_LINUX_OOM_H, 1, [have linux/oom.h]))
dnl Mac OS X does not have spinlocks
@@ -672,9 +710,14 @@ AC_SUBST(RDMA_SUBDIR)
# SYNCDAEMON section
+enable_georeplication=yes
AC_ARG_ENABLE([georeplication],
AC_HELP_STRING([--disable-georeplication],
- [Do not install georeplication components]))
+ [Do not install georeplication components]),
+ [enable_georeplication=no],[])
+
+echo "enable_georeplication=$enable_georeplication"
+enable_georeplication=no
BUILD_SYNCDAEMON=no
case $host_os in
@@ -689,9 +732,9 @@ case $host_os in
enable_georeplication=no
;;
esac
+SYNCDAEMON_SUBDIR=geo-replication
SYNCDAEMON_COMPILE=0
if test "x$enable_georeplication" != "xno"; then
- SYNCDAEMON_SUBDIR=geo-replication
SYNCDAEMON_COMPILE=1
BUILD_SYNCDAEMON="yes"
@@ -715,10 +758,7 @@ AC_SUBST(SYNCDAEMON_COMPILE)
AC_SUBST(SYNCDAEMON_SUBDIR)
# end SYNCDAEMON section
-# only install scripts from extras/geo-rep when enabled
-if test "x$enable_georeplication" != "xno"; then
- GEOREP_EXTRAS_SUBDIR=geo-rep
-fi
+GEOREP_EXTRAS_SUBDIR=geo-rep
AC_SUBST(GEOREP_EXTRAS_SUBDIR)
AM_CONDITIONAL(USE_GEOREP, test "x$enable_georeplication" != "xno")
@@ -843,8 +883,7 @@ fi
AC_SUBST(HAVE_LINKAT)
dnl check for Monotonic clock
-AC_CHECK_LIB([rt], [clock_gettime], ,
- AC_MSG_WARN([System doesn't have monotonic clock using contrib]))
+AC_CHECK_FUNC([clock_gettime], [has_monotonic_clock=yes], AC_CHECK_LIB([rt], [clock_gettime], , AC_MSG_WARN([System does not have monotonic clock using contrib])))
dnl Check for argp
AC_CHECK_HEADER([argp.h], AC_DEFINE(HAVE_ARGP, 1, [have argp]))
@@ -922,12 +961,16 @@ fi
if test "x$ac_cv_file__etc_redhat_release" = "xyes"; then
GF_DISTRIBUTION=Redhat
fi
+if uname -a | grep fbk &> /dev/null; then
+ GF_DISTRIBUTION=FbRedhat
+fi
AC_SUBST(GF_DISTRIBUTION)
GF_HOST_OS=""
GF_LDFLAGS="-rdynamic"
+ASAN_CFLAGS=""
BUILD_ASAN=no
if test "x$with_asan" = "xyes"; then
echo -n "checking for address sanitizer (ASAN) support... "
@@ -938,7 +981,7 @@ if test "x$with_asan" = "xyes"; then
if test $ret -eq 0 ; then
echo "yes"
BUILD_ASAN=yes
- GF_CFLAGS="$GF_CFLAGS -DASAN -fsanitize=address -O0 -ggdb"
+ ASAN_CFLAGS="-DASAN -fsanitize=address -O0 -ggdb"
GF_LDFLAGS="-gdb -static-libasan $GF_LDFLAGS"
else
echo "no"
@@ -947,17 +990,18 @@ if test "x$with_asan" = "xyes"; then
fi
fi
+TSAN_CFLAGS=""
BUILD_TSAN=no
if test "x$with_tsan" = "xyes"; then
echo -n "checking for thread sanitizer (TSAN) support... "
AC_LANG_CONFTEST([AC_LANG_PROGRAM()])
- $CC conftest.c $CFLAGS -fsanitize=address -o conftest > /dev/null 2> /dev/null
+ $CC conftest.c $CFLAGS -fsanitize=thread -o conftest > /dev/null 2> /dev/null
ret=$?
rm -f conftest.o conftest
if test $ret -eq 0 ; then
echo "yes"
BUILD_TSAN=yes
- GF_CFLAGS="$GF_CFLAGS -fsanitize=thread -O0 -ggdb -fPIC -pie"
+ TSAN_CFLAGS="-fsanitize=thread -O0 -ggdb -fPIC -pie"
GF_LDFLAGS="-gdb -static-libtsan $GF_LDFLAGS"
else
echo "no"
@@ -966,6 +1010,7 @@ if test "x$with_tsan" = "xyes"; then
fi
fi
+JEMALLOC_CFLAGS=""
BUILD_JEMALLOC=no
if test "x$with_jemalloc" = "xyes"; then
echo -n "checking for jemalloc support... "
@@ -984,42 +1029,9 @@ if test "x$with_jemalloc" = "xyes"; then
fi
fi
-TESTER_CFLAGS=""
-dnl include tirpc for FB builds
if test "x$BUILD_FBEXTRAS" = "xyes"; then
- TIRPC_CFLAGS="-I/usr/include/tirpc"
- GF_LDFLAGS="-ltirpc $GF_LDFLAGS"
- GF_CFLAGS="$TIRPC_CFLAGS $GF_CFLAGS -DIPV6_DEFAULT -DGF_FBEXTRAS"
- TESTER_CFLAGS="$TESTER_CFLAGS -ltirpc"
-fi
-
-dnl check for gcc -Werror=format-security
-saved_CFLAGS=$CFLAGS
-CFLAGS="-Wformat -Werror=format-security"
-AC_MSG_CHECKING([whether $CC accepts -Werror=format-security])
-AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], [cc_werror_format_security=yes], [cc_werror_format_security=no])
-echo $cc_werror_format_security
-if test "x$cc_werror_format_security" = "xno"; then
- CFLAGS="$saved_CFLAGS"
-else
- CFLAGS="$saved_CFLAGS $CFLAGS"
- GF_CFLAGS="$GF_CFLAGS $CFLAGS"
-fi
-
-dnl check for gcc -Werror=implicit-function-declaration
-saved_CFLAGS=$CFLAGS
-saved_GF_CFLAGS=$GF_CFLAGS
-CFLAGS="-Werror=implicit-function-declaration"
-GF_CFLAGS="-Werror=implicit-function-declaration"
-AC_MSG_CHECKING([whether $CC accepts -Werror=implicit-function-declaration])
-AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], [cc_werror_implicit=yes], [cc_werror_implicit=no])
-echo $cc_werror_implicit
-if test "x$cc_werror_implicit" = "xno"; then
- CFLAGS="$saved_CFLAGS"
- GF_CFLAGS="$saved_GF_CFLAGS"
-else
- CFLAGS="$saved_CFLAGS $CFLAGS"
- GF_CFLAGS="$saved_GF_CFLAGS $GF_CFLAGS"
+ TIRPC_CFLAGS="-I/usr/include/tirpc -DIPV6_DEFAULT"
+ GF_LDFLAGS="-ltirpc $GF_LDFLAGS"
fi
dnl clang is mostly GCC-compatible, but its version is much lower,
@@ -1083,6 +1095,9 @@ prefix=$old_prefix
case $host_os in
linux*)
GF_HOST_OS="GF_LINUX_HOST_OS"
+ GF_CFLAGS="${GF_CFLAGS} $MEMPOOL_CFLAGS $ASAN_CFLAGS $TSAN_CFLAGS \
+ $JEMALLOC_CFLAGS $TIRPC_CFLAGS"
+ GF_GLUSTERFS_CFLAGS="${GF_CFLAGS}"
GF_FUSE_CFLAGS="-DFUSERMOUNT_DIR=\\\"\$(bindir)\\\""
GLUSTERD_WORKDIR="${LOCALSTATEDIR}/lib/glusterd"
;;
@@ -1179,17 +1194,14 @@ fi
AC_SUBST(UMOUNTD_SUBDIR)
+AC_CHECK_PROG(NFUSR, nfusr, "yes", "no")
+AC_SUBST(NFUSR)
+
# enable debug section
AC_ARG_ENABLE([debug],
AC_HELP_STRING([--enable-debug],
[Enable debug build options.]))
-AC_ARG_ENABLE([mempool],
- AC_HELP_STRING([--disable-mempool],
- [Disable the Gluster memory pooler.]))
-if test "x$enable_mempool" = "xno"; then
- CFLAGS="${CFLAGS} -DDISABLE_MEMPOOL"
-fi
# syslog section
AC_ARG_ENABLE([syslog],
@@ -1378,7 +1390,7 @@ CONTRIBDIR='$(top_srcdir)/contrib'
AC_SUBST(CONTRIBDIR)
GF_CPPDEFINES='-D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -D$(GF_HOST_OS)'
-GF_CPPINCLUDES='-include $(top_builddir)/config.h -include $(top_builddir)/site.h -I$(top_srcdir)/libglusterfs/src -I$(top_builddir)/libglusterfs/src'
+GF_CPPINCLUDES='-include $(top_builddir)/config.h -I$(top_srcdir)/libglusterfs/src -I$(top_builddir)/libglusterfs/src'
GF_CPPFLAGS="$GF_CPPFLAGS $GF_CPPDEFINES $GF_CPPINCLUDES"
AC_SUBST([GF_CPPFLAGS])
@@ -1386,13 +1398,14 @@ AM_CONDITIONAL([GF_LINUX_HOST_OS], test "${GF_HOST_OS}" = "GF_LINUX_HOST_OS")
AM_CONDITIONAL([GF_DARWIN_HOST_OS], test "${GF_HOST_OS}" = "GF_DARWIN_HOST_OS")
AM_CONDITIONAL([GF_BSD_HOST_OS], test "${GF_HOST_OS}" = "GF_BSD_HOST_OS")
AM_CONDITIONAL([GF_FBEXTRAS], test "${BUILD_FBEXTRAS}" = "yes")
+AM_CONDITIONAL([GF_WITH_SYSTEMD], test "${WITH_SYSTEMD}" = "1")
+AC_SUBST(WITH_SYSTEMD)
AC_SUBST(GLUSTERD_WORKDIR)
AM_CONDITIONAL([GF_INSTALL_GLUSTERD_WORKDIR], test ! -d ${GLUSTERD_WORKDIR} && test -d ${sysconfdir}/glusterd )
AC_SUBST(GLUSTERD_VOLFILE)
AC_SUBST(GLUSTERFS_LIBEXECDIR)
AC_SUBST(GLUSTERFSD_MISCDIR)
-AC_SUBST(TESTER_CFLAGS)
dnl pkg-config versioning
dnl
@@ -1457,4 +1470,5 @@ echo "Experimental xlators : $BUILD_EXPERIMENTAL"
echo "ASAN enabled : $BUILD_ASAN"
echo "TSAN enabled : $BUILD_TSAN"
echo "jemalloc enabled : $BUILD_JEMALLOC"
+echo "mempool enabled : $USE_MEMPOOL"
echo
diff --git a/extras/init.d/glusterd-FbRedhat.in b/extras/init.d/glusterd-FbRedhat.in
new file mode 100644
index 00000000000..18be537cb6b
--- /dev/null
+++ b/extras/init.d/glusterd-FbRedhat.in
@@ -0,0 +1,214 @@
+#!/bin/bash
+#
+# Script to manage the various Gluster systemd services
+#
+#
+# ** DO NOT INCLUDE /etc/rc.d/init.d/functions **
+# ** This will break all automation because of systemctl redirect **
+#
+
+. /etc/rc.d/init.d/glusterd.functions
+
+PATH="$PATH:/usr/local/bin:/usr/bin:/bin:/usr/sbin"
+
+# BEGIN functions copied out of /etc/rc.d/init.d/functions
+if [ -z "${BOOTUP:-}" ]; then
+ if [ -f /etc/sysconfig/init ]; then
+ . /etc/sysconfig/init
+ else
+ # This all seem confusing? Look in /etc/sysconfig/init,
+ # or in /usr/share/doc/initscripts-*/sysconfig.txt
+ BOOTUP=color
+ RES_COL=60
+ MOVE_TO_COL="echo -en \\033[${RES_COL}G"
+ SETCOLOR_SUCCESS="echo -en \\033[1;32m"
+ SETCOLOR_FAILURE="echo -en \\033[1;31m"
+ SETCOLOR_WARNING="echo -en \\033[1;33m"
+ SETCOLOR_NORMAL="echo -en \\033[0;39m"
+ LOGLEVEL=1
+ fi
+ if [ "$CONSOLETYPE" = "serial" ]; then
+ BOOTUP=serial
+ MOVE_TO_COL=
+ SETCOLOR_SUCCESS=
+ SETCOLOR_FAILURE=
+ SETCOLOR_WARNING=
+ SETCOLOR_NORMAL=
+ fi
+fi
+
+success() {
+ [ "$BOOTUP" != "verbose" -a -z "${LSB:-}" ] && echo_success
+ return 0
+}
+
+# Log that something failed
+failure() {
+ local rc=$?
+ [ "$BOOTUP" != "verbose" -a -z "${LSB:-}" ] && echo_failure
+ [ -x /bin/plymouth ] && /bin/plymouth --details
+ return $rc
+}
+
+
+# Run some action. Log its output.
+action() {
+ local STRING rc
+
+ STRING=$1
+ echo -n "$STRING "
+ shift
+ "$@" && success $"$STRING" || failure $"$STRING"
+ rc=$?
+ echo
+ return $rc
+}
+
+echo_success() {
+ [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
+ echo -n "["
+ [ "$BOOTUP" = "color" ] && $SETCOLOR_SUCCESS
+ echo -n $" OK "
+ [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+ echo -n "]"
+ echo -ne "\r"
+ return 0
+}
+
+echo_failure() {
+ [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
+ echo -n "["
+ [ "$BOOTUP" = "color" ] && $SETCOLOR_FAILURE
+ echo -n $"FAILED"
+ [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+ echo -n "]"
+ echo -ne "\r"
+ return 1
+}
+# END copy from /etc/rc.d/init.d/functions
+
+_start_service()
+{
+ _service=$1
+ _desc=$2
+ [ -z "$_desc" ] && _desc=$_service
+ action "Starting $_desc..." /bin/systemctl start "$_service"
+}
+
+_stop_service()
+{
+ _service=$1
+ _desc=$2
+ [ -z "$_desc" ] && _desc=$_service
+ action "Stopping $_desc..." /bin/systemctl stop "$_service"
+}
+
+# SHD Management Functions
+start_shd()
+{
+ _start_service glusterd-shd "self-heal daemon"
+}
+
+stop_shd()
+{
+ _stop_service glusterd-shd "self-heal daemon"
+}
+
+restart_shd()
+{
+ stop_shd
+ start_shd
+}
+
+# NFSd Management Functions
+start_nfsd()
+{
+ _start_service glusterd-nfsd "nfs daemon"
+}
+
+stop_nfsd()
+{
+ _stop_service glusterd-nfsd "nfs daemon"
+}
+
+restart_nfsd()
+{
+ stop_nfsd
+ start_nfsd
+}
+
+# GFProxyd Management Functions
+start_gfproxyd()
+{
+ _start_service glusterd-gfproxyd "gfproxy daemon"
+}
+
+stop_gfproxyd()
+{
+ _stop_service glusterd-gfproxyd "gfproxy daemon"
+}
+
+restart_gfproxyd()
+{
+ stop_gfproxyd
+ start_gfproxyd
+}
+
+# mgmt daemon Management Functions
+start_mgmt()
+{
+ _start_service glusterd-mgmt "mgmt daemon"
+}
+
+stop_mgmt()
+{
+ _stop_service glusterd-mgmt "mgmt daemon"
+ pkill -f '^/usr/sbin/glusterd.*'
+}
+
+restart_mgmt()
+{
+ stop_mgmt
+ start_mgmt
+}
+
+start()
+{
+ start_mgmt
+ start_nfsd
+ start_shd
+ #start_gfproxyd
+}
+
+stop()
+{
+ stop_mgmt
+ stop_nfsd
+ stop_shd
+ #stop_gfproxyd
+ # Stop the bricks, since they are not managed by systemd
+ pkill -f '^/usr/sbin/glusterfsd.*'
+}
+
+restart()
+{
+ stop
+ start
+}
+
+case $1 in
+ start) start ;;
+ start_nfsd) start_nfsd ;;
+ start_shd) start_shd ;;
+ stop) stop ;;
+ stop_nfsd) stop_nfsd ;;
+ stop_shd) stop_shd ;;
+ #stop_gfproxyd) stop_gfproxyd ;;
+ restart) restart;;
+ restart_nfsd) restart_nfsd ;;
+ restart_shd) restart_shd ;;
+ #restart_gfproxyd) restart_gfproxyd ;;
+ checkquorum) check_quorum ;;
+esac
+
+exit 0
diff --git a/extras/init.d/glusterd.functions-FbRedhat.in b/extras/init.d/glusterd.functions-FbRedhat.in
new file mode 100644
index 00000000000..c3843ac8594
--- /dev/null
+++ b/extras/init.d/glusterd.functions-FbRedhat.in
@@ -0,0 +1,630 @@
+
+# Include this file only on CentOS 6, it breaks things on CentOS 7
+CENTOS_RELEASE=$(/usr/lib/rpm/redhat/dist.sh --distnum)
+if [ "$CENTOS_RELEASE" == "6" ]; then
+ . /etc/rc.d/init.d/functions
+fi
+
+PATH="$PATH:/usr/local/bin:/usr/bin:/bin:/usr/sbin"
+
+GLUSTERD_DATA_DIR="/data/gluster_vols"
+GLUSTERD_CONF_DIR="/var/lib/glusterd"
+GLUSTERD_PORT=24007
+# Can be over-written by the glusterfs.root_mount SMC service/tier property
+GLUSTER_ROOT_MOUNT="/mnt/groot"
+# Can be over-written by the glusterfs.root_volume SMC service/tier property
+GLUSTER_ROOT_VOLUME="groot"
+HOST_IP=$(host $HOSTNAME | awk '{print $NF}')
+OOM_SCORE_ADJ="-1000"
+NICENESS="-10"
+
+NETCAT_OPTS="-w1 -6"
+
+# Simple port probe.
+probe_glusterd()
+{
+ return $(nc $NETCAT_OPTS $1 $2 < /dev/null)
+}
+
+#
+# FUNCTION wait_for_smc
+#
+# DESCRIPTION: As you might guess this function just sits around and waits
+# for SMC proxy to start or it times out whichever comes first.
+#
+wait_for_smc()
+{
+ CNT=0
+ while ! (smcc tiers $HOSTNAME &> /dev/null) && (( $CNT < $SMC_TIMEOUT ))
+ do
+ echo "Gluster init waiting for SMC proxy..." && sleep 1
+ CNT=$(($CNT+1))
+ done
+ if (( $CNT >= $SMC_TIMEOUT )); then
+ echo_failure; echo "Timed out waiting on SMC"
+ return 1
+ else
+ echo_success && echo "SMC proxy is alive!"
+ return 0
+ fi
+}
+
+#
+# FUNCTION set_smc_Tier
+#
+# DESCRIPTION: Tries to find the storage.gluster SMC tier for this host,
+# if it finds the tier it will set a few enviro variables to their SMC
+# values,
+#
+set_smc_tier()
+{
+ [ -n "$GLUSTER_SMC_TIER" ] && return 0
+ wait_for_smc || return 1
+ if GLUSTER_SMC_TIER=$(smcc tiers $HOSTNAME | \
+ /bin/grep "storage.gluster" 2> /dev/null); then
+ TIER_VOL_DIR=$(smcc getprop $GLUSTER_SMC_TIER \
+ glusterfs.data_dir 2>/dev/null) && GLUSTERD_DATA_DIR="$TIER_VOL_DIR"
+ TIER_ROOT_VOLUME=$(smcc getprop $GLUSTER_SMC_TIER \
+ glusterfs.root_volume 2>/dev/null) && [ -n "$TIER_ROOT_VOLUME" ] &&
+ GLUSTER_ROOT_VOLUME="$TIER_ROOT_VOLUME"
+ SVC_ROOT_VOLUME=$(smcc getsvcprop $GLUSTER_SMC_TIER \
+ $HOSTNAME glusterfs.root_volume 2>/dev/null) &&
+ [ -n "$SVC_ROOT_VOLUME" ] && GLUSTER_ROOT_VOLUME="$SVC_ROOT_VOLUME"
+ TIER_ROOT_MOUNT=$(smcc getprop $GLUSTER_SMC_TIER glusterfs.root_mount \
+ 2> /dev/null) && [ -n "$TIER_ROOT_MOUNT" ] &&
+ GLUSTER_ROOT_MOUNT="$TIER_ROOT_MOUNT"
+ SVC_ROOT_MOUNT=$(smcc getsvcprop $GLUSTER_SMC_TIER $HOSTNAME \
+ glusterfs.root_mount 2> /dev/null) && [ -n "$SVC_ROOT_MOUNT" ] &&
+ GLUSTER_ROOT_MOUNT="$SVC_ROOT_MOUNT"
+ SVC_UUID=$(smcc getsvcprop $GLUSTER_SMC_TIER $HOSTNAME glusterfs.uuid \
+ 2> /dev/null)
+ NICE_DAEMON=$(smcc getprop $GLUSTER_SMC_TIER \
+ glusterfs.nice_daemon 2> /dev/null)
+ WARM_INODE_CACHE=$(smcc getprop $GLUSTER_SMC_TIER \
+ glusterfs.warm_inode_cache 2> /dev/null)
+ # Fatal if we don't find any services
+ TIER_SERVICES=($(smcc ls $GLUSTER_SMC_TIER | /bin/cut -d: -f1)) || return 1
+ return 0
+ fi
+ return 1
+}
+
+# FUNCTION nice_daemon
+#
+# DESCRIPTION: Nice the glustefsd (brick) and glusterd (management)
+# daemons. Also, adjust their OOM scores to prevent the OOM killer
+# from killing them in OOM low memory conditions.
+#
+# Also consider adjusting vm.min_free_kbytes kernel property via
+# /etc/sysctl.conf and disabling swap (swapoff -a).
+#
+nice_daemon()
+{
+ set_smc_tier || return 1
+ if [ "$NICE_DAEMON" == "1" ]; then
+ sleep 2
+ renice $NICENESS -g $(pgrep -x glusterfsd) &> /dev/null && \
+ echo_success && echo "Nice'ing glusterfsd..."
+ renice $NICENESS -g $(pgrep -x glusterd) &> /dev/null && \
+ echo_success && echo "Nice'ing glusterd..."
+ for p in $(pgrep -x glusterfsd);do echo $OOM_SCORE_ADJ > \
+ /proc/$p/oom_score_adj;done
+ for p in $(pgrep -x glusterd);do echo $OOM_SCORE_ADJ > \
+ /proc/$p/oom_score_adj;done
+ echo_success && echo "Adjusting OOM score..."
+ fi
+}
+
+#
+# FUNCTION set_bricks
+#
+# DESCRIPTION: Populates "$BRICKS" with a list of hostnames which are
+# found to be in the groot volume. Currently this won't work for clusters
+# with more than volume.
+#
+set_bricks()
+{
+ if [ -z "$BRICKS" ]; then
+ if ! BRICKS=($(gluster volume info all | grep -E "^Brick[0-9]+:" |
+ awk '{print $NF}' | cut -d: -f1)); then
+ echo "Unable to find any bricks."
+ return 1
+ else
+ echo "Found ${#BRICKS[@]} bricks..."
+ fi
+ fi
+ return 0
+}
+
+set_hosted_vols()
+{
+ local ALL_VOLS=($(\ls $GLUSTERD_CONF_DIR/vols))
+ for VOL in ${ALL_VOLS[@]}; do
+ if grep ${HOSTNAME/.facebook.com/} $GLUSTERD_CONF_DIR/vols/$VOL/info &> /dev/null; then
+ HOSTED_VOLS+=($VOL)
+ fi
+ done
+}
+
+#
+# FUNCTION set_replica_cnt
+#
+# DESCRIPTION: Sets $REPLICA_CNT to the current replication factor for the
+# cluster.
+#
+set_replica_cnt()
+{
+ set_hosted_vols
+ if [ -n "$REPLICA_CNT" ]; then
+ return 0
+ fi
+
+ for VOL in ${HOSTED_VOLS[@]}; do
+ REPLICA_CNT=$(grep disperse_count /var/lib/glusterd/vols/$VOL/info | cut -d= -f2)
+ if (( $REPLICA_CNT > 0 )); then
+ return 0
+ fi
+ done
+
+ if BRICK_NO_STR=$(gluster volume info all | grep -E \
+ "Number of Bricks: [0-9]+ x [0-9] = [0-9]+"); then
+ REPLICA_CNT=$(echo $BRICK_NO_STR | grep "Number of Bricks" |
+ awk '{print $6}')
+ elif BRICK_NO_STR=$(gluster volume info all | grep -E \
+ "Number of Bricks: [0-9]+"); then
+ REPLICA_CNT=$(echo $BRICK_NO_STR | grep "Number of Bricks" |
+ awk '{print $NF}')
+ else
+ echo "Unable to determine number of brick!"
+ return 1
+ fi
+ return 0
+}
+
+#
+# FUNCTION set_node_index
+#
+# DESCRIPTION: Sets $NODE_INDEX to the position this node has in the
+# brick list given by the "volume info all" command. We will use this
+# for quorum calculations.
+#
+set_node_index()
+{
+ set_bricks || return 1
+ if [ -n "$NODE_INDEX" ]; then
+ return 0
+ fi
+ local POS=0
+ local BRICK=""
+ for BRICK in ${BRICKS[@]}
+ do
+ if echo $BRICK | grep -E "[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}" &> /dev/null; then
+ BRICK=$(host $BRICK | awk '{print $NF}')
+ fi
+ BRICK_IP=$(host $BRICK | awk '{print $NF}')
+ if [ "$BRICK_IP" = "$HOST_IP" ]; then
+ NODE_INDEX=$POS
+ return 0
+ fi
+ POS=$(($POS+1))
+ done
+ return 1
+}
+
+#
+# FUNCTION set_replicas
+#
+# DESCRIPTION: Sets $REPLICAS to a list of hosts which are replicas
+# of this host.
+#
+set_replicas()
+{
+ set_replica_cnt || return 1
+ set_bricks || return 1
+ if ! set_node_index; then
+ echo "$HOSTNAME not a member of any replica group."
+ return 2
+ fi
+ local MODULUS=$((($NODE_INDEX+1) % $REPLICA_CNT))
+ local START_POS=0
+ if (( $MODULUS == 0 )); then
+ START_POS=$(($NODE_INDEX-$REPLICA_CNT+1))
+ else
+ START_POS=$(($NODE_INDEX-$MODULUS+1))
+ fi
+ local OFFSET=0
+ while (( $OFFSET < $REPLICA_CNT ))
+ do
+ POS=$(($OFFSET+$START_POS))
+ if (( $POS != $NODE_INDEX )); then
+ REPLICAS+=(${BRICKS[$POS]})
+ fi
+ OFFSET=$(($OFFSET + 1))
+ done
+}
+
+#
+# FUNCTION set_live_replica
+#
+# DESCRIPTION: Sets $LIVE_REPLICA to a host fromthe $REPLICAS list which is
+# confirmed to be "alive" by way of a probe sent to the hosts Gluster
+# management port (we can't use the brick port since it is dynamic).
+#
+set_live_replica()
+{
+ set_replicas || return 0
+ local REPLICA=""
+ for REPLICA in ${REPLICAS[@]}
+ do
+ echo -n "Checking host $REPLICA..."
+ if probe_glusterd $REPLICA $GLUSTERD_PORT; then
+ echo "ALIVE, setting as replica host."
+ LIVE_REPLICA=$REPLICA
+ return 0
+ else
+ echo "DEAD"
+ fi
+ done
+ return 1
+}
+
+#
+# FUNCTION: probe_peer
+#
+# DESCRIPTION: This function will find a working host in the hosts SMC tier
+# to probe.
+#
+probe_peer()
+{
+ for HOST in ${TIER_SERVICES[@]};
+ do
+ if [ ! "$HOST" == "$HOSTNAME" ] &&
+ probe_glusterd $HOST $GLUSTERD_PORT; then
+ if gluster peer probe $HOST &> /dev/null; then
+ echo_success && echo "Probed @ $HOST"
+ return 0
+ else
+ echo_failure; echo "Failed to probe $HOST"
+ fi
+ fi
+ done
+ return 1
+}
+
+#
+# FUNCTION: sync_uuid_smc
+#
+# DESCRIPTION: This function will copy the hosts UUID into SMC for later
+# use i.e. re-adding a node to a cluster after re-imaging.
+#
+sync_uuid_smc()
+{
+ if ! smcc setsvcprop $GLUSTER_SMC_TIER $HOSTNAME glusterfs.uuid $1 &> \
+ /dev/null; then
+ echo_failure; echo "Failed to save UUID to SMC"
+ return 1
+ fi
+ echo_success && echo "Sync'd UUID to SMC"
+ return 0
+}
+
+#
+# FUNCTION: smartmount_vol
+#
+# DESCRIPTION: This function figures out how to mount a Gluster volume in
+# a SMC tier by trying to find a host which has a working daemon. Once
+# a working daemon is found it will attempt to mount against that node.
+# After the initial mount is made and the cluster topology is
+# downloaded to the client this host is no longer required.
+#
+smartmount_vol()
+{
+ set_smc_tier || return 1
+ /sbin/modprobe fuse || (echo "Failed to load FUSE!" && return 1)
+ local VOLUME="$1"
+ local MOUNT="$2"
+ rpm -q nmap &> /dev/null || yum -y -q install nmap &> /dev/null
+ for HOST in ${TIER_SERVICES[@]};
+ do
+ if probe_glusterd $HOST $GLUSTERD_PORT; then
+ echo_success && echo "Found GlusterFS host @ $HOST"
+ if grep -E "^[[:graph:]]+ $MOUNT fuse.glusterfs" /proc/mounts &> /dev/null; then
+ echo_success && echo "$MOUNT already mounted"
+ return 0
+ elif mkdir -p "$GLUSTER_ROOT_MOUNT" &> /dev/null &&
+ mount -t glusterfs $HOST:/"$VOLUME" "$MOUNT" &&
+ sleep 1 && cat /proc/mounts | grep "$MOUNT" &> /dev/null; then
+ echo_success && echo "Mounted GlusterFS $VOLUME @ $MOUNT"
+ return 0
+ else
+ echo_failure; echo "Failed to mount from $HOST"
+ fi
+ fi
+ done
+}
+
+#
+# FUNCTION: patch_services
+#
+# DESCRIPTION: Patch /etc/services off the get-go so we don't
+# steal fbagent's port. cfengine can handle this as well but
+# it takes some time to run, so we don't want to take a chance
+# given how vital it is.
+#
+patch_etc_services()
+{
+ if ! grep "fbagent.*988" /etc/services &> /dev/null; then
+ grep "fbagent.*988/tcp" /etc/services || \
+ echo "fbagent 988/tcp" >> /etc/services
+ grep "fbagent.*988/udp" /etc/services || \
+ echo "fbagent 988/udp" >> /etc/services
+ echo_success && echo "Added fbagent to /etc/services"
+ fi
+}
+
+#
+# FUNCTION: heal_volume
+#
+# DESCRIPTION: Heal volume will traverse a given volume stat'ing each
+# file in order to trigger a self-heal & ensure the file is re-mirrored
+# to a host which has been re-imaged or otherwise become out of sync.
+#
+heal_volume()
+{
+ set_smc_tier || return 1
+ local VOLUME="$(echo $1 | sed 's/\./_/g')"
+ local CONCURRENT_HEALS="2"
+ [ -n "$2" ] && CONCURRENT_HEALS="$2"
+ local TMP_MOUNT="/tmp/$VOLUME.healer"
+ [ -d "$TMP_MOUNT" ] || mkdir -p $TMP_MOUNT
+ cat /proc/mounts | grep "$TMP_MOUNT" &> /dev/null && umount "$TMP_MOUNT"
+ if smartmount_vol "$VOLUME" "$TMP_MOUNT"; then
+ umount "$TMP_MOUNT"
+ smartmount_vol "$VOLUME" "$TMP_MOUNT"
+ cd "$TMP_MOUNT"
+ for ((CNT=1; CNT<=$CONCURRENT_HEALS; CNT++))
+ do
+ for ENTRY in $(ls | sed -n "$CNT~""$CONCURRENT_HEALS""p");do
+ echo "Healing $ENTRY..." &&
+ ( [ -d "$ENTRY" ] && \
+ ls "$ENTRY"/* | xargs -n50 -P1 stat >/dev/null ) ||
+ stat "$ENTRY" &> /dev/null
+ done &
+ done
+ cd /
+ wait
+ # Don't umount here, as the actual heals are backgrounded by
+ # the FUSE client. If we umount now they will go unfinished.
+ # (Don't worry, this all goes away as of v3.3).
+ echo_success && echo "Healed $VOLUME"
+ else
+ echo_failure; echo "Failed to heal $VOLUME"
+ return 1
+ fi
+}
+
+#
+# FUNCTION: check_config
+#
+# DESCRIPTION: This function verifies the hosts Gluster configuration and if
+# necessary will restore the hosts UUID & re-sync the configuration from a
+# working node in the cluster. Afterwards it will re-create the volume
+# directories and trigger a self-heal on all files
+#
+# NOTE: This function will only run if the node is *not* Gluster MService
+# managed, as the MService handles these functions and then some. It's
+# here for cases where we are testing out new configs but still want to be
+# resilient through re-imaging cycles. For long-term production use the
+# MService should be used.
+#
+check_config()
+{
+ # If the host isn't listed in a storage.gluster.* tier do nothing
+ set_smc_tier || return 0
+ # If tier uses Gluster MService don't do anything, the MService
+ # will handle these functions
+ smcc getprop $GLUSTER_SMC_TIER fbd_package 2>&1 |
+ grep -E "gluster_mservice|antfarm" &> /dev/null && return 0
+ LOCAL_UUID=$(cat $GLUSTERD_CONF/glusterd.info 2> /dev/null | cut -d= -f2)
+
+ if [ -n "$SVC_UUID" ]; then
+ # We have a storaged UUID in SMC, two cases, either
+ # we have been re-imaged, or we just need to sync it to SMC
+ if ! grep "UUID=$SVC_UUID" $GLUSTERD_CONF/glusterd.info &> /dev/null; then
+ # SMC UUID doesn't match, restore it!
+ echo "UUID=$SVC_UUID" > $GLUSTERD_CONF/glusterd.info
+ echo_success && echo "Restored UUID from SMC"
+ start_daemon
+ sleep 5
+ probe_peer
+ sleep 5
+ stop
+ sleep 5
+ start_daemon
+ sleep 5
+ if VOL_DIRS=($(gluster volume info | grep -Eo \
+ "(^Brick[0-9]+: $HOSTNAME)|(^Brick[0-9]+: $(echo $HOSTNAME |
+ sed 's/.facebook.com//g')):$GLUSTERD_DATA_DIR.*" |
+ cut -d: -f3)); then
+ stop
+ start_daemon
+ for VOL_DIR in ${VOL_DIRS[@]}; do
+ mkdir -p "$VOL_DIR"
+ heal_volume "${VOL_DIR##*/}"
+ done
+ echo_success && echo "Created volume dirs"
+ else
+ echo_failure; echo "No volume dirs found"
+ fi
+ fi
+ else
+ # We don't have any UUID stored in SMC, either we need to record it
+ # or this is a completely fresh install.
+ if [ -z "$LOCAL_UUID" ]; then
+ # Not even a local UUID, fresh install case
+ start_daemon
+ sleep 5
+ if ! LOCAL_UUID=$(cat $GLUSTERD_CONF/glusterd.info | cut -d= -f2); then
+ echo_failure; echo "UUID not generated"
+ return 1
+ fi
+ stop
+ fi
+ sync_uuid_smc $LOCAL_UUID
+ fi
+ return 0
+}
+
+#
+# FUNCTION: mount_root
+#
+# DESCRIPTION: Mount root will attempt to find a defined "root" volume which
+# is assigned this this host and mount it
+#
+mount_root()
+{
+ if ! set_smc_tier; then
+ echo_failure; echo "Mounting root not possible, no GFS SMC tier found"
+ return 1
+ fi
+ if [ -z "$SVC_UUID" ]; then
+ echo_failure;echo "Not mounting, no UUID in SMC, new node?"
+ return 1
+ fi
+ if smartmount_vol $GLUSTER_ROOT_VOLUME $GLUSTER_ROOT_MOUNT; then
+ return 0
+ else
+ echo_failure; echo \
+ "WARNING: GlusterFS not mounted @ $GLUSTER_ROOT_MOUNT" && return 1
+ fi
+}
+
+#
+# FUNCTION: warm_inode_cache
+#
+# DESCRIPTION: This function effectively "pre-warms" the inode cache of a
+# Gluster host by simply doing an ls -lR on the data directory. This is
+# very useful for hosts which run with only 1 spindle as the number of
+# meta-data requests which flood a host which participates in a cluster
+# with large numbers of files creates head contention. The result of this
+# contention can be a cluster which is unresponsive and/or laggy. Loading
+# this meta-data into memory ahead of time eliminates this problem.
+#
+warm_inode_cache()
+{
+ # Don't fail here, attempt to run with defaults
+ set_smc_tier
+ if [ "$WARM_INODE_CACHE" == "1" ] && [ -n "$GLUSTERD_DATA_DIR" ] && \
+ [ -d "$GLUSTERD_DATA_DIR" ]; then
+ echo -n "Warming inode cache ($GLUSTERD_DATA_DIR)..."
+ mkdir -p $GLUSTERD_DATA_DIR
+ if CNT=$(ls -lR $GLUSTERD_DATA_DIR | wc -l); then
+ echo -n "$CNT entries"
+ echo_success && echo ""
+ else
+ echo_failure && echo ""
+ fi
+ fi
+ return 0
+}
+
+#
+# FUNCTION: check_quorum
+#
+# DESCRIPTION: Checks the quorum status of the local node. Will non-zero if
+# the node quorum margin is <= 0, where node margin is defined by how many
+# nodes can be downed before we have a loss of quorum. This will principally
+# be used by FBAR to easily figure out if it can remediate a Gluster node
+# (it can call this via SSH).
+#
+check_quorum()
+{
+ # Return 0 here so FBAR knows it's ok to take a spare or otherwise
+ # dead node.
+ if ! pgrep glusterd &> /dev/null; then
+ echo "glusterd not running!"
+ return 1
+ fi
+ set_replica_cnt || return 1
+ set_replicas
+ local REPLICAS_RET_CODE=$?
+ if (( $REPLICAS_RET_CODE == 2 )); then
+ return 0
+ elif (( $REPLICAS_RET_CODE != 0 )); then
+ return 1
+ fi
+
+ local REDUNDANCY_CNT=0
+ for VOL in ${HOSTED_VOLS[@]}; do
+ REDUNDANCY_CNT=$(grep redundancy_count /var/lib/glusterd/vols/groot/info | cut -d= -f2)
+ if (( REDUNDANCY_COUNT > 0 )); then
+ break;
+ fi
+ done
+ if ! (( REDUNDANCY_CNT > 0 )); then
+ REDUNDANCY_CNT=${#REPLICAS[@]}
+ QUORUM_THRESHOLD=$(((${REDUNDANCY_CNT}+1)/2+1))
+ echo "Quorum threshold: $QUORUM_THRESHOLD"
+ else
+ QUORUM_THRESHOLD=$((${REDUNDANCY_CNT}/2))
+ echo "Quorum threshold (EC @ 50% of ${REDUNDANCY_CNT} redundant bricks): $QUORUM_THRESHOLD"
+ fi
+
+ local LIVING_BRICKS=$REPLICA_CNT
+ local CHECK_LIST=(${REPLICAS[@]})
+ CHECK_LIST+=($HOST)
+ local CHECK_HOST=""
+ local DEAD_BRICKS=0
+ for CHECK_HOST in ${CHECK_LIST[@]}
+ do
+ echo -n "Replica $CHECK_HOST: "
+ if ! probe_glusterd $CHECK_HOST $GLUSTERD_PORT; then
+ echo "DEAD"
+ LIVING_BRICKS=$(($LIVING_BRICKS-1))
+ DEAD_BRICKS=$(($DEAD_BRICKS+1))
+ else
+ echo "ALIVE"
+ fi
+ done
+ QUORUM_MARGIN=$(($QUORUM_THRESHOLD-$DEAD_BRICKS))
+ echo "Quorum margin: $QUORUM_MARGIN"
+ if (( $QUORUM_MARGIN > 0 )); then
+ return 0
+ else
+ return 1
+ fi
+}
+
+#
+# FUNCTION: fsdiff
+#
+# DESCRIPTION: Does a quick sanity check on the file sets between the local node
+# and one of it's partner nodes. This function will return a list of all files
+# which differ in size. Keep in mind this will be approximate on running live
+# hosts since the script can't get a perfect snapshot of each FS. On a node
+# which is about to be re-integrated into the cluster however it will give a
+# good view of how much data is out of sync.
+#
+fsdiff()
+{
+ WORK_DIR="/tmp/gfsdiff"
+ set_smc_tier
+ if ! set_node_index; then
+ echo "$HOSTNAME not a member of any replica group."
+ exit 1
+ fi
+ set_replicas || ( echo "No replicas found!" && return 1 )
+ set_live_replica || ( echo "No live replica found!" && return 1 )
+ mkdir -p $WORK_DIR
+ echo -n "Getting local file list for $HOSTNAME..."
+ find $GLUSTERD_DATA_DIR -type f -printf '%s\t%p\n' |
+ sort > $WORK_DIR/$HOSTNAME.lst
+ echo "DONE"
+ echo -n "Getting file list for $LIVE_REPLICA..."
+ ssh root@$LIVE_REPLICA "find $GLUSTERD_DATA_DIR -type f -printf '%s\t%p\n'" \
+ | sort > $WORK_DIR/$LIVE_REPLICA.lst
+ echo "DONE"
+ echo "Finding differences..."
+ comm -1 -3 $WORK_DIR/$LIVE_REPLICA.lst $WORK_DIR/$HOSTNAME.lst |
+ awk '{print $NF}'
+}
diff --git a/extras/systemd/Makefile.am b/extras/systemd/Makefile.am
index 3f0ec89537a..9e350775111 100644
--- a/extras/systemd/Makefile.am
+++ b/extras/systemd/Makefile.am
@@ -1,7 +1,15 @@
-CLEANFILES = glusterd.service
-EXTRA_DIST = glusterd.service.in
-if USE_SYSTEMD
-# systemddir is already defined through configure.ac
-systemd_DATA = glusterd.service
+CLEANFILES =
+
+SYSTEMD_DIR = @systemddir@
+
+if GF_WITH_SYSTEMD
+install-exec-local:
+ @if [ -d $(SYSTEMD_DIR) ]; then \
+ $(mkdir_p) $(DESTDIR)$(SYSTEMD_DIR); \
+ $(INSTALL_PROGRAM) glusterd-mgmt.service $(DESTDIR)$(SYSTEMD_DIR)/; \
+ $(INSTALL_PROGRAM) glusterd-nfsd.service $(DESTDIR)$(SYSTEMD_DIR)/; \
+ $(INSTALL_PROGRAM) glusterd-gfproxyd.service $(DESTDIR)$(SYSTEMD_DIR)/; \
+ $(INSTALL_PROGRAM) glusterd-shd.service $(DESTDIR)$(SYSTEMD_DIR)/; \
+ fi
endif
diff --git a/extras/systemd/glusterd-gfproxyd.service b/extras/systemd/glusterd-gfproxyd.service
new file mode 100644
index 00000000000..051181fff34
--- /dev/null
+++ b/extras/systemd/glusterd-gfproxyd.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=GlusterFS GFProxy Daemon
+After=network.target rpcbind.service
+Before=network-online.target
+
+[Service]
+Type=forking
+EnvironmentFile=/var/lib/glusterd/glusterd.info
+ExecStartPre=/bin/mkdir -p /var/lib/glusterd/gfproxy/run
+ExecStart=/usr/sbin/glusterfs -s localhost --volfile-id gfproxy/groot -p /var/lib/glusterd/gfproxy/run/gfproxy.pid -l /var/log/glusterfs/gfproxyd.log -S /var/lib/glusterd/gfproxy/run/${UUID}.socket
+PIDFile=/var/lib/glusterd/gfproxy/run/gfproxy.pid
+ExecStop='pkill -f '^/usr/sbin/glusterfs.*gluster/gfproxy''
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extras/systemd/glusterd-gfproxyd.service.in b/extras/systemd/glusterd-gfproxyd.service.in
new file mode 100644
index 00000000000..f6af14416b3
--- /dev/null
+++ b/extras/systemd/glusterd-gfproxyd.service.in
@@ -0,0 +1,16 @@
+[Unit]
+Description=GlusterFS GFProxy Daemon
+After=network.target rpcbind.service
+Before=network-online.target
+
+[Service]
+Type=forking
+EnvironmentFile=@localstatedir@/lib/glusterd/glusterd.info
+ExecStartPre=/bin/mkdir -p @localstatedir@/lib/glusterd/gfproxy/run
+ExecStart=@prefix@/sbin/glusterfs -s localhost --volfile-id gfproxy/groot -p @localstatedir@/lib/glusterd/gfproxy/run/gfproxy.pid -l /var/log/glusterfs/gfproxyd.log -S @localstatedir@/lib/glusterd/gfproxy/run/${UUID}.socket
+PIDFile=@localstatedir@/lib/glusterd/gfproxy/run/gfproxy.pid
+ExecStop='pkill -f '^@prefix@/sbin/glusterfs.*gluster/gfproxy''
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extras/systemd/glusterd-mgmt.service b/extras/systemd/glusterd-mgmt.service
new file mode 100644
index 00000000000..62352137413
--- /dev/null
+++ b/extras/systemd/glusterd-mgmt.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=GlusterFS Management Daemon
+After=network.target rpcbind.service
+Before=network-online.target
+
+[Service]
+Type=forking
+PIDFile=/var/lib/glusterd/run/glusterd.pid
+LimitNOFILE=65536
+ExecStart=/usr/sbin/glusterd -p /var/lib/glusterd/run/glusterd.pid --skip-shd-start --skip-nfsd-start
+KillMode=process
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extras/systemd/glusterd-mgmt.service.in b/extras/systemd/glusterd-mgmt.service.in
new file mode 100644
index 00000000000..5e6e3911a0a
--- /dev/null
+++ b/extras/systemd/glusterd-mgmt.service.in
@@ -0,0 +1,15 @@
+[Unit]
+Description=GlusterFS Management Daemon
+After=network.target rpcbind.service
+Before=network-online.target
+
+[Service]
+Type=forking
+PIDFile=@localstatedir@/lib/glusterd/run/glusterd.pid
+LimitNOFILE=65536
+ExecStart=@prefix@/sbin/glusterd -p @localstatedir@/lib/glusterd/run/glusterd.pid --skip-shd-start --skip-nfsd-start
+KillMode=process
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extras/systemd/glusterd-nfsd.service b/extras/systemd/glusterd-nfsd.service
new file mode 100644
index 00000000000..111b56f9cfa
--- /dev/null
+++ b/extras/systemd/glusterd-nfsd.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=GlusterFS NFS Daemon
+After=network.target rpcbind.service
+Before=network-online.target
+
+[Service]
+Type=forking
+EnvironmentFile=/var/lib/glusterd/glusterd.info
+ExecStartPre=-/bin/pkill -f 'glusterfs.*gluster/nfs'
+ExecStart=/usr/sbin/glusterfs -s localhost --volfile-id gluster/nfs -p /var/lib/glusterd/nfs/run/nfs.pid -l /var/log/glusterfs/nfs.log -S /var/lib/glusterd/nfs/run/${UUID}.socket
+PIDFile=/var/lib/glusterd/nfs/run/nfs.pid
+ExecStop=-/bin/pkill -f 'glusterfs.*gluster/nfs'
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extras/systemd/glusterd-nfsd.service.in b/extras/systemd/glusterd-nfsd.service.in
new file mode 100644
index 00000000000..45de2a4dc26
--- /dev/null
+++ b/extras/systemd/glusterd-nfsd.service.in
@@ -0,0 +1,16 @@
+[Unit]
+Description=GlusterFS NFS Daemon
+After=network.target rpcbind.service
+Before=network-online.target
+
+[Service]
+Type=forking
+EnvironmentFile=@localstatedir@/lib/glusterd/glusterd.info
+ExecStartPre=-/bin/pkill -f 'glusterfs.*gluster/nfs'
+ExecStart=@prefix@/sbin/glusterfs -s localhost --volfile-id gluster/nfs -p @localstatedir@/lib/glusterd/nfs/run/nfs.pid -l /var/log/glusterfs/nfs.log -S @localstatedir@/lib/glusterd/nfs/run/${UUID}.socket
+PIDFile=@localstatedir@/lib/glusterd/nfs/run/nfs.pid
+ExecStop=-/bin/pkill -f 'glusterfs.*gluster/nfs'
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extras/systemd/glusterd-shd.service b/extras/systemd/glusterd-shd.service
new file mode 100644
index 00000000000..0b606f947cd
--- /dev/null
+++ b/extras/systemd/glusterd-shd.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=GlusterFS Self-Heal Daemon
+After=network.target rpcbind.service
+Before=network-online.target
+
+[Service]
+Type=forking
+EnvironmentFile=/var/lib/glusterd/glusterd.info
+LimitNOFILE=65536
+ExecStartPre=-/bin/pkill -f '^/usr/sbin/glusterfs.*gluster/glustershd'
+ExecStart= /usr/sbin/glusterfs -s localhost --volfile-id gluster/glustershd -p /var/lib/glusterd/glustershd/run/glustershd.pid -l /var/log/glusterfs/glustershd.log --xlator-option *replicate*.node-uuid=${UUID} -S /var/lib/glusterd/glustershd/run/${UUID}.socket
+PIDFile=/var/lib/glusterd/glustershd/run/glustershd.pid
+ExecStop=/bin/pkill -f '^/usr/sbin/glusterfs.*gluster/glustershd'
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extras/systemd/glusterd-shd.service.in b/extras/systemd/glusterd-shd.service.in
new file mode 100644
index 00000000000..bd6933d0247
--- /dev/null
+++ b/extras/systemd/glusterd-shd.service.in
@@ -0,0 +1,17 @@
+[Unit]
+Description=GlusterFS Self-Heal Daemon
+After=network.target rpcbind.service
+Before=network-online.target
+
+[Service]
+Type=forking
+EnvironmentFile=@localstatedir@/lib/glusterd/glusterd.info
+LimitNOFILE=65536
+ExecStartPre=-/bin/pkill -f '^@prefix@/sbin/glusterfs.*gluster/glustershd'
+ExecStart= @prefix@/sbin/glusterfs -s localhost --volfile-id gluster/glustershd -p @localstatedir@/lib/glusterd/glustershd/run/glustershd.pid -l /var/log/glusterfs/glustershd.log --xlator-option *replicate*.node-uuid=${UUID} -S @localstatedir@/lib/glusterd/glustershd/run/${UUID}.socket
+PIDFile=@localstatedir@/lib/glusterd/glustershd/run/glustershd.pid
+ExecStop=/bin/pkill -f '^@prefix@/sbin/glusterfs.*gluster/glustershd'
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extras/systemd/glusterd.service.in b/extras/systemd/glusterd.service.in
deleted file mode 100644
index 26694cfc8ab..00000000000
--- a/extras/systemd/glusterd.service.in
+++ /dev/null
@@ -1,17 +0,0 @@
-[Unit]
-Description=GlusterFS, a clustered file-system server
-Requires=rpcbind.service
-After=network.target rpcbind.service
-Before=network-online.target
-
-[Service]
-Type=forking
-PIDFile=@localstatedir@/run/glusterd.pid
-LimitNOFILE=65536
-Environment="LOG_LEVEL=INFO"
-EnvironmentFile=-@sysconfdir@/sysconfig/glusterd
-ExecStart=@prefix@/sbin/glusterd -p @localstatedir@/run/glusterd.pid --log-level $LOG_LEVEL $GLUSTERD_OPTIONS
-KillMode=process
-
-[Install]
-WantedBy=multi-user.target
diff --git a/extras/systemd/glustereventsd.service b/extras/systemd/glustereventsd.service
new file mode 100644
index 00000000000..73307282013
--- /dev/null
+++ b/extras/systemd/glustereventsd.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=Gluster Events Notifier
+After=syslog.target network.target
+
+[Service]
+Environment=PYTHONPATH=/usr/local/lib/python2.7/site-packages:$PYTHONPATH
+Type=simple
+ExecStart=/usr/local/sbin/glustereventsd --pid-file /var/run/glustereventsd.pid
+ExecReload=/bin/kill -SIGUSR2 $MAINPID
+KillMode=control-group
+PIDFile=/var/run/glustereventsd.pid
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extras/systemd/glusterfssharedstorage.service b/extras/systemd/glusterfssharedstorage.service
new file mode 100644
index 00000000000..95da3dfbabd
--- /dev/null
+++ b/extras/systemd/glusterfssharedstorage.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Mount glusterfs sharedstorage
+Requires=glusterd.service remote-fs-pre.target local-fs.target
+
+[Service]
+Type=forking
+ExecStart=/usr/local/libexec/glusterfs/mount-shared-storage.sh
+Restart=on-failure
+RestartSec=3
+RestartForceExitStatus=1
+
+[Install]
+WantedBy=multi-user.target
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 4237db448df..4ae3b8fe7e3 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1,5 +1,14 @@
%global _hardened_build 1
+%undefine _missing_build_ids_terminate_build
+
+%if 0%{?rhel} == 7
+# CentOS 7 would force ".el7.centos", we want to avoid that.
+ %define dist .el7
+%endif
+
+%global release fb_release
+
%global _for_fedora_koji_builds 0
# uncomment and add '%' to use the prereltag for pre-releases
@@ -13,10 +22,6 @@
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with debug
%{?_with_debug:%global _with_debug --enable-debug}
-# if you wish to compile an rpm with Facebook specfic extras...
-# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with fbextras
-%{?_with_fbextras:%global _with_fbextras --with-fbextras}
-
# if you wish to compile an rpm with cmocka unit testing...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with cmocka
%{?_with_cmocka:%global _with_cmocka --enable-cmocka}
@@ -30,6 +35,10 @@
%global _without_rdma --disable-ibverbs
%endif
+# if you wish to compile an rpm with ASAN enabled...
+# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with asan
+%{?_with_asan:%global _with_asan --with-asan}
+
# if you wish to compile an rpm without epoll...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without epoll
%{?_without_epoll:%global _without_epoll --disable-epoll}
@@ -76,17 +85,17 @@
%global _without_tiering --disable-tiering
%endif
+# if you wish to compile an rpm with Facebook specfic extras...
+# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with fbextras
+%{?_with_fbextras:%global _with_fbextras --with-fbextras}
+
##-----------------------------------------------------------------------------
## All %%global definitions should be placed here and keep them sorted
##
-%if ( 0%{?fedora} && 0%{?fedora} > 16 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
%global _with_systemd true
-%endif
-%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 7 )
-%global _with_firewalld --enable-firewalld
-%endif
+%global _without_firewalld --disable-firewalld
%if 0%{?_tmpfilesdir:1}
%global _with_tmpfilesdir --with-tmpfilesdir=%{_tmpfilesdir}
@@ -103,13 +112,15 @@
%if ( 0%{?_with_systemd:1} )
%global _init_enable() /bin/systemctl enable %1.service ;
%global _init_disable() /bin/systemctl disable %1.service ;
-%global _init_restart() /bin/systemctl try-restart %1.service ;
+%global _init_restart() /bin/systemctl restart %1.service ;
%global _init_start() /bin/systemctl start %1.service ;
%global _init_stop() /bin/systemctl stop %1.service ;
%global _init_install() install -D -p -m 0644 %1 %{buildroot}%{_unitdir}/%2.service ;
# can't seem to make a generic macro that works
-%global _init_glusterd %{_unitdir}/glusterd.service
-%global _init_glusterfsd %{_unitdir}/glusterfsd.service
+%global _init_glusterd %{_unitdir}/glusterd-mgmt.service
+%global _init_glusterd_nfsd %{_unitdir}/glusterd-nfsd.service
+%global _init_glusterd_gfproxyd %{_unitdir}/glusterd-gfproxyd.service
+%global _init_glusterd_shd %{_unitdir}/glusterd-shd.service
%else
%global _init_enable() /sbin/chkconfig --add %1 ;
%global _init_disable() /sbin/chkconfig --del %1 ;
@@ -137,21 +148,24 @@
%global _sharedstatedir /var/lib
%endif
+# Commenting this out, this doesn't actually work with FB CentOS6. Frankly
+# I've no idea what they are trying to accomplish here, these "useless"
+# provides don't bother me.
# We do not want to generate useless provides and requires for xlator
# .so files to be set for glusterfs packages.
# Filter all generated:
#
# TODO: RHEL5 does not have a convenient solution
-%if ( 0%{?rhel} == 6 )
+#%if ( 0%{?rhel} == 6 )
# filter_setup exists in RHEL6 only
-%filter_provides_in %{_libdir}/glusterfs/%{version}/
-%global __filter_from_req %{?__filter_from_req} | grep -v -P '^(?!lib).*\.so.*$'
-%filter_setup
-%else
-# modern rpm and current Fedora do not generate requires when the
-# provides are filtered
-%global __provides_exclude_from ^%{_libdir}/glusterfs/%{version}/.*$
-%endif
+# %filter_provides_in %{_libdir}/glusterfs/%{version}/
+# %global __filter_from_req %{?__filter_from_req} | grep -v -P '^(?!lib).*\.so.*$'
+# %filter_setup
+#%else
+# # modern rpm and current Fedora do not generate requires when the
+# # provides are filtered
+# %global __provides_exclude_from ^%{_libdir}/glusterfs/%{version}/.*$
+#%endif
##-----------------------------------------------------------------------------
@@ -160,13 +174,13 @@
Summary: Distributed File System
%if ( 0%{_for_fedora_koji_builds} )
Name: glusterfs
-Version: 3.8.0
-Release: 0.1%{?prereltag:.%{prereltag}}%{?dist}
+Version: 3.8_fb
+Release: %{release}
Vendor: Fedora Project
%else
Name: @PACKAGE_NAME@
Version: @PACKAGE_VERSION@
-Release: 0.@PACKAGE_RELEASE@%{?dist}
+Release: %{release}
Vendor: Gluster Community
%endif
License: GPLv2 or LGPLv3+
@@ -200,10 +214,13 @@ BuildRequires: libxml2-devel openssl-devel
BuildRequires: libaio-devel libacl-devel
BuildRequires: python-devel
BuildRequires: python-ctypes
-%if ( 0%{?_with_fbextras:1} )
-BuildRequires: fb-libtirpc fb-libtirpc-devel
-BuildRequires: jemalloc jemalloc-devel
+%if ( 0%{?rhel} && 0%{?rhel} == 7 )
+BuildRequires: libtirpc libtirpc-devel
+%else
+BuildRequires: libfbtirpc libfbtirpc-devel
%endif
+BuildRequires: jemalloc jemalloc-devel
+BuildRequires: gperftools-devel gperftools-libs
BuildRequires: userspace-rcu-devel >= 0.7
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
BuildRequires: automake
@@ -226,7 +243,7 @@ BuildRequires: lvm2-devel
BuildRequires: libattr-devel
%endif
-%if (0%{?_with_firewalld:1})
+%if (0%{!?_without_firewalld:1})
BuildRequires: firewalld-filesystem
%endif
@@ -348,6 +365,9 @@ Requires: psmisc
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
+%if ( 0%{?_with_fbextras:1} )
+Requires: fb-glusterfs-clientstats >= 0.0.2
+%endif
Obsoletes: %{name}-client < %{version}-%{release}
Provides: %{name}-client = %{version}-%{release}
@@ -521,13 +541,15 @@ Requires: %{name}-cli%{?_isa} = %{version}-%{release}
Requires: %{name}-libs%{?_isa} = %{version}-%{release}
# some daemons (like quota) use a fuse-mount, glusterfsd is part of -fuse
Requires: %{name}-fuse%{?_isa} = %{version}-%{release}
-%if ( 0%{?_with_fbextras:1} )
-Requires: fb-libtirpc >= 0.2.5-1
-Requires: jemalloc >= 3.6.0-1
-%endif
# self-heal daemon, rebalance, nfs-server etc. are actually clients
Requires: %{name}-api%{?_isa} = %{version}-%{release}
Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
+%if ( 0%{?rhel} && 0%{?rhel} == 7 )
+Requires: libtirpc >= 0.2.4
+%else
+Requires: libfbtirpc >= 0.2.5-1
+%endif
+Requires: jemalloc >= 3.6.0-1
# psmisc for killall, lvm2 for snapshot, and nfs-utils and
# rpcbind/portmap for gnfs server
Requires: psmisc
@@ -543,7 +565,7 @@ Requires(preun): /sbin/service
Requires(preun): /sbin/chkconfig
Requires(postun): /sbin/service
%endif
-%if (0%{?_with_firewalld:1})
+%if (0%{!?_without_firewalld:1})
# we install firewalld rules, so we need to have the directory owned
Requires: firewalld-filesystem
%endif
@@ -586,6 +608,58 @@ is in user space and easily manageable.
This package provides the translators needed on any GlusterFS client.
+%package server-nodeps
+Summary: Clustered file-system server
+Group: System Environment/Daemons
+%if ( 0%{?rhel} && 0%{?rhel} == 7 )
+Requires: libtirpc >= 0.2.4
+%else
+Requires: libfbtirpc >= 0.2.5-1
+%endif
+Requires: jemalloc >= 3.6.0-1
+# psmisc for killall, lvm2 for snapshot, and nfs-utils and
+# rpcbind/portmap for gnfs server
+Requires: psmisc
+Requires: lvm2
+Requires: nfs-utils
+%if ( 0%{?_with_systemd:1} )
+Requires(post): systemd-units, systemd
+Requires(preun): systemd-units
+Requires(postun): systemd-units
+%else
+Requires(post): /sbin/chkconfig
+Requires(preun): /sbin/service
+Requires(preun): /sbin/chkconfig
+Requires(postun): /sbin/service
+%endif
+%if (0%{!?_without_firewalld:1})
+# we install firewalld rules, so we need to have the directory owned
+Requires: firewalld-filesystem
+%endif
+%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
+Requires: rpcbind
+%else
+Requires: portmap
+%endif
+%if ( 0%{?rhel} && 0%{?rhel} < 6 )
+Obsoletes: %{name}-geo-replication = %{version}-%{release}
+%endif
+%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
+Requires: python-argparse
+%endif
+Requires: pyxattr
+
+%description server-nodeps
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over Infiniband RDMA
+or TCP/IP interconnect into one large parallel network file
+system. GlusterFS is one of the most sophisticated file systems in
+terms of features and extensibility. It borrows a powerful concept
+called Translators from GNU Hurd kernel. Much of the code in GlusterFS
+is in user space and easily manageable.
+
+This package provides the glusterfs server daemon.
+
%prep
%setup -q -n %{name}-%{version}%{?prereltag}
@@ -601,6 +675,8 @@ export CFLAGS
%endif
%configure \
+ --with-jemalloc \
+ --enable-mempool \
%{?_with_cmocka} \
%{?_with_debug} \
%{?_with_tmpfilesdir} \
@@ -608,12 +684,14 @@ export CFLAGS
%{?_without_epoll} \
%{?_without_fusermount} \
%{?_without_georeplication} \
- %{?_with_firewalld} \
+ %{?_without_firewalld} \
%{?_without_ocf} \
%{?_without_rdma} \
%{?_without_syslog} \
%{?_without_tiering} \
- %{?_with_fbextras}
+ %{?_with_asan} \
+ %{?_with_fbextras} \
+ %{?_with_systemd:--with-systemd}
# fix hardening and remove rpath in shlibs
%if ( 0%{?fedora} && 0%{?fedora} > 17 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
@@ -628,6 +706,7 @@ make %{?_smp_mflags}
make check
%install
+export DONT_STRIP=1
rm -rf %{buildroot}
make install DESTDIR=%{buildroot}
# Install include directory
@@ -736,6 +815,18 @@ find ./tests ./run-tests.sh -type f | cpio -pd %{buildroot}%{_prefix}/share/glus
install -p -m 0744 -D extras/command-completion/gluster.bash \
%{buildroot}%{_sysconfdir}/bash_completion.d/gluster
+%if ( 0%{?_with_systemd:1} )
+%{__mkdir_p} %{buildroot}/usr/lib/systemd/system
+%{__install} -p -m 0755 extras/systemd/glusterd-mgmt.service \
+ %{buildroot}/usr/lib/systemd/system/glusterd-mgmt.service
+%{__install} -p -m 0755 extras/systemd/glusterd-nfsd.service \
+ %{buildroot}/usr/lib/systemd/system/glusterd-nfsd.service
+%{__install} -p -m 0755 extras/systemd/glusterd-shd.service \
+ %{buildroot}/usr/lib/systemd/system/glusterd-shd.service
+%{__install} -p -m 0755 extras/systemd/glusterd-gfproxyd.service \
+ %{buildroot}/usr/lib/systemd/system/glusterd-gfproxyd.service
+%endif
+
%clean
rm -rf %{buildroot}
@@ -772,6 +863,12 @@ exit 0
/sbin/ldconfig
%post server
+CONF_DIR="%{_sharedstatedir}/glusterd"
+if [ -d "$CONF_DIR" ]; then
+ BACKUP_FILE="/var/tmp/var-lib-glusterd-$(date +%Y_%m_%d_%H_%M).tgz"
+ echo "Creating backup of $CONF_DIR @ $BACKUP_FILE..."
+ tar czf "$BACKUP_FILE" -c $CONF_DIR 2> /dev/null
+fi
# Legacy server
%_init_enable glusterd
%if ( 0%{_for_fedora_koji_builds} )
@@ -816,21 +913,112 @@ if [ -e /etc/ld.so.conf.d/glusterfs.conf ]; then
/sbin/ldconfig
fi
-%if (0%{?_with_firewalld:1})
+%if (0%{!?_without_firewalld:1})
%firewalld_reload
%endif
-%if ( 0%{?_with_fbextras:1} )
+pidof -c -o %PPID -x glusterd &> /dev/null
+if [ $? -eq 0 ]; then
+ kill -9 `pgrep -f gsyncd.py` &> /dev/null
+
+ timeout 30 killall --wait glusterd &> /dev/null
+ glusterd --xlator-option *.upgrade=on -N
+
+ #Cleaning leftover glusterd socket file which is created by glusterd in
+ #rpm_script_t context.
+ rm -rf /var/run/glusterd.socket
+
+ # glusterd _was_ running, we killed it, it exited after *.upgrade=on,
+ # so start it again
+ %_init_start glusterd
+ sleep 3
+ echo "Restarting daemons..."
+ %if ( 0%{?_with_systemd:1} )
+ pkill -f @prefix@/sbin/glusterfsd.*
+ pkill -f @prefix@/sbin/glusterd.*
+ pkill -f @prefix@/sbin/glusterfs.*
+ /bin/systemctl daemon-reload
+ %_init_restart glusterd-nfsd
+ %_init_restart glusterd-shd
+ %_init_restart glusterd-gfproxyd
+ %_init_restart glusterd
+ %else
+ service glusterd restart
+ %endif
+
+else
+ glusterd --xlator-option *.upgrade=on -N
+
+ #Cleaning leftover glusterd socket file which is created by glusterd in
+ #rpm_script_t context.
+ rm -rf /var/run/glusterd.socket
+fi
+exit 0
+
if ! [ -f %{_sharedstatedir}/glusterd/glusterd.info ]; then
echo "UUID=$(/usr/bin/uuidgen)" >> %{_sharedstatedir}/glusterd/glusterd.info
fi
+
+%post server-nodeps
+CONF_DIR="%{_sharedstatedir}/glusterd"
+if [ -d "$CONF_DIR" ]; then
+ BACKUP_FILE="/var/tmp/var-lib-glusterd-$(date +%Y_%m_%d_%H_%M).tgz"
+ echo "Creating backup of $CONF_DIR @ $BACKUP_FILE..."
+ tar czf "$BACKUP_FILE" -c $CONF_DIR 2> /dev/null
+fi
+# Legacy server
+%_init_enable glusterd
+%if ( 0%{_for_fedora_koji_builds} )
+%_init_enable glusterfsd
+%endif
+# ".cmd_log_history" is renamed to "cmd_history.log" in GlusterFS-3.7 .
+# While upgrading glusterfs-server package form GlusterFS version <= 3.6 to
+# GlusterFS version 3.7, ".cmd_log_history" should be renamed to
+# "cmd_history.log" to retain cli command history contents.
+if [ -f %{_localstatedir}/log/glusterfs/.cmd_log_history ]; then
+ mv %{_localstatedir}/log/glusterfs/.cmd_log_history \
+ %{_localstatedir}/log/glusterfs/cmd_history.log
+fi
+
+# Genuine Fedora (and EPEL) builds never put gluster files in /etc; if
+# there are any files in /etc from a prior gluster.org install, move them
+# to /var/lib. (N.B. Starting with 3.3.0 all gluster files are in /var/lib
+# in gluster.org RPMs.) Be careful to copy them on the off chance that
+# /etc and /var/lib are on separate file systems
+if [ -d /etc/glusterd -a ! -h %{_sharedstatedir}/glusterd ]; then
+ mkdir -p %{_sharedstatedir}/glusterd
+ cp -a /etc/glusterd %{_sharedstatedir}/glusterd
+ rm -rf /etc/glusterd
+ ln -sf %{_sharedstatedir}/glusterd /etc/glusterd
+fi
+
+# Rename old volfiles in an RPM-standard way. These aren't actually
+# considered package config files, so %%config doesn't work for them.
+if [ -d %{_sharedstatedir}/glusterd/vols ]; then
+ for file in $(find %{_sharedstatedir}/glusterd/vols -name '*.vol'); do
+ newfile=${file}.rpmsave
+ echo "warning: ${file} saved as ${newfile}"
+ cp ${file} ${newfile}
+ done
+fi
+
+# add marker translator
+# but first make certain that there are no old libs around to bite us
+# BZ 834847
+if [ -e /etc/ld.so.conf.d/glusterfs.conf ]; then
+ rm -f /etc/ld.so.conf.d/glusterfs.conf
+ /sbin/ldconfig
+fi
+
+%if (0%{!?_without_firewalld:1})
+ %firewalld_reload
%endif
pidof -c -o %PPID -x glusterd &> /dev/null
if [ $? -eq 0 ]; then
kill -9 `pgrep -f gsyncd.py` &> /dev/null
- killall --wait glusterd &> /dev/null
+ timeout 30 killall --wait glusterd &> /dev/null
glusterd --xlator-option *.upgrade=on -N
#Cleaning leftover glusterd socket file which is created by glusterd in
@@ -840,6 +1028,21 @@ if [ $? -eq 0 ]; then
# glusterd _was_ running, we killed it, it exited after *.upgrade=on,
# so start it again
%_init_start glusterd
+ sleep 3
+ echo "Restarting daemons..."
+ %if ( 0%{?_with_systemd:1} )
+ pkill -f @prefix@/sbin/glusterfsd.*
+ pkill -f @prefix@/sbin/glusterd.*
+ pkill -f @prefix@/sbin/glusterfs.*
+ /bin/systemctl daemon-reload
+ %_init_restart glusterd-nfsd
+ %_init_restart glusterd-shd
+ %_init_restart glusterd-gfproxyd
+ %_init_restart glusterd
+ %else
+ service glusterd restart
+ %endif
+
else
glusterd --xlator-option *.upgrade=on -N
@@ -849,6 +1052,24 @@ else
fi
exit 0
+if ! [ -f %{_sharedstatedir}/glusterd/glusterd.info ]; then
+ echo "UUID=$(/usr/bin/uuidgen)" >> %{_sharedstatedir}/glusterd/glusterd.info
+fi
+
+/sbin/ldconfig
+
+%if ( 0%{?rhel} == 5 )
+modprobe fuse
+%endif
+
+%if ( 0%{!?_without_georeplication:1} )
+if [ $1 -ge 1 ]; then
+ %_init_restart glusterd
+fi
+%endif
+
+exit 0
+
##-----------------------------------------------------------------------------
## All %%preun should be placed here and keep them sorted
##
@@ -871,6 +1092,25 @@ if [ $1 -ge 1 ]; then
fi
exit 0
+%preun server-nodeps
+if [ $1 -eq 0 ]; then
+ if [ -f %_init_glusterfsd ]; then
+ %_init_stop glusterfsd
+ fi
+ %_init_stop glusterd
+ if [ -f %_init_glusterfsd ]; then
+ %_init_disable glusterfsd
+ fi
+ %_init_disable glusterd
+fi
+if [ $1 -ge 1 ]; then
+ if [ -f %_init_glusterfsd ]; then
+ %_init_restart glusterfsd
+ fi
+ %_init_restart glusterd
+fi
+exit 0
+
##-----------------------------------------------------------------------------
## All %%postun should be placed here and keep them sorted
##
@@ -890,7 +1130,14 @@ exit 0
%postun server
/sbin/ldconfig
-%if (0%{?_with_firewalld:1})
+%if (0%{!?_without_firewalld:1})
+ %firewalld_reload
+%endif
+exit 0
+
+%postun server-nodeps
+/sbin/ldconfig
+%if (0%{!?_without_firewalld:1})
%firewalld_reload
%endif
exit 0
@@ -932,7 +1179,6 @@ exit 0
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/shard.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-client.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/worm.so
-%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/namespace.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/meta.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-cache.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-threads.so
@@ -944,6 +1190,8 @@ exit 0
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/stat-prefetch.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/write-behind.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system/posix-acl.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/ganesha.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/namespace.so
%dir %{_localstatedir}/run/gluster
%if 0%{?_tmpfilesdir:1}
%{_tmpfilesdir}/gluster.conf
@@ -983,6 +1231,7 @@ exit 0
%files client-xlators
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/*.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/pump.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/ganesha.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so
@@ -1006,6 +1255,9 @@ exit 0
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/fuse.so
/sbin/mount.glusterfs
+%if ( 0%{?_with_fbextras:1} )
+/sbin/umount.fuse.glusterfs
+%endif
%if ( 0%{!?_without_fusermount:1} )
%{_bindir}/fusermount-glusterfs
%endif
@@ -1090,6 +1342,13 @@ exit 0
# init files
%_init_glusterd
+%if ( 0%{!?_with_systemd:1} )
+%_init_glusterd_functions
+%else
+%_init_glusterd_nfsd
+%_init_glusterd_shd
+%_init_glusterd_gfproxyd
+%endif
%if ( 0%{_for_fedora_koji_builds} )
%_init_glusterfsd
%endif
@@ -1196,17 +1455,246 @@ exit 0
%{_bindir}/glusterfind
%{_libexecdir}/glusterfs/peer_add_secret_pub
-%if ( 0%{?_with_firewalld:1} )
+%if ( 0%{!?_without_firewalld:1} )
%{_prefix}/lib/firewalld/services/glusterfs.xml
%endif
+%files server-nodeps
+%doc extras/clear_xattrs.sh
+# sysconf
+%config(noreplace) %{_sysconfdir}/glusterfs
+%config(noreplace) %{_sysconfdir}/sysconfig/glusterd
+%if ( 0%{_for_fedora_koji_builds} )
+%config(noreplace) %{_sysconfdir}/sysconfig/glusterfsd
+%endif
+
+# init files
+%_init_glusterd
+%if ( 0%{!?_with_systemd:1} )
+%_init_glusterd_functions
+%else
+%_init_glusterd_nfsd
+%_init_glusterd_shd
+%_init_glusterd_gfproxyd
+%endif
+%if ( 0%{_for_fedora_koji_builds} )
+%_init_glusterfsd
+%endif
+
+# binaries
+%{_sbindir}/glusterd
+%{_sbindir}/glfsheal
+# {_sbindir}/glusterfsd is the actual binary, but glusterfs (client) is a
+# symlink. The binary itself (and symlink) are part of the glusterfs-fuse
+# package, because glusterfs-server depends on that anyway.
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/pump.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
+%if ( 0%{!?_without_tiering:1} )
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changetimerecorder.so
+%endif
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix*
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota*
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt*
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs*
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server*
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage*
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
+%if ( 0%{!?_without_tiering:1} )
+%{_libdir}/libgfdb.so.*
+%endif
+
+# snap_scheduler
+%{_sbindir}/snap_scheduler.py
+%{_sbindir}/gcron.py
+
+# /var/lib/glusterd, e.g. hookscripts, etc.
+%ghost %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/bitd
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/groups
+ %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/virt
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind/.keys
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glustershd
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post
+ %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/disabled-quota-root-xattr-heal.sh
+ %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre/S28Quota-enable-root-xattr-heal.sh
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/post
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/pre
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/post
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/pre
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/post
+ %{_sharedstatedir}/glusterd/hooks/1/delete/post/S57glusterfind-delete-post
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/pre
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/post
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/pre
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/post
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/pre
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post
+ %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S30samba-set.sh
+ %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S32gluster_enable_shared_storage.sh
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/pre
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post
+ %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S29CTDBsetup.sh
+ %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S30samba-start.sh
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/pre
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/post
+ %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre
+ %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S30samba-stop.sh
+ %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs
+%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/nfs-server.vol
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs/run
+%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid
+%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/options
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/peers
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/quotad
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/scrub
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/snaps
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/ss_brick
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/vols
+
+# Extra utility script
+%{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
+
+# Incrementalapi
+%{_libexecdir}/glusterfs/glusterfind
+%{_bindir}/glusterfind
+%{_libexecdir}/glusterfs/peer_add_secret_pub
+
+%if ( 0%{!?_without_firewalld:1} )
+%{_prefix}/lib/firewalld/services/glusterfs.xml
+%endif
+
+%{_libdir}/*.so
+# libgfapi files
+%{_libdir}/libgfapi.*
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/api.so
+%{_sbindir}/gluster
+%{_mandir}/man8/gluster.8*
+%{_sysconfdir}/bash_completion.d/gluster
+
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/*.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/pump.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so
+
+# glusterfs is a symlink to glusterfsd, -server depends on -fuse.
+%{_sbindir}/glusterfs
+%{_sbindir}/glusterfsd
+%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/fuse.so
+/sbin/mount.glusterfs
+%if ( 0%{?_with_fbextras:1} )
+/sbin/umount.fuse.glusterfs
+%endif
+%if ( 0%{!?_without_fusermount:1} )
+%{_bindir}/fusermount-glusterfs
+%endif
+%if ( 0%{_for_fedora_koji_builds} )
+%if ( 0%{?rhel} && 0%{?rhel} <= 5 )
+%{_sysconfdir}/sysconfig/modules/glusterfs-fuse.modules
+%endif
+%endif
+
+%{_libdir}/*.so.*
+%{_libdir}/libgfapi.*
+%if ( 0%{!?_without_tiering:1} )
+# libgfdb is only needed server-side
+%{_libdir}/libgfdb.*
+%endif
+
+%{_mandir}/man8/*gluster*.8*
+%exclude %{_mandir}/man8/gluster.8*
+%dir %{_localstatedir}/log/glusterfs
+%if ( 0%{!?_without_rdma:1} )
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
+%endif
+%dir %{_datadir}/glusterfs/scripts
+%{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh
+%{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh
+# xlators that are needed on the client- and on the server-side
+%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/auth
+%{_libdir}/glusterfs/%{version}%{?prereltag}/auth/addr.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/auth/login.so
+%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
+%{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/socket.so
+%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/error-gen.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/io-stats.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/trace.so
+%if ( ! ( 0%{?rhel} && 0%{?rhel} < 6 ) )
+# RHEL-5 based distributions have a too old openssl
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/crypt.so
+%endif
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/access-control.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/barrier.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cdc.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changelog.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/gfid-access.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/read-only.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/shard.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-client.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/worm.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/meta.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-cache.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-threads.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/md-cache.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/open-behind.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/quick-read.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/read-ahead.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/readdir-ahead.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/stat-prefetch.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/write-behind.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system/posix-acl.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/ganesha.so
+%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/namespace.so
+%dir %{_localstatedir}/run/gluster
+%if 0%{?_tmpfilesdir:1}
+%{_tmpfilesdir}/gluster.conf
+%endif
+
+%if ( 0%{?_without_georeplication:1} )
+%exclude %{_datadir}/glusterfs/scripts/get-gfid.sh
+%exclude %{_datadir}/glusterfs/scripts/slave-upgrade.sh
+%exclude %{_datadir}/glusterfs/scripts/gsync-upgrade.sh
+%exclude %{_datadir}/glusterfs/scripts/generate-gfid-file.sh
+%exclude %{_datadir}/glusterfs/scripts/gsync-sync-gfid
+%exclude %{_datadir}/glusterfs/scripts/schedule_georep.py*
+%exclude %{_libexecdir}/glusterfs/gsyncd
+%exclude %{_libexecdir}/glusterfs/python/syncdaemon/*
+%exclude %{_libexecdir}/glusterfs/gverify.sh
+%exclude %{_libexecdir}/glusterfs/set_geo_rep_pem_keys.sh
+%exclude %{_libexecdir}/glusterfs/peer_gsec_create
+%exclude %{_libexecdir}/glusterfs/peer_mountbroker
+%exclude %{_libexecdir}/glusterfs/gfind_missing_files
+%exclude %{_sbindir}/gfind_missing_files
+%endif
+
%changelog
* Fri Jan 6 2017 Niels de Vos <ndevos@redhat.com>
- use macro provided by firewalld-filesystem to reload firewalld
-* Thu Dec 19 2016 Jiffin Tony Thottan <jhottan@redhat.com>
-- remove S31ganesha-reset.sh from hooks (#1405951)
-
* Mon Aug 22 2016 Milind Changire <mchangir@redhat.com>
- Add psmisc as dependency for glusterfs-fuse for killall command (#1367665)
diff --git a/libglusterfs/src/compat.h b/libglusterfs/src/compat.h
index 771ed983d32..0b14d20a481 100644
--- a/libglusterfs/src/compat.h
+++ b/libglusterfs/src/compat.h
@@ -483,6 +483,7 @@ int gf_mkostemp (char *tmpl, int suffixlen, int flags);
#define CLOCK_REALTIME_COARSE CLOCK_REALTIME
#endif
+#if 0
#ifndef IPV6_DEFAULT
#ifndef IXDR_GET_LONG
@@ -502,6 +503,7 @@ int gf_mkostemp (char *tmpl, int suffixlen, int flags);
#endif
#endif /* IPV6_DEFAULT */
+#endif
#if defined(__GNUC__) && !defined(RELAX_POISONING)
/* Use run API, see run.h */
diff --git a/makerelease38.sh b/makerelease38.sh
new file mode 100755
index 00000000000..aad16d26d7a
--- /dev/null
+++ b/makerelease38.sh
@@ -0,0 +1,153 @@
+#!/bin/bash
+# This script automates the steps required to "cut" a GlusterFS release
+# Before running this script it assumes:
+# - You've run all prove tests and they all pass, or have very good reasons for
+# ignoring any failures.
+# - You have a working GlusterFS repo
+# - You've installed any libraries which are required to build GlusterFS
+# - You are _authorized_ by the storage to to cut a release, e-mail storage@fb.com
+# if you are unsure.
+#
+
+PUBLISH_REPO=false
+TAG_RELEASE=true
+BUILD_FROM_TAG=false
+GFS_VER="3.8.15_fb"
+function usage {
+cat << EOF
+Usage $0 [-p|-t|-n] <release tag>,
+
+Where,
+ -p publish the RPMs to the site-packages repo.
+ -n suppress making git tag for this release (cannot be used with -p).
+ -t checkout tag and publish RPMs from this tag
+
+e.g. "$0 1" creates a release called v3.6.3_fb-1
+
+HINT: If you just want to create "test" RPMs, this isn't the script you are
+ looking for; use buildrpm.sh for this. This script is intended for
+ releasing _production_ quality RPMs.
+
+EOF
+exit 1
+}
+
+(( $# == 0 )) && usage
+while getopts ":apnt" options; do
+ case $options in
+ a ) echo "Hey Mr. Old-skool, '-a' is deprecated; use '-p' to publish your RPMs."
+ echo "NB: unlike '-a', '-p' publishes RPMs *immediately*. Be sure that's what you want!"
+ exit 1
+ ;;
+ p ) PUBLISH_REPO=true;;
+ n ) TAG_RELEASE=false;;
+ t ) BUILD_FROM_TAG=true;;
+ \? ) usage;;
+ * ) usage;;
+ esac
+done
+
+if [ $PUBLISH_REPO -a ! $TAG_RELEASE ]; then
+ echo "Cannot publish release without tagging."
+ exit 1
+fi
+
+if $BUILD_FROM_TAG; then
+ TAG_RELEASE=false
+fi
+
+RELEASE_TAG=${@:$OPTIND:1}
+
+echo -n "Checking if user is root..."
+if [ $USER == "root" ]; then
+ echo yes
+ echo "This script is not intended to be run by root, aborting!" && exit 1
+else
+ echo DONE
+fi
+
+echo -n "Checking if $USER is in storage group..."
+if ! getent group storage | grep -q $USER; then
+ echo "$USER not in storage group, aborting!" && exit 1
+else
+ echo DONE
+fi
+
+echo -n "Checking OS version..."
+REDHAT_MAJOR=$(/usr/lib/rpm/redhat/dist.sh --distnum)
+
+if [ "$REDHAT_MAJOR" = "6" -o "$REDHAT_MAJOR" = "7" ]; then
+ echo DONE
+else
+ echo "You are treading unknown ground with Centos $REDHAT_MAJOR! You are likely to be eaten by a grue!"
+ read -p "Press forward (y/n)? " yesno
+ if [ "$yesno" != "y" ]; then
+ exit 1
+ fi
+fi
+
+echo -n "Checking for uncommitted changes..."
+UNCOMMITTED_CHANGES=$(git status -s | grep -v "??")
+if [ ! -z "$UNCOMMITTED_CHANGES" ]; then
+ echo "FAILED"
+ echo "You have changes in your repo. Commit them or stash them before building."
+ exit 1;
+fi
+
+#echo "Updating repo..."
+#if ! ( git fetch && git rebase ); then
+# echo "Unable to update GIT repo, aborting!" && exit 1
+#fi
+#if ! BUILD_VERSION=$(grep AC_INIT.*glusterfs configure.ac | cut -d, -f2 | grep -Eo "[0-9A-Za-z._]+"); then
+# echo "Unable to find build version, aborting!" && exit 1
+#fi
+BUILD_VERSION=3.8.15_fb
+echo "Build version is $BUILD_VERSION..."
+echo "Release tag is $RELEASE_TAG..."
+GIT_TAG="v$BUILD_VERSION-$RELEASE_TAG"
+if [ $TAG_RELEASE -o $BUILD_FROM_TAG ]; then
+ echo -n "Checking for conflicts for tag $GIT_TAG..."
+ if git tag | grep -qw $GIT_TAG; then
+ if ! $BUILD_FROM_TAG; then
+ echo "FAILED"
+ echo "Gluster release $GIT_TAG already exists, please try again, or pass -t to build from this tag!" && exit 1
+ else
+ if ! git checkout $GIT_TAG; then
+ echo "FAILED"
+ echo "Failed to checkout $GIT_TAG."
+ exit 1
+ fi
+ fi
+ fi
+ echo DONE
+fi
+echo "Building RPMs..."
+if ! ./buildrpm38.sh $RELEASE_TAG; then
+ echo "Failed to build RPMs, aborting!" && exit 1
+fi
+
+if $TAG_RELEASE; then
+ echo "Creating GIT tag for this build..."
+ if ! git tag -a $GIT_TAG -m "GlusterFS build $GIT_TAG"; then
+ echo "Unable to tag build, aborting!" && exit 1
+ fi
+fi
+
+if $PUBLISH_REPO; then
+ echo "Publishing RPMs..."
+ if ! svnyum -y publish site-packages/${REDHAT_MAJOR}/x86_64 ~/local/rpmbuild/RPMS/x86_64/glusterfs*${GFS_VER}_fb-${RELEASE_TAG}.el${REDHAT_MAJOR}.x86_64.rpm; then
+ echo "ERROR: Unable to publish RPMs!"
+ echo "Removing GIT tag ($GIT_TAG) from GFS (local) git repo..."
+ git tag -d $GIT_TAG && echo "Removing tag $GIT_TAG, and aborting."
+ exit 1
+ fi
+fi
+
+if $TAG_RELEASE; then
+ echo "Pushing tag to remote repo..."
+ if ! git push origin $GIT_TAG; then
+ echo "Unable to push tag to repo, aborting!" && exit 1
+ fi
+fi
+
+echo "Successfully released GlusterFS $GIT_TAG!"
diff --git a/xlators/mount/fuse/utils/Makefile.am b/xlators/mount/fuse/utils/Makefile.am
index fdad27ad103..da646dbf9b0 100644
--- a/xlators/mount/fuse/utils/Makefile.am
+++ b/xlators/mount/fuse/utils/Makefile.am
@@ -2,6 +2,9 @@ utildir = @mountutildir@
if GF_LINUX_HOST_OS
util_SCRIPTS = mount.glusterfs
+if GF_FBEXTRAS
+util_SCRIPTS += umount.fuse.glusterfs
+endif
else
util_SCRIPTS = mount_glusterfs
endif
diff --git a/xlators/mount/fuse/utils/umount.fuse.glusterfs.in b/xlators/mount/fuse/utils/umount.fuse.glusterfs.in
new file mode 100644
index 00000000000..6a94d3933ff
--- /dev/null
+++ b/xlators/mount/fuse/utils/umount.fuse.glusterfs.in
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+# If $1 is a mountpoint, return it; else if it is a mounted device,
+# return the mountpoint for the device; else return eror.
+#
+# value returned in global $mountpoint.
+get_mountpoint() {
+ if mountpoint -q -- $1; then
+ mountpoint=$1
+ return 0
+ fi
+
+ mountpoint=$(grep "^$1" /proc/mounts | cut -d' ' -f2)
+
+ if [ -n "$mountpoint" ]; then
+ return 0
+ fi
+
+ return 1
+}
+
+for arg in $@; do
+ if get_mountpoint $arg; then
+ # Please keep in sync with mount script, which starts this process.
+ squashed_mountpoint=$(echo ${mountpoint} | sed s^/^_^g)
+ pkill -f "gfs_clientstats_scuba.*gfs_client${squashed_mountpoint}.log"
+ pkill -f "gfs_clientstats_ods.*_${squashed_mountpoint}.dump"
+ fi
+done
+
+umount -i $@