summaryrefslogtreecommitdiffstats
path: root/extras/ganesha/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'extras/ganesha/scripts')
-rw-r--r--extras/ganesha/scripts/Makefile.am8
-rwxr-xr-xextras/ganesha/scripts/copy-export-ganesha.sh97
-rwxr-xr-xextras/ganesha/scripts/create-export-ganesha.sh55
-rwxr-xr-xextras/ganesha/scripts/dbus-send.sh74
-rw-r--r--extras/ganesha/scripts/ganesha-ha.sh437
-rwxr-xr-xextras/ganesha/scripts/generate-epoch.py6
6 files changed, 312 insertions, 365 deletions
diff --git a/extras/ganesha/scripts/Makefile.am b/extras/ganesha/scripts/Makefile.am
index c326fc2f136..7e345fd5f19 100644
--- a/extras/ganesha/scripts/Makefile.am
+++ b/extras/ganesha/scripts/Makefile.am
@@ -1,6 +1,6 @@
-EXTRA_DIST= ganesha-ha.sh dbus-send.sh create-export-ganesha.sh \
- generate-epoch.py copy-export-ganesha.sh
+EXTRA_DIST= create-export-ganesha.sh generate-epoch.py dbus-send.sh \
+ ganesha-ha.sh
scriptsdir = $(libexecdir)/ganesha
-scripts_SCRIPTS = create-export-ganesha.sh dbus-send.sh ganesha-ha.sh \
- generate-epoch.py copy-export-ganesha.sh
+scripts_SCRIPTS = create-export-ganesha.sh dbus-send.sh generate-epoch.py \
+ ganesha-ha.sh
diff --git a/extras/ganesha/scripts/copy-export-ganesha.sh b/extras/ganesha/scripts/copy-export-ganesha.sh
deleted file mode 100755
index 9c4afa02d68..00000000000
--- a/extras/ganesha/scripts/copy-export-ganesha.sh
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/bin/bash
-
-#This script is called by glusterd when in case of
-#reboot.An export file specific to a volume
-#is copied in GANESHA_DIR/exports from online node.
-
-# Try loading the config from any of the distro
-# specific configuration locations
-if [ -f /etc/sysconfig/ganesha ]
- then
- . /etc/sysconfig/ganesha
-fi
-if [ -f /etc/conf.d/ganesha ]
- then
- . /etc/conf.d/ganesha
-fi
-if [ -f /etc/default/ganesha ]
- then
- . /etc/default/ganesha
-fi
-
-GANESHA_DIR=${1%/}
-VOL=$2
-CONF=
-host=$(hostname -s)
-SECRET_PEM="/var/lib/glusterd/nfs/secret.pem"
-
-function check_cmd_status()
-{
- if [ "$1" != "0" ]
- then
- rm -rf $GANESHA_DIR/exports/export.$VOL.conf
- exit 1
- fi
-}
-
-
-if [ ! -d "$GANESHA_DIR/exports" ];
- then
- mkdir $GANESHA_DIR/exports
- check_cmd_status `echo $?`
-fi
-
-function find_rhel7_conf
-{
- while [[ $# > 0 ]]
- do
- key="$1"
- case $key in
- -f)
- CONFFILE="$2"
- ;;
- *)
- ;;
- esac
- shift
- done
-}
-
-if [ -z $CONFFILE ]; then
- find_rhel7_conf $OPTIONS
-
-fi
-CONF=${CONFFILE:-/etc/ganesha/ganesha.conf}
-
-#remove the old export entry from NFS-Ganesha
-#if already exported
-dbus-send --type=method_call --print-reply --system \
- --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
- org.ganesha.nfsd.exportmgr.ShowExports \
- | grep -w -q "/$VOL"
-if [ $? -eq 0 ]; then
- removed_id=`cat $GANESHA_DIR/exports/export.$VOL.conf |\
- grep Export_Id | awk -F"[=,;]" '{print$2}' | tr -d '[[:space:]]'`
-
- dbus-send --print-reply --system --dest=org.ganesha.nfsd \
- /org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.RemoveExport \
- uint16:$removed_id 2>&1
-fi
-
-ha_servers=$(pcs status | grep "Online:" | grep -o '\[.*\]' | sed -e 's/\[//' | sed -e 's/\]//')
-IFS=$' '
-for server in ${ha_servers} ; do
- current_host=`echo $server | cut -d "." -f 1`
- if [ $host != $current_host ]
- then
- scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
- ${SECRET_PEM} $server:$GANESHA_DIR/exports/export.$VOL.conf \
- $GANESHA_DIR/exports/export.$VOL.conf
- break
- fi
-done
-
-if ! (cat $CONF | grep $VOL.conf\"$ )
-then
-echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF
-fi
diff --git a/extras/ganesha/scripts/create-export-ganesha.sh b/extras/ganesha/scripts/create-export-ganesha.sh
index a1a35dba58a..3040e8138b0 100755
--- a/extras/ganesha/scripts/create-export-ganesha.sh
+++ b/extras/ganesha/scripts/create-export-ganesha.sh
@@ -21,14 +21,17 @@ if [ -f /etc/default/ganesha ]
fi
GANESHA_DIR=${1%/}
-VOL=$2
-CONF=
+OPTION=$2
+VOL=$3
+CONF=$GANESHA_DIR"/ganesha.conf"
+declare -i EXPORT_ID
function check_cmd_status()
{
if [ "$1" != "0" ]
then
rm -rf $GANESHA_DIR/exports/export.$VOL.conf
+ sed -i /$VOL.conf/d $CONF
exit 1
fi
}
@@ -40,28 +43,6 @@ if [ ! -d "$GANESHA_DIR/exports" ];
check_cmd_status `echo $?`
fi
-function find_rhel7_conf
-{
- while [[ $# > 0 ]]
- do
- key="$1"
- case $key in
- -f)
- CONFFILE="$2"
- ;;
- *)
- ;;
- esac
- shift
- done
-}
-
-if [ -z $CONFFILE ]; then
- find_rhel7_conf $OPTIONS
-
-fi
-CONF=${CONFFILE:-/etc/ganesha/ganesha.conf}
-
function write_conf()
{
echo -e "# WARNING : Using Gluster CLI will overwrite manual
@@ -83,11 +64,29 @@ echo " Pseudo=\"/$VOL\";"
echo ' Protocols = "3", "4" ;'
echo ' Transports = "UDP","TCP";'
echo ' SecType = "sys";'
+echo ' Security_Label = False;'
echo " }"
}
-
-write_conf $@ > $GANESHA_DIR/exports/export.$VOL.conf
-if ! (cat $CONF | grep $VOL.conf\"$ )
+if [ "$OPTION" = "on" ];
then
-echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF
+ if ! (cat $CONF | grep $VOL.conf\"$ )
+ then
+ write_conf $@ > $GANESHA_DIR/exports/export.$VOL.conf
+ echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF
+ count=`ls -l $GANESHA_DIR/exports/*.conf | wc -l`
+ if [ "$count" = "1" ] ; then
+ EXPORT_ID=2
+ else
+ EXPORT_ID=`cat $GANESHA_DIR/.export_added`
+ check_cmd_status `echo $?`
+ EXPORT_ID=EXPORT_ID+1
+ sed -i s/Export_Id.*/"Export_Id= $EXPORT_ID ;"/ \
+ $GANESHA_DIR/exports/export.$VOL.conf
+ check_cmd_status `echo $?`
+ fi
+ echo $EXPORT_ID > $GANESHA_DIR/.export_added
+ fi
+else
+ rm -rf $GANESHA_DIR/exports/export.$VOL.conf
+ sed -i /$VOL.conf/d $CONF
fi
diff --git a/extras/ganesha/scripts/dbus-send.sh b/extras/ganesha/scripts/dbus-send.sh
index 14af095464d..9d613a0e7ad 100755
--- a/extras/ganesha/scripts/dbus-send.sh
+++ b/extras/ganesha/scripts/dbus-send.sh
@@ -15,71 +15,22 @@ if [ -f /etc/default/ganesha ]
. /etc/default/ganesha
fi
-declare -i EXPORT_ID
GANESHA_DIR=${1%/}
OPTION=$2
VOL=$3
-CONF=
-
-function find_rhel7_conf
-{
- while [[ $# > 0 ]]
- do
- key="$1"
- case $key in
- -f)
- CONFFILE="$2"
- break;
- ;;
- *)
- ;;
- esac
- shift
- done
-}
-
-if [ -z $CONFFILE ]
- then
- find_rhel7_conf $OPTIONS
-
-fi
-
-CONF=${CONFFILE:-/etc/ganesha/ganesha.conf}
+CONF=$GANESHA_DIR"/ganesha.conf"
function check_cmd_status()
{
if [ "$1" != "0" ]
- then
- rm -rf $GANESHA_DIR/exports/export.$VOL.conf
- sed -i /$VOL.conf/d $CONF
- exit 1
+ then
+ logger "dynamic export failed on node :${hostname -s}"
fi
}
#This function keeps track of export IDs and increments it with every new entry
function dynamic_export_add()
{
- count=`ls -l $GANESHA_DIR/exports/*.conf | wc -l`
- if [ "$count" = "1" ] ;
- then
- EXPORT_ID=2
- else
- #if [ -s /var/lib/ganesha/export_removed ];
- # then
- # EXPORT_ID=`head -1 /var/lib/ganesha/export_removed`
- # sed -i -e "1d" /var/lib/ganesha/export_removed
- # else
-
- EXPORT_ID=`cat $GANESHA_DIR/.export_added`
- check_cmd_status `echo $?`
- EXPORT_ID=EXPORT_ID+1
- #fi
- fi
- echo $EXPORT_ID > $GANESHA_DIR/.export_added
- check_cmd_status `echo $?`
- sed -i s/Export_Id.*/"Export_Id= $EXPORT_ID ;"/ \
-$GANESHA_DIR/exports/export.$VOL.conf
- check_cmd_status `echo $?`
dbus-send --system \
--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
org.ganesha.nfsd.exportmgr.AddExport string:$GANESHA_DIR/exports/export.$VOL.conf \
@@ -90,16 +41,22 @@ string:"EXPORT(Path=/$VOL)"
#This function removes an export dynamically(uses the export_id of the export)
function dynamic_export_remove()
{
- removed_id=`cat $GANESHA_DIR/exports/export.$VOL.conf |\
-grep Export_Id | awk -F"[=,;]" '{print$2}'| tr -d '[[:space:]]'`
- check_cmd_status `echo $?`
+ # Below bash fetch all the export from ShowExport command and search
+ # export entry based on path and then get its export entry.
+ # There are two possiblities for path, either entire volume will be
+ # exported or subdir. It handles both cases. But it remove only first
+ # entry from the list based on assumption that entry exported via cli
+ # has lowest export id value
+ removed_id=$(dbus-send --type=method_call --print-reply --system \
+ --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
+ org.ganesha.nfsd.exportmgr.ShowExports | grep -B 1 -we \
+ "/"$VOL -e "/"$VOL"/" | grep uint16 | awk '{print $2}' \
+ | head -1)
+
dbus-send --print-reply --system \
--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
org.ganesha.nfsd.exportmgr.RemoveExport uint16:$removed_id
check_cmd_status `echo $?`
- sed -i /$VOL.conf/d $CONF
- rm -rf $GANESHA_DIR/exports/export.$VOL.conf
-
}
if [ "$OPTION" = "on" ];
@@ -111,4 +68,3 @@ if [ "$OPTION" = "off" ];
then
dynamic_export_remove $@
fi
-
diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index ac8c91f194e..9790a719e10 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -20,16 +20,26 @@
# ensure that the NFS GRACE DBUS signal is sent after the VIP moves to
# the new host.
+GANESHA_HA_SH=$(realpath $0)
HA_NUM_SERVERS=0
HA_SERVERS=""
-HA_CONFDIR="/etc/ganesha"
HA_VOL_NAME="gluster_shared_storage"
-HA_VOL_MNT="/var/run/gluster/shared_storage"
+HA_VOL_MNT="/run/gluster/shared_storage"
+HA_CONFDIR=$HA_VOL_MNT"/nfs-ganesha"
SERVICE_MAN="DISTRO_NOT_FOUND"
-RHEL6_PCS_CNAME_OPTION="--name"
+# rhel, fedora id, version
+ID=""
+VERSION_ID=""
+
+PCS9OR10_PCS_CNAME_OPTION=""
+PCS9OR10_PCS_CLONE_OPTION="clone"
SECRET_PEM="/var/lib/glusterd/nfs/secret.pem"
+# UNBLOCK RA uses shared_storage which may become unavailable
+# during any of the nodes reboot. Hence increase timeout value.
+PORTBLOCK_UNBLOCK_TIMEOUT="60s"
+
# Try loading the config from any of the distro
# specific configuration locations
if [ -f /etc/sysconfig/ganesha ]
@@ -64,9 +74,9 @@ function find_rhel7_conf
done
}
-if [ -z $CONFFILE ]
+if [ -z ${CONFFILE} ]
then
- find_rhel7_conf $OPTIONS
+ find_rhel7_conf ${OPTIONS}
fi
@@ -86,9 +96,9 @@ usage() {
determine_service_manager () {
- if [ -e "/usr/bin/systemctl" ];
+ if [ -e "/bin/systemctl" ];
then
- SERVICE_MAN="/usr/bin/systemctl"
+ SERVICE_MAN="/bin/systemctl"
elif [ -e "/sbin/invoke-rc.d" ];
then
SERVICE_MAN="/sbin/invoke-rc.d"
@@ -96,9 +106,9 @@ determine_service_manager () {
then
SERVICE_MAN="/sbin/service"
fi
- if [ "$SERVICE_MAN" == "DISTRO_NOT_FOUND" ]
+ if [[ "${SERVICE_MAN}X" == "DISTRO_NOT_FOUNDX" ]]
then
- echo "Service manager not recognized, exiting"
+ logger "Service manager not recognized, exiting"
exit 1
fi
}
@@ -107,13 +117,23 @@ manage_service ()
{
local action=${1}
local new_node=${2}
- if [ "$SERVICE_MAN" == "/usr/bin/systemctl" ]
+ local option=
+
+ if [[ "${action}" == "start" ]]; then
+ option="yes"
+ else
+ option="no"
+ fi
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${new_node} "${GANESHA_HA_SH} --setup-ganesha-conf-files $HA_CONFDIR $option"
+
+ if [[ "${SERVICE_MAN}" == "/bin/systemctl" ]]
then
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} root@${new_node} "$SERVICE_MAN ${action} nfs-ganesha"
+${SECRET_PEM} root@${new_node} "${SERVICE_MAN} ${action} nfs-ganesha"
else
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} root@${new_node} "$SERVICE_MAN nfs-ganesha ${action}"
+${SECRET_PEM} root@${new_node} "${SERVICE_MAN} nfs-ganesha ${action}"
fi
}
@@ -125,7 +145,7 @@ check_cluster_exists()
if [ -e /var/run/corosync.pid ]; then
cluster_name=$(pcs status | grep "Cluster name:" | cut -d ' ' -f 3)
- if [ ${cluster_name} -a ${cluster_name} = ${name} ]; then
+ if [[ "${cluster_name}X" == "${name}X" ]]; then
logger "$name already exists, exiting"
exit 0
fi
@@ -140,7 +160,7 @@ determine_servers()
local tmp_ifs=${IFS}
local ha_servers=""
- if [ "X${cmd}X" != "XsetupX" -a "X${cmd}X" != "XstatusX" ]; then
+ if [ "${cmd}X" != "setupX" -a "${cmd}X" != "statusX" ]; then
ha_servers=$(pcs status | grep "Online:" | grep -o '\[.*\]' | sed -e 's/\[//' | sed -e 's/\]//')
IFS=$' '
for server in ${ha_servers} ; do
@@ -160,6 +180,13 @@ determine_servers()
fi
}
+stop_ganesha_all()
+{
+ local serverlist=${1}
+ for node in ${serverlist} ; do
+ manage_service "stop" ${node}
+ done
+}
setup_cluster()
{
@@ -169,16 +196,23 @@ setup_cluster()
local unclean=""
local quorum_policy="stop"
-
logger "setting up cluster ${name} with the following ${servers}"
- pcs cluster auth ${servers}
- # pcs cluster setup --name ${name} ${servers}
- pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} --transport udpu ${servers}
+ # pcs cluster setup --force ${PCS9OR10_PCS_CNAME_OPTION} ${name} ${servers}
+ pcs cluster setup --force ${PCS9OR10_PCS_CNAME_OPTION} ${name} --enable ${servers}
if [ $? -ne 0 ]; then
- logger "pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} ${servers} failed"
+ logger "pcs cluster setup ${PCS9OR10_PCS_CNAME_OPTION} ${name} --enable ${servers} failed, shutting down ganesha and bailing out"
+ #set up failed stop all ganesha process and clean up symlinks in cluster
+ stop_ganesha_all "${servers}"
exit 1;
fi
+
+ # pcs cluster auth ${servers}
+ pcs cluster auth
+ if [ $? -ne 0 ]; then
+ logger "pcs cluster auth failed"
+ fi
+
pcs cluster start --all
if [ $? -ne 0 ]; then
logger "pcs cluster start failed"
@@ -194,7 +228,7 @@ setup_cluster()
done
unclean=$(pcs status | grep -u "UNCLEAN")
- while [[ "${unclean}X" = "UNCLEANX" ]]; do
+ while [[ "${unclean}X" == "UNCLEANX" ]]; do
sleep 1
unclean=$(pcs status | grep -u "UNCLEAN")
done
@@ -221,86 +255,38 @@ setup_finalize_ha()
local stopped=""
stopped=$(pcs status | grep -u "Stopped")
- while [[ "${stopped}X" = "StoppedX" ]]; do
+ while [[ "${stopped}X" == "StoppedX" ]]; do
sleep 1
stopped=$(pcs status | grep -u "Stopped")
done
}
-setup_copy_config()
-{
- local short_host=$(hostname -s)
- local tganesha_conf=$(mktemp -u)
-
- if [ -e ${SECRET_PEM} ]; then
- while [[ ${1} ]]; do
- current_host=`echo ${1} | cut -d "." -f 1`
- if [ ${short_host} != ${current_host} ]; then
- scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${HA_CONFDIR}/ganesha-ha.conf ${1}:${HA_CONFDIR}/
- if [ $? -ne 0 ]; then
- logger "warning: scp ganesha-ha.conf to ${1} failed"
- fi
- fi
- shift
- done
- else
- logger "warning: scp ganesha-ha.conf to ${1} failed"
- fi
-}
-
refresh_config ()
{
local short_host=$(hostname -s)
local VOL=${1}
local HA_CONFDIR=${2}
- local tganesha_vol_conf=$(mktemp)
local short_host=$(hostname -s)
- cp ${HA_CONFDIR}/exports/export.$VOL.conf \
-${tganesha_vol_conf}
+ local export_id=$(grep ^[[:space:]]*Export_Id $HA_CONFDIR/exports/export.$VOL.conf |\
+ awk -F"[=,;]" '{print $2}' | tr -d '[[:space:]]')
+
if [ -e ${SECRET_PEM} ]; then
while [[ ${3} ]]; do
current_host=`echo ${3} | cut -d "." -f 1`
- if [ ${short_host} != ${current_host} ]; then
- removed_id=$(ssh -oPasswordAuthentication=no \
--oStrictHostKeyChecking=no -i ${SECRET_PEM} root@${current_host} \
-"cat $HA_CONFDIR/exports/export.$VOL.conf |\
-grep Export_Id | awk -F\"[=,;]\" '{print \$2}' | tr -d '[[:space:]]'")
-
+ if [[ ${short_host} != ${current_host} ]]; then
output=$(ssh -oPasswordAuthentication=no \
-oStrictHostKeyChecking=no -i ${SECRET_PEM} root@${current_host} \
"dbus-send --print-reply --system --dest=org.ganesha.nfsd \
-/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.RemoveExport \
-uint16:$removed_id 2>&1")
- ret=$?
- logger <<< "${output}"
- if [ ${ret} -ne 0 ]; then
- echo "Error: refresh-config failed on ${current_host}."
- exit 1
- fi
- sleep 1
- sed -i s/Export_Id.*/"Export_Id= $removed_id ;"/ \
- ${tganesha_vol_conf}
-
- scp -q -oPasswordAuthentication=no \
--oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${tganesha_vol_conf} \
-${current_host}:${HA_CONFDIR}/exports/export.$VOL.conf
-
- output=$(ssh -oPasswordAuthentication=no \
--oStrictHostKeyChecking=no -i ${SECRET_PEM} root@${current_host} \
-"dbus-send --print-reply --system --dest=org.ganesha.nfsd \
-/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.AddExport \
+/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.UpdateExport \
string:$HA_CONFDIR/exports/export.$VOL.conf \
-string:\"EXPORT(Path=/$removed_id)\" 2>&1")
+string:\"EXPORT(Export_Id=$export_id)\" 2>&1")
ret=$?
logger <<< "${output}"
if [ ${ret} -ne 0 ]; then
- echo "Error: refresh-config failed on ${current_host}."
- exit 1
+ echo "Refresh-config failed on ${current_host}. Please check logs on ${current_host}"
else
echo "Refresh-config completed on ${current_host}."
fi
@@ -314,66 +300,17 @@ string:\"EXPORT(Path=/$removed_id)\" 2>&1")
fi
# Run the same command on the localhost,
- removed_id=`cat $HA_CONFDIR/exports/export.$VOL.conf |\
-grep Export_Id | awk -F"[=,;]" '{print$2}' | tr -d '[[:space:]]'`
output=$(dbus-send --print-reply --system --dest=org.ganesha.nfsd \
-/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.RemoveExport \
-uint16:$removed_id 2>&1)
- ret=$?
- logger <<< "${output}"
- if [ ${ret} -ne 0 ]; then
- echo "Error: refresh-config failed on localhost."
- exit 1
- fi
- sleep 1
- output=$(dbus-send --print-reply --system --dest=org.ganesha.nfsd \
-/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.AddExport \
+/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.UpdateExport \
string:$HA_CONFDIR/exports/export.$VOL.conf \
-string:"EXPORT(Path=/$removed_id)" 2>&1)
+string:"EXPORT(Export_Id=$export_id)" 2>&1)
ret=$?
logger <<< "${output}"
if [ ${ret} -ne 0 ] ; then
- echo "Error: refresh-config failed on localhost."
- exit 1
+ echo "Refresh-config failed on localhost."
else
echo "Success: refresh-config completed."
fi
- rm -f ${tganesha_vol_conf}
-
-}
-
-copy_export_config ()
-{
- local new_node=${1}
- local tganesha_conf=$(mktemp)
- local tganesha_exports=$(mktemp -d)
- local short_host=$(hostname -s)
- # avoid prompting for password, even with password-less scp
- # scp $host1:$file $host2:$file prompts for the password
- # Ideally all the existing nodes in the cluster should have same
- # copy of the configuration files. Maybe for sanity check, copy
- # the state from HA_VOL_SERVER?
- if [ "${HA_VOL_SERVER}" == $(hostname) ]
- then
- cp ${GANESHA_CONF} ${tganesha_conf}
- else
- scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${HA_VOL_SERVER}:${GANESHA_CONF} $short_host:${tganesha_conf}
- fi
- scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${tganesha_conf} ${new_node}:${GANESHA_CONF}
- rm -f ${tganesha_conf}
-
- if [ "${HA_VOL_SERVER}" == $(hostname) ]
- then
- cp -r ${HA_CONFDIR}/exports ${tganesha_exports}
- else
- scp -r -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${HA_VOL_SERVER}:${HA_CONFDIR}/exports/ $short_host:${tganesha_exports}
- fi
- scp -r -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${tganesha_exports}/exports ${new_node}:${HA_CONFDIR}/
- rm -rf ${tganesha_exports}
}
@@ -420,13 +357,10 @@ teardown_cluster()
cleanup_ganesha_config ()
{
- rm -rf ${HA_CONFDIR}/exports/*.conf
- rm -rf ${HA_CONFDIR}/.export_added
+ rm -f /etc/corosync/corosync.conf
rm -rf /etc/cluster/cluster.conf*
rm -rf /var/lib/pacemaker/cib/*
- rm -f /etc/corosync/corosync.conf
- sed -r -i -e '/^%include[[:space:]]+".+\.conf"$/d' ${GANESHA_CONF}
- rm -rf ${HA_VOL_MNT}/nfs-ganesha
+ sed -r -i -e '/^%include[[:space:]]+".+\.conf"$/d' $HA_CONFDIR/ganesha.conf
}
do_create_virt_ip_constraints()
@@ -437,17 +371,17 @@ do_create_virt_ip_constraints()
# first a constraint location rule that says the VIP must be where
# there's a ganesha.nfsd running
- pcs -f ${cibfile} constraint location ${primary}-cluster_ip-1 rule score=-INFINITY ganesha-active ne 1
+ pcs -f ${cibfile} constraint location ${primary}-group rule score=-INFINITY ganesha-active ne 1
if [ $? -ne 0 ]; then
- logger "warning: pcs constraint location ${primary}-cluster_ip-1 rule score=-INFINITY ganesha-active ne 1 failed"
+ logger "warning: pcs constraint location ${primary}-group rule score=-INFINITY ganesha-active ne 1 failed"
fi
# then a set of constraint location prefers to set the prefered order
# for where a VIP should move
while [[ ${1} ]]; do
- pcs -f ${cibfile} constraint location ${primary}-cluster_ip-1 prefers ${1}=${weight}
+ pcs -f ${cibfile} constraint location ${primary}-group prefers ${1}=${weight}
if [ $? -ne 0 ]; then
- logger "warning: pcs constraint location ${primary}-cluster_ip-1 prefers ${1}=${weight} failed"
+ logger "warning: pcs constraint location ${primary}-group prefers ${1}=${weight} failed"
fi
weight=$(expr ${weight} + 1000)
shift
@@ -457,9 +391,9 @@ do_create_virt_ip_constraints()
# on Fedora setting appears to be additive, so to get the desired
# value we adjust the weight
# weight=$(expr ${weight} - 100)
- pcs -f ${cibfile} constraint location ${primary}-cluster_ip-1 prefers ${primary}=${weight}
+ pcs -f ${cibfile} constraint location ${primary}-group prefers ${primary}=${weight}
if [ $? -ne 0 ]; then
- logger "warning: pcs constraint location ${primary}-cluster_ip-1 prefers ${primary}=${weight} failed"
+ logger "warning: pcs constraint location ${primary}-group prefers ${primary}=${weight} failed"
fi
}
@@ -475,7 +409,7 @@ wrap_create_virt_ip_constraints()
# the result is "node2 node3 node4"; for node2, "node3 node4 node1"
# and so on.
while [[ ${1} ]]; do
- if [ "${1}" = "${primary}" ]; then
+ if [[ ${1} == ${primary} ]]; then
shift
while [[ ${1} ]]; do
tail=${tail}" "${1}
@@ -506,15 +440,15 @@ setup_create_resources()
local cibfile=$(mktemp -u)
# fixup /var/lib/nfs
- logger "pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone"
- pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone
+ logger "pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION}"
+ pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION}
if [ $? -ne 0 ]; then
- logger "warning: pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone failed"
+ logger "warning: pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION} failed"
fi
- pcs resource create nfs-mon ocf:heartbeat:ganesha_mon --clone
+ pcs resource create nfs-mon ocf:heartbeat:ganesha_mon ${PCS9OR10_PCS_CLONE_OPTION}
if [ $? -ne 0 ]; then
- logger "warning: pcs resource create nfs-mon ocf:heartbeat:ganesha_mon --clone failed"
+ logger "warning: pcs resource create nfs-mon ocf:heartbeat:ganesha_mon ${PCS9OR10_PCS_CLONE_OPTION} failed"
fi
# see comment in (/usr/lib/ocf/resource.d/heartbeat/ganesha_grace
@@ -522,9 +456,9 @@ setup_create_resources()
# ganesha-active crm_attribute
sleep 5
- pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone meta notify=true
+ pcs resource create nfs-grace ocf:heartbeat:ganesha_grace ${PCS9OR10_PCS_CLONE_OPTION} notify=true
if [ $? -ne 0 ]; then
- logger "warning: pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone failed"
+ logger "warning: pcs resource create nfs-grace ocf:heartbeat:ganesha_grace ${PCS9OR10_PCS_CLONE_OPTION} failed"
fi
pcs constraint location nfs-grace-clone rule score=-INFINITY grace-active ne 1
@@ -554,9 +488,16 @@ setup_create_resources()
eval tmp_ipaddr=\$${clean_name}
ipaddr=${tmp_ipaddr//_/.}
- pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} cidr_netmask=32 op monitor interval=15s
+ pcs -f ${cibfile} resource create ${1}-nfs_block ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=block ip=${ipaddr} --group ${1}-group
if [ $? -ne 0 ]; then
- logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} cidr_netmask=32 op monitor interval=15s failed"
+ logger "warning pcs resource create ${1}-nfs_block failed"
+ fi
+ pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s --group ${1}-group --after ${1}-nfs_block
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s failed"
fi
pcs -f ${cibfile} constraint order nfs-grace-clone then ${1}-cluster_ip-1
@@ -564,6 +505,16 @@ setup_create_resources()
logger "warning: pcs constraint order nfs-grace-clone then ${1}-cluster_ip-1 failed"
fi
+ pcs -f ${cibfile} resource create ${1}-nfs_unblock ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=unblock ip=${ipaddr} reset_local_on_unblock_stop=true \
+ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${1}-group --after ${1}-cluster_ip-1 \
+ op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} \
+ op monitor interval=10s timeout=${PORTBLOCK_UNBLOCK_TIMEOUT}
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-nfs_unblock failed"
+ fi
+
+
shift
done
@@ -602,9 +553,9 @@ teardown_resources()
fi
while [[ ${1} ]]; do
- pcs resource delete ${1}-cluster_ip-1
+ pcs resource delete ${1}-group
if [ $? -ne 0 ]; then
- logger "warning: pcs resource delete ${1}-cluster_ip-1 failed"
+ logger "warning: pcs resource delete ${1}-group failed"
fi
shift
done
@@ -627,9 +578,16 @@ recreate_resources()
eval tmp_ipaddr=\$${clean_name}
ipaddr=${tmp_ipaddr//_/.}
- pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} cidr_netmask=32 op monitor interval=15s
+ pcs -f ${cibfile} resource create ${1}-nfs_block ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=block ip=${ipaddr} --group ${1}-group
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-nfs_block failed"
+ fi
+ pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s --group ${1}-group --after ${1}-nfs_block
if [ $? -ne 0 ]; then
- logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} cidr_netmask=32 op monitor interval=10s failed"
+ logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s failed"
fi
pcs -f ${cibfile} constraint order nfs-grace-clone then ${1}-cluster_ip-1
@@ -637,6 +595,15 @@ recreate_resources()
logger "warning: pcs constraint order nfs-grace-clone then ${1}-cluster_ip-1 failed"
fi
+ pcs -f ${cibfile} resource create ${1}-nfs_unblock ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=unblock ip=${ipaddr} reset_local_on_unblock_stop=true \
+ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${1}-group --after ${1}-cluster_ip-1 \
+ op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} \
+ op monitor interval=10s timeout=${PORTBLOCK_UNBLOCK_TIMEOUT}
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-nfs_unblock failed"
+ fi
+
shift
done
}
@@ -650,15 +617,32 @@ addnode_recreate_resources()
recreate_resources ${cibfile} ${HA_SERVERS}
- pcs -f ${cibfile} resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${add_vip} cidr_netmask=32 op monitor interval=15s
+ pcs -f ${cibfile} resource create ${add_node}-nfs_block ocf:heartbeat:portblock \
+ protocol=tcp portno=2049 action=block ip=${add_vip} --group ${add_node}-group
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${add_node}-nfs_block failed"
+ fi
+ pcs -f ${cibfile} resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr \
+ ip=${add_vip} cidr_netmask=32 op monitor interval=15s --group ${add_node}-group \
+ --after ${add_node}-nfs_block
if [ $? -ne 0 ]; then
- logger "warning pcs resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${add_vip} cidr_netmask=32 op monitor interval=10s failed"
+ logger "warning pcs resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr \
+ ip=${add_vip} cidr_netmask=32 op monitor interval=15s failed"
fi
pcs -f ${cibfile} constraint order nfs-grace-clone then ${add_node}-cluster_ip-1
if [ $? -ne 0 ]; then
logger "warning: pcs constraint order nfs-grace-clone then ${add_node}-cluster_ip-1 failed"
fi
+ pcs -f ${cibfile} resource create ${add_node}-nfs_unblock ocf:heartbeat:portblock \
+ protocol=tcp portno=2049 action=unblock ip=${add_vip} reset_local_on_unblock_stop=true \
+ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${add_node}-group --after \
+ ${add_node}-cluster_ip-1 op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start \
+ timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op monitor interval=10s \
+ timeout=${PORTBLOCK_UNBLOCK_TIMEOUT}
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${add_node}-nfs_unblock failed"
+ fi
}
@@ -667,9 +651,9 @@ clear_resources()
local cibfile=${1}; shift
while [[ ${1} ]]; do
- pcs -f ${cibfile} resource delete ${1}-cluster_ip-1
+ pcs -f ${cibfile} resource delete ${1}-group
if [ $? -ne 0 ]; then
- logger "warning: pcs -f ${cibfile} resource delete ${1}-cluster_ip-1"
+ logger "warning: pcs -f ${cibfile} resource delete ${1}-group"
fi
shift
@@ -746,7 +730,7 @@ deletenode_update_haconfig()
local clean_name=${name//[-.]/_}
ha_servers=$(echo ${HA_SERVERS} | sed -e "s/ /,/")
- sed -i -e "s/^HA_CLUSTER_NODES=.*$/HA_CLUSTER_NODES=\"${ha_servers// /,}\"/" -e "s/^${clean_name}=.*$//" -e "/^$/d" ${HA_CONFDIR}/ganesha-ha.conf
+ sed -i -e "s/^HA_CLUSTER_NODES=.*$/HA_CLUSTER_NODES=\"${ha_servers// /,}\"/" -e "s/^${name}=.*$//" -e "/^$/d" ${HA_CONFDIR}/ganesha-ha.conf
}
@@ -769,7 +753,9 @@ setup_state_volume()
dirname=${1}${dname}
fi
-
+ if [ ! -d ${mnt}/nfs-ganesha/tickle_dir ]; then
+ mkdir ${mnt}/nfs-ganesha/tickle_dir
+ fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname} ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}
fi
@@ -781,9 +767,11 @@ setup_state_volume()
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd
fi
if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/state ]; then
touch ${mnt}/nfs-ganesha/${dirname}/nfs/state
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/state
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov
@@ -793,15 +781,17 @@ setup_state_volume()
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
fi
if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state ]; then
touch ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state
fi
for server in ${HA_SERVERS} ; do
- if [ ${server} != ${dirname} ]; then
+ if [[ ${server} != ${dirname} ]]; then
ln -s ${mnt}/nfs-ganesha/${server}/nfs/ganesha ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/${server}
ln -s ${mnt}/nfs-ganesha/${server}/nfs/statd ${mnt}/nfs-ganesha/${dirname}/nfs/statd/${server}
fi
@@ -812,6 +802,21 @@ setup_state_volume()
}
+enable_pacemaker()
+{
+ while [[ ${1} ]]; do
+ if [[ "${SERVICE_MAN}" == "/bin/systemctl" ]]; then
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${1} "${SERVICE_MAN} enable pacemaker"
+ else
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${1} "${SERVICE_MAN} pacemaker enable"
+ fi
+ shift
+ done
+}
+
+
addnode_state_volume()
{
local newnode=${1}; shift
@@ -840,9 +845,11 @@ addnode_state_volume()
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd
fi
if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/state ]; then
touch ${mnt}/nfs-ganesha/${dirname}/nfs/state
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/state
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov
@@ -852,15 +859,18 @@ addnode_state_volume()
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
fi
if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state ]; then
touch ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state
fi
for server in ${HA_SERVERS} ; do
+
if [[ ${server} != ${dirname} ]]; then
ln -s ${mnt}/nfs-ganesha/${server}/nfs/ganesha ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/${server}
ln -s ${mnt}/nfs-ganesha/${server}/nfs/statd ${mnt}/nfs-ganesha/${dirname}/nfs/statd/${server}
@@ -893,7 +903,7 @@ delnode_state_volume()
rm -rf ${mnt}/nfs-ganesha/${dirname}
for server in ${HA_SERVERS} ; do
- if [[ "${server}" != "${dirname}" ]]; then
+ if [[ ${server} != ${dirname} ]]; then
rm -f ${mnt}/nfs-ganesha/${server}/nfs/ganesha/${dirname}
rm -f ${mnt}/nfs-ganesha/${server}/nfs/statd/${dirname}
fi
@@ -909,8 +919,9 @@ status()
local index=1
local nodes
- # change tabs to spaces, strip leading spaces
- pcs status | sed -e "s/\t/ /g" -e "s/^[ ]*//" > ${scratch}
+ # change tabs to spaces, strip leading spaces, including any
+ # new '*' at the beginning of a line introduced in pcs-0.10.x
+ pcs status | sed -e "s/\t/ /g" -e "s/^[ ]*\*//" -e "s/^[ ]*//" > ${scratch}
nodes[0]=${1}; shift
@@ -925,7 +936,7 @@ status()
done
# print the nodes that are expected to be online
- grep -E "^Online:" ${scratch}
+ grep -E "Online:" ${scratch}
echo
@@ -934,12 +945,18 @@ status()
echo
- # check if the VIP RAs are on the expected nodes
+ # check if the VIP and port block/unblock RAs are on the expected nodes
for n in ${nodes[*]}; do
+ grep -E -x "${n}-nfs_block \(ocf::heartbeat:portblock\): Started ${n}" > /dev/null 2>&1 ${scratch}
+ result=$?
+ ((healthy+=${result}))
grep -E -x "${n}-cluster_ip-1 \(ocf::heartbeat:IPaddr\): Started ${n}" > /dev/null 2>&1 ${scratch}
result=$?
((healthy+=${result}))
+ grep -E -x "${n}-nfs_unblock \(ocf::heartbeat:portblock\): Started ${n}" > /dev/null 2>&1 ${scratch}
+ result=$?
+ ((healthy+=${result}))
done
grep -E "\):\ Stopped|FAILED" > /dev/null 2>&1 ${scratch}
@@ -956,6 +973,43 @@ status()
rm -f ${scratch}
}
+create_ganesha_conf_file()
+{
+ if [[ "$1" == "yes" ]];
+ then
+ if [ -e $GANESHA_CONF ];
+ then
+ rm -rf $GANESHA_CONF
+ fi
+ # The symlink /etc/ganesha/ganesha.conf need to be
+ # created using ganesha conf file mentioned in the
+ # shared storage. Every node will only have this
+ # link and actual file will stored in shared storage,
+ # so that ganesha conf editing of ganesha conf will
+ # be easy as well as it become more consistent.
+
+ ln -s $HA_CONFDIR/ganesha.conf $GANESHA_CONF
+ else
+ # Restoring previous file
+ rm -rf $GANESHA_CONF
+ cp $HA_CONFDIR/ganesha.conf $GANESHA_CONF
+ sed -r -i -e '/^%include[[:space:]]+".+\.conf"$/d' $GANESHA_CONF
+ fi
+}
+
+set_quorum_policy()
+{
+ local quorum_policy="stop"
+ local num_servers=${1}
+
+ if [ ${num_servers} -lt 3 ]; then
+ quorum_policy="ignore"
+ fi
+ pcs property set no-quorum-policy=${quorum_policy}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs property set no-quorum-policy=${quorum_policy} failed"
+ fi
+}
main()
{
@@ -965,6 +1019,18 @@ main()
usage
exit 0
fi
+
+ if (selinuxenabled) ;then
+ semanage boolean -m gluster_use_execmem --on
+ fi
+
+ local osid=""
+
+ osid=$(grep ^ID= /etc/os-release)
+ eval $(echo ${osid} | grep -F ID=)
+ osid=$(grep ^VERSION_ID= /etc/os-release)
+ eval $(echo ${osid} | grep -F VERSION_ID=)
+
HA_CONFDIR=${1%/}; shift
local ha_conf=${HA_CONFDIR}/ganesha-ha.conf
local node=""
@@ -985,7 +1051,19 @@ main()
determine_servers "setup"
- if [ "X${HA_NUM_SERVERS}X" != "X1X" ]; then
+ # Fedora 29+ and rhel/centos 8 has PCS-0.10.x
+ # default is pcs-0.10.x options but check for
+ # rhel/centos 7 (pcs-0.9.x) and adjust accordingly
+ if [[ ! ${ID} =~ {rhel,centos} ]]; then
+ if [[ ${VERSION_ID} == 7.* ]]; then
+ PCS9OR10_PCS_CNAME_OPTION="--name"
+ PCS9OR10_PCS_CLONE_OPTION="--clone"
+ fi
+ fi
+
+ if [[ "${HA_NUM_SERVERS}X" != "1X" ]]; then
+
+ determine_service_manager
setup_cluster ${HA_NAME} ${HA_NUM_SERVERS} "${HA_SERVERS}"
@@ -995,7 +1073,7 @@ main()
setup_state_volume ${HA_SERVERS}
- setup_copy_config ${HA_SERVERS}
+ enable_pacemaker ${HA_SERVERS}
else
@@ -1025,8 +1103,6 @@ main()
logger "adding ${node} with ${vip} to ${HA_NAME}"
- copy_export_config ${node} ${HA_CONFDIR}
-
determine_service_manager
manage_service "start" ${node}
@@ -1045,7 +1121,7 @@ main()
# newly added node to the file so that the resources specfic
# to this node is correctly recreated in the future.
clean_node=${node//[-.]/_}
- echo "VIP_$clean_node=\"${vip}\"" >> ${HA_CONFDIR}/ganesha-ha.conf
+ echo "VIP_${node}=\"${vip}\"" >> ${HA_CONFDIR}/ganesha-ha.conf
NEW_NODES="$HA_CLUSTER_NODES,${node}"
@@ -1054,7 +1130,11 @@ $HA_CONFDIR/ganesha-ha.conf
addnode_state_volume ${node}
- setup_copy_config ${HA_SERVERS}
+ # addnode_create_resources() already appended ${node} to
+ # HA_SERVERS, so only need to increment HA_NUM_SERVERS
+ # and set quorum policy
+ HA_NUM_SERVERS=$(expr ${HA_NUM_SERVERS} + 1)
+ set_quorum_policy ${HA_NUM_SERVERS}
;;
delete | --delete)
@@ -1073,13 +1153,14 @@ $HA_CONFDIR/ganesha-ha.conf
deletenode_update_haconfig ${node}
- setup_copy_config ${HA_SERVERS}
-
delnode_state_volume ${node}
determine_service_manager
manage_service "stop" ${node}
+
+ HA_NUM_SERVERS=$(expr ${HA_NUM_SERVERS} - 1)
+ set_quorum_policy ${HA_NUM_SERVERS}
;;
status | --status)
@@ -1096,6 +1177,11 @@ $HA_CONFDIR/ganesha-ha.conf
refresh_config ${VOL} ${HA_CONFDIR} ${HA_SERVERS}
;;
+ setup-ganesha-conf-files | --setup-ganesha-conf-files)
+
+ create_ganesha_conf_file ${1}
+ ;;
+
*)
# setup and teardown are not intended to be used by a
# casual user
@@ -1104,7 +1190,10 @@ $HA_CONFDIR/ganesha-ha.conf
;;
esac
+
+ if (selinuxenabled) ;then
+ semanage boolean -m gluster_use_execmem --off
+ fi
}
main $*
-
diff --git a/extras/ganesha/scripts/generate-epoch.py b/extras/ganesha/scripts/generate-epoch.py
index 5db5e56b480..77af014bab9 100755
--- a/extras/ganesha/scripts/generate-epoch.py
+++ b/extras/ganesha/scripts/generate-epoch.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python3
#
# Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
# This file is part of GlusterFS.
@@ -36,13 +36,13 @@ def epoch_uuid():
uuid_bin = binascii.unhexlify(glusterd_uuid.replace("-",""))
- epoch_uuid = int(uuid_bin.encode('hex'), 32) & 0xFFFF0000
+ epoch_uuid = int(binascii.hexlify(uuid_bin), 32) & 0xFFFF0000
return epoch_uuid
# Construct epoch as follows -
# first 32-bit contains the now() time
# rest 32-bit value contains the local glusterd node uuid
epoch = (epoch_now() | epoch_uuid())
-print str(epoch)
+print((str(epoch)))
exit(0)