From 885ecce6e2df6464b388f42c91211ed31e17654d Mon Sep 17 00:00:00 2001 From: Soumya Koduri Date: Fri, 9 Dec 2016 12:18:28 +0530 Subject: common-ha: Create portblock RA as part of add/delete-node When a node is added to or deleted from existing nfs-ganesha cluster, we need to create or cleanup portblock RA as well. This patch is to address the same. Also we need to adjust the quorum-policy with increase/decrease in the number of nodes in the cluster. Change-Id: I31a896715b9b7fc931009723d1570bf7aa4da9b6 BUG: 1403130 Signed-off-by: Soumya Koduri Reviewed-on: http://review.gluster.org/16089 NetBSD-regression: NetBSD Build System Smoke: Gluster Build System CentOS-regression: Gluster Build System Reviewed-by: jiffin tony Thottan Reviewed-by: Kaleb KEITHLEY --- extras/ganesha/scripts/ganesha-ha.sh | 69 +++++++++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 8 deletions(-) (limited to 'extras/ganesha') diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh index 64e52662886..5fa8bf5363b 100644 --- a/extras/ganesha/scripts/ganesha-ha.sh +++ b/extras/ganesha/scripts/ganesha-ha.sh @@ -558,9 +558,16 @@ recreate_resources() eval tmp_ipaddr=\$${clean_name} ipaddr=${tmp_ipaddr//_/.} - pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} cidr_netmask=32 op monitor interval=15s + pcs -f ${cibfile} resource create ${1}-nfs_block ocf:heartbeat:portblock protocol=tcp \ + portno=2049 action=block ip=${ipaddr} --group ${1}-group if [ $? -ne 0 ]; then - logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} cidr_netmask=32 op monitor interval=10s failed" + logger "warning pcs resource create ${1}-nfs_block failed" + fi + pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \ + cidr_netmask=32 op monitor interval=15s --group ${1}-group --after ${1}-nfs_block + if [ $? -ne 0 ]; then + logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \ + cidr_netmask=32 op monitor interval=15s failed" fi pcs -f ${cibfile} constraint order nfs-grace-clone then ${1}-cluster_ip-1 @@ -568,6 +575,15 @@ recreate_resources() logger "warning: pcs constraint order nfs-grace-clone then ${1}-cluster_ip-1 failed" fi + pcs -f ${cibfile} resource create ${1}-nfs_unblock ocf:heartbeat:portblock protocol=tcp \ + portno=2049 action=unblock ip=${ipaddr} reset_local_on_unblock_stop=true \ + tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${1}-group --after ${1}-cluster_ip-1 \ + op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} \ + op monitor interval=10s timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} + if [ $? -ne 0 ]; then + logger "warning pcs resource create ${1}-nfs_unblock failed" + fi + shift done } @@ -581,15 +597,32 @@ addnode_recreate_resources() recreate_resources ${cibfile} ${HA_SERVERS} - pcs -f ${cibfile} resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${add_vip} cidr_netmask=32 op monitor interval=15s + pcs -f ${cibfile} resource create ${add_node}-nfs_block ocf:heartbeat:portblock \ + protocol=tcp portno=2049 action=block ip=${ipaddr} --group ${add_node}-group if [ $? -ne 0 ]; then - logger "warning pcs resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${add_vip} cidr_netmask=32 op monitor interval=10s failed" + logger "warning pcs resource create ${add_node}-nfs_block failed" + fi + pcs -f ${cibfile} resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr \ + ip=${ipaddr} cidr_netmask=32 op monitor interval=15s --group ${add_node}-group \ + --after ${add_node}-nfs_block + if [ $? -ne 0 ]; then + logger "warning pcs resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr \ + ip=${ipaddr} cidr_netmask=32 op monitor interval=15s failed" fi pcs -f ${cibfile} constraint order nfs-grace-clone then ${add_node}-cluster_ip-1 if [ $? -ne 0 ]; then logger "warning: pcs constraint order nfs-grace-clone then ${add_node}-cluster_ip-1 failed" fi + pcs -f ${cibfile} resource create ${add_node}-nfs_unblock ocf:heartbeat:portblock \ + protocol=tcp portno=2049 action=unblock ip=${ipaddr} reset_local_on_unblock_stop=true \ + tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${add_node}-group --after \ + ${add_node}-cluster_ip-1 op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start \ + timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op monitor interval=10s \ + timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} + if [ $? -ne 0 ]; then + logger "warning pcs resource create ${add_node}-nfs_unblock failed" + fi } @@ -598,9 +631,9 @@ clear_resources() local cibfile=${1}; shift while [[ ${1} ]]; do - pcs -f ${cibfile} resource delete ${1}-cluster_ip-1 + pcs -f ${cibfile} resource delete ${1}-group if [ $? -ne 0 ]; then - logger "warning: pcs -f ${cibfile} resource delete ${1}-cluster_ip-1" + logger "warning: pcs -f ${cibfile} resource delete ${1}-group" fi shift @@ -677,7 +710,7 @@ deletenode_update_haconfig() local clean_name=${name//[-.]/_} ha_servers=$(echo ${HA_SERVERS} | sed -e "s/ /,/") - sed -i -e "s/^HA_CLUSTER_NODES=.*$/HA_CLUSTER_NODES=\"${ha_servers// /,}\"/" -e "s/^${clean_name}=.*$//" -e "/^$/d" ${HA_CONFDIR}/ganesha-ha.conf + sed -i -e "s/^HA_CLUSTER_NODES=.*$/HA_CLUSTER_NODES=\"${ha_servers// /,}\"/" -e "s/^${name}=.*$//" -e "/^$/d" ${HA_CONFDIR}/ganesha-ha.conf } @@ -831,6 +864,20 @@ create_ganesha_conf_file() fi } +set_quorum_policy() +{ + local quorum_policy="stop" + local num_servers=${1} + + if [ ${num_servers} -lt 3 ]; then + quorum_policy="ignore" + fi + pcs property set no-quorum-policy=${quorum_policy} + if [ $? -ne 0 ]; then + logger "warning: pcs property set no-quorum-policy=${quorum_policy} failed" + fi +} + main() { @@ -915,13 +962,16 @@ main() # newly added node to the file so that the resources specfic # to this node is correctly recreated in the future. clean_node=${node//[-.]/_} - echo "VIP_$clean_node=\"${vip}\"" >> ${HA_CONFDIR}/ganesha-ha.conf + echo "VIP_${node}=\"${vip}\"" >> ${HA_CONFDIR}/ganesha-ha.conf NEW_NODES="$HA_CLUSTER_NODES,${node}" sed -i s/HA_CLUSTER_NODES.*/"HA_CLUSTER_NODES=\"$NEW_NODES\""/ \ $HA_CONFDIR/ganesha-ha.conf HA_SERVERS="${HA_SERVERS} ${node}" + + HA_NUM_SERVERS=$(expr ${HA_NUM_SERVERS} + 1) + set_quorum_policy ${HA_NUM_SERVERS} ;; delete | --delete) @@ -945,6 +995,9 @@ $HA_CONFDIR/ganesha-ha.conf determine_service_manager manage_service "stop" ${node} + + HA_NUM_SERVERS=$(expr ${HA_NUM_SERVERS} - 1) + set_quorum_policy ${HA_NUM_SERVERS} ;; status | --status) -- cgit