From e0ef957c34e4f49afc486dc8f02c8b703206be40 Mon Sep 17 00:00:00 2001 From: Jiffin Tony Thottan Date: Mon, 18 Apr 2016 21:34:32 +0530 Subject: glusterd-ganesha : copy ganesha export configuration files during reboot glusterd creates export conf file for ganesha using hook script during volume start and ganesha_manage_export() for volume set command. But this routine is not added in glusterd restart scenario. Consider the following case, in a three node cluster a volume got exported via ganesha while one of the node is offline(glusterd is not running). When the node comes back online, that volume is not exported on that node due to the above mentioned issue. Also I have removed unused variables from glusterd_handle_ganesha_op() For this patch to work pcs cluster should running on that be node. Upstream reference >Change-Id: I5b2312c2f3cef962b1f795b9f16c8f0a27f08ee5 >BUG: 1330097 >Signed-off-by: Jiffin Tony Thottan >Reviewed-on: http://review.gluster.org/14063 >Smoke: Gluster Build System >NetBSD-regression: NetBSD Build System >CentOS-regression: Gluster Build System >Reviewed-by: soumya k >Reviewed-by: Atin Mukherjee >(cherry picked from commit f71e2fa49af185779b9f43e146effd122d4e9da0) Change-Id: I5b2312c2f3cef962b1f795b9f16c8f0a27f08ee5 BUG: 1336801 Signed-off-by: Jiffin Tony Thottan Reviewed-on: http://review.gluster.org/14397 Smoke: Gluster Build System Reviewed-by: Kaleb KEITHLEY Tested-by: Kaleb KEITHLEY NetBSD-regression: NetBSD Build System CentOS-regression: Gluster Build System --- xlators/mgmt/glusterd/src/glusterd-utils.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'xlators/mgmt/glusterd/src/glusterd-utils.c') diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 5c28ec942ca..313ae89f3e6 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -4060,6 +4060,9 @@ glusterd_import_friend_volume (dict_t *peer_data, size_t count) glusterd_volinfo_t *old_volinfo = NULL; glusterd_volinfo_t *new_volinfo = NULL; glusterd_svc_t *svc = NULL; + gf_boolean_t newexportvalue; + gf_boolean_t oldexportvalue; + char *value = NULL; GF_ASSERT (peer_data); @@ -4080,6 +4083,8 @@ glusterd_import_friend_volume (dict_t *peer_data, size_t count) ret = glusterd_volinfo_find (new_volinfo->volname, &old_volinfo); if (0 == ret) { + oldexportvalue = glusterd_check_ganesha_export (old_volinfo); + /* Ref count the old_volinfo such that deleting it doesn't crash * if its been already in use by other thread */ @@ -4106,6 +4111,31 @@ glusterd_import_friend_volume (dict_t *peer_data, size_t count) } } + ret = glusterd_volinfo_get (new_volinfo, "ganesha.enable", &value); + if (ret) + goto out; + ret = gf_string2boolean (value, &newexportvalue); + if (ret) + goto out; + + /* * + * if new and old export value is off, then there is no point in calling + * ganesha_manage_export + */ + if (!((newexportvalue == oldexportvalue) && + newexportvalue == _gf_false)) { + ret = ganesha_manage_export (new_volinfo->volname, value, + NULL, _gf_true); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, + GD_MSG_NFS_GNS_OP_HANDLE_FAIL, + "Returning from ganesha_manage_export with" + " ret: %d for volume %s ganesha.enable %s", + ret, new_volinfo->volname, + value); + goto out; + } + } ret = glusterd_store_volinfo (new_volinfo, GLUSTERD_VOLINFO_VER_AC_NONE); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, -- cgit