summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-handler.c
diff options
context:
space:
mode:
authorKaushal M <kaushal@redhat.com>2012-12-12 10:39:31 +0530
committerAnand Avati <avati@redhat.com>2012-12-13 14:11:45 -0800
commitef95b1ec0981e5f9859f5308e15ae33608bd6c29 (patch)
tree2ca8c1d2410a0894ff4c0dce2c290ca15c61a843 /xlators/mgmt/glusterd/src/glusterd-handler.c
parent524dd0ef4c76d6432a8ff793fdb9322b9265aade (diff)
glusterd: fix peer probe cli response on error
A peer probe with peers with differing op-versions or with unresolvable addresses would not reply to the cli with the error. This regression was caused by some changes introduced into glusterd_peer_rpc_notify(), which caused the cli reply path not to be executed. Change-Id: I0392b61e0a1dc22fa074529bdba0e357f938d013 BUG: 885591 Signed-off-by: Kaushal M <kaushal@redhat.com> Reviewed-on: http://review.gluster.org/4293 Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Anand Avati <avati@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c42
1 files changed, 19 insertions, 23 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index ff2c8e71c..cecf27197 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -3199,14 +3199,17 @@ glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
quorum_action = _gf_true;
peerinfo->quorum_action = _gf_false;
}
- peerinfo->connected = 0;
+
+ // Remove peer if it is not a friend and connection/handshake
+ // fails, and notify cli. Happens only during probe.
+ if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT)
+ glusterd_friend_remove_notify (peerctx);
/*
local glusterd (thinks that it) is the owner of the cluster
lock and 'fails' the operation on the first disconnect from
a peer.
*/
-
if (peerinfo->connected) {
glusterd_get_lock_owner (&owner);
if (!uuid_compare (MY_UUID, owner)) {
@@ -3216,28 +3219,21 @@ glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
gf_log (this->name, GF_LOG_ERROR,
"Unable to enqueue cluster "
"unlock event");
- break;
- }
-
- peer_uuid = GF_CALLOC (1, sizeof (*peer_uuid),
- gf_common_mt_char);
- if (!peer_uuid) {
- ret = -1;
- break;
- }
-
- uuid_copy (*peer_uuid, peerinfo->uuid);
- ret = glusterd_op_sm_inject_event
- (GD_OP_EVENT_LOCAL_UNLOCK_NO_RESP, peer_uuid);
- if (ret)
- gf_log (this->name, GF_LOG_ERROR, "Unable"
- " to enque local lock flush event.");
-
- //Inject friend disconnected here
- if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT) {
- glusterd_friend_remove_notify (peerctx);
+ } else {
+ peer_uuid = GF_CALLOC (1, sizeof (*peer_uuid),
+ gf_common_mt_char);
+ if (!peer_uuid) {
+ ret = -1;
+ break;
+ }
+
+ uuid_copy (*peer_uuid, peerinfo->uuid);
+ ret = glusterd_op_sm_inject_event
+ (GD_OP_EVENT_LOCAL_UNLOCK_NO_RESP, peer_uuid);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR, "Unable"
+ " to enque local lock flush event.");
}
-
}
peerinfo->connected = 0;