diff options
author | Krishnan Parthasarathi <kparthas@redhat.com> | 2013-04-22 12:27:07 +0530 |
---|---|---|
committer | Vijay Bellur <vbellur@redhat.com> | 2013-04-30 04:23:22 -0700 |
commit | 3b1ecc6a7fd961c709e82862fd4760b223365863 (patch) | |
tree | 95f9b3ab0e7bf0ae820b7137f3d0006511e75999 /xlators/mgmt/glusterd/src/glusterd-handler.c | |
parent | f75be775a9b191eb74f6cb4c161d9af36f2fdc97 (diff) |
glusterd: Removed 'proactive' failing of volume op
Volume operations were failed 'proactively', on the first disconnect of
a peer that was participating in the transaction.
The reason behind having this kludgey code in the first place was to
'abort' an ongoing volume operation as soon as we perceive the first
disconnect. But the rpc call backs themselves are capable of injecting
appropriate state machine events, which would set things in motion for an
eventual abort of the transaction.
Change-Id: Iad7cb2bd076f22d89a793dfcd08c2d208b39c4be
BUG: 847214
Signed-off-by: Krishnan Parthasarathi <kparthas@redhat.com>
Reviewed-on: http://review.gluster.org/4869
Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 56 |
1 files changed, 3 insertions, 53 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index faba30221fd..e21b67e6d11 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -3423,7 +3423,6 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, int ret = 0; glusterd_peerinfo_t *peerinfo = NULL; glusterd_peerctx_t *peerctx = NULL; - uuid_t *peer_uuid = NULL; gf_boolean_t quorum_action = _gf_false; peerctx = mydata; @@ -3459,64 +3458,15 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, peerinfo->quorum_action = _gf_false; } - // Remove peer if it is not a friend and connection/handshake - // fails, and notify cli. Happens only during probe. + /* Remove peer if it is not a friend and connection/handshake + * fails, and notify cli. Happens only during probe. + */ if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT) { glusterd_friend_remove_notify (peerctx); goto out; } - /* - local glusterd (thinks that it) is the owner of the cluster - lock and 'fails' the operation on the first disconnect from - a peer. - */ - if (peerinfo->connected) { - /*TODO: The following is needed till all volume - * operations are synctaskized. - * */ - if (is_origin_glusterd ()) { - switch (glusterd_op_get_op ()) { - case GD_OP_START_VOLUME: - case GD_OP_ADD_BRICK: - case GD_OP_REMOVE_BRICK: - case GD_OP_STATUS_VOLUME: - break; - - default: - ret = glusterd_op_sm_inject_event - (GD_OP_EVENT_START_UNLOCK, NULL); - if (ret) - gf_log (this->name, - GF_LOG_ERROR, - "Unable to enqueue " - "cluster unlock event"); - - break; - } - - } else { - peer_uuid = GF_CALLOC (1, sizeof (*peer_uuid), - gf_common_mt_char); - - if (peer_uuid) { - uuid_copy (*peer_uuid, peerinfo->uuid); - ret = glusterd_op_sm_inject_event - (GD_OP_EVENT_LOCAL_UNLOCK_NO_RESP, - peer_uuid); - if (ret) - gf_log (this->name, - GF_LOG_ERROR, - "Unable to enqueue " - "local lock flush " - "event."); - } - } - - } - peerinfo->connected = 0; - //default_notify (this, GF_EVENT_CHILD_DOWN, NULL); break; } default: |