diff options
author | Krishnan Parthasarathi <kparthas@redhat.com> | 2012-10-23 08:52:55 +0530 |
---|---|---|
committer | Anand Avati <avati@redhat.com> | 2013-02-03 11:51:03 -0800 |
commit | e2e12006a2156da24c1374fc5b544447d2914d52 (patch) | |
tree | 02d2c71bae2ccd4439e06e8892eba170a699a49a /xlators | |
parent | 454c6c0fde1f0788c4a1a7506c434a9b7d822e85 (diff) |
glusterd: Removed start-unlock event injection in 'synctask' codepath
Change-Id: I87e02c95d0b650dab7f9ee86c96b2e09ada50109
BUG: 862834
Signed-off-by: Krishnan Parthasarathi <kparthas@redhat.com>
Reviewed-on: http://review.gluster.org/4118
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Amar Tumballi <amarts@redhat.com>
Reviewed-by: Anand Avati <avati@redhat.com>
Diffstat (limited to 'xlators')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 62 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-syncop.c | 6 |
2 files changed, 46 insertions, 22 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index ec717ac2ebf..1979f7e2aa4 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -3183,7 +3183,6 @@ glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx) new_event->peerinfo = peerinfo; ret = glusterd_friend_sm_inject_event (new_event); - glusterd_friend_sm (); } else { gf_log ("glusterd", GF_LOG_ERROR, @@ -3205,7 +3204,6 @@ glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, int ret = 0; glusterd_peerinfo_t *peerinfo = NULL; glusterd_peerctx_t *peerctx = NULL; - uuid_t owner = {0,}; uuid_t *peer_uuid = NULL; gf_boolean_t quorum_action = _gf_false; @@ -3244,8 +3242,10 @@ glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, // Remove peer if it is not a friend and connection/handshake // fails, and notify cli. Happens only during probe. - if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT) + if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT) { glusterd_friend_remove_notify (peerctx); + goto out; + } /* local glusterd (thinks that it) is the owner of the cluster @@ -3253,29 +3253,46 @@ glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, a peer. */ if (peerinfo->connected) { - glusterd_get_lock_owner (&owner); - if (!uuid_compare (MY_UUID, owner)) { - ret = glusterd_op_sm_inject_event - (GD_OP_EVENT_START_UNLOCK, NULL); - if (ret) - gf_log (this->name, GF_LOG_ERROR, - "Unable to enqueue cluster " - "unlock event"); - } else { - peer_uuid = GF_CALLOC (1, sizeof (*peer_uuid), - gf_common_mt_char); - if (!peer_uuid) { - ret = -1; + /*TODO: The following is needed till all volume + * operations are synctaskized. + * */ + if (is_origin_glusterd ()) { + switch (glusterd_op_get_op ()) { + case GD_OP_START_VOLUME: + case GD_OP_ADD_BRICK: + case GD_OP_REMOVE_BRICK: + break; + + default: + ret = glusterd_op_sm_inject_event + (GD_OP_EVENT_START_UNLOCK, NULL); + if (ret) + gf_log (this->name, + GF_LOG_ERROR, + "Unable to enqueue " + "cluster unlock event"); + break; } - uuid_copy (*peer_uuid, peerinfo->uuid); - ret = glusterd_op_sm_inject_event - (GD_OP_EVENT_LOCAL_UNLOCK_NO_RESP, peer_uuid); - if (ret) - gf_log (this->name, GF_LOG_ERROR, "Unable" - " to enque local lock flush event."); + } else { + peer_uuid = GF_CALLOC (1, sizeof (*peer_uuid), + gf_common_mt_char); + + if (peer_uuid) { + uuid_copy (*peer_uuid, peerinfo->uuid); + ret = glusterd_op_sm_inject_event + (GD_OP_EVENT_LOCAL_UNLOCK_NO_RESP, + peer_uuid); + if (ret) + gf_log (this->name, + GF_LOG_ERROR, + "Unable to enqueue " + "local lock flush " + "event."); + } } + } peerinfo->connected = 0; @@ -3289,6 +3306,7 @@ glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, break; } +out: glusterd_friend_sm (); glusterd_op_sm (); if (quorum_action) diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index 4a8961cf429..2b1c88c3cc3 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -517,6 +517,11 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) /* successful lock in local node */ local_locked = _gf_true; + /* storing op globally to access in synctask code paths + * This is still acceptable, as we are performing this under + * the 'cluster' lock*/ + + glusterd_op_set_op (op); INIT_LIST_HEAD (&conf->xaction_peers); list_for_each_entry (peerinfo, &conf->peers, uuid_list) { if (!peerinfo->connected) @@ -628,6 +633,7 @@ out: /* Local node should be the one to be locked first, unlocked last to prevent races */ + glusterd_op_clear_op (op); glusterd_unlock (MY_UUID); } |