From 78b0b59285b03af65c10a1fd976836bc5f53c167 Mon Sep 17 00:00:00 2001 From: Avra Sengupta Date: Sun, 15 Sep 2013 17:55:31 +0530 Subject: glusterd: Adding transaction checks for cluster unlock. While a gluster command holding lock is in execution, any other gluster command which tries to run will fail to acquire the lock. As a result command#2 will follow the cleanup code flow, which also includes unlocking the held locks. As both the commands are run from the same node, command#2 will end up releasing the locks held by command#1 even before command#1 reaches completion. Now we call the unlock routine in the code path, of the cluster has been locked during the same transaction. Signed-off-by: Avra Sengupta Change-Id: I7b7aa4d4c7e565e982b75b8ed1e550fca528c834 BUG: 1008172 Signed-off-by: Avra Sengupta Reviewed-on: http://review.gluster.org/5937 Tested-by: Gluster Build System Reviewed-by: Krishnan Parthasarathi Reviewed-by: Anand Avati --- xlators/mgmt/glusterd/src/glusterd-syncop.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'xlators/mgmt/glusterd/src') diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index a694cae84d1..a854e053090 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -1018,7 +1018,7 @@ out: int gd_unlock_op_phase (struct list_head *peers, glusterd_op_t op, int op_ret, rpcsvc_request_t *req, dict_t *op_ctx, char *op_errstr, - int npeers) + int npeers, gf_boolean_t is_locked) { glusterd_peerinfo_t *peerinfo = NULL; glusterd_peerinfo_t *tmp = NULL; @@ -1033,6 +1033,11 @@ gd_unlock_op_phase (struct list_head *peers, glusterd_op_t op, int op_ret, goto out; } + /* If the lock has not been held during this + * transaction, do not send unlock requests */ + if (!is_locked) + goto out; + this = THIS; synctask_barrier_init((&args)); peer_cnt = 0; @@ -1056,7 +1061,8 @@ gd_unlock_op_phase (struct list_head *peers, glusterd_op_t op, int op_ret, out: glusterd_op_send_cli_response (op, op_ret, 0, req, op_ctx, op_errstr); glusterd_op_clear_op (op); - glusterd_unlock (MY_UUID); + if (is_locked) + glusterd_unlock (MY_UUID); return 0; } @@ -1153,6 +1159,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) int32_t tmp_op = 0; char *op_errstr = NULL; xlator_t *this = NULL; + gf_boolean_t is_locked = _gf_false; this = THIS; GF_ASSERT (this); @@ -1175,6 +1182,8 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) goto out; } + is_locked = _gf_true; + /* storing op globally to access in synctask code paths * This is still acceptable, as we are performing this under * the 'cluster' lock*/ @@ -1212,7 +1221,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) ret = 0; out: (void) gd_unlock_op_phase (&conf->xaction_peers, op, ret, req, - op_ctx, op_errstr, npeers); + op_ctx, op_errstr, npeers, is_locked); if (req_dict) dict_unref (req_dict); -- cgit