From 44428343529bec83fab0e3519396471fc8f651b4 Mon Sep 17 00:00:00 2001 From: Avra Sengupta Date: Sat, 1 Mar 2014 09:04:16 +0530 Subject: glusterd/mgmt_v3 locks: Generalization of the volume wide locks. Renaming volume locks as mgmt_v3 locks Change-Id: I2a324e2b8e1772d7b165fe96ce8ba5b902c2ed9a Signed-off-by: Avra Sengupta Reviewed-on: http://review.gluster.org/7187 Reviewed-by: Rajesh Joseph Tested-by: Rajesh Joseph --- xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c | 114 +++++++++++----------- 1 file changed, 59 insertions(+), 55 deletions(-) (limited to 'xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c') diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c index f4ecc486f..72181e963 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c @@ -27,10 +27,10 @@ glusterd_mgmt_v3_null (rpcsvc_request_t *req) } static int -glusterd_mgmt_v3_vol_lock_send_resp (rpcsvc_request_t *req, int32_t status) +glusterd_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, int32_t status) { - gd1_mgmt_v3_vol_lock_rsp rsp = {{0},}; + gd1_mgmt_v3_lock_rsp rsp = {{0},}; int ret = -1; GF_ASSERT (req); @@ -42,18 +42,18 @@ glusterd_mgmt_v3_vol_lock_send_resp (rpcsvc_request_t *req, int32_t status) glusterd_get_uuid (&rsp.uuid); ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_rsp); + (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp); gf_log (THIS->name, GF_LOG_DEBUG, - "Responded to volume lock, ret: %d", ret); + "Responded to mgmt_v3 lock, ret: %d", ret); return ret; } static int -glusterd_syctasked_volume_lock (rpcsvc_request_t *req, - gd1_mgmt_v3_vol_lock_req *lock_req, - glusterd_op_lock_ctx_t *ctx) +glusterd_synctasked_mgmt_v3_lock (rpcsvc_request_t *req, + gd1_mgmt_v3_lock_req *lock_req, + glusterd_op_lock_ctx_t *ctx) { int32_t ret = -1; int32_t volcount = -1; @@ -74,31 +74,31 @@ glusterd_syctasked_volume_lock (rpcsvc_request_t *req, "Failed to get volname"); goto out; } - ret = glusterd_volume_lock (volname, ctx->uuid); + ret = glusterd_mgmt_v3_lock (volname, ctx->uuid); if (ret) gf_log (this->name, GF_LOG_ERROR, "Unable to acquire local lock for %s", volname); } else { - /* Trying to acquire volume locks on multiple volumes */ - ret = glusterd_multiple_volumes_lock (ctx->dict, ctx->uuid); + /* Trying to acquire multiple mgmt_v3 locks */ + ret = glusterd_multiple_mgmt_v3_lock (ctx->dict, ctx->uuid); if (ret) gf_log ("", GF_LOG_ERROR, - "Failed to acquire volume locks for %s", + "Failed to acquire mgmt_v3 locks for %s", uuid_utoa (ctx->uuid)); } out: - ret = glusterd_mgmt_v3_vol_lock_send_resp (req, ret); + ret = glusterd_mgmt_v3_lock_send_resp (req, ret); gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret); return ret; } static int -glusterd_op_state_machine_volume_lock (rpcsvc_request_t *req, - gd1_mgmt_v3_vol_lock_req *lock_req, - glusterd_op_lock_ctx_t *ctx) +glusterd_op_state_machine_mgmt_v3_lock (rpcsvc_request_t *req, + gd1_mgmt_v3_lock_req *lock_req, + glusterd_op_lock_ctx_t *ctx) { int32_t ret = -1; xlator_t *this = NULL; @@ -135,9 +135,9 @@ out: } static int -glusterd_handle_volume_lock_fn (rpcsvc_request_t *req) +glusterd_handle_mgmt_v3_lock_fn (rpcsvc_request_t *req) { - gd1_mgmt_v3_vol_lock_req lock_req = {{0},}; + gd1_mgmt_v3_lock_req lock_req = {{0},}; int32_t ret = -1; glusterd_peerinfo_t *peerinfo = NULL; glusterd_op_lock_ctx_t *ctx = NULL; @@ -149,7 +149,7 @@ glusterd_handle_volume_lock_fn (rpcsvc_request_t *req) GF_ASSERT (req); ret = xdr_to_generic (req->msg[0], &lock_req, - (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_req); + (xdrproc_t)xdr_gd1_mgmt_v3_lock_req); if (ret < 0) { gf_log (this->name, GF_LOG_ERROR, "Failed to decode lock " "request received from peer"); @@ -157,7 +157,7 @@ glusterd_handle_volume_lock_fn (rpcsvc_request_t *req) goto out; } - gf_log (this->name, GF_LOG_DEBUG, "Received volume lock req " + gf_log (this->name, GF_LOG_DEBUG, "Received mgmt_v3 lock req " "from uuid: %s", uuid_utoa (lock_req.uuid)); if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) { @@ -191,11 +191,13 @@ glusterd_handle_volume_lock_fn (rpcsvc_request_t *req) goto out; } - is_synctasked = dict_get_str_boolean (ctx->dict, "is_synctasked", _gf_false); + is_synctasked = dict_get_str_boolean (ctx->dict, + "is_synctasked", _gf_false); if (is_synctasked) - ret = glusterd_syctasked_volume_lock (req, &lock_req, ctx); + ret = glusterd_synctasked_mgmt_v3_lock (req, &lock_req, ctx); else - ret = glusterd_op_state_machine_volume_lock (req, &lock_req, ctx); + ret = glusterd_op_state_machine_mgmt_v3_lock (req, &lock_req, + ctx); out: @@ -707,10 +709,10 @@ out: } static int -glusterd_mgmt_v3_vol_unlock_send_resp (rpcsvc_request_t *req, int32_t status) +glusterd_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, int32_t status) { - gd1_mgmt_v3_vol_unlock_rsp rsp = {{0},}; + gd1_mgmt_v3_unlock_rsp rsp = {{0},}; int ret = -1; GF_ASSERT (req); @@ -722,18 +724,18 @@ glusterd_mgmt_v3_vol_unlock_send_resp (rpcsvc_request_t *req, int32_t status) glusterd_get_uuid (&rsp.uuid); ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_rsp); + (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp); gf_log (THIS->name, GF_LOG_DEBUG, - "Responded to volume unlock, ret: %d", ret); + "Responded to mgmt_v3 unlock, ret: %d", ret); return ret; } static int -glusterd_syctasked_volume_unlock (rpcsvc_request_t *req, - gd1_mgmt_v3_vol_unlock_req *unlock_req, - glusterd_op_lock_ctx_t *ctx) +glusterd_synctasked_mgmt_v3_unlock (rpcsvc_request_t *req, + gd1_mgmt_v3_unlock_req *unlock_req, + glusterd_op_lock_ctx_t *ctx) { int32_t ret = -1; int32_t volcount = -1; @@ -753,21 +755,21 @@ glusterd_syctasked_volume_unlock (rpcsvc_request_t *req, "Failed to get volname"); goto out; } - ret = glusterd_volume_unlock (volname, ctx->uuid); + ret = glusterd_mgmt_v3_unlock (volname, ctx->uuid); if (ret) gf_log (this->name, GF_LOG_ERROR, "Unable to release lock for %s", volname); } else { - /* Trying to release volume locks on multiple volumes */ - ret = glusterd_multiple_volumes_unlock (ctx->dict, ctx->uuid); + /* Trying to release multiple mgmt_v3 locks */ + ret = glusterd_multiple_mgmt_v3_unlock (ctx->dict, ctx->uuid); if (ret) gf_log ("", GF_LOG_ERROR, - "Failed to release volume locks for %s", + "Failed to release mgmt_v3 locks for %s", uuid_utoa(ctx->uuid)); } out: - ret = glusterd_mgmt_v3_vol_unlock_send_resp (req, ret); + ret = glusterd_mgmt_v3_unlock_send_resp (req, ret); gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret); return ret; @@ -775,9 +777,9 @@ out: static int -glusterd_op_state_machine_volume_unlock (rpcsvc_request_t *req, - gd1_mgmt_v3_vol_unlock_req *lock_req, - glusterd_op_lock_ctx_t *ctx) +glusterd_op_state_machine_mgmt_v3_unlock (rpcsvc_request_t *req, + gd1_mgmt_v3_unlock_req *lock_req, + glusterd_op_lock_ctx_t *ctx) { int32_t ret = -1; xlator_t *this = NULL; @@ -800,9 +802,9 @@ glusterd_op_state_machine_volume_unlock (rpcsvc_request_t *req, } static int -glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req) +glusterd_handle_mgmt_v3_unlock_fn (rpcsvc_request_t *req) { - gd1_mgmt_v3_vol_unlock_req lock_req = {{0},}; + gd1_mgmt_v3_unlock_req lock_req = {{0},}; int32_t ret = -1; glusterd_op_lock_ctx_t *ctx = NULL; glusterd_peerinfo_t *peerinfo = NULL; @@ -814,7 +816,7 @@ glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req) GF_ASSERT (req); ret = xdr_to_generic (req->msg[0], &lock_req, - (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_req); + (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req); if (ret < 0) { gf_log (this->name, GF_LOG_ERROR, "Failed to decode unlock " "request received from peer"); @@ -822,7 +824,7 @@ glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req) goto out; } - gf_log (this->name, GF_LOG_DEBUG, "Received volume unlock req " + gf_log (this->name, GF_LOG_DEBUG, "Received mgmt_v3 unlock req " "from uuid: %s", uuid_utoa (lock_req.uuid)); if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) { @@ -856,11 +858,13 @@ glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req) goto out; } - is_synctasked = dict_get_str_boolean (ctx->dict, "is_synctasked", _gf_false); + is_synctasked = dict_get_str_boolean (ctx->dict, + "is_synctasked", _gf_false); if (is_synctasked) - ret = glusterd_syctasked_volume_unlock (req, &lock_req, ctx); + ret = glusterd_synctasked_mgmt_v3_unlock (req, &lock_req, ctx); else - ret = glusterd_op_state_machine_volume_unlock (req, &lock_req, ctx); + ret = glusterd_op_state_machine_mgmt_v3_unlock (req, &lock_req, + ctx); out: @@ -876,10 +880,10 @@ out: } int -glusterd_handle_volume_lock (rpcsvc_request_t *req) +glusterd_handle_mgmt_v3_lock (rpcsvc_request_t *req) { return glusterd_big_locked_handler (req, - glusterd_handle_volume_lock_fn); + glusterd_handle_mgmt_v3_lock_fn); } static int @@ -911,20 +915,20 @@ glusterd_handle_post_validate (rpcsvc_request_t *req) } int -glusterd_handle_volume_unlock (rpcsvc_request_t *req) +glusterd_handle_mgmt_v3_unlock (rpcsvc_request_t *req) { return glusterd_big_locked_handler (req, - glusterd_handle_volume_unlock_fn); + glusterd_handle_mgmt_v3_unlock_fn); } rpcsvc_actor_t gd_svc_mgmt_v3_actors[] = { - [GLUSTERD_MGMT_V3_NULL] = { "NULL", GLUSTERD_MGMT_V3_NULL, glusterd_mgmt_v3_null, NULL, 0, DRC_NA}, - [GLUSTERD_MGMT_V3_VOLUME_LOCK] = { "VOL_LOCK", GLUSTERD_MGMT_V3_VOLUME_LOCK, glusterd_handle_volume_lock, NULL, 0, DRC_NA}, - [GLUSTERD_MGMT_V3_PRE_VALIDATE] = { "PRE_VAL", GLUSTERD_MGMT_V3_PRE_VALIDATE, glusterd_handle_pre_validate, NULL, 0, DRC_NA}, - [GLUSTERD_MGMT_V3_BRICK_OP] = { "BRCK_OP", GLUSTERD_MGMT_V3_BRICK_OP, glusterd_handle_brick_op, NULL, 0, DRC_NA}, - [GLUSTERD_MGMT_V3_COMMIT] = { "COMMIT", GLUSTERD_MGMT_V3_COMMIT, glusterd_handle_commit, NULL, 0, DRC_NA}, - [GLUSTERD_MGMT_V3_POST_VALIDATE] = { "POST_VAL", GLUSTERD_MGMT_V3_POST_VALIDATE, glusterd_handle_post_validate, NULL, 0, DRC_NA}, - [GLUSTERD_MGMT_V3_VOLUME_UNLOCK] = { "VOL_UNLOCK", GLUSTERD_MGMT_V3_VOLUME_UNLOCK, glusterd_handle_volume_unlock, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_V3_NULL] = { "NULL", GLUSTERD_MGMT_V3_NULL, glusterd_mgmt_v3_null, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_V3_LOCK] = { "MGMT_V3_LOCK", GLUSTERD_MGMT_V3_LOCK, glusterd_handle_mgmt_v3_lock, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_V3_PRE_VALIDATE] = { "PRE_VAL", GLUSTERD_MGMT_V3_PRE_VALIDATE, glusterd_handle_pre_validate, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_V3_BRICK_OP] = { "BRCK_OP", GLUSTERD_MGMT_V3_BRICK_OP, glusterd_handle_brick_op, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_V3_COMMIT] = { "COMMIT", GLUSTERD_MGMT_V3_COMMIT, glusterd_handle_commit, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_V3_POST_VALIDATE] = { "POST_VAL", GLUSTERD_MGMT_V3_POST_VALIDATE, glusterd_handle_post_validate, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_V3_UNLOCK] = { "MGMT_V3_UNLOCK", GLUSTERD_MGMT_V3_UNLOCK, glusterd_handle_mgmt_v3_unlock, NULL, 0, DRC_NA}, }; struct rpcsvc_program gd_svc_mgmt_v3_prog = { -- cgit