diff options
author | Avra Sengupta <asengupt@redhat.com> | 2014-02-11 02:22:32 +0000 |
---|---|---|
committer | Vijay Bellur <vbellur@redhat.com> | 2014-02-14 07:05:30 -0800 |
commit | 53779e4458c17a3978675585e8099c97c8c2b3a2 (patch) | |
tree | a01ecbac5ddedc438008c57550a91ea481121b81 /xlators | |
parent | a78dfebb7343671b0a3a0af8b46951894a3cf7a4 (diff) |
glusterd/Vol-Locks : Moving globals into glusterd priv and code refactoring
Moved globals(vol_lock and txn_opinfo dicts and global_txn_id) into
glusterd priv
Moved glusterd_op_send_cli_response() out of gd_unlock_op_phase
as gd_unlock_op_phase and glusterd_clear_txn_opinfo should only
be called if the txn id has been successfully generated. The
cli resp should be sent irrespective of that.
Changed log levels from ERROR to WARNING for some volume lock logs
where the logs are expected and is not an error
Added logs for better transparency of transaction ids.
Change-Id: Ifac9b23aa9f1648c9ae252cfd3ac50bb2ed46728
BUG: 1011470
Signed-off-by: Avra Sengupta <asengupt@redhat.com>
Reviewed-on: http://review.gluster.org/6976
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'xlators')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 32 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-locks.c | 82 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-locks.h | 5 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 110 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.h | 4 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-replace-brick.c | 6 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 62 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-syncop.c | 85 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd.c | 4 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd.h | 7 |
10 files changed, 295 insertions, 102 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index 797141decd1..891c1a97da5 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -56,7 +56,6 @@ #endif extern glusterd_op_info_t opinfo; -extern uuid_t global_txn_id; int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, @@ -623,7 +622,6 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx, gf_log (this->name, GF_LOG_ERROR, "Failed to generate transaction id"); goto out; - } /* Save the MY_UUID as the originator_uuid. This originator_uuid @@ -684,7 +682,7 @@ local_locking_done: /* If no volname is given as a part of the command, locks will * not be held, hence sending stage event. */ - if (volname) + if (volname || (priv->op_version < GD_OP_VERSION_4)) event_type = GD_OP_EVENT_START_LOCK; else { txn_op_info.state.state = GD_OP_STATE_LOCK_SENT; @@ -743,13 +741,18 @@ __glusterd_handle_cluster_lock (rpcsvc_request_t *req) glusterd_op_t op = GD_OP_EVENT_LOCK; glusterd_peerinfo_t *peerinfo = NULL; glusterd_op_info_t txn_op_info = {{0},}; - uuid_t *txn_id = &global_txn_id; + glusterd_conf_t *priv = NULL; + uuid_t *txn_id = NULL; xlator_t *this = NULL; this = THIS; GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); GF_ASSERT (req); + txn_id = &priv->global_txn_id; + ret = xdr_to_generic (req->msg[0], &lock_req, (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req); if (ret < 0) { @@ -1057,14 +1060,19 @@ __glusterd_handle_stage_op (rpcsvc_request_t *req) gd1_mgmt_stage_op_req op_req = {{0},}; glusterd_peerinfo_t *peerinfo = NULL; xlator_t *this = NULL; - uuid_t *txn_id = &global_txn_id; + uuid_t *txn_id = NULL; glusterd_op_info_t txn_op_info = {{0},}; glusterd_op_sm_state_info_t state = {0,}; + glusterd_conf_t *priv = NULL; this = THIS; GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); GF_ASSERT (req); + txn_id = &priv->global_txn_id; + ret = xdr_to_generic (req->msg[0], &op_req, (xdrproc_t)xdr_gd1_mgmt_stage_op_req); if (ret < 0) { @@ -1141,12 +1149,17 @@ __glusterd_handle_commit_op (rpcsvc_request_t *req) gd1_mgmt_commit_op_req op_req = {{0},}; glusterd_peerinfo_t *peerinfo = NULL; xlator_t *this = NULL; - uuid_t *txn_id = &global_txn_id; + uuid_t *txn_id = NULL; + glusterd_conf_t *priv = NULL; this = THIS; GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); GF_ASSERT (req); + txn_id = &priv->global_txn_id; + ret = xdr_to_generic (req->msg[0], &op_req, (xdrproc_t)xdr_gd1_mgmt_commit_op_req); if (ret < 0) { @@ -2285,12 +2298,17 @@ __glusterd_handle_cluster_unlock (rpcsvc_request_t *req) glusterd_op_lock_ctx_t *ctx = NULL; glusterd_peerinfo_t *peerinfo = NULL; xlator_t *this = NULL; - uuid_t *txn_id = &global_txn_id; + uuid_t *txn_id = NULL; + glusterd_conf_t *priv = NULL; this = THIS; GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); GF_ASSERT (req); + txn_id = &priv->global_txn_id; + ret = xdr_to_generic (req->msg[0], &unlock_req, (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req); if (ret < 0) { diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c index 68c6d74264b..9e8bbc21b81 100644 --- a/xlators/mgmt/glusterd/src/glusterd-locks.c +++ b/xlators/mgmt/glusterd/src/glusterd-locks.c @@ -26,17 +26,22 @@ #include <signal.h> -static dict_t *vol_lock; - /* Initialize the global vol-lock list(dict) when * glusterd is spawned */ int32_t glusterd_vol_lock_init () { - int32_t ret = -1; + int32_t ret = -1; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); - vol_lock = dict_new (); - if (!vol_lock) + priv->vol_lock = dict_new (); + if (!priv->vol_lock) goto out; ret = 0; @@ -49,16 +54,31 @@ out: void glusterd_vol_lock_fini () { - if (vol_lock) - dict_unref (vol_lock); + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); + + if (priv->vol_lock) + dict_unref (priv->vol_lock); } int32_t glusterd_get_vol_lock_owner (char *volname, uuid_t *uuid) { - int32_t ret = -1; - vol_lock_obj *lock_obj = NULL; - uuid_t no_owner = {0,}; + int32_t ret = -1; + glusterd_vol_lock_obj *lock_obj = NULL; + glusterd_conf_t *priv = NULL; + uuid_t no_owner = {0,}; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); if (!volname || !uuid) { gf_log ("", GF_LOG_ERROR, "volname or uuid is null."); @@ -66,7 +86,7 @@ glusterd_get_vol_lock_owner (char *volname, uuid_t *uuid) goto out; } - ret = dict_get_bin (vol_lock, volname, (void **) &lock_obj); + ret = dict_get_bin (priv->vol_lock, volname, (void **) &lock_obj); if (!ret) uuid_copy (*uuid, lock_obj->lock_owner); else @@ -81,9 +101,16 @@ out: int32_t glusterd_volume_lock (char *volname, uuid_t uuid) { - int32_t ret = -1; - vol_lock_obj *lock_obj = NULL; - uuid_t owner = {0}; + int32_t ret = -1; + glusterd_vol_lock_obj *lock_obj = NULL; + glusterd_conf_t *priv = NULL; + uuid_t owner = {0}; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); if (!volname) { gf_log ("", GF_LOG_ERROR, "volname is null."); @@ -93,21 +120,22 @@ glusterd_volume_lock (char *volname, uuid_t uuid) ret = glusterd_get_vol_lock_owner (volname, &owner); if (ret) { - gf_log ("", GF_LOG_DEBUG, "Unable to get volume lock owner"); + gf_log ("", GF_LOG_WARNING, + "Unable to get volume lock owner"); goto out; } /* If the lock has already been held for the given volume * we fail */ if (!uuid_is_null (owner)) { - gf_log ("", GF_LOG_ERROR, "Unable to acquire lock. " + gf_log ("", GF_LOG_WARNING, "Unable to acquire lock. " "Lock for %s held by %s", volname, uuid_utoa (owner)); ret = -1; goto out; } - lock_obj = GF_CALLOC (1, sizeof(vol_lock_obj), + lock_obj = GF_CALLOC (1, sizeof(glusterd_vol_lock_obj), gf_common_mt_vol_lock_obj_t); if (!lock_obj) { ret = -1; @@ -116,7 +144,8 @@ glusterd_volume_lock (char *volname, uuid_t uuid) uuid_copy (lock_obj->lock_owner, uuid); - ret = dict_set_bin (vol_lock, volname, lock_obj, sizeof(vol_lock_obj)); + ret = dict_set_bin (priv->vol_lock, volname, lock_obj, + sizeof(glusterd_vol_lock_obj)); if (ret) { gf_log ("", GF_LOG_ERROR, "Unable to set lock owner " "in volume lock"); @@ -137,8 +166,15 @@ out: int32_t glusterd_volume_unlock (char *volname, uuid_t uuid) { - int32_t ret = -1; - uuid_t owner = {0}; + int32_t ret = -1; + glusterd_conf_t *priv = NULL; + uuid_t owner = {0}; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); if (!volname) { gf_log ("", GF_LOG_ERROR, "volname is null."); @@ -151,21 +187,21 @@ glusterd_volume_unlock (char *volname, uuid_t uuid) goto out; if (uuid_is_null (owner)) { - gf_log ("", GF_LOG_ERROR, "Lock for %s not held", volname); + gf_log ("", GF_LOG_WARNING, "Lock for %s not held", volname); ret = -1; goto out; } ret = uuid_compare (uuid, owner); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "Lock owner mismatch. " + gf_log (THIS->name, GF_LOG_WARNING, "Lock owner mismatch. " "Lock for %s held by %s", volname, uuid_utoa (owner)); goto out; } /* Removing the volume lock from the global list */ - dict_del (vol_lock, volname); + dict_del (priv->vol_lock, volname); gf_log ("", GF_LOG_DEBUG, "Lock for %s successfully released", volname); diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.h b/xlators/mgmt/glusterd/src/glusterd-locks.h index 2a8cc20ed25..6e3f56f9cf9 100644 --- a/xlators/mgmt/glusterd/src/glusterd-locks.h +++ b/xlators/mgmt/glusterd/src/glusterd-locks.h @@ -15,10 +15,9 @@ #include "config.h" #endif -struct volume_lock_object_ { +typedef struct glusterd_volume_lock_object_ { uuid_t lock_owner; -}; -typedef struct volume_lock_object_ vol_lock_obj; +} glusterd_vol_lock_obj; int32_t glusterd_vol_lock_init (); diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 83c91a52d1d..385f14e9e7a 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -68,29 +68,27 @@ static struct list_head gd_op_sm_queue; pthread_mutex_t gd_op_sm_lock; glusterd_op_info_t opinfo = {{0},}; -uuid_t global_txn_id = {0}; /* To be used in - * heterogeneous - * cluster with no - * transaction ids */ - -static dict_t *txn_opinfo; - -struct glusterd_txn_opinfo_object_ { - glusterd_op_info_t opinfo; -}; -typedef struct glusterd_txn_opinfo_object_ glusterd_txn_opinfo_obj; int32_t glusterd_txn_opinfo_dict_init () { - int32_t ret = -1; + int32_t ret = -1; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; - txn_opinfo = dict_new (); - if (!txn_opinfo) { + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); + + priv->glusterd_txn_opinfo = dict_new (); + if (!priv->glusterd_txn_opinfo) { ret = -1; goto out; } + memset (priv->global_txn_id, '\0', sizeof(uuid_t)); + ret = 0; out: return ret; @@ -99,8 +97,16 @@ out: void glusterd_txn_opinfo_dict_fini () { - if (txn_opinfo) - dict_destroy (txn_opinfo); + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); + + if (priv->glusterd_txn_opinfo) + dict_unref (priv->glusterd_txn_opinfo); } void @@ -118,7 +124,10 @@ glusterd_txn_opinfo_init (glusterd_op_info_t *opinfo, if (op) opinfo->op = *op; - opinfo->op_ctx = dict_ref(op_ctx); + if (op_ctx) + opinfo->op_ctx = dict_ref(op_ctx); + else + opinfo->op_ctx = NULL; if (req) opinfo->req = req; @@ -130,14 +139,23 @@ int32_t glusterd_generate_txn_id (dict_t *dict, uuid_t **txn_id) { int32_t ret = -1; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); GF_ASSERT (dict); *txn_id = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t); if (!*txn_id) goto out; - uuid_generate (**txn_id); + if (priv->op_version < GD_OP_VERSION_4) + uuid_copy (**txn_id, priv->global_txn_id); + else + uuid_generate (**txn_id); ret = dict_set_bin (dict, "transaction_id", *txn_id, sizeof (uuid_t)); @@ -150,8 +168,10 @@ glusterd_generate_txn_id (dict_t *dict, uuid_t **txn_id) gf_log ("", GF_LOG_DEBUG, "Transaction_id = %s", uuid_utoa (**txn_id)); out: - if (ret && *txn_id) + if (ret && *txn_id) { GF_FREE (*txn_id); + *txn_id = NULL; + } return ret; } @@ -161,6 +181,13 @@ glusterd_get_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo) { int32_t ret = -1; glusterd_txn_opinfo_obj *opinfo_obj = NULL; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); if (!txn_id || !opinfo) { gf_log ("", GF_LOG_ERROR, @@ -169,16 +196,23 @@ glusterd_get_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo) goto out; } - ret = dict_get_bin(txn_opinfo, uuid_utoa (*txn_id), + ret = dict_get_bin(priv->glusterd_txn_opinfo, + uuid_utoa (*txn_id), (void **) &opinfo_obj); if (ret) { gf_log ("", GF_LOG_ERROR, - "Unable to get transaction opinfo"); + "Unable to get transaction opinfo " + "for transaction ID : %s", + uuid_utoa (*txn_id)); goto out; } (*opinfo) = opinfo_obj->opinfo; + gf_log ("", GF_LOG_DEBUG, + "Successfully got opinfo for transaction ID : %s", + uuid_utoa (*txn_id)); + ret = 0; out: gf_log ("", GF_LOG_DEBUG, "Returning %d", ret); @@ -190,6 +224,13 @@ glusterd_set_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo) { int32_t ret = -1; glusterd_txn_opinfo_obj *opinfo_obj = NULL; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); if (!txn_id) { gf_log ("", GF_LOG_ERROR, "Empty transaction id received."); @@ -197,7 +238,8 @@ glusterd_set_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo) goto out; } - ret = dict_get_bin(txn_opinfo, uuid_utoa (*txn_id), + ret = dict_get_bin(priv->glusterd_txn_opinfo, + uuid_utoa (*txn_id), (void **) &opinfo_obj); if (ret) { opinfo_obj = GF_CALLOC (1, sizeof(glusterd_txn_opinfo_obj), @@ -207,7 +249,8 @@ glusterd_set_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo) goto out; } - ret = dict_set_bin(txn_opinfo, uuid_utoa (*txn_id), opinfo_obj, + ret = dict_set_bin(priv->glusterd_txn_opinfo, + uuid_utoa (*txn_id), opinfo_obj, sizeof(glusterd_txn_opinfo_obj)); if (ret) { gf_log ("", GF_LOG_ERROR, @@ -219,6 +262,9 @@ glusterd_set_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo) opinfo_obj->opinfo = (*opinfo); + gf_log ("", GF_LOG_DEBUG, + "Successfully set opinfo for transaction ID : %s", + uuid_utoa (*txn_id)); ret = 0; out: if (ret) @@ -234,6 +280,13 @@ glusterd_clear_txn_opinfo (uuid_t *txn_id) { int32_t ret = -1; glusterd_op_info_t txn_op_info = {{0},}; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); if (!txn_id) { gf_log ("", GF_LOG_ERROR, "Empty transaction id received."); @@ -247,9 +300,14 @@ glusterd_clear_txn_opinfo (uuid_t *txn_id) goto out; } - dict_unref (txn_op_info.op_ctx); + if (txn_op_info.op_ctx) + dict_unref (txn_op_info.op_ctx); - dict_del(txn_opinfo, uuid_utoa (*txn_id)); + dict_del(priv->glusterd_txn_opinfo, uuid_utoa (*txn_id)); + + gf_log ("", GF_LOG_DEBUG, + "Successfully cleared opinfo for transaction ID : %s", + uuid_utoa (*txn_id)); ret = 0; out: @@ -4293,7 +4351,7 @@ glusterd_op_ac_stage_op (glusterd_op_sm_event_t *event, void *ctx) } ret = dict_set_bin (rsp_dict, "transaction_id", - txn_id, sizeof(uuid_t *)); + txn_id, sizeof(uuid_t)); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Failed to set transaction id."); diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h index 53d4e2ff4b1..4a73b08f43a 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h @@ -172,6 +172,10 @@ typedef struct gsync_status_param { glusterd_volinfo_t *volinfo; }gsync_status_param_t; +typedef struct glusterd_txn_opinfo_object_ { + glusterd_op_info_t opinfo; +} glusterd_txn_opinfo_obj; + typedef enum cli_cmd_type_ { PER_REPLICA, ALL_REPLICA, diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c index f26108f89c2..9685cb37461 100644 --- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c +++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c @@ -1895,15 +1895,17 @@ glusterd_do_replace_brick (void *data) glusterd_brickinfo_t *src_brickinfo = NULL; glusterd_brickinfo_t *dst_brickinfo = NULL; glusterd_conf_t *priv = NULL; - uuid_t *txn_id = &global_txn_id; + uuid_t *txn_id = NULL; int ret = 0; dict = data; GF_ASSERT (THIS); - priv = THIS->private; + GF_ASSERT (priv); + + txn_id = &priv->global_txn_id; if (priv->timer) { gf_timer_call_cancel (THIS->ctx, priv->timer); diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c index 9af26cfab07..18f37c190fe 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c @@ -575,12 +575,17 @@ __glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov, glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE; glusterd_peerinfo_t *peerinfo = NULL; xlator_t *this = NULL; - uuid_t *txn_id = &global_txn_id; + uuid_t *txn_id = NULL; + glusterd_conf_t *priv = NULL; this = THIS; GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); GF_ASSERT (req); + txn_id = &priv->global_txn_id; + if (-1 == req->rpc_status) { rsp.op_ret = -1; rsp.op_errno = EINVAL; @@ -811,12 +816,17 @@ __glusterd_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov, glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE; glusterd_peerinfo_t *peerinfo = NULL; xlator_t *this = NULL; - uuid_t *txn_id = &global_txn_id; + uuid_t *txn_id = NULL; + glusterd_conf_t *priv = NULL; this = THIS; GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); GF_ASSERT (req); + txn_id = &priv->global_txn_id; + if (-1 == req->rpc_status) { rsp.op_ret = -1; rsp.op_errno = EINVAL; @@ -1403,6 +1413,7 @@ glusterd_vol_lock (call_frame_t *frame, xlator_t *this, glusterd_conf_t *priv = NULL; call_frame_t *dummy_frame = NULL; dict_t *dict = NULL; + uuid_t *txn_id = NULL; if (!this) goto out; @@ -1429,9 +1440,23 @@ glusterd_vol_lock (call_frame_t *frame, xlator_t *this, goto out; } + ret = dict_get_bin (dict, "transaction_id", + (void **)&txn_id); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Failed to get transaction id."); + goto out; + } else { + gf_log (this->name, GF_LOG_DEBUG, + "Transaction_id = %s", uuid_utoa (*txn_id)); + uuid_copy (req.txn_id, *txn_id); + } + dummy_frame = create_frame (this, this->ctx->pool); - if (!dummy_frame) + if (!dummy_frame) { + ret = -1; goto out; + } ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame, peerinfo->mgmt_v3, @@ -1453,6 +1478,7 @@ glusterd_vol_unlock (call_frame_t *frame, xlator_t *this, glusterd_conf_t *priv = NULL; call_frame_t *dummy_frame = NULL; dict_t *dict = NULL; + uuid_t *txn_id = NULL; if (!this) goto out; @@ -1479,9 +1505,23 @@ glusterd_vol_unlock (call_frame_t *frame, xlator_t *this, goto out; } + ret = dict_get_bin (dict, "transaction_id", + (void **)&txn_id); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Failed to get transaction id."); + goto out; + } else { + gf_log (this->name, GF_LOG_DEBUG, + "Transaction_id = %s", uuid_utoa (*txn_id)); + uuid_copy (req.txn_id, *txn_id); + } + dummy_frame = create_frame (this, this->ctx->pool); - if (!dummy_frame) + if (!dummy_frame) { + ret = -1; goto out; + } ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame, peerinfo->mgmt_v3, @@ -1657,12 +1697,16 @@ __glusterd_brick_op_cbk (struct rpc_req *req, struct iovec *iov, glusterd_req_ctx_t *req_ctx = NULL; glusterd_pending_node_t *node = NULL; xlator_t *this = NULL; - uuid_t *txn_id = &global_txn_id; + uuid_t *txn_id = NULL; + glusterd_conf_t *priv = NULL; this = THIS; GF_ASSERT (this); - + priv = this->private; + GF_ASSERT (priv); GF_ASSERT (req); + + txn_id = &priv->global_txn_id; frame = myframe; req_ctx = frame->local; @@ -1760,7 +1804,7 @@ glusterd_brick_op_cbk (struct rpc_req *req, struct iovec *iov, int32_t glusterd_brick_op (call_frame_t *frame, xlator_t *this, - void *data) + void *data) { gd1_mgmt_brick_op_req *req = NULL; @@ -1773,7 +1817,7 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this, glusterd_req_ctx_t *req_ctx = NULL; struct rpc_clnt *rpc = NULL; dict_t *op_ctx = NULL; - uuid_t *txn_id = &global_txn_id; + uuid_t *txn_id = NULL; if (!this) { ret = -1; @@ -1782,6 +1826,8 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this, priv = this->private; GF_ASSERT (priv); + txn_id = &priv->global_txn_id; + req_ctx = data; GF_ASSERT (req_ctx); INIT_LIST_HEAD (&opinfo.pending_bricks); diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index 5eb5e9f3899..578bce897df 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -1313,7 +1313,7 @@ out: } int -gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int op_ret, +gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret, rpcsvc_request_t *req, dict_t *op_ctx, char *op_errstr, int npeers, char *volname, gf_boolean_t is_acquired, uuid_t txn_id) @@ -1336,8 +1336,10 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int op_ret, /* If the lock has not been held during this * transaction, do not send unlock requests */ - if (!is_acquired) + if (!is_acquired) { + ret = 0; goto out; + } this = THIS; synctask_barrier_init((&args)); @@ -1376,7 +1378,11 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int op_ret, } out: - glusterd_op_send_cli_response (op, op_ret, 0, req, op_ctx, op_errstr); + /* If unlock failed, and op_ret was previously set + * priority is given to the op_ret. If op_ret was + * not set, and unlock failed, then set op_ret */ + if (!*op_ret) + *op_ret = ret; if (is_acquired) { /* Based on the op-version, @@ -1397,6 +1403,9 @@ out: } } + if (!*op_ret) + *op_ret = ret; + return 0; } @@ -1486,6 +1495,7 @@ void gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) { int ret = -1; + int op_ret = -1; int npeers = 0; dict_t *req_dict = NULL; glusterd_conf_t *conf = NULL; @@ -1504,6 +1514,14 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) conf = this->private; GF_ASSERT (conf); + ret = dict_get_int32 (op_ctx, GD_SYNC_OPCODE_KEY, &tmp_op); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to get volume " + "operation"); + goto out; + } + op = tmp_op; + /* Generate a transaction-id for this operation and * save it in the dict */ ret = glusterd_generate_txn_id (op_ctx, &txn_id); @@ -1511,9 +1529,20 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) gf_log (this->name, GF_LOG_ERROR, "Failed to generate transaction id"); goto out; - } + /* Save opinfo for this transaction with the transaction id */ + glusterd_txn_opinfo_init (&txn_opinfo, NULL, &op, NULL, NULL); + ret = glusterd_set_txn_opinfo (txn_id, &txn_opinfo); + if (ret) + gf_log (this->name, GF_LOG_ERROR, + "Unable to set transaction's opinfo"); + + gf_log (this->name, GF_LOG_DEBUG, + "Transaction ID : %s", uuid_utoa (*txn_id)); + + opinfo = txn_opinfo; + /* Save the MY_UUID as the originator_uuid */ ret = glusterd_set_originator_uuid (op_ctx); if (ret) { @@ -1522,15 +1551,6 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) goto out; } - ret = dict_get_int32 (op_ctx, GD_SYNC_OPCODE_KEY, &tmp_op); - if (ret) { - gf_log (this->name, GF_LOG_ERROR, "Failed to get volume " - "operation"); - goto out; - } - - op = tmp_op; - /* Based on the op_version, acquire a cluster or volume lock */ if (conf->op_version < GD_OP_VERSION_4) { ret = glusterd_lock (MY_UUID); @@ -1576,26 +1596,20 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) local_locking_done: - /* Save opinfo for this transaction with the transaction id */ - glusterd_txn_opinfo_init (&txn_opinfo, NULL, &op, NULL, NULL); - ret = glusterd_set_txn_opinfo (txn_id, &txn_opinfo); - if (ret) - gf_log (this->name, GF_LOG_ERROR, - "Unable to set transaction's opinfo"); - - opinfo = txn_opinfo; - INIT_LIST_HEAD (&conf->xaction_peers); npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op); /* If no volname is given as a part of the command, locks will * not be held */ - if (volname) { + if (volname || (conf->op_version < GD_OP_VERSION_4)) { ret = gd_lock_op_phase (conf, op, op_ctx, &op_errstr, npeers, *txn_id); - if (ret) + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Locking Peers Failed."); goto out; + } } ret = glusterd_op_build_payload (&req_dict, &op_errstr, op_ctx); @@ -1623,14 +1637,23 @@ local_locking_done: ret = 0; out: - (void) gd_unlock_op_phase (conf, op, ret, req, op_ctx, op_errstr, - npeers, volname, is_acquired, *txn_id); + op_ret = ret; + if (txn_id) { + (void) gd_unlock_op_phase (conf, op, &op_ret, req, + op_ctx, op_errstr, + npeers, volname, + is_acquired, *txn_id); + + /* Clearing the transaction opinfo */ + ret = glusterd_clear_txn_opinfo (txn_id); + if (ret) + gf_log (this->name, GF_LOG_ERROR, + "Unable to clear transaction's " + "opinfo for transaction ID : %s", + uuid_utoa (*txn_id)); + } - /* Clearing the transaction opinfo */ - ret = glusterd_clear_txn_opinfo (txn_id); - if (ret) - gf_log (this->name, GF_LOG_ERROR, - "Unable to clear transaction's opinfo"); + glusterd_op_send_cli_response (op, op_ret, 0, req, op_ctx, op_errstr); if (volname) GF_FREE (volname); diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c index 831cb82b28d..a5f6945450e 100644 --- a/xlators/mgmt/glusterd/src/glusterd.c +++ b/xlators/mgmt/glusterd/src/glusterd.c @@ -1314,8 +1314,6 @@ init (xlator_t *this) glusterd_friend_sm_init (); glusterd_op_sm_init (); glusterd_opinfo_init (); - glusterd_vol_lock_init (); - glusterd_txn_opinfo_dict_init (); ret = glusterd_sm_tr_log_init (&conf->op_sm_log, glusterd_op_sm_state_name_get, glusterd_op_sm_event_name_get, @@ -1344,6 +1342,8 @@ init (xlator_t *this) } this->private = conf; + glusterd_vol_lock_init (); + glusterd_txn_opinfo_dict_init (); (void) glusterd_nodesvc_set_online_status ("glustershd", _gf_false); GLUSTERD_GET_HOOKS_DIR (hooks_dir, GLUSTERD_HOOK_VER, conf); diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h index e8035c7c619..df53327cb2a 100644 --- a/xlators/mgmt/glusterd/src/glusterd.h +++ b/xlators/mgmt/glusterd/src/glusterd.h @@ -142,6 +142,13 @@ typedef struct { gf_timer_t *timer; glusterd_sm_tr_log_t op_sm_log; struct rpc_clnt_program *gfs_mgmt; + dict_t *vol_lock; /* Dict for saving vol locks */ + dict_t *glusterd_txn_opinfo; /* Dict for saving + * transaction opinfos */ + uuid_t global_txn_id; /* To be used in + * heterogeneous + * cluster with no + * transaction ids */ struct list_head mount_specs; gf_boolean_t valgrind; |