summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAtin Mukherjee <amukherj@redhat.com>2014-10-27 12:12:03 +0530
committerRaghavendra Bhat <raghavendra@redhat.com>2015-03-03 23:31:08 -0800
commitb646678334f4fab78883ecc1b993ec0cb1b49aba (patch)
tree206f29f1c5372732e7e2ed2116f8f35cc9c2c19b
parenta1d9f01b28267fc333aebc49cb81ee69dc2c24f8 (diff)
glusterd : release cluster wide locks in op-sm during failures
glusterd op-sm infrastructure has some loophole in handing error cases in locking/unlocking phases which ends up having stale locks restricting further transactions to go through. This patch still doesn't handle all possible unlocking error cases as the framework neither has retry mechanism nor the lock timeout. For eg - if unlocking fails in one of the peer, cluster wide lock is not released and further transaction can not be made until and unless originator node/the node where unlocking failed is restarted. Following test cases were executed (with the help of gdb) after applying this patch: * RPC timesout in lock cbk * Decoding of RPC response in lock cbk fails * RPC response is received from unknown peer in lock cbk * Setting peerinfo in dictionary fails while sending lock request for first peer in the list * Setting peerinfo in dictionary fails while sending lock request for other peers * Lock RPC could not be sent for peers For all above test cases the success criteria is not to have any stale locks Patch link : http://review.gluster.org/9012 Change-Id: Ia1550341c31005c7850ee1b2697161c9ca04b01a BUG: 1179136 Signed-off-by: Atin Mukherjee <amukherj@redhat.com> Reviewed-on: http://review.gluster.org/9012 Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaushal M <kaushal@redhat.com> Reviewed-on: http://review.gluster.org/9393 Reviewed-by: Raghavendra Bhat <raghavendra@redhat.com>
-rw-r--r--cli/src/cli-rpc-ops.c20
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-messages.h11
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c67
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c172
5 files changed, 198 insertions, 74 deletions
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index f194a76efb4..1af1ca52af1 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -1508,14 +1508,18 @@ gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov,
if (rsp.op_ret && strcmp (rsp.op_errstr, "")) {
snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);
} else {
- if (!rsp.op_ret) {
+ if (!rsp.op_ret) {
+ /* append errstr in the cli msg for successful
+ * case since unlock failures can be highlighted
+ * event though rebalance command was successful
+ */
snprintf (msg, sizeof (msg),
"Initiated rebalance on volume %s."
"\nExecute \"gluster volume rebalance"
" <volume-name> status\" to check"
- " status.\nID: %s", volname,
- task_id_str);
- } else {
+ " status.\nID: %s\n%s", volname,
+ task_id_str, rsp.op_errstr);
+ } else {
snprintf (msg, sizeof (msg),
"Starting rebalance on volume %s has "
"been unsuccessful.", volname);
@@ -1535,13 +1539,17 @@ gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov,
volname);
goto done;
} else {
+ /* append errstr in the cli msg for successful case
+ * since unlock failures can be highlighted event though
+ * rebalance command was successful */
snprintf (msg, sizeof (msg),
"rebalance process may be in the middle of a "
"file migration.\nThe process will be fully "
"stopped once the migration of the file is "
"complete.\nPlease check rebalance process "
"for completion before doing any further "
- "brick related tasks on the volume.");
+ "brick related tasks on the volume.\n%s",
+ rsp.op_errstr);
}
}
if (cmd == GF_DEFRAG_CMD_STATUS) {
@@ -1554,6 +1562,8 @@ gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov,
"Failed to get the status of "
"rebalance process");
goto done;
+ } else {
+ snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);
}
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h
index 604743ef5e7..b4f8585097a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-messages.h
+++ b/xlators/mgmt/glusterd/src/glusterd-messages.h
@@ -45,7 +45,7 @@
*/
#define GLUSTERD_COMP_BASE GLFS_MSGID_GLUSTERD
-#define GLFS_NUM_MESSAGES 19
+#define GLFS_NUM_MESSAGES 20
#define GLFS_MSGID_END (GLUSTERD_COMP_BASE + GLFS_NUM_MESSAGES + 1)
/* Messaged with message IDs */
#define glfs_msg_start_x GLFS_COMP_BASE, "Invalid: Start of messages"
@@ -206,6 +206,15 @@
* failure to set default options
*/
#define GD_MSG_FAIL_DEFAULT_OPT_SET (GLUSTERD_COMP_BASE + 19)
+
+/*!
+ * @messageid 106020
+ * @diagnosis Failed to release cluster wide lock for one of the peer
+ * @recommendedaction Restart the glusterd service on the node where the command
+ * was issued
+ */
+#define GD_MSG_CLUSTER_UNLOCK_FAILED (GLUSTERD_COMP_BASE + 20)
+
/*------------*/
#define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages"
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 45fcebffea1..a7a612c8152 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -2834,6 +2834,20 @@ glusterd_op_ac_none (glusterd_op_sm_event_t *event, void *ctx)
}
static int
+glusterd_op_sm_locking_failed (uuid_t *txn_id)
+{
+ int ret = -1;
+
+ opinfo.op_ret = -1;
+ opinfo.op_errstr = gf_strdup ("locking failed for one of the peer.");
+
+ /* Inject a reject event such that unlocking gets triggered right away*/
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, txn_id, NULL);
+
+ return ret;
+}
+
+static int
glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
{
int ret = 0;
@@ -2870,8 +2884,10 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
"peer %s",
gd_op_list[opinfo.op],
peerinfo->hostname);
- continue;
+ goto out;
}
+ /* Mark the peer as locked*/
+ peerinfo->locked = _gf_true;
pending_count++;
}
} else {
@@ -2899,8 +2915,10 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
gd_op_list[opinfo.op],
peerinfo->hostname);
dict_unref (dict);
- continue;
+ goto out;
}
+ /* Mark the peer as locked*/
+ peerinfo->locked = _gf_true;
pending_count++;
}
}
@@ -2911,6 +2929,9 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
out:
+ if (ret)
+ ret = glusterd_op_sm_locking_failed (&event->txn_id);
+
gf_log (this->name, GF_LOG_DEBUG, "Returning with %d", ret);
return ret;
}
@@ -2933,12 +2954,12 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
GF_ASSERT (peerinfo);
- if (!peerinfo->connected || !peerinfo->mgmt)
+ if (!peerinfo->connected || !peerinfo->mgmt ||
+ !peerinfo->locked)
continue;
if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
continue;
-
/* Based on the op_version,
* release the cluster or mgmt_v3 lock */
if (priv->op_version < GD_OP_VERSION_3_6_0) {
@@ -2947,15 +2968,19 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = proc->fn (NULL, this, peerinfo);
if (ret) {
- gf_log (this->name, GF_LOG_WARNING,
- "Failed to send unlock request "
- "for operation 'Volume %s' to "
- "peer %s",
+ opinfo.op_errstr = gf_strdup
+ ("Unlocking failed for one of "
+ "the peer.");
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_CLUSTER_UNLOCK_FAILED,
+ "Unlocking failed for operation"
+ " volume %s on peer %s",
gd_op_list[opinfo.op],
peerinfo->hostname);
continue;
}
pending_count++;
+ peerinfo->locked = _gf_false;
}
} else {
dict = glusterd_op_get_ctx ();
@@ -2967,24 +2992,35 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
ret = dict_set_static_ptr (dict, "peerinfo",
peerinfo);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to set peerinfo");
+ opinfo.op_errstr = gf_strdup
+ ("Unlocking failed for one of the "
+ "peer.");
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_CLUSTER_UNLOCK_FAILED,
+ "Unlocking failed for operation"
+ " volume %s on peer %s",
+ gd_op_list[opinfo.op],
+ peerinfo->hostname);
dict_unref (dict);
- goto out;
+ continue;
}
ret = proc->fn (NULL, this, dict);
if (ret) {
- gf_log (this->name, GF_LOG_WARNING,
- "Failed to send volume unlock "
- "request for operation "
- "'Volume %s' to peer %s",
+ opinfo.op_errstr = gf_strdup
+ ("Unlocking failed for one of the "
+ "peer.");
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_CLUSTER_UNLOCK_FAILED,
+ "Unlocking failed for operation"
+ " volume %s on peer %s",
gd_op_list[opinfo.op],
peerinfo->hostname);
dict_unref (dict);
continue;
}
pending_count++;
+ peerinfo->locked = _gf_false;
}
}
}
@@ -2993,7 +3029,6 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
if (!opinfo.pending_count)
ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
-out:
gf_log (this->name, GF_LOG_DEBUG, "Returning with %d", ret);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
index 229ee469598..88fe9ef4c04 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
@@ -311,4 +311,6 @@ glusterd_clear_txn_opinfo (uuid_t *txn_id);
int32_t
glusterd_generate_txn_id (dict_t *dict, uuid_t **txn_id);
+void
+glusterd_set_opinfo (char *errstr, int32_t op_errno, int32_t op_ret);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 19b66ac06d8..ec2d850094a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -26,6 +26,7 @@
#include "protocol-common.h"
#include "glusterd-utils.h"
#include "common-utils.h"
+#include "glusterd-messages.h"
#include <sys/uio.h>
@@ -656,6 +657,7 @@ __glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,
xlator_t *this = NULL;
uuid_t *txn_id = NULL;
glusterd_conf_t *priv = NULL;
+ char *err_str = NULL;
this = THIS;
GF_ASSERT (this);
@@ -666,21 +668,26 @@ __glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,
txn_id = &priv->global_txn_id;
if (-1 == req->rpc_status) {
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
+ gf_log (this->name, GF_LOG_ERROR, "Lock response is not "
+ "received from one of the peer");
+ err_str = "Lock response is not received from one of the peer";
+ glusterd_set_opinfo (err_str, ENETRESET, -1);
+ event_type = GD_OP_EVENT_RCVD_RJT;
goto out;
}
- ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to decode lock "
- "response received from peer");
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode "
+ "cluster lock response received from peer");
+ err_str = "Failed to decode cluster lock response received from"
+ " peer";
+ glusterd_set_opinfo (err_str, EINVAL, -1);
+ event_type = GD_OP_EVENT_RCVD_RJT;
goto out;
}
-out:
op_ret = rsp.op_ret;
gf_log (this->name, (op_ret) ? GF_LOG_ERROR : GF_LOG_DEBUG,
@@ -689,9 +696,12 @@ out:
peerinfo = glusterd_peerinfo_find (rsp.uuid, NULL);
if (peerinfo == NULL) {
- ret = -1;
- gf_log (this->name, GF_LOG_CRITICAL, "Lock response received "
- "from unknown peer: %s", uuid_utoa (rsp.uuid));
+ gf_log (this->name, GF_LOG_CRITICAL,
+ "cluster lock response received from unknown peer: %s."
+ "Ignoring response", uuid_utoa (rsp.uuid));
+ err_str = "cluster lock response received from unknown peer";
+ goto out;
+
}
if (op_ret) {
@@ -704,6 +714,7 @@ out:
event_type = GD_OP_EVENT_RCVD_ACC;
}
+out:
ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
if (!ret) {
@@ -723,9 +734,17 @@ glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,
__glusterd_cluster_lock_cbk);
}
+void
+glusterd_set_opinfo (char *errstr, int32_t op_errno, int32_t op_ret)
+{
+ opinfo.op_errstr = gf_strdup (errstr);
+ opinfo.op_errno = op_errno;
+ opinfo.op_ret = op_ret;
+}
+
static int32_t
glusterd_mgmt_v3_lock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
gd1_mgmt_v3_lock_rsp rsp = {{0},};
int ret = -1;
@@ -733,26 +752,36 @@ glusterd_mgmt_v3_lock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
glusterd_peerinfo_t *peerinfo = NULL;
xlator_t *this = NULL;
+ call_frame_t *frame = NULL;
uuid_t *txn_id = NULL;
+ char *err_str = NULL;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (req);
+ frame = myframe;
+ txn_id = frame->cookie;
+ frame->cookie = NULL;
+
if (-1 == req->rpc_status) {
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
+ gf_log (this->name, GF_LOG_ERROR, "Lock response is not "
+ "received from one of the peer");
+ err_str = "Lock response is not received from one of the peer";
+ glusterd_set_opinfo (err_str, ENETRESET, -1);
+ event_type = GD_OP_EVENT_RCVD_RJT;
goto out;
}
ret = xdr_to_generic (*iov, &rsp,
(xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "Failed to decode mgmt_v3 lock "
- "response received from peer");
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode "
+ "mgmt_v3 lock response received from peer");
+ err_str = "Failed to decode mgmt_v3 lock response received from"
+ " peer";
+ glusterd_set_opinfo (err_str, EINVAL, -1);
+ event_type = GD_OP_EVENT_RCVD_RJT;
goto out;
}
@@ -766,7 +795,6 @@ glusterd_mgmt_v3_lock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
peerinfo = glusterd_peerinfo_find (rsp.uuid, NULL);
if (peerinfo == NULL) {
- ret = -1;
gf_log (this->name, GF_LOG_CRITICAL,
"mgmt_v3 lock response received "
"from unknown peer: %s. Ignoring response",
@@ -784,15 +812,15 @@ glusterd_mgmt_v3_lock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
event_type = GD_OP_EVENT_RCVD_ACC;
}
+out:
ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
-
if (!ret) {
glusterd_friend_sm ();
glusterd_op_sm ();
}
-out:
- GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe));
+ GF_FREE (frame->cookie);
+ GLUSTERD_STACK_DESTROY (frame);
return ret;
}
@@ -814,26 +842,39 @@ glusterd_mgmt_v3_unlock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
glusterd_peerinfo_t *peerinfo = NULL;
xlator_t *this = NULL;
+ call_frame_t *frame = NULL;
uuid_t *txn_id = NULL;
+ char *err_str = NULL;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (req);
+ frame = myframe;
+ txn_id = frame->cookie;
+ frame->cookie = NULL;
+
if (-1 == req->rpc_status) {
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
+ err_str = "Unlock response not received from one of the peer.";
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_CLUSTER_UNLOCK_FAILED,
+ "UnLock response is not received from one of the peer");
+ glusterd_set_opinfo (err_str, 0, 0);
+ event_type = GD_OP_EVENT_RCVD_RJT;
goto out;
}
ret = xdr_to_generic (*iov, &rsp,
(xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "Failed to decode mgmt_v3 unlock "
- "response received from peer");
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_CLUSTER_UNLOCK_FAILED,
+ "Failed to decode mgmt_v3 unlock response received from"
+ "peer");
+ err_str = "Failed to decode mgmt_v3 unlock response received "
+ "from peer";
+ glusterd_set_opinfo (err_str, 0, 0);
+ event_type = GD_OP_EVENT_RCVD_RJT;
goto out;
}
@@ -848,8 +889,8 @@ glusterd_mgmt_v3_unlock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
peerinfo = glusterd_peerinfo_find (rsp.uuid, NULL);
if (peerinfo == NULL) {
- ret = -1;
- gf_log (this->name, GF_LOG_CRITICAL,
+ gf_msg (this->name, GF_LOG_CRITICAL, 0,
+ GD_MSG_CLUSTER_UNLOCK_FAILED,
"mgmt_v3 unlock response received "
"from unknown peer: %s. Ignoring response",
uuid_utoa (rsp.uuid));
@@ -866,6 +907,7 @@ glusterd_mgmt_v3_unlock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
event_type = GD_OP_EVENT_RCVD_ACC;
}
+out:
ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
if (!ret) {
@@ -873,8 +915,8 @@ glusterd_mgmt_v3_unlock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
glusterd_op_sm ();
}
-out:
- GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe));
+ GF_FREE (frame->cookie);
+ GLUSTERD_STACK_DESTROY (frame);
return ret;
}
@@ -898,6 +940,7 @@ __glusterd_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov,
xlator_t *this = NULL;
uuid_t *txn_id = NULL;
glusterd_conf_t *priv = NULL;
+ char *err_str = NULL;
this = THIS;
GF_ASSERT (this);
@@ -908,21 +951,28 @@ __glusterd_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov,
txn_id = &priv->global_txn_id;
if (-1 == req->rpc_status) {
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
+ err_str = "Unlock response not received from one of the peer.";
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_CLUSTER_UNLOCK_FAILED,
+ "UnLock response is not received from one of the peer");
+ glusterd_set_opinfo (err_str, 0, 0);
+ event_type = GD_OP_EVENT_RCVD_RJT;
goto out;
}
- ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to decode unlock "
- "response received from peer");
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_CLUSTER_UNLOCK_FAILED,
+ "Failed to decode unlock response received from peer");
+ err_str = "Failed to decode cluster unlock response received "
+ "from peer";
+ glusterd_set_opinfo (err_str, 0, 0);
+ event_type = GD_OP_EVENT_RCVD_RJT;
goto out;
}
-out:
op_ret = rsp.op_ret;
gf_log (this->name, (op_ret) ? GF_LOG_ERROR : GF_LOG_DEBUG,
@@ -931,8 +981,11 @@ out:
peerinfo = glusterd_peerinfo_find (rsp.uuid, NULL);
if (peerinfo == NULL) {
- gf_log (this->name, GF_LOG_CRITICAL, "Unlock response received "
- "from unknown peer %s", uuid_utoa (rsp.uuid));
+ gf_msg (this->name, GF_LOG_CRITICAL, 0,
+ GD_MSG_CLUSTER_UNLOCK_FAILED,
+ "Unlock response received from unknown peer %s",
+ uuid_utoa (rsp.uuid));
+ goto out;
}
if (op_ret) {
@@ -942,6 +995,7 @@ out:
event_type = GD_OP_EVENT_RCVD_ACC;
}
+out:
ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
if (!ret) {
@@ -1516,7 +1570,6 @@ glusterd_mgmt_v3_lock_peers (call_frame_t *frame, xlator_t *this,
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_conf_t *priv = NULL;
- call_frame_t *dummy_frame = NULL;
dict_t *dict = NULL;
uuid_t *txn_id = NULL;
@@ -1558,13 +1611,21 @@ glusterd_mgmt_v3_lock_peers (call_frame_t *frame, xlator_t *this,
uuid_copy (req.txn_id, *txn_id);
}
- dummy_frame = create_frame (this, this->ctx->pool);
- if (!dummy_frame) {
+ if (!frame)
+ frame = create_frame (this, this->ctx->pool);
+
+ if (!frame) {
ret = -1;
goto out;
}
+ frame->cookie = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!frame->cookie) {
+ ret = -1;
+ goto out;
+ }
+ uuid_copy (frame->cookie, req.txn_id);
- ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame,
+ ret = glusterd_submit_request (peerinfo->rpc, &req, frame,
peerinfo->mgmt_v3,
GLUSTERD_MGMT_V3_LOCK, NULL,
this, glusterd_mgmt_v3_lock_peers_cbk,
@@ -1582,7 +1643,6 @@ glusterd_mgmt_v3_unlock_peers (call_frame_t *frame, xlator_t *this,
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_conf_t *priv = NULL;
- call_frame_t *dummy_frame = NULL;
dict_t *dict = NULL;
uuid_t *txn_id = NULL;
@@ -1624,13 +1684,21 @@ glusterd_mgmt_v3_unlock_peers (call_frame_t *frame, xlator_t *this,
uuid_copy (req.txn_id, *txn_id);
}
- dummy_frame = create_frame (this, this->ctx->pool);
- if (!dummy_frame) {
+ if (!frame)
+ frame = create_frame (this, this->ctx->pool);
+
+ if (!frame) {
ret = -1;
goto out;
}
+ frame->cookie = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!frame->cookie) {
+ ret = -1;
+ goto out;
+ }
+ uuid_copy (frame->cookie, req.txn_id);
- ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame,
+ ret = glusterd_submit_request (peerinfo->rpc, &req, frame,
peerinfo->mgmt_v3,
GLUSTERD_MGMT_V3_UNLOCK, NULL,
this, glusterd_mgmt_v3_unlock_peers_cbk,