summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-syncop.c
diff options
context:
space:
mode:
authorSanju Rakonde <srakonde@redhat.com>2018-11-28 16:13:58 +0530
committerAtin Mukherjee <amukherj@redhat.com>2018-12-03 17:03:57 +0000
commit2bb0e89e4bb113a93c6e786446a140cd99261af8 (patch)
tree4e3ad012d934fb471b60bbd3b18fd61f4fc4c8cf /xlators/mgmt/glusterd/src/glusterd-syncop.c
parent220722b426f4014abdca0c719b2ca4e3aefeecc4 (diff)
glusterd: perform rcu_read_lock/unlock() under cleanup_lock mutex
Problem: glusterd should not try to acquire locks on any resources, when it already received a SIGTERM and cleanup is started. Otherwise we might hit segfault, since the thread which is going through cleanup path will be freeing up the resouces and some other thread might be trying to acquire locks on freed resources. Solution: perform rcu_read_lock/unlock() under cleanup_lock mutex. fixes: bz#1654270 Change-Id: I87a97cfe4f272f74f246d688660934638911ce54 Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-syncop.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index 0bf03358ffd..bdb4b137d6b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -52,13 +52,13 @@ gd_collate_errors(struct syncargs *args, int op_ret, int op_errno,
args->op_ret = op_ret;
args->op_errno = op_errno;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(peerid, NULL);
if (peerinfo)
peer_str = gf_strdup(peerinfo->hostname);
else
peer_str = gf_strdup(uuid_utoa(uuid));
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (op_errstr && strcmp(op_errstr, "")) {
len = snprintf(err_str, sizeof(err_str) - 1, "Error: %s",
@@ -560,7 +560,7 @@ _gd_syncop_mgmt_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
gf_uuid_copy(args->uuid, rsp.uuid);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(*peerid, NULL);
if (peerinfo) {
/* Set peer as locked, so we unlock only the locked peers */
@@ -573,7 +573,7 @@ _gd_syncop_mgmt_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
"ID %s",
uuid_utoa(*peerid));
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
@@ -661,7 +661,7 @@ _gd_syncop_mgmt_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
gf_uuid_copy(args->uuid, rsp.uuid);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(*peerid, NULL);
if (peerinfo) {
peerinfo->locked = _gf_false;
@@ -672,7 +672,7 @@ _gd_syncop_mgmt_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
"ID %s",
uuid_utoa(*peerid));
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
@@ -770,9 +770,9 @@ _gd_syncop_stage_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
ret = -1;
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
@@ -1072,9 +1072,9 @@ _gd_syncop_commit_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == 0);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
ret = -1;
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
@@ -1185,7 +1185,7 @@ gd_lock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1209,7 +1209,7 @@ gd_lock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
peer_uuid, txn_id);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1315,7 +1315,7 @@ stage_done:
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1334,7 +1334,7 @@ stage_done:
req_dict, op_ctx);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1441,7 +1441,7 @@ commit_done:
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1460,7 +1460,7 @@ commit_done:
req_dict, op_ctx);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1520,7 +1520,7 @@ gd_unlock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
peer_cnt = 0;
if (cluster_lock) {
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before
@@ -1541,7 +1541,7 @@ gd_unlock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
peer_cnt++;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
} else {
ret = dict_get_int32(op_ctx, "hold_global_locks", &global);
if (!ret && global)
@@ -1549,7 +1549,7 @@ gd_unlock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
else
type = "vol";
if (volname || global) {
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were
@@ -1568,7 +1568,7 @@ gd_unlock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
tmp_uuid, txn_id);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
}
}