diff options
author | Kaushal M <kaushal@redhat.com> | 2015-01-06 18:23:41 +0530 |
---|---|---|
committer | Krishnan Parthasarathi <kparthas@redhat.com> | 2015-03-03 23:50:22 -0800 |
commit | 673ba2659cebe22ee30c43f9fb080f330150f55e (patch) | |
tree | f91e83be5cfae7a08febfda420b33e05ed0b964f /xlators/mgmt/glusterd/src/glusterd-handler.c | |
parent | ef061b67f1b80c147c1959b896f7c9bdff01af96 (diff) |
glusterd: Replace libglusterfs lists with liburcu lists
This patch replaces usage of the libglusterfs lists data structures and
API in glusterd with the lists data structures and API from liburcu. The
liburcu data structes and APIs are a drop-in replacement for
libglusterfs lists.
All usages have been changed to keep the code consistent, and free from
confusion.
NOTE: glusterd_conf_t->xprt_list still uses the libglusterfs data
structures and API, as it holds rpc_transport_t objects, which is not a
part of glusterd and is not being changed in this patch.
This change was developed on the git branch at [1]. This commit is a
combination of the following commits on the development branch.
6dac576 Replace libglusterfs lists with liburcu lists
a51b5ab Fix compilation issues
d98a06f Fix merge issues
a5d918e Remove merge remnant
1cca113 More style cleanup
1917be3 Address review comments on 9624/1
8d10f13 Use cds_lists for glusterd_svc_t
524ad5d Add rculist header in glusterd-conn-helper.c
646f294 glusterd: add list_add_order API honouring rcu
[1]: https://github.com/kshlm/glusterfs/tree/urcu
Change-Id: Ic613c5b6e496a677b9d3de15fc042a0492109fb0
BUG: 1191030
Signed-off-by: Kaushal M <kaushal@redhat.com>
Signed-off-by: Krishnan Parthasarathi <kparthas@redhat.com>
Reviewed-on: http://review.gluster.org/9624
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-by: Gaurav Kumar Garg <ggarg@redhat.com>
Reviewed-by: Anand Nekkunti <anekkunt@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 45 |
1 files changed, 23 insertions, 22 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index 8ba16b7a804..eaa05969656 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -465,7 +465,7 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo, } #endif - list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { char brick[1024] = {0,}; char brick_uuid[64] = {0,}; snprintf (key, 256, "volume%d.brick%d", count, i); @@ -609,7 +609,7 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx, local_locking_done: - INIT_LIST_HEAD (&priv->xaction_peers); + CDS_INIT_LIST_HEAD (&priv->xaction_peers); npeers = gd_build_peers_list (&priv->peers, &priv->xaction_peers, op); @@ -1162,8 +1162,8 @@ __glusterd_handle_cli_deprobe (rpcsvc_request_t *req) * detached. It's not a problem if a volume contains none or all * of its bricks on the peer being detached */ - list_for_each_entry_safe (volinfo, tmp, &priv->volumes, - vol_list) { + cds_list_for_each_entry_safe (volinfo, tmp, &priv->volumes, + vol_list) { ret = glusterd_friend_contains_vol_bricks (volinfo, uuid); if (ret == 1) { @@ -1379,7 +1379,7 @@ __glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req) */ ret = -1; // Do not allow peer reset if there are any volumes in the cluster - if (!list_empty (&priv->volumes)) { + if (!cds_list_empty (&priv->volumes)) { snprintf (msg_str, sizeof (msg_str), "volumes are already " "present in the cluster. Resetting uuid is not " "allowed"); @@ -1388,7 +1388,7 @@ __glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req) } // Do not allow peer reset if trusted storage pool is already formed - if (!list_empty (&priv->peers)) { + if (!cds_list_empty (&priv->peers)) { snprintf (msg_str, sizeof (msg_str),"trusted storage pool " "has been already formed. Please detach this peer " "from the pool and reset its uuid."); @@ -1550,7 +1550,7 @@ __glusterd_handle_cli_list_volume (rpcsvc_request_t *req) if (!dict) goto out; - list_for_each_entry (volinfo, &priv->volumes, vol_list) { + cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) { memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "volume%d", count); ret = dict_set_str (dict, key, volinfo->volname); @@ -2550,7 +2550,7 @@ __glusterd_handle_probe_query (rpcsvc_request_t *req) goto out; } peerinfo = glusterd_peerinfo_find (probe_req.uuid, remote_hostname); - if ((peerinfo == NULL) && (!list_empty (&conf->peers))) { + if ((peerinfo == NULL) && (!cds_list_empty (&conf->peers))) { rsp.op_ret = -1; rsp.op_errno = GF_PROBE_ANOTHER_CLUSTER; } else if (peerinfo == NULL) { @@ -3079,7 +3079,7 @@ glusterd_friend_add (const char *hoststr, int port, * invalid peer name). That would mean we're adding something that had * just been free, and we're likely to crash later. */ - list_add_tail (&(*friend)->uuid_list, &conf->peers); + cds_list_add_tail (&(*friend)->uuid_list, &conf->peers); //restore needs to first create the list of peers, then create rpcs //to keep track of quorum in race-free manner. In restore for each peer @@ -3132,7 +3132,7 @@ glusterd_friend_add_from_peerinfo (glusterd_peerinfo_t *friend, * invalid peer name). That would mean we're adding something that had * just been free, and we're likely to crash later. */ - list_add_tail (&friend->uuid_list, &conf->peers); + cds_list_add_tail (&friend->uuid_list, &conf->peers); //restore needs to first create the list of peers, then create rpcs //to keep track of quorum in race-free manner. In restore for each peer @@ -3590,8 +3590,8 @@ glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags) gf_log ("", GF_LOG_WARNING, "Out of Memory"); goto out; } - if (!list_empty (&priv->peers)) { - list_for_each_entry (entry, &priv->peers, uuid_list) { + if (!cds_list_empty (&priv->peers)) { + cds_list_for_each_entry (entry, &priv->peers, uuid_list) { count++; ret = gd_add_peer_detail_to_dict (entry, friends, count); @@ -3665,13 +3665,13 @@ glusterd_get_volumes (rpcsvc_request_t *req, dict_t *dict, int32_t flags) goto out; } - if (list_empty (&priv->volumes)) { + if (cds_list_empty (&priv->volumes)) { ret = 0; goto respond; } if (flags == GF_CLI_GET_VOLUME_ALL) { - list_for_each_entry (entry, &priv->volumes, vol_list) { + cds_list_for_each_entry (entry, &priv->volumes, vol_list) { ret = glusterd_add_volume_detail_to_dict (entry, volumes, count); if (ret) @@ -3686,17 +3686,17 @@ glusterd_get_volumes (rpcsvc_request_t *req, dict_t *dict, int32_t flags) if (ret) { if (priv->volumes.next) { - entry = list_entry (priv->volumes.next, - typeof (*entry), - vol_list); + entry = cds_list_entry (priv->volumes.next, + typeof (*entry), + vol_list); } } else { ret = glusterd_volinfo_find (volname, &entry); if (ret) goto respond; - entry = list_entry (entry->vol_list.next, - typeof (*entry), - vol_list); + entry = cds_list_entry (entry->vol_list.next, + typeof (*entry), + vol_list); } if (&entry->vol_list == &priv->volumes) { @@ -4508,8 +4508,9 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, !uuid_compare (peerinfo->uuid, uuid)) glusterd_unlock (peerinfo->uuid); } else { - list_for_each_entry (volinfo, &conf->volumes, - vol_list) { + cds_list_for_each_entry (volinfo, + &conf->volumes, + vol_list) { ret = glusterd_mgmt_v3_unlock (volinfo->volname, peerinfo->uuid, |