diff options
author | Kaushal M <kaushal@redhat.com> | 2015-01-08 19:24:59 +0530 |
---|---|---|
committer | Krishnan Parthasarathi <kparthas@redhat.com> | 2015-03-16 02:19:14 -0700 |
commit | c7785f78420c94220954eef538ed4698713ebcdb (patch) | |
tree | b10ad0468f21835121262463f517cad58614d49a /xlators/mgmt/glusterd/src/glusterd-peer-utils.c | |
parent | 7d8be3613f7384f5118f26e194fe7c64ea69d11c (diff) |
glusterd: Protect the peer list and peerinfos with RCU.
The peer list and the peerinfo objects are now protected using RCU.
Design patterns described in the Paul McKenney's RCU dissertation [1]
(sections 5 and 6) have been used to convert existing non-RCU protected
code to RCU protected code.
Currently, we are only targetting guaranteeing the existence of the
peerinfo objects, ie., we are only looking to protect deletes, not all
updaters. We chose this, as protecting all updates is a much more
complex task.
The steps used to accomplish this are,
1. Remove all long lived direct references to peerinfo objects (apart
from the peerinfo list). This includes references in glusterd_peerctx_t
(RPC), glusterd_friend_sm_event_t (friend state machine) and others.
This way no one has a reference to deleted peerinfo object.
2. Replace the direct references with indirect references, ie., use
peer uuid and peer hostname as indirect references to the peerinfo
object. Any reader or updater now uses the indirect references to get to
the actual peerinfo object, using glusterd_peerinfo_find. Cases where a
peerinfo cannot be found are handled gracefully.
3. The readers get and use the peerinfo object only within a RCU read
critical section. This prevents the object from being deleted/freed when
in actual use.
4. The deletion of a peerinfo object is done in a ordered manner
(glusterd_peerinfo_destroy). The object is first removed from the
peerinfo list using an atomic list remove, but the list head is not
reset to allow existing list readers to complete correctly. We wait for
readers to complete, before resetting the list head. This removes the
object from the list completely. After this no new readers can get a
reference to the object, and it can be freed.
This change was developed on the git branch at [2]. This commit is a
combination of the following commits on the development branch.
d7999b9 Protect the glusterd_conf_t->peers_list with RCU.
0da85c4 Synchronize before INITing peerinfo list head after removing
from list.
32ec28a Add missing rcu_read_unlock
8fed0b8 Correctly exit read critical section once peer is found.
63db857 Free peerctx only on rpc destruction
56eff26 Cleanup style issues
e5f38b0 Indirection for events and friend_sm
3c84ac4 In __glusterd_probe_cbk goto unlock only if peer already
exists
141d855 Address review comments on 9695/1
aaeefed Protection during peer updates
6eda33d Revert "Synchronize before INITing peerinfo list head after
removing from list."
f69db96 Remove unneeded line
b43d2ec Address review comments on 9695/4
7781921 Address review comments on 9695/5
eb6467b Add some missing semi-colons
328a47f Remove synchronize_rcu from
glusterd_friend_sm_transition_state
186e429 Run part of glusterd_friend_remove in critical section
55c0a2e Fix gluster (peer status/ pool list) with no peers
93f8dcf Use call_rcu to free peerinfo
c36178c Introduce composite struct, gd_rcu_head
[1]: http://www.rdrop.com/~paulmck/RCU/RCUdissertation.2004.07.14e1.pdf
[2]: https://github.com/kshlm/glusterfs/tree/urcu
Change-Id: Ic1480e59c86d41d25a6a3d159aa3e11fbb3cbc7b
BUG: 1191030
Signed-off-by: Kaushal M <kaushal@redhat.com>
Reviewed-on: http://review.gluster.org/9695
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-by: Anand Nekkunti <anekkunt@redhat.com>
Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com>
Tested-by: Krishnan Parthasarathi <kparthas@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-peer-utils.c')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-peer-utils.c | 151 |
1 files changed, 94 insertions, 57 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c index 3a145264b79..49fab4cb8b9 100644 --- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c @@ -12,44 +12,21 @@ #include "glusterd-store.h" #include "common-utils.h" -int32_t -glusterd_peerinfo_cleanup (glusterd_peerinfo_t *peerinfo) -{ - GF_ASSERT (peerinfo); - glusterd_peerctx_t *peerctx = NULL; - gf_boolean_t quorum_action = _gf_false; - glusterd_conf_t *priv = THIS->private; - - if (peerinfo->quorum_contrib != QUORUM_NONE) - quorum_action = _gf_true; - if (peerinfo->rpc) { - peerctx = peerinfo->rpc->mydata; - peerinfo->rpc->mydata = NULL; - peerinfo->rpc = glusterd_rpc_clnt_unref (priv, peerinfo->rpc); - peerinfo->rpc = NULL; - if (peerctx) { - GF_FREE (peerctx->errstr); - GF_FREE (peerctx); - } - } - glusterd_peerinfo_destroy (peerinfo); - - if (quorum_action) - glusterd_do_quorum_action (); - return 0; -} - -int32_t -glusterd_peerinfo_destroy (glusterd_peerinfo_t *peerinfo) +void +glusterd_peerinfo_destroy (struct rcu_head *head) { - int32_t ret = -1; + int32_t ret = -1; + glusterd_peerinfo_t *peerinfo = NULL; glusterd_peer_hostname_t *hostname = NULL; - glusterd_peer_hostname_t *tmp = NULL; + glusterd_peer_hostname_t *tmp = NULL; - if (!peerinfo) - goto out; + /* This works as rcu_head is the first member of gd_rcu_head */ + peerinfo = caa_container_of (head, glusterd_peerinfo_t, head); + + /* Set THIS to the saved this. Needed by some functions below */ + THIS = peerinfo->head.this; - cds_list_del_init (&peerinfo->uuid_list); + CDS_INIT_LIST_HEAD (&peerinfo->uuid_list); ret = glusterd_store_delete_peerinfo (peerinfo); if (ret) { @@ -65,13 +42,44 @@ glusterd_peerinfo_destroy (glusterd_peerinfo_t *peerinfo) } glusterd_sm_tr_log_delete (&peerinfo->sm_log); + pthread_mutex_destroy (&peerinfo->delete_lock); GF_FREE (peerinfo); + peerinfo = NULL; - ret = 0; + return; +} -out: - return ret; +int32_t +glusterd_peerinfo_cleanup (glusterd_peerinfo_t *peerinfo) +{ + GF_ASSERT (peerinfo); + glusterd_peerctx_t *peerctx = NULL; + gf_boolean_t quorum_action = _gf_false; + glusterd_conf_t *priv = THIS->private; + + if (pthread_mutex_trylock (&peerinfo->delete_lock)) { + /* Someone else is already deleting the peer, so give up */ + return 0; + } + + uatomic_set (&peerinfo->deleting, _gf_true); + + if (peerinfo->quorum_contrib != QUORUM_NONE) + quorum_action = _gf_true; + if (peerinfo->rpc) { + peerinfo->rpc = glusterd_rpc_clnt_unref (priv, peerinfo->rpc); + peerinfo->rpc = NULL; + } + + cds_list_del_rcu (&peerinfo->uuid_list); + /* Saving THIS, as it is needed by the callback function */ + peerinfo->head.this = THIS; + call_rcu (&peerinfo->head.head, glusterd_peerinfo_destroy); + + if (quorum_action) + glusterd_do_quorum_action (); + return 0; } /* glusterd_peerinfo_find_by_hostname searches for a peer which matches the @@ -166,6 +174,7 @@ glusterd_peerinfo_find_by_uuid (uuid_t uuid) { glusterd_conf_t *priv = NULL; glusterd_peerinfo_t *entry = NULL; + glusterd_peerinfo_t *found = NULL; xlator_t *this = NULL; this = THIS; @@ -178,19 +187,23 @@ glusterd_peerinfo_find_by_uuid (uuid_t uuid) if (uuid_is_null (uuid)) return NULL; - cds_list_for_each_entry (entry, &priv->peers, uuid_list) { + rcu_read_lock (); + cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) { if (!uuid_compare (entry->uuid, uuid)) { gf_log (this->name, GF_LOG_DEBUG, "Friend found... state: %s", glusterd_friend_sm_state_name_get (entry->state.state)); - return entry; + found = entry; /* Probably should be rcu_dereferenced */ + break; } } + rcu_read_unlock (); - gf_log (this->name, GF_LOG_DEBUG, "Friend with uuid: %s, not found", - uuid_utoa (uuid)); - return NULL; + if (!found) + gf_log (this->name, GF_LOG_DEBUG, + "Friend with uuid: %s, not found", uuid_utoa (uuid)); + return found; } /* glusterd_peerinfo_find will search for a peer matching either @uuid or @@ -282,6 +295,8 @@ glusterd_peerinfo_new (glusterd_friend_sm_state_t state, uuid_t *uuid, if (new_peer->state.state == GD_FRIEND_STATE_BEFRIENDED) new_peer->quorum_contrib = QUORUM_WAITING; new_peer->port = port; + + pthread_mutex_init (&new_peer->delete_lock, NULL); out: if (ret && new_peer) { glusterd_peerinfo_cleanup (new_peer); @@ -303,7 +318,8 @@ glusterd_chk_peers_connected_befriended (uuid_t skip_uuid) priv= THIS->private; GF_ASSERT (priv); - cds_list_for_each_entry (peerinfo, &priv->peers, uuid_list) { + rcu_read_lock (); + cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) { if (!uuid_is_null (skip_uuid) && !uuid_compare (skip_uuid, peerinfo->uuid)) @@ -315,6 +331,8 @@ glusterd_chk_peers_connected_befriended (uuid_t skip_uuid) break; } } + rcu_read_unlock (); + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %s", (ret?"TRUE":"FALSE")); return ret; @@ -336,14 +354,16 @@ glusterd_uuid_to_hostname (uuid_t uuid) if (!uuid_compare (MY_UUID, uuid)) { hostname = gf_strdup ("localhost"); } + rcu_read_lock (); if (!cds_list_empty (&priv->peers)) { - cds_list_for_each_entry (entry, &priv->peers, uuid_list) { + cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) { if (!uuid_compare (entry->uuid, uuid)) { hostname = gf_strdup (entry->hostname); break; } } } + rcu_read_unlock (); return hostname; } @@ -373,7 +393,8 @@ glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo, if (!uuid_compare (brickinfo->uuid, MY_UUID)) continue; - cds_list_for_each_entry (peerinfo, peers, uuid_list) { + rcu_read_lock (); + cds_list_for_each_entry_rcu (peerinfo, peers, uuid_list) { if (uuid_compare (peerinfo->uuid, brickinfo->uuid)) continue; @@ -385,9 +406,11 @@ glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo, *down_peerstr = gf_strdup (peerinfo->hostname); gf_log ("", GF_LOG_DEBUG, "Peer %s is down. ", peerinfo->hostname); + rcu_read_unlock (); goto out; } } + rcu_read_unlock (); } ret = _gf_true; @@ -479,7 +502,7 @@ gd_add_address_to_peer (glusterd_peerinfo_t *peerinfo, const char *address) if (ret) goto out; - cds_list_add_tail (&hostname->hostname_list, &peerinfo->hostnames); + cds_list_add_tail_rcu (&hostname->hostname_list, &peerinfo->hostnames); ret = 0; out: @@ -584,6 +607,7 @@ gd_peerinfo_find_from_hostname (const char *hoststr) xlator_t *this = NULL; glusterd_conf_t *priv = NULL; glusterd_peerinfo_t *peer = NULL; + glusterd_peerinfo_t *found = NULL; glusterd_peer_hostname_t *tmphost = NULL; this = THIS; @@ -593,19 +617,24 @@ gd_peerinfo_find_from_hostname (const char *hoststr) GF_VALIDATE_OR_GOTO (this->name, (hoststr != NULL), out); - cds_list_for_each_entry (peer, &priv->peers, uuid_list) { - cds_list_for_each_entry (tmphost, &peer->hostnames, - hostname_list) { + rcu_read_lock (); + cds_list_for_each_entry_rcu (peer, &priv->peers, uuid_list) { + cds_list_for_each_entry_rcu (tmphost, &peer->hostnames, + hostname_list) { if (!strncasecmp (tmphost->hostname, hoststr, 1024)) { gf_log (this->name, GF_LOG_DEBUG, "Friend %s found.. state: %d", tmphost->hostname, peer->state.state); - return peer; + found = peer; /* Probably needs to be + dereferenced*/ + goto unlock; } } } +unlock: + rcu_read_unlock (); out: - return NULL; + return found; } /* gd_peerinfo_find_from_addrinfo iterates over all the addresses saved for each @@ -624,6 +653,7 @@ gd_peerinfo_find_from_addrinfo (const struct addrinfo *addr) xlator_t *this = NULL; glusterd_conf_t *conf = NULL; glusterd_peerinfo_t *peer = NULL; + glusterd_peerinfo_t *found = NULL; glusterd_peer_hostname_t *address = NULL; int ret = 0; struct addrinfo *paddr = NULL; @@ -636,9 +666,10 @@ gd_peerinfo_find_from_addrinfo (const struct addrinfo *addr) GF_VALIDATE_OR_GOTO (this->name, (addr != NULL), out); - cds_list_for_each_entry (peer, &conf->peers, uuid_list) { - cds_list_for_each_entry (address, &peer->hostnames, - hostname_list) { + rcu_read_lock (); + cds_list_for_each_entry_rcu (peer, &conf->peers, uuid_list) { + cds_list_for_each_entry_rcu (address, &peer->hostnames, + hostname_list) { /* TODO: Cache the resolved addrinfos to improve * performance */ @@ -658,14 +689,20 @@ gd_peerinfo_find_from_addrinfo (const struct addrinfo *addr) for (tmp = paddr; tmp != NULL; tmp = tmp->ai_next) { if (gf_compare_sockaddr (addr->ai_addr, tmp->ai_addr)) { - freeaddrinfo (paddr); - return peer; + found = peer; /* (de)referenced? */ + break; } } + + freeaddrinfo (paddr); + if (found) + goto unlock; } } +unlock: + rcu_read_unlock (); out: - return NULL; + return found; } /* gd_update_peerinfo_from_dict will update the hostnames for @peerinfo from |