diff options
author | Krishnan Parthasarathi <kparthas@redhat.com> | 2013-04-15 15:56:57 +0530 |
---|---|---|
committer | Vijay Bellur <vbellur@redhat.com> | 2013-04-17 05:48:50 -0700 |
commit | 92729add67e2e7b8c7589c2dfab0bde071a7faf2 (patch) | |
tree | 4e8c2d48de34938a71abd320402e98e1588aee30 /xlators | |
parent | 47c118e22d9d6fb6662fe96841ed4fe3089739b5 (diff) |
glusterd: big lock - a coarse-grained locking to prevent racesv3.4.0alpha3
There are primarily three lists that are part of glusterd process,
that are concurrently accessed. Namely, priv->volumes, priv->peers
and volinfo->bricks_list.
Big-lock approach
-----------------
WHAT IS IT?
Big lock is a coarse-grained lock which protects all three
lists, mentioned above, from racy access.
HOW DOES IT WORK?
At any given point in time, glusterd's thread(s) are in execution
_iff_ there is a preceding, inbound network event. Of course, the
sigwaiter thread and timer thread are exceptions.
A network event is an external trigger to glusterd, via the epoll
thread, in the form of POLLIN and POLLERR.
As long as we take the big-lock at all such entry points and yield
it when we are done, we are guaranteed that all the network events,
accessing the global lists, are serialised.
This amounts to holding the big lock at
- all the handlers of all the actors in glusterd. (POLLIN)
- all the cbks in glusterd. (POLLIN)
- rpc_notify (DISCONNECT event), if we access/modify
one of the three lists. (POLLERR)
In the case of synctask'ized volume operations, we must remember that,
if we held the big lock for the entire duration of the handler,
we may block other non-synctask rpc actors from executing.
For eg, volume-start would block in PMAP SIGNIN, if done incorrectly.
To prevent this, we need to yield the big lock, when we yield the
synctask, and reacquire on waking up of the synctask.
BUG: 948686
Change-Id: I429832f1fed67bcac0813403d58346558a403ce9
Signed-off-by: Krishnan Parthasarathi <kparthas@redhat.com>
Reviewed-on: http://review.gluster.org/4835
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'xlators')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-brick-ops.c | 15 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-geo-rep.c | 10 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 281 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handshake.c | 63 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-pmap.c | 43 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-quota.c | 8 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-rebalance.c | 25 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-replace-brick.c | 11 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 106 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-sm.c | 19 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-syncop.c | 24 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 40 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.h | 2 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 57 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd.c | 48 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd.h | 11 |
16 files changed, 657 insertions, 106 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c index 1c1ad1bf89b..1d927044ef5 100644 --- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c @@ -335,7 +335,7 @@ out: /* Handler functions */ int -glusterd_handle_add_brick (rpcsvc_request_t *req) +__glusterd_handle_add_brick (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -536,9 +536,14 @@ out: return ret; } +int +glusterd_handle_add_brick (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_add_brick); +} int -glusterd_handle_remove_brick (rpcsvc_request_t *req) +__glusterd_handle_remove_brick (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -842,6 +847,12 @@ out: return ret; } +int +glusterd_handle_remove_brick (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_remove_brick); +} /* op-sm */ diff --git a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c index 758ab1a14af..b90dbb60a28 100644 --- a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c +++ b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c @@ -36,7 +36,7 @@ static char *gsync_reserved_opts[] = { }; int -glusterd_handle_gsync_set (rpcsvc_request_t *req) +__glusterd_handle_gsync_set (rpcsvc_request_t *req) { int32_t ret = 0; dict_t *dict = NULL; @@ -154,6 +154,12 @@ out: } +int +glusterd_handle_gsync_set (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_gsync_set); +} + /***** * * glusterd_urltransform* internal API @@ -1342,7 +1348,9 @@ glusterd_gsync_configure (glusterd_volinfo_t *volinfo, char *slave, runner_add_arg (&runner, op_name); if (op_value) runner_add_arg (&runner, op_value); + synclock_unlock (&priv->big_lock); ret = runner_run (&runner); + synclock_lock (&priv->big_lock); if (ret) { gf_log ("", GF_LOG_WARNING, "gsyncd failed to " "%s %s option for %s %s peers", diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index d5bad0f2751..1b39fb3ebbc 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -54,6 +54,30 @@ #include <lvm2app.h> #endif +int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, + void *data, rpc_clnt_notify_t notify_fn) +{ + glusterd_conf_t *priv = THIS->private; + int ret = -1; + synclock_lock (&priv->big_lock); + ret = notify_fn (rpc, mydata, event, data); + synclock_unlock (&priv->big_lock); + return ret; +} + +int glusterd_big_locked_handler (rpcsvc_request_t *req, rpcsvc_actor actor_fn) +{ + glusterd_conf_t *priv = THIS->private; + int ret = -1; + + synclock_lock (&priv->big_lock); + ret = actor_fn (req); + synclock_unlock (&priv->big_lock); + + return ret; +} + static int glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid, char *hostname, int port, @@ -500,7 +524,7 @@ out: } int -glusterd_handle_cluster_lock (rpcsvc_request_t *req) +__glusterd_handle_cluster_lock (rpcsvc_request_t *req) { gd1_mgmt_cluster_lock_req lock_req = {{0},}; int32_t ret = -1; @@ -554,6 +578,13 @@ out: } int +glusterd_handle_cluster_lock (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cluster_lock); +} + +int glusterd_req_ctx_create (rpcsvc_request_t *rpc_req, glusterd_op_t op, uuid_t uuid, char *buf_val, size_t buf_len, @@ -604,7 +635,7 @@ out: } int -glusterd_handle_stage_op (rpcsvc_request_t *req) +__glusterd_handle_stage_op (rpcsvc_request_t *req) { int32_t ret = -1; glusterd_req_ctx_t *req_ctx = NULL; @@ -649,7 +680,14 @@ glusterd_handle_stage_op (rpcsvc_request_t *req) } int -glusterd_handle_commit_op (rpcsvc_request_t *req) +glusterd_handle_stage_op (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_stage_op); +} + + +int +__glusterd_handle_commit_op (rpcsvc_request_t *req) { int32_t ret = -1; glusterd_req_ctx_t *req_ctx = NULL; @@ -700,7 +738,13 @@ out: } int -glusterd_handle_cli_probe (rpcsvc_request_t *req) +glusterd_handle_commit_op (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_commit_op); +} + +int +__glusterd_handle_cli_probe (rpcsvc_request_t *req) { int32_t ret = -1; gf1_cli_probe_req cli_req = {0,}; @@ -788,7 +832,13 @@ out: } int -glusterd_handle_cli_deprobe (rpcsvc_request_t *req) +glusterd_handle_cli_probe (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_cli_probe); +} + +int +__glusterd_handle_cli_deprobe (rpcsvc_request_t *req) { int32_t ret = -1; gf1_cli_deprobe_req cli_req = {0,}; @@ -877,7 +927,13 @@ out: } int -glusterd_handle_cli_list_friends (rpcsvc_request_t *req) +glusterd_handle_cli_deprobe (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_cli_deprobe); +} + +int +__glusterd_handle_cli_list_friends (rpcsvc_request_t *req) { int32_t ret = -1; gf1_cli_peer_list_req cli_req = {0,}; @@ -925,7 +981,14 @@ out: } int -glusterd_handle_cli_get_volume (rpcsvc_request_t *req) +glusterd_handle_cli_list_friends (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_list_friends); +} + +int +__glusterd_handle_cli_get_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -978,9 +1041,16 @@ out: return ret; } +int +glusterd_handle_cli_get_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_get_volume); +} + #ifdef HAVE_BD_XLATOR int -glusterd_handle_cli_bd_op (rpcsvc_request_t *req) +__glusterd_handle_cli_bd_op (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = { {0,} }; @@ -1044,10 +1114,16 @@ out: return ret; } + +int +glusterd_handle_cli_bd_op (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_cli_bd_op); +} #endif int -glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req) +__glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req) { int ret = -1; dict_t *dict = NULL; @@ -1144,7 +1220,14 @@ out: } int -glusterd_handle_cli_list_volume (rpcsvc_request_t *req) +glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_uuid_reset); +} + +int +__glusterd_handle_cli_list_volume (rpcsvc_request_t *req) { int ret = -1; dict_t *dict = NULL; @@ -1203,6 +1286,13 @@ out: return ret; } +int +glusterd_handle_cli_list_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_list_volume); +} + int32_t glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx, char *err_str, size_t err_len) @@ -1215,7 +1305,7 @@ glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx, } int -glusterd_handle_reset_volume (rpcsvc_request_t *req) +__glusterd_handle_reset_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -1276,9 +1366,15 @@ out: return ret; } +int +glusterd_handle_reset_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_reset_volume); +} int -glusterd_handle_set_volume (rpcsvc_request_t *req) +__glusterd_handle_set_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -1376,7 +1472,13 @@ out: } int -glusterd_handle_sync_volume (rpcsvc_request_t *req) +glusterd_handle_set_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_set_volume); +} + +int +__glusterd_handle_sync_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -1465,6 +1567,12 @@ out: } int +glusterd_handle_sync_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_sync_volume); +} + +int glusterd_fsm_log_send_resp (rpcsvc_request_t *req, int op_ret, char *op_errstr, dict_t *dict) { @@ -1491,7 +1599,7 @@ glusterd_fsm_log_send_resp (rpcsvc_request_t *req, int op_ret, } int -glusterd_handle_fsm_log (rpcsvc_request_t *req) +__glusterd_handle_fsm_log (rpcsvc_request_t *req) { int32_t ret = -1; gf1_cli_fsm_log_req cli_req = {0,}; @@ -1548,6 +1656,12 @@ out: } int +glusterd_handle_fsm_log (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_fsm_log); +} + +int glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status) { @@ -1586,7 +1700,7 @@ glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status) } int -glusterd_handle_cluster_unlock (rpcsvc_request_t *req) +__glusterd_handle_cluster_unlock (rpcsvc_request_t *req) { gd1_mgmt_cluster_unlock_req unlock_req = {{0}, }; int32_t ret = -1; @@ -1638,6 +1752,13 @@ out: } int +glusterd_handle_cluster_unlock (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cluster_unlock); +} + +int glusterd_op_stage_send_resp (rpcsvc_request_t *req, int32_t op, int32_t status, char *op_errstr, dict_t *rsp_dict) @@ -1718,7 +1839,7 @@ out: } int -glusterd_handle_incoming_friend_req (rpcsvc_request_t *req) +__glusterd_handle_incoming_friend_req (rpcsvc_request_t *req) { int32_t ret = -1; gd1_mgmt_friend_req friend_req = {{0},}; @@ -1757,7 +1878,14 @@ out: } int -glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req) +glusterd_handle_incoming_friend_req (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_incoming_friend_req); +} + +int +__glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req) { int32_t ret = -1; gd1_mgmt_friend_req friend_req = {{0},}; @@ -1795,6 +1923,14 @@ out: } int +glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_incoming_unfriend_req); + +} + +int glusterd_handle_friend_update_delete (dict_t *dict) { char *hostname = NULL; @@ -1840,7 +1976,7 @@ out: } int -glusterd_handle_friend_update (rpcsvc_request_t *req) +__glusterd_handle_friend_update (rpcsvc_request_t *req) { int32_t ret = -1; gd1_mgmt_friend_update friend_req = {{0},}; @@ -1977,7 +2113,14 @@ out: } int -glusterd_handle_probe_query (rpcsvc_request_t *req) +glusterd_handle_friend_update (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_friend_update); +} + +int +__glusterd_handle_probe_query (rpcsvc_request_t *req) { int32_t ret = -1; xlator_t *this = NULL; @@ -2071,8 +2214,13 @@ out: return ret; } +int glusterd_handle_probe_query (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_probe_query); +} + int -glusterd_handle_cli_profile_volume (rpcsvc_request_t *req) +__glusterd_handle_cli_profile_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -2140,7 +2288,14 @@ out: } int -glusterd_handle_getwd (rpcsvc_request_t *req) +glusterd_handle_cli_profile_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_profile_volume); +} + +int +__glusterd_handle_getwd (rpcsvc_request_t *req) { int32_t ret = -1; gf1_cli_getwd_rsp rsp = {0,}; @@ -2165,9 +2320,14 @@ glusterd_handle_getwd (rpcsvc_request_t *req) return ret; } +int +glusterd_handle_getwd (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_getwd); +} int -glusterd_handle_mount (rpcsvc_request_t *req) +__glusterd_handle_mount (rpcsvc_request_t *req) { gf1_cli_mount_req mnt_req = {0,}; gf1_cli_mount_rsp rsp = {0,}; @@ -2230,7 +2390,13 @@ glusterd_handle_mount (rpcsvc_request_t *req) } int -glusterd_handle_umount (rpcsvc_request_t *req) +glusterd_handle_mount (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_mount); +} + +int +__glusterd_handle_umount (rpcsvc_request_t *req) { gf1_cli_umount_req umnt_req = {0,}; gf1_cli_umount_rsp rsp = {0,}; @@ -2243,9 +2409,11 @@ glusterd_handle_umount (rpcsvc_request_t *req) gf_boolean_t dir_ok = _gf_false; char *pdir = NULL; char *t = NULL; + glusterd_conf_t *priv = NULL; GF_ASSERT (req); GF_ASSERT (this); + priv = this->private; ret = xdr_to_generic (req->msg[0], &umnt_req, (xdrproc_t)xdr_gf1_cli_umount_req); @@ -2288,7 +2456,9 @@ glusterd_handle_umount (rpcsvc_request_t *req) runner_add_args (&runner, "umount", umnt_req.path, NULL); if (umnt_req.lazy) runner_add_arg (&runner, "-l"); + synclock_unlock (&priv->big_lock); rsp.op_ret = runner_run (&runner); + synclock_lock (&priv->big_lock); if (rsp.op_ret == 0) { if (realpath (umnt_req.path, mntp)) rmdir (mntp); @@ -2317,6 +2487,12 @@ glusterd_handle_umount (rpcsvc_request_t *req) } int +glusterd_handle_umount (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_umount); +} + +int glusterd_friend_remove (uuid_t uuid, char *hostname) { int ret = 0; @@ -2521,7 +2697,9 @@ glusterd_friend_add (const char *hoststr, int port, if (!restore) { ret = glusterd_store_peerinfo (*friend); if (ret == 0) { + synclock_unlock (&conf->big_lock); ret = glusterd_friend_rpc_create (this, *friend, args); + synclock_lock (&conf->big_lock); } else { gf_log (this->name, GF_LOG_ERROR, @@ -2926,7 +3104,7 @@ out: } int -glusterd_handle_status_volume (rpcsvc_request_t *req) +__glusterd_handle_status_volume (rpcsvc_request_t *req) { int32_t ret = -1; uint32_t cmd = 0; @@ -2998,7 +3176,14 @@ out: } int -glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req) +glusterd_handle_status_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_status_volume); +} + +int +__glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -3067,9 +3252,15 @@ out: } int -glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, - rpc_clnt_event_t event, - void *data) +glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_clearlocks_volume); +} + +int +__glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) { xlator_t *this = NULL; glusterd_conf_t *conf = NULL; @@ -3108,9 +3299,16 @@ glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, } int -glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata, - rpc_clnt_event_t event, - void *data) +glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) +{ + return glusterd_big_locked_notify (rpc, mydata, event, data, + __glusterd_brick_rpc_notify); +} + +int +__glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) { xlator_t *this = NULL; glusterd_conf_t *conf = NULL; @@ -3149,6 +3347,14 @@ glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata, } int +glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) +{ + return glusterd_big_locked_notify (rpc, mydata, event, data, + __glusterd_nodesvc_rpc_notify); +} + +int glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx) { int ret = -1; @@ -3190,9 +3396,8 @@ out: } int -glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, - rpc_clnt_event_t event, - void *data) +__glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) { xlator_t *this = NULL; glusterd_conf_t *conf = NULL; @@ -3311,6 +3516,14 @@ out: } int +glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) +{ + return glusterd_big_locked_notify (rpc, mydata, event, data, + __glusterd_peer_rpc_notify); +} + +int glusterd_null (rpcsvc_request_t *req) { diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c index 8b964702669..c41a9459870 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handshake.c +++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c @@ -109,7 +109,7 @@ out: } int -server_getspec (rpcsvc_request_t *req) +__server_getspec (rpcsvc_request_t *req) { int32_t ret = -1; int32_t op_errno = 0; @@ -289,8 +289,14 @@ fail: return 0; } +int +server_getspec (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __server_getspec); +} + int32_t -server_event_notify (rpcsvc_request_t *req) +__server_event_notify (rpcsvc_request_t *req) { int32_t ret = -1; int32_t op_errno = 0; @@ -350,6 +356,12 @@ fail: return 0; } +int32_t +server_event_notify (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __server_event_notify); +} + int gd_validate_cluster_op_version (xlator_t *this, int cluster_op_version, char *peerid) @@ -383,7 +395,7 @@ out: } int -glusterd_mgmt_hndsk_versions (rpcsvc_request_t *req) +__glusterd_mgmt_hndsk_versions (rpcsvc_request_t *req) { dict_t *dict = NULL; xlator_t *this = NULL; @@ -459,7 +471,14 @@ out: } int -glusterd_mgmt_hndsk_versions_ack (rpcsvc_request_t *req) +glusterd_mgmt_hndsk_versions (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_mgmt_hndsk_versions); +} + +int +__glusterd_mgmt_hndsk_versions_ack (rpcsvc_request_t *req) { dict_t *clnt_dict = NULL; xlator_t *this = NULL; @@ -527,6 +546,12 @@ out: return ret; } +int +glusterd_mgmt_hndsk_versions_ack (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_mgmt_hndsk_versions_ack); +} rpcsvc_actor_t gluster_handshake_actors[] = { [GF_HNDSK_NULL] = {"NULL", GF_HNDSK_NULL, NULL, NULL, 0}, @@ -699,7 +724,7 @@ out: } int -glusterd_mgmt_hndsk_version_ack_cbk (struct rpc_req *req, struct iovec *iov, +__glusterd_mgmt_hndsk_version_ack_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { int ret = -1; @@ -778,7 +803,15 @@ out: } int -glusterd_mgmt_hndsk_version_cbk (struct rpc_req *req, struct iovec *iov, +glusterd_mgmt_hndsk_version_ack_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + return glusterd_big_locked_cbk (req, iov, count, myframe, + __glusterd_mgmt_hndsk_version_ack_cbk); +} + +int +__glusterd_mgmt_hndsk_version_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { int ret = -1; @@ -883,6 +916,14 @@ out: } int +glusterd_mgmt_hndsk_version_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + return glusterd_big_locked_cbk (req, iov, count, myframe, + __glusterd_mgmt_hndsk_version_cbk); +} + +int glusterd_mgmt_handshake (xlator_t *this, glusterd_peerctx_t *peerctx) { call_frame_t *frame = NULL; @@ -985,7 +1026,7 @@ out: } int -glusterd_peer_dump_version_cbk (struct rpc_req *req, struct iovec *iov, +__glusterd_peer_dump_version_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { int ret = -1; @@ -1092,6 +1133,14 @@ out: int +glusterd_peer_dump_version_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + return glusterd_big_locked_cbk (req, iov, count, myframe, + __glusterd_peer_dump_version_cbk); +} + +int glusterd_peer_dump_version (xlator_t *this, struct rpc_clnt *rpc, glusterd_peerctx_t *peerctx) { diff --git a/xlators/mgmt/glusterd/src/glusterd-pmap.c b/xlators/mgmt/glusterd/src/glusterd-pmap.c index cb29462c9b7..aab6744a418 100644 --- a/xlators/mgmt/glusterd/src/glusterd-pmap.c +++ b/xlators/mgmt/glusterd/src/glusterd-pmap.c @@ -280,7 +280,7 @@ out: } int -gluster_pmap_portbybrick (rpcsvc_request_t *req) +__gluster_pmap_portbybrick (rpcsvc_request_t *req) { pmap_port_by_brick_req args = {0,}; pmap_port_by_brick_rsp rsp = {0,}; @@ -314,7 +314,14 @@ fail: int -gluster_pmap_brickbyport (rpcsvc_request_t *req) +gluster_pmap_portbybrick (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __gluster_pmap_portbybrick); +} + + +int +__gluster_pmap_brickbyport (rpcsvc_request_t *req) { pmap_brick_by_port_req args = {0,}; pmap_brick_by_port_rsp rsp = {0,}; @@ -340,6 +347,14 @@ fail: return 0; } + +int +gluster_pmap_brickbyport (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __gluster_pmap_brickbyport); +} + + static int glusterd_brick_update_signin (glusterd_brickinfo_t *brickinfo, gf_boolean_t value) @@ -350,7 +365,7 @@ glusterd_brick_update_signin (glusterd_brickinfo_t *brickinfo, } int -gluster_pmap_signup (rpcsvc_request_t *req) +__gluster_pmap_signup (rpcsvc_request_t *req) { pmap_signup_req args = {0,}; pmap_signup_rsp rsp = {0,}; @@ -376,7 +391,13 @@ fail: } int -gluster_pmap_signin (rpcsvc_request_t *req) +gluster_pmap_signup (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __gluster_pmap_signup); +} + +int +__gluster_pmap_signin (rpcsvc_request_t *req) { pmap_signin_req args = {0,}; pmap_signin_rsp rsp = {0,}; @@ -407,9 +428,15 @@ fail: } +int +gluster_pmap_signin (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __gluster_pmap_signin); +} + int -gluster_pmap_signout (rpcsvc_request_t *req) +__gluster_pmap_signout (rpcsvc_request_t *req) { pmap_signout_req args = {0,}; pmap_signout_rsp rsp = {0,}; @@ -440,6 +467,12 @@ fail: return 0; } +int +gluster_pmap_signout (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __gluster_pmap_signout); +} + rpcsvc_actor_t gluster_pmap_actors[] = { [GF_PMAP_NULL] = {"NULL", GF_PMAP_NULL, NULL, NULL, 0}, [GF_PMAP_PORTBYBRICK] = {"PORTBYBRICK", GF_PMAP_PORTBYBRICK, diff --git a/xlators/mgmt/glusterd/src/glusterd-quota.c b/xlators/mgmt/glusterd/src/glusterd-quota.c index 28ab16d22a7..e299fecaae9 100644 --- a/xlators/mgmt/glusterd/src/glusterd-quota.c +++ b/xlators/mgmt/glusterd/src/glusterd-quota.c @@ -25,7 +25,7 @@ #include <sys/wait.h> int -glusterd_handle_quota (rpcsvc_request_t *req) +__glusterd_handle_quota (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -115,6 +115,12 @@ out: return ret; } +int +glusterd_handle_quota (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_quota); +} + int32_t glusterd_check_if_quota_trans_enabled (glusterd_volinfo_t *volinfo) { diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c index 271de6111c8..52f845b2972 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c +++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c @@ -70,9 +70,10 @@ out: return ret; } + int32_t -glusterd_defrag_notify (struct rpc_clnt *rpc, void *mydata, - rpc_clnt_event_t event, void *data) +__glusterd_defrag_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) { glusterd_volinfo_t *volinfo = NULL; glusterd_defrag_info_t *defrag = NULL; @@ -160,6 +161,14 @@ glusterd_defrag_notify (struct rpc_clnt *rpc, void *mydata, return ret; } +int32_t +glusterd_defrag_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) +{ + return glusterd_big_locked_notify (rpc, mydata, event, + data, __glusterd_defrag_notify); +} + int glusterd_handle_defrag_start (glusterd_volinfo_t *volinfo, char *op_errstr, size_t len, int cmd, defrag_cbk_fn_t cbk, @@ -274,8 +283,10 @@ glusterd_handle_defrag_start (glusterd_volinfo_t *volinfo, char *op_errstr, goto out; } + synclock_unlock (&priv->big_lock); ret = glusterd_rpc_create (&defrag->rpc, options, glusterd_defrag_notify, volinfo); + synclock_lock (&priv->big_lock); if (ret) { gf_log (THIS->name, GF_LOG_ERROR, "RPC create failed"); goto out; @@ -326,8 +337,10 @@ glusterd_rebalance_rpc_create (glusterd_volinfo_t *volinfo, goto out; } + synclock_unlock (&priv->big_lock); ret = glusterd_rpc_create (&defrag->rpc, options, glusterd_defrag_notify, volinfo); + synclock_lock (&priv->big_lock); if (ret) { gf_log (THIS->name, GF_LOG_ERROR, "RPC create failed"); goto out; @@ -376,7 +389,7 @@ out: } int -glusterd_handle_defrag_volume (rpcsvc_request_t *req) +__glusterd_handle_defrag_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -461,6 +474,12 @@ out: return 0; } +int +glusterd_handle_defrag_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_defrag_volume); +} + int glusterd_op_stage_rebalance (dict_t *dict, char **op_errstr) diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c index ec69b363763..928d2ee2190 100644 --- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c +++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c @@ -70,7 +70,7 @@ out: } int -glusterd_handle_replace_brick (rpcsvc_request_t *req) +__glusterd_handle_replace_brick (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -171,6 +171,13 @@ out: return ret; } +int +glusterd_handle_replace_brick (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_replace_brick); +} + static int glusterd_get_rb_dst_brickinfo (glusterd_volinfo_t *volinfo, glusterd_brickinfo_t **brickinfo) @@ -748,7 +755,7 @@ rb_spawn_dst_brick (glusterd_volinfo_t *volinfo, if (volinfo->memory_accounting) runner_add_arg (&runner, "--mem-accounting"); - ret = runner_run (&runner); + ret = runner_run_nowait (&runner); if (ret) { pmap_registry_remove (THIS, 0, brickinfo->path, GF_PMAP_PORT_BRICKSERVER, NULL); diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c index c0de195bde1..b9e53733335 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c @@ -185,7 +185,21 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret, } int -glusterd_probe_cbk (struct rpc_req *req, struct iovec *iov, +glusterd_big_locked_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe, fop_cbk_fn_t fn) +{ + glusterd_conf_t *priv = THIS->private; + int ret = -1; + + synclock_lock (&priv->big_lock); + ret = fn (req, iov, count, myframe); + synclock_unlock (&priv->big_lock); + + return ret; +} + +int +__glusterd_probe_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gd1_mgmt_probe_rsp rsp = {{0},}; @@ -287,7 +301,16 @@ out: } int -glusterd_friend_add_cbk (struct rpc_req * req, struct iovec *iov, +glusterd_probe_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + return glusterd_big_locked_cbk (req, iov, count, myframe, + __glusterd_probe_cbk); +} + + +int +__glusterd_friend_add_cbk (struct rpc_req * req, struct iovec *iov, int count, void *myframe) { gd1_mgmt_friend_rsp rsp = {{0},}; @@ -380,7 +403,15 @@ out: } int -glusterd_friend_remove_cbk (struct rpc_req * req, struct iovec *iov, +glusterd_friend_add_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + return glusterd_big_locked_cbk (req, iov, count, myframe, + __glusterd_friend_add_cbk); +} + +int +__glusterd_friend_remove_cbk (struct rpc_req * req, struct iovec *iov, int count, void *myframe) { gd1_mgmt_friend_rsp rsp = {{0},}; @@ -473,8 +504,16 @@ respond: return ret; } +int +glusterd_friend_remove_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + return glusterd_big_locked_cbk (req, iov, count, myframe, + __glusterd_friend_remove_cbk); +} + int32_t -glusterd_friend_update_cbk (struct rpc_req *req, struct iovec *iov, +__glusterd_friend_update_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { int ret = -1; @@ -506,8 +545,16 @@ out: return ret; } +int +glusterd_friend_update_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + return glusterd_big_locked_cbk (req, iov, count, myframe, + __glusterd_friend_update_cbk); +} + int32_t -glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov, +__glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gd1_mgmt_cluster_lock_rsp rsp = {{0},}; @@ -572,7 +619,15 @@ out: } int32_t -glusterd_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov, +glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + return glusterd_big_locked_cbk (req, iov, count, myframe, + __glusterd_cluster_lock_cbk); +} + +int32_t +__glusterd_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gd1_mgmt_cluster_lock_rsp rsp = {{0},}; @@ -634,7 +689,15 @@ out: } int32_t -glusterd_stage_op_cbk (struct rpc_req *req, struct iovec *iov, +glusterd_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + return glusterd_big_locked_cbk (req, iov, count, myframe, + __glusterd_cluster_unlock_cbk); +} + +int32_t +__glusterd_stage_op_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gd1_mgmt_stage_op_rsp rsp = {{0},}; @@ -753,7 +816,15 @@ out: } int32_t -glusterd_commit_op_cbk (struct rpc_req *req, struct iovec *iov, +glusterd_stage_op_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + return glusterd_big_locked_cbk (req, iov, count, myframe, + __glusterd_stage_op_cbk); +} + +int32_t +__glusterd_commit_op_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gd1_mgmt_commit_op_rsp rsp = {{0},}; @@ -911,7 +982,13 @@ out: return ret; } - +int32_t +glusterd_commit_op_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + return glusterd_big_locked_cbk (req, iov, count, myframe, + __glusterd_commit_op_cbk); +} int32_t glusterd_rpc_probe (call_frame_t *frame, xlator_t *this, @@ -1276,7 +1353,7 @@ out: } int32_t -glusterd_brick_op_cbk (struct rpc_req *req, struct iovec *iov, +__glusterd_brick_op_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gd1_mgmt_brick_op_rsp rsp = {0}; @@ -1378,6 +1455,14 @@ out: } int32_t +glusterd_brick_op_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + return glusterd_big_locked_cbk (req, iov, count, myframe, + __glusterd_brick_op_cbk); +} + +int32_t glusterd_brick_op (call_frame_t *frame, xlator_t *this, void *data) { @@ -1542,3 +1627,4 @@ struct rpc_clnt_program gd_peer_prog = { .numproc = GLUSTERD_FRIEND_MAXVALUE, }; + diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c index 5a38fdfecb5..a82ca2e176f 100644 --- a/xlators/mgmt/glusterd/src/glusterd-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-sm.c @@ -1075,8 +1075,25 @@ glusterd_friend_sm () ret = 0; out: - if (quorum_action) + if (quorum_action) { + /* When glusterd is restarted, it needs to wait until the 'friends' view + * of the volumes settle, before it starts any of the internal daemons. + * + * Every friend that was part of the cluster, would send its + * cluster-view, 'our' way. For every friend, who belongs to + * a partition which has a different cluster-view from our + * partition, we may update our cluster-view. For subsequent + * friends from that partition would agree with us, if the first + * friend wasn't rejected. For every first friend, whom we agreed with, + * we would need to start internal daemons/bricks belonging to the + * new volumes. + * glusterd_spawn_daemons calls functions that are idempotent. ie, + * the functions spawn process(es) only if they are not started yet. + * + * */ + glusterd_spawn_daemons ((void*) _gf_false); glusterd_do_quorum_action (); + } return ret; } diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index 4430b41a8b8..5730dc48a41 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -18,6 +18,16 @@ #include "glusterd-op-sm.h" #include "glusterd-utils.h" +static inline void +gd_synctask_barrier_wait (struct syncargs *args, int count) +{ + glusterd_conf_t *conf = THIS->private; + + synclock_unlock (&conf->big_lock); + synctask_barrier_wait (args, count); + synclock_lock (&conf->big_lock); +} + static void gd_collate_errors (struct syncargs *args, int op_ret, int op_errno, char *op_errstr) @@ -663,7 +673,7 @@ gd_lock_op_phase (struct list_head *peers, glusterd_op_t op, dict_t *op_ctx, gd_syncop_mgmt_lock (peerinfo->rpc, &args, MY_UUID, peer_uuid); peer_cnt++; } - synctask_barrier_wait((&args), peer_cnt); + gd_synctask_barrier_wait((&args), peer_cnt); ret = args.op_ret; if (ret) { gf_asprintf (op_errstr, "Another transaction could be " @@ -741,7 +751,7 @@ stage_done: op, req_dict, op_ctx); peer_cnt++; } - synctask_barrier_wait((&args), peer_cnt); + gd_synctask_barrier_wait((&args), peer_cnt); ret = args.op_ret; if (dict_get_str (op_ctx, "errstr", &errstr) == 0) *op_errstr = gf_strdup (errstr); @@ -816,7 +826,7 @@ commit_done: op, req_dict, op_ctx); peer_cnt++; } - synctask_barrier_wait((&args), peer_cnt); + gd_synctask_barrier_wait((&args), peer_cnt); ret = args.op_ret; if (dict_get_str (op_ctx, "errstr", &errstr) == 0) *op_errstr = gf_strdup (errstr); @@ -853,7 +863,7 @@ gd_unlock_op_phase (struct list_head *peers, glusterd_op_t op, int op_ret, list_del_init (&peerinfo->op_peers_list); peer_cnt++; } - synctask_barrier_wait((&args), peer_cnt); + gd_synctask_barrier_wait((&args), peer_cnt); ret = args.op_ret; if (ret) { gf_log (this->name, GF_LOG_ERROR, "Failed to unlock " @@ -889,8 +899,10 @@ gd_brick_op_phase (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, char **op int ret = -1; rpc_clnt_t *rpc = NULL; dict_t *rsp_dict = NULL; + glusterd_conf_t *conf = NULL; this = THIS; + conf = this->private; rsp_dict = dict_new (); if (!rsp_dict) { ret = -1; @@ -930,8 +942,12 @@ gd_brick_op_phase (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, char **op "due to rpc failure."); goto out; } + /*This is to ensure that the brick_op_cbk is able to take + * the big lock*/ + synclock_unlock (&conf->big_lock); ret = gd_syncop_mgmt_brick_op (rpc, pending_node, op, req_dict, op_ctx, op_errstr); + synclock_lock (&conf->big_lock); if (ret) goto out; diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 83342afb5c7..2153a43df97 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -23,6 +23,7 @@ #include "timer.h" #include "defaults.h" #include "compat.h" +#include "syncop.h" #include "run.h" #include "compat-errno.h" #include "statedump.h" @@ -1207,6 +1208,7 @@ glusterd_brick_connect (glusterd_volinfo_t *volinfo, char socketpath[PATH_MAX] = {0}; dict_t *options = NULL; struct rpc_clnt *rpc = NULL; + glusterd_conf_t *priv = THIS->private; GF_ASSERT (volinfo); GF_ASSERT (brickinfo); @@ -1225,9 +1227,11 @@ glusterd_brick_connect (glusterd_volinfo_t *volinfo, socketpath, 600); if (ret) goto out; + synclock_unlock (&priv->big_lock); ret = glusterd_rpc_create (&rpc, options, glusterd_brick_rpc_notify, brickinfo); + synclock_lock (&priv->big_lock); if (ret) goto out; brickinfo->rpc = rpc; @@ -1392,10 +1396,14 @@ glusterd_volume_start_glusterfs (glusterd_volinfo_t *volinfo, runner_add_arg (&runner, "--mem-accounting"); runner_log (&runner, "", GF_LOG_DEBUG, "Starting GlusterFS"); - if (wait) + if (wait) { + synclock_unlock (&priv->big_lock); ret = runner_run (&runner); - else + synclock_lock (&priv->big_lock); + + } else { ret = runner_run_nowait (&runner); + } if (ret) goto out; @@ -2386,6 +2394,19 @@ out: return in; } +int +glusterd_spawn_daemons (void *opaque) +{ + glusterd_conf_t *conf = THIS->private; + gf_boolean_t start_bricks = (long) opaque; + + if (start_bricks) + glusterd_restart_bricks (conf); + glusterd_restart_gsyncds (conf); + glusterd_restart_rebalance (conf); + return 0; +} + void glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, gf_boolean_t meets_quorum) @@ -3382,6 +3403,7 @@ glusterd_nodesvc_connect (char *server, char *socketpath) { int ret = 0; dict_t *options = NULL; struct rpc_clnt *rpc = NULL; + glusterd_conf_t *priv = THIS->private; rpc = glusterd_nodesvc_get_rpc (server); @@ -3395,9 +3417,11 @@ glusterd_nodesvc_connect (char *server, char *socketpath) { socketpath, 600); if (ret) goto out; + synclock_unlock (&priv->big_lock); ret = glusterd_rpc_create (&rpc, options, glusterd_nodesvc_rpc_notify, server); + synclock_lock (&priv->big_lock); if (ret) goto out; (void) glusterd_nodesvc_set_rpc (server, rpc); @@ -4041,13 +4065,8 @@ glusterd_restart_bricks (glusterd_conf_t *conf) if (volinfo->status != GLUSTERD_STATUS_STARTED) continue; start_nodesvcs = _gf_true; - if (glusterd_is_volume_in_server_quorum (volinfo)) { - //these bricks will be restarted once the quorum is met - continue; - } - list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { - glusterd_brick_start (volinfo, brickinfo, _gf_true); + glusterd_brick_start (volinfo, brickinfo, _gf_false); } } @@ -5554,7 +5573,9 @@ glusterd_start_gsync (glusterd_volinfo_t *master_vol, char *slave, runner_argprintf (&runner, ":%s", master_vol->volname); runner_add_args (&runner, slave, "--config-set", "session-owner", uuid_str, NULL); + synclock_unlock (&priv->big_lock); ret = runner_run (&runner); + synclock_lock (&priv->big_lock); if (ret == -1) { errcode = -1; goto out; @@ -5565,7 +5586,9 @@ glusterd_start_gsync (glusterd_volinfo_t *master_vol, char *slave, runner_argprintf (&runner, "%s/"GSYNC_CONF, priv->workdir); runner_argprintf (&runner, ":%s", master_vol->volname); runner_add_arg (&runner, slave); + synclock_unlock (&priv->big_lock); ret = runner_run (&runner); + synclock_lock (&priv->big_lock); if (ret == -1) { gf_asprintf (op_errstr, GEOREP" start failed for %s %s", master_vol->volname, slave); @@ -6031,7 +6054,6 @@ glusterd_restart_rebalance (glusterd_conf_t *conf) return ret; } - void glusterd_volinfo_reset_defrag_stats (glusterd_volinfo_t *volinfo) { diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h index 6084f5714b3..ac920a655ce 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.h +++ b/xlators/mgmt/glusterd/src/glusterd-utils.h @@ -357,6 +357,8 @@ glusterd_delete_brick (glusterd_volinfo_t* volinfo, int32_t glusterd_delete_all_bricks (glusterd_volinfo_t* volinfo); int +glusterd_spawn_daemons (void *opaque); +int glusterd_restart_gsyncds (glusterd_conf_t *conf); int glusterd_start_gsync (glusterd_volinfo_t *master_vol, char *slave, diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index fb1832f0d47..bc656a091b2 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -31,7 +31,7 @@ glusterd_op_stop_volume_args_get (dict, volname, flags) int -glusterd_handle_create_volume (rpcsvc_request_t *req) +__glusterd_handle_create_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -187,7 +187,14 @@ out: } int -glusterd_handle_cli_start_volume (rpcsvc_request_t *req) +glusterd_handle_create_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_create_volume); +} + +int +__glusterd_handle_cli_start_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -252,9 +259,15 @@ out: return ret; } +int +glusterd_handle_cli_start_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_start_volume); +} int -glusterd_handle_cli_stop_volume (rpcsvc_request_t *req) +__glusterd_handle_cli_stop_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -322,7 +335,14 @@ out: } int -glusterd_handle_cli_delete_volume (rpcsvc_request_t *req) +glusterd_handle_cli_stop_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_stop_volume); +} + +int +__glusterd_handle_cli_delete_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,},}; @@ -392,7 +412,14 @@ out: } int -glusterd_handle_cli_heal_volume (rpcsvc_request_t *req) +glusterd_handle_cli_delete_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_delete_volume); +} + +int +__glusterd_handle_cli_heal_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -480,7 +507,14 @@ out: } int -glusterd_handle_cli_statedump_volume (rpcsvc_request_t *req) +glusterd_handle_cli_heal_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_heal_volume); +} + +int +__glusterd_handle_cli_statedump_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -561,6 +595,13 @@ out: return ret; } +int +glusterd_handle_cli_statedump_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_statedump_volume); +} + #ifdef HAVE_BD_XLATOR int glusterd_is_valid_vg (const char *name) @@ -1844,7 +1885,9 @@ glusterd_clearlocks_unmount (glusterd_volinfo_t *volinfo, char *mntpt) runner_add_args (&runner, "/bin/umount", "-f", NULL); runner_argprintf (&runner, "%s", mntpt); + synclock_unlock (&priv->big_lock); ret = runner_run (&runner); + synclock_lock (&priv->big_lock); if (ret) { ret = 0; gf_log ("", GF_LOG_DEBUG, @@ -1916,7 +1959,9 @@ glusterd_clearlocks_mount (glusterd_volinfo_t *volinfo, char **xl_opts, } runner_argprintf (&runner, "%s", mntpt); + synclock_unlock (&priv->big_lock); ret = runner_run (&runner); + synclock_lock (&priv->big_lock); if (ret) { gf_log (THIS->name, GF_LOG_DEBUG, "Could not start glusterfs"); diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c index c2d8d70e3f7..e3f671f086d 100644 --- a/xlators/mgmt/glusterd/src/glusterd.c +++ b/xlators/mgmt/glusterd/src/glusterd.c @@ -868,27 +868,31 @@ _install_mount_spec (dict_t *opts, char *key, data_t *value, void *data) return -1; } + static int -glusterd_default_synctask_cbk (int ret, call_frame_t *frame, void *opaque) +gd_default_synctask_cbk (int ret, call_frame_t *frame, void *opaque) { - return ret; + glusterd_conf_t *priv = THIS->private; + synclock_unlock (&priv->big_lock); + return ret; } -static int -glusterd_launch_synctask (xlator_t *this, synctask_fn_t fn) +static void +glusterd_launch_synctask (synctask_fn_t fn, void *opaque) { - glusterd_conf_t *priv = NULL; - int ret = -1; - - priv = this->private; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + int ret = -1; - ret = synctask_new (this->ctx->env, fn, - glusterd_default_synctask_cbk, NULL, priv); + this = THIS; + priv = this->private; - if (ret) - gf_log (this->name, GF_LOG_CRITICAL, "Failed to create synctask" - "for starting process"); - return ret; + synclock_lock (&priv->big_lock); + ret = synctask_new (this->ctx->env, fn, gd_default_synctask_cbk, NULL, + opaque); + if (ret) + gf_log (this->name, GF_LOG_CRITICAL, "Failed to spawn bricks" + " and other volume related services"); } /* @@ -1083,6 +1087,7 @@ init (xlator_t *this) conf->gfs_mgmt = &gd_brick_prog; strncpy (conf->workdir, workdir, PATH_MAX); + synclock_init (&conf->big_lock); pthread_mutex_init (&conf->xprt_lock, NULL); INIT_LIST_HEAD (&conf->xprt_list); @@ -1147,6 +1152,14 @@ init (xlator_t *this) if (ret < 0) goto out; + /* If there are no 'friends', this would be the best time to + * spawn process/bricks that may need (re)starting since last + * time (this) glusterd was up.*/ + + if (list_empty (&conf->peers)) { + glusterd_launch_synctask (glusterd_spawn_daemons, + (void*) _gf_true); + } ret = glusterd_options_init (this); if (ret < 0) goto out; @@ -1155,13 +1168,6 @@ init (xlator_t *this) if (ret) goto out; - glusterd_launch_synctask (this, - (synctask_fn_t) glusterd_restart_bricks); - glusterd_launch_synctask (this, - (synctask_fn_t) glusterd_restart_gsyncds); - glusterd_launch_synctask (this, - (synctask_fn_t) glusterd_restart_rebalance); - ret = glusterd_hooks_spawn_worker (this); if (ret) goto out; diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h index 41b74a684dd..a4f2d23ac2c 100644 --- a/xlators/mgmt/glusterd/src/glusterd.h +++ b/xlators/mgmt/glusterd/src/glusterd.h @@ -152,6 +152,7 @@ typedef struct { xlator_t *xl; /* Should be set to 'THIS' before creating thread */ gf_boolean_t pending_quorum_action; dict_t *opts; + synclock_t big_lock; } glusterd_conf_t; @@ -423,6 +424,16 @@ __glusterd_uuid() return &priv->uuid[0]; } +int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, + void *data, rpc_clnt_notify_t notify_fn); + +int +glusterd_big_locked_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe, fop_cbk_fn_t fn); + +int glusterd_big_locked_handler (rpcsvc_request_t *req, rpcsvc_actor actor_fn); + int32_t glusterd_brick_from_brickinfo (glusterd_brickinfo_t *brickinfo, char **new_brick); |