diff options
author | Mohammed Rafi KC <rkavunga@redhat.com> | 2019-02-25 10:05:32 +0530 |
---|---|---|
committer | Amar Tumballi <amarts@redhat.com> | 2019-04-01 03:44:23 +0000 |
commit | bc3694d7cfc868a2ed6344ea123faf19fce28d13 (patch) | |
tree | 51764aa4445462081273444d5ff2499b1e5375f7 /xlators/protocol/client | |
parent | 92ae26ae8039847e38c738ef98835a14be9d4296 (diff) |
mgmt/shd: Implement multiplexing in self heal daemon
Problem:
Shd daemon is per node, which means they create a graph
with all volumes on it. While this is a great for utilizing
resources, it is so good in terms of performance and managebility.
Because self-heal daemons doesn't have capability to automatically
reconfigure their graphs. So each time when any configurations
changes happens to the volumes(replicate/disperse), we need to restart
shd to bring the changes into the graph.
Because of this all on going heal for all other volumes has to be
stopped in the middle, and need to restart all over again.
Solution:
This changes makes shd as a per volume daemon, so that the graph
will be generated for each volumes.
When we want to start/reconfigure shd for a volume, we first search
for an existing shd running on the node, if there is none, we will
start a new process. If already a daemon is running for shd, then
we will simply detach a graph for a volume and reatach the updated
graph for the volume. This won't touch any of the on going operations
for any other volumes on the shd daemon.
Example of an shd graph when it is per volume
graph
-----------------------
| debug-iostat |
-----------------------
/ | \
/ | \
--------- --------- ----------
| AFR-1 | | AFR-2 | | AFR-3 |
-------- --------- ----------
A running shd daemon with 3 volumes will be like-->
graph
-----------------------
| debug-iostat |
-----------------------
/ | \
/ | \
------------ ------------ ------------
| volume-1 | | volume-2 | | volume-3 |
------------ ------------ ------------
Change-Id: Idcb2698be3eeb95beaac47125565c93370afbd99
fixes: bz#1659708
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
Diffstat (limited to 'xlators/protocol/client')
-rw-r--r-- | xlators/protocol/client/src/client.c | 31 |
1 files changed, 30 insertions, 1 deletions
diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c index 5aa3d9418f6..1e9f47297ee 100644 --- a/xlators/protocol/client/src/client.c +++ b/xlators/protocol/client/src/client.c @@ -46,7 +46,6 @@ client_fini_complete(xlator_t *this) GF_VALIDATE_OR_GOTO(this->name, this->private, out); clnt_conf_t *conf = this->private; - if (!conf->destroy) return 0; @@ -69,6 +68,11 @@ client_notify_dispatch_uniq(xlator_t *this, int32_t event, void *data, ...) return 0; return client_notify_dispatch(this, event, data); + + /* Please avoid any code that access xlator object here + * Because for a child down event, once we do the signal + * we will start cleanup. + */ } int @@ -105,6 +109,11 @@ client_notify_dispatch(xlator_t *this, int32_t event, void *data, ...) } pthread_mutex_unlock(&ctx->notify_lock); + /* Please avoid any code that access xlator object here + * Because for a child down event, once we do the signal + * we will start cleanup. + */ + return ret; } @@ -2272,6 +2281,7 @@ client_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, { xlator_t *this = NULL; clnt_conf_t *conf = NULL; + gf_boolean_t is_parent_down = _gf_false; int ret = 0; this = mydata; @@ -2333,6 +2343,19 @@ client_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, if (conf->portmap_err_logged) conf->disconnect_err_logged = 1; } + /* + * Once we complete the child down notification, + * There is a chance that the graph might get freed, + * So it is not safe to access any xlator contens + * So here we are checking whether the parent is down + * or not. + */ + pthread_mutex_lock(&conf->lock); + { + is_parent_down = conf->parent_down; + } + pthread_mutex_unlock(&conf->lock); + /* If the CHILD_DOWN event goes to parent xlator multiple times, the logic of parent xlator notify may get screwed up.. (eg. CHILD_MODIFIED event in @@ -2340,6 +2363,12 @@ client_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, to parent are genuine */ ret = client_notify_dispatch_uniq(this, GF_EVENT_CHILD_DOWN, NULL); + if (is_parent_down) { + /* If parent is down, then there should not be any + * operation after a child down. + */ + goto out; + } if (ret) gf_msg(this->name, GF_LOG_INFO, 0, PC_MSG_CHILD_DOWN_NOTIFY_FAILED, |