summaryrefslogtreecommitdiffstats
path: root/glusterfsd/src
diff options
context:
space:
mode:
authorSanju Rakonde <srakonde@redhat.com>2018-04-06 01:53:45 +0530
committerAmar Tumballi <amarts@redhat.com>2018-05-07 15:31:59 +0000
commit4da244caccd38a77de5428b6954f565219ef0719 (patch)
tree43311ffe448d78206aa4f68b1be07d2c38ac4bc3 /glusterfsd/src
parent23c1385b5f6f6103e820d15ecfe1df31940fdb45 (diff)
glusterd: handling brick termination in brick-mux
Problem: There's a race between the glusterfs_handle_terminate() response sent to glusterd from last brick of the process and the socket disconnect event that encounters after the brick process got killed. Solution: When it is a last brick for the brick process, instead of sending GLUSTERD_BRICK_TERMINATE to brick process, glusterd will kill the process (same as we do it in case of non brick multiplecing). The test case is added for https://bugzilla.redhat.com/show_bug.cgi?id=1549996 Change-Id: If94958cd7649ea48d09d6af7803a0f9437a85503 fixes: bz#1545048 Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Diffstat (limited to 'glusterfsd/src')
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index db961c86304..e30d9287575 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -174,7 +174,6 @@ glusterfs_terminate_response_send (rpcsvc_request_t *req, int op_ret)
ret = dict_allocate_and_serialize (dict, &rsp.output.output_val,
&rsp.output.output_len);
-
if (ret == 0)
ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL,
(xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
@@ -188,15 +187,15 @@ glusterfs_terminate_response_send (rpcsvc_request_t *req, int op_ret)
int
glusterfs_handle_terminate (rpcsvc_request_t *req)
{
- gd1_mgmt_brick_op_req xlator_req = {0,};
+ gd1_mgmt_brick_op_req xlator_req = {0,};
ssize_t ret;
- glusterfs_ctx_t *ctx = NULL;
- xlator_t *top = NULL;
- xlator_t *victim = NULL;
- xlator_t *tvictim = NULL;
- xlator_list_t **trav_p = NULL;
- gf_boolean_t lockflag = _gf_false;
- gf_boolean_t last_brick = _gf_false;
+ glusterfs_ctx_t *ctx = NULL;
+ xlator_t *top = NULL;
+ xlator_t *victim = NULL;
+ xlator_t *tvictim = NULL;
+ xlator_list_t **trav_p = NULL;
+ gf_boolean_t lockflag = _gf_false;
+ gf_boolean_t still_bricks_attached = _gf_false;
ret = xdr_to_generic (req->msg[0], &xlator_req,
(xdrproc_t)xdr_gd1_mgmt_brick_op_req);
@@ -240,15 +239,16 @@ glusterfs_handle_terminate (rpcsvc_request_t *req)
glusterfs_terminate_response_send (req, 0);
for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
tvictim = (*trav_p)->xlator;
- if (!tvictim->cleanup_starting && !strcmp (tvictim->name, xlator_req.name)) {
+ if (!tvictim->cleanup_starting &&
+ !strcmp (tvictim->name, xlator_req.name)) {
continue;
}
if (!tvictim->cleanup_starting) {
- last_brick = _gf_true;
+ still_bricks_attached = _gf_true;
break;
}
}
- if (!last_brick) {
+ if (!still_bricks_attached) {
gf_log (THIS->name, GF_LOG_INFO,
"terminating after loss of last child %s",
xlator_req.name);