diff options
author | Sanju Rakonde <srakonde@redhat.com> | 2018-03-29 10:48:32 +0000 |
---|---|---|
committer | Atin Mukherjee <amukherj@redhat.com> | 2018-03-29 14:58:27 +0000 |
commit | 3f9851db49ca6ac7a969817964a6ad216b10fd6f (patch) | |
tree | 3cb12c5ad1a0501d3b2133c2d37dde5065b1c43e /glusterfsd/src | |
parent | c87bd439ef12adc70dc580e75304121c3cd38e9a (diff) |
Revert "glusterd: handling brick termination in brick-mux"
This reverts commit a60fc2ddc03134fb23c5ed5c0bcb195e1649416b.
This commit was causing multiple tests to time out when brick
multiplexing is enabled. With further debugging, it's found that even
though the volume stop transaction is converted into mgmt_v3 to allow
the remote nodes to follow the synctask framework to process the command,
there are other callers of glusterd_brick_stop () which are not synctask
based.
Change-Id: I7aee687abc6bfeaa70c7447031f55ed4ccd64693
updates: bz#1545048
Diffstat (limited to 'glusterfsd/src')
-rw-r--r-- | glusterfsd/src/gf_attach.c | 41 | ||||
-rw-r--r-- | glusterfsd/src/glusterfsd-mgmt.c | 33 |
2 files changed, 14 insertions, 60 deletions
diff --git a/glusterfsd/src/gf_attach.c b/glusterfsd/src/gf_attach.c index 0eb4868263b..3f248292ddf 100644 --- a/glusterfsd/src/gf_attach.c +++ b/glusterfsd/src/gf_attach.c @@ -11,9 +11,6 @@ #include <stdio.h> #include <stdlib.h> #include <unistd.h> -#include <sys/types.h> -#include <sys/wait.h> -#include <signal.h> //#include "config.h" #include "glusterfs.h" @@ -26,7 +23,6 @@ int done = 0; int rpc_status; -glfs_t *fs; struct rpc_clnt_procedure gf_attach_actors[GLUSTERD_BRICK_MAXVALUE] = { [GLUSTERD_BRICK_NULL] = {"NULL", NULL }, @@ -75,43 +71,11 @@ my_notify (struct rpc_clnt *rpc, void *mydata, } int32_t -my_callback (struct rpc_req *req, struct iovec *iov, int count, void *v_frame) +my_callback (struct rpc_req *req, struct iovec *iov, int count, void *frame) { - gd1_mgmt_brick_op_rsp rsp; - dict_t *dict = NULL; - pid_t pid = -1; - int ret = -1; - xlator_t *this = NULL; - - this = fs->ctx->master; - memset (&rsp, 0, sizeof (rsp)); - - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); - - if (ret < 0) { - fprintf (stderr, "xdr decoding failed\n"); - goto out; - } - GF_PROTOCOL_DICT_UNSERIALIZE (this, dict, - (rsp.output.output_val), - (rsp.output.output_len), - ret, rsp.op_errno, out); - if (dict) { - if (dict_get_int32 (dict, "last_brick_terminated", &pid) == 0) { - int status = 0; - - gf_log ("gf_attach", GF_LOG_INFO, "Killing %d", pid); - kill (pid, SIGTERM); - waitpid (pid, &status, 0); - } - dict_unref (dict); - } - rpc_status = req->rpc_status; done = 1; - ret = 0; -out: - return ret; + return 0; } /* copied from gd_syncop_submit_request */ @@ -206,6 +170,7 @@ usage (char *prog) int main (int argc, char *argv[]) { + glfs_t *fs; struct rpc_clnt *rpc; dict_t *options; int ret; diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c index c4df275077f..d2b39494e51 100644 --- a/glusterfsd/src/glusterfsd-mgmt.c +++ b/glusterfsd/src/glusterfsd-mgmt.c @@ -159,31 +159,21 @@ out: } int -glusterfs_terminate_response_send (rpcsvc_request_t *req, int op_ret, - gf_boolean_t last_brick) +glusterfs_terminate_response_send (rpcsvc_request_t *req, int op_ret) { gd1_mgmt_brick_op_rsp rsp = {0,}; dict_t *dict = NULL; - int ret = -1; + int ret = 0; rsp.op_ret = op_ret; rsp.op_errno = 0; rsp.op_errstr = ""; dict = dict_new (); - if (dict) { - /* Setting the last_brick_terminated key in dictionary is - * required to for standalone gf_attach utility to work. - * gf_attach utility will receive this dictionary and kill - * the process. - */ - if (last_brick) { - ret = dict_set_int32 (dict, "last_brick_terminated", - getpid()); - } + if (dict) ret = dict_allocate_and_serialize (dict, &rsp.output.output_val, &rsp.output.output_len); - } + if (ret == 0) ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL, @@ -272,7 +262,6 @@ glusterfs_handle_terminate (rpcsvc_request_t *req) xlator_t *victim = NULL; xlator_list_t **trav_p = NULL; gf_boolean_t lockflag = _gf_false; - gf_boolean_t last_brick = _gf_false; ret = xdr_to_generic (req->msg[0], &xlator_req, (xdrproc_t)xdr_gd1_mgmt_brick_op_req); @@ -305,16 +294,17 @@ glusterfs_handle_terminate (rpcsvc_request_t *req) * make sure it's down and if it's already down that's * good enough. */ - glusterfs_terminate_response_send (req, 0, last_brick); + glusterfs_terminate_response_send (req, 0); goto err; } + glusterfs_terminate_response_send (req, 0); if ((trav_p == &top->children) && !(*trav_p)->next) { - last_brick = _gf_true; - glusterfs_terminate_response_send (req, 0, last_brick); - gf_log (THIS->name, GF_LOG_INFO, "This is last brick of process." - "glusterD will kill the process and takes care of " - "removal of entries from port map register"); + gf_log (THIS->name, GF_LOG_INFO, + "terminating after loss of last child %s", + xlator_req.name); + rpc_clnt_mgmt_pmap_signout (glusterfsd_ctx, xlator_req.name); + kill (getpid(), SIGTERM); } else { /* * This is terribly unsafe without quiescing or shutting @@ -323,7 +313,6 @@ glusterfs_handle_terminate (rpcsvc_request_t *req) * * TBD: finish implementing this "detach" code properly */ - glusterfs_terminate_response_send (req, 0, last_brick); UNLOCK (&ctx->volfile_lock); lockflag = _gf_true; gf_log (THIS->name, GF_LOG_INFO, "detaching not-only" |