diff options
author | hari gowtham <hgowtham@redhat.com> | 2018-04-11 17:38:26 +0530 |
---|---|---|
committer | Atin Mukherjee <amukherj@redhat.com> | 2018-04-19 02:54:50 +0000 |
commit | be26b0da2f1a7fe336400de6a1c016716983bd38 (patch) | |
tree | 573d0289d2556cbf99085e7888197bea2b07ee23 /glusterfsd | |
parent | 054cecc30676017f83a18847734d9fe0fcb8ea72 (diff) |
glusterd: volume inode/fd status broken with brick mux
Problem:
The values for inode/fd was populated from the ctx received
from the server xlator.
Without brickmux, every brick from a volume belonged to a
single brick from the volume.
So searching the server and populating it worked.
With brickmux, a number of bricks can be confined to a single
process. These bricks can be from different volumes too (if
we use the max-bricks-per-process option).
If they are from different volumes, using the server xlator
to populate causes problem.
Fix:
Use the brick to validate and populate the inode/fd status.
Signed-off-by: hari gowtham <hgowtham@redhat.com>
Change-Id: I2543fa5397ea095f8338b518460037bba3dfdbfd
fixes: bz#1566067
Diffstat (limited to 'glusterfsd')
-rw-r--r-- | glusterfsd/src/glusterfsd-mgmt.c | 34 |
1 files changed, 15 insertions, 19 deletions
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c index 43f7f6cf043..bce8d5cc276 100644 --- a/glusterfsd/src/glusterfsd-mgmt.c +++ b/glusterfsd/src/glusterfsd-mgmt.c @@ -1119,14 +1119,14 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req) glusterfs_ctx_t *ctx = NULL; glusterfs_graph_t *active = NULL; xlator_t *this = NULL; - xlator_t *any = NULL; - xlator_t *xlator = NULL; + xlator_t *server_xl = NULL; + xlator_t *brick_xl = NULL; dict_t *dict = NULL; dict_t *output = NULL; - char *volname = NULL; char *xname = NULL; uint32_t cmd = 0; char *msg = NULL; + char *brickname = NULL; GF_ASSERT (req); this = THIS; @@ -1154,32 +1154,26 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req) goto out; } - ret = dict_get_str (dict, "volname", &volname); + ret = dict_get_str (dict, "brick-name", &brickname); if (ret) { - gf_log (this->name, GF_LOG_ERROR, "Couldn't get volname"); + gf_log (this->name, GF_LOG_ERROR, "Couldn't get brickname from" + " dict"); goto out; } ctx = glusterfsd_ctx; GF_ASSERT (ctx); active = ctx->active; - any = active->first; + server_xl = active->first; - ret = gf_asprintf (&xname, "%s-server", volname); - if (-1 == ret) { - gf_log (this->name, GF_LOG_ERROR, "Out of memory"); - goto out; - } - - xlator = xlator_search_by_name (any, xname); - if (!xlator) { + brick_xl = get_xlator_by_name (server_xl, brickname); + if (!brick_xl) { gf_log (this->name, GF_LOG_ERROR, "xlator %s is not loaded", xname); ret = -1; goto out; } - output = dict_new (); switch (cmd & GF_CLI_STATUS_MASK) { case GF_CLI_STATUS_MEM: @@ -1190,15 +1184,17 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req) case GF_CLI_STATUS_CLIENTS: case GF_CLI_STATUS_CLIENT_LIST: - ret = xlator->dumpops->priv_to_dict (xlator, output); + ret = server_xl->dumpops->priv_to_dict (server_xl, + output, brickname); break; case GF_CLI_STATUS_INODE: - ret = xlator->dumpops->inode_to_dict (xlator, output); + ret = server_xl->dumpops->inode_to_dict (brick_xl, + output); break; case GF_CLI_STATUS_FD: - ret = xlator->dumpops->fd_to_dict (xlator, output); + ret = server_xl->dumpops->fd_to_dict (brick_xl, output); break; case GF_CLI_STATUS_CALLPOOL: @@ -1374,7 +1370,7 @@ glusterfs_handle_node_status (rpcsvc_request_t *req) "Error setting volname to dict"); goto out; } - ret = node->dumpops->priv_to_dict (node, output); + ret = node->dumpops->priv_to_dict (node, output, NULL); break; case GF_CLI_STATUS_INODE: |