diff options
author | hari gowtham <hgowtham@redhat.com> | 2018-04-11 17:38:26 +0530 |
---|---|---|
committer | Hari Gowtham <hgowtham@redhat.com> | 2018-09-18 12:24:52 +0530 |
commit | ca5adfb65b08841714431e97751a0c0c63a4bbdf (patch) | |
tree | 2df2190b06e02f38e17a549f10495bd4e938540c /libglusterfs/src/client_t.c | |
parent | fe5b6bc8522b3539a97765b243ad37ef227c05b6 (diff) |
glusterd: volume inode/fd status broken with brick mux
backport of:https://review.gluster.org/#/c/19846/6
Problem:
The values for inode/fd was populated from the ctx received
from the server xlator.
Without brickmux, every brick from a volume belonged to a
single brick from the volume.
So searching the server and populating it worked.
With brickmux, a number of bricks can be confined to a single
process. These bricks can be from different volumes too (if
we use the max-bricks-per-process option).
If they are from different volumes, using the server xlator
to populate causes problem.
Fix:
Use the brick to validate and populate the inode/fd status.
>Signed-off-by: hari gowtham <hgowtham@redhat.com>
>Change-Id: I2543fa5397ea095f8338b518460037bba3dfdbfd
>fixes: bz#1566067
Change-Id: I2543fa5397ea095f8338b518460037bba3dfdbfd
BUG: 1569336
fixes: bz#1569336
Signed-off-by: hari gowtham <hgowtham@redhat.com>
Diffstat (limited to 'libglusterfs/src/client_t.c')
-rw-r--r-- | libglusterfs/src/client_t.c | 54 |
1 files changed, 31 insertions, 23 deletions
diff --git a/libglusterfs/src/client_t.c b/libglusterfs/src/client_t.c index eda1c465827..38f1b2edf62 100644 --- a/libglusterfs/src/client_t.c +++ b/libglusterfs/src/client_t.c @@ -743,10 +743,13 @@ gf_client_dump_fdtables_to_dict (xlator_t *this, dict_t *dict) clienttable->cliententries[count].next_free) continue; client = clienttable->cliententries[count].client; - memset(key, 0, sizeof key); - snprintf (key, sizeof key, "conn%d", count++); - fdtable_dump_to_dict (client->server_ctx.fdtable, - key, dict); + if (!strcmp (client->bound_xl->name, this->name)) { + memset(key, 0, sizeof (key)); + snprintf (key, sizeof (key), "conn%d", count++); + fdtable_dump_to_dict (client->server_ctx. + fdtable, + key, dict); + } } } UNLOCK(&clienttable->lock); @@ -859,25 +862,30 @@ gf_client_dump_inodes_to_dict (xlator_t *this, dict_t *dict) clienttable->cliententries[count].next_free) continue; client = clienttable->cliententries[count].client; - memset(key, 0, sizeof key); - if (client->bound_xl && client->bound_xl->itable) { - /* Presently every brick contains only - * one bound_xl for all connections. - * This will lead to duplicating of - * the inode lists, if listing is - * done for every connection. This - * simple check prevents duplication - * in the present case. If need arises - * the check can be improved. - */ - if (client->bound_xl == prev_bound_xl) - continue; - prev_bound_xl = client->bound_xl; - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "conn%d", count); - inode_table_dump_to_dict (client->bound_xl->itable, - key, dict); + if (!strcmp (client->bound_xl->name, this->name)) { + memset(key, 0, sizeof (key)); + if (client->bound_xl && client->bound_xl-> + itable) { + /* Presently every brick contains only + * one bound_xl for all connections. + * This will lead to duplicating of + * the inode lists, if listing is + * done for every connection. This + * simple check prevents duplication + * in the present case. If need arises + * the check can be improved. + */ + if (client->bound_xl == prev_bound_xl) + continue; + prev_bound_xl = client->bound_xl; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "conn%d", + count); + inode_table_dump_to_dict (client-> + bound_xl->itable, + key, dict); + } } } } |