diff options
author | Gaurav Kumar Garg <ggarg@redhat.com> | 2015-07-14 14:01:14 +0530 |
---|---|---|
committer | Krishnan Parthasarathi <kparthas@redhat.com> | 2015-07-14 11:39:16 -0700 |
commit | 1d50088062f893424f8e63ae8f7786a97494f21b (patch) | |
tree | 251f52333ac7daeb2a3c723f1fa60fbaf3e67550 | |
parent | 796062f8d8bec2d6fff5ddac4f529814ec982dfb (diff) |
glusterd: Pass NULL in glusterd_svc_manager in glusterd_restart_bricks
On restarting glusterd quota daemon is not started when more than one
volumes are configured and quota is enabled only on 2nd volume.
This is because of while restarting glusterd it will restart all the bricks.
During brick restart it will start respective daemon by passing volinfo of
first volume. Passing volinfo to glusterd_svc_manager will imply daemon
managers will take action based on the same volume's configuration which
is incorrect for per node daemons.
Fix is to pass volinfo NULL while restarting bricks.
Change-Id: I2602002a8ba7762fc1eb08123e79fbcf568ecab4
BUG: 1242875
Signed-off-by: Gaurav Kumar Garg <ggarg@redhat.com>
Reviewed-on: http://review.gluster.org/11658
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: NetBSD Build System <jenkins@build.gluster.org>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com>
-rw-r--r-- | tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t | 38 | ||||
-rw-r--r-- | tests/volume.rc | 3 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 2 |
3 files changed, 42 insertions, 1 deletions
diff --git a/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t b/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t new file mode 100644 index 00000000000..c229d4371b6 --- /dev/null +++ b/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t @@ -0,0 +1,38 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../cluster.rc + +cleanup; + +## Start glusterd +TEST glusterd; +TEST pidof glusterd; + +## Lets create volume V0 and start the volume +TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume start $V0 + +## Lets create volume V1 and start the volume +TEST $CLI volume create $V1 $H0:$B0/${V0}2 $H0:$B0/${V0}3 +TEST $CLI volume start $V1 + +## Enable quota on 2nd volume +TEST $CLI volume quota $V1 enable +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_quotad_count + +## Killing all gluster process +pkill gluster; + +## there should not be any quota daemon running after killing quota process +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_quotad_count + +## Start glusterd +TEST glusterd; +TEST pidof glusterd; + +## Quotad daemon should start on restarting the glusterd +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_quotad_count + +cleanup; diff --git a/tests/volume.rc b/tests/volume.rc index 4bbaf108cd1..4217c284345 100644 --- a/tests/volume.rc +++ b/tests/volume.rc @@ -543,6 +543,9 @@ function get_scrubd_count { ps auxww | grep glusterfs | grep scrub.pid | grep -v grep | wc -l } +function get_quotad_count { + ps auxww | grep glusterfs | grep quotad.pid | grep -v grep | wc -l +} function drop_cache() { case $OSTYPE in diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index f225dce0599..501e9de7169 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -4484,7 +4484,7 @@ glusterd_restart_bricks (glusterd_conf_t *conf) continue; if (start_svcs == _gf_false) { start_svcs = _gf_true; - glusterd_svcs_manager (volinfo); + glusterd_svcs_manager (NULL); } gf_msg_debug (this->name, 0, "starting the volume %s", volinfo->volname); |