summaryrefslogtreecommitdiffstats
path: root/tests/bugs
diff options
context:
space:
mode:
authorAnand Avati <avati@redhat.com>2013-02-21 18:49:43 -0800
committerVijay Bellur <vbellur@redhat.com>2013-02-26 09:07:13 -0800
commit89ea4583161382de7e56007b3dee3359e2a41b98 (patch)
tree1fa75c53e0fe24eeb1b55847031def3a6f03b468 /tests/bugs
parent5e6dfce0b0d55d96b5bdad6a693fdb2826c20b92 (diff)
tests/cluster.rc: support for virtual multi-server glusterd tests
Since http://review.gluster.org/4556 glusterd is capable of running many instances of itself on a single system. This patch exploits that feature and enhances the regression test framework to expose handy primitives so that test cases may be written to test glusterd in a cluster. Usage: 1. Include "$(dirname)/../cluster.rc" to get access to the extensions 2. Call launch_cluster $N where $N is the count of virtual servers Calling launch_cluster, starts $N glusterds which bind to $N different IPs and dynamically defines these primitives: - Variables $H1 .. $Hn assigned to hostnames of each "server". - Variables $CLI_1 .. $CLI_n assigned as commands to run CLI commands on the corresponding N'th server. - Variables $B1 .. $Bn assigned to the backend directories on each "server". - Function kill_glusterd, which accepts a parameter - index number of glusterd to be killed. - Variables $glusterd_1 .. $glusterd_n assigned to the command lines to restart the corresponding glusterd, if it was previously killed. The current set of primitives and functions were implemented with the goal of satisfying ./tests/bugs/bug-913555.t. The API will be made richer as we add more cluster test cases Change-Id: Ieb13ed9f4a72ac0321db0ca0844c7b294145bb32 BUG: 913555 Signed-off-by: Anand Avati <avati@redhat.com> Reviewed-on: http://review.gluster.org/4566 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
Diffstat (limited to 'tests/bugs')
-rwxr-xr-xtests/bugs/bug-913555.t64
1 files changed, 26 insertions, 38 deletions
diff --git a/tests/bugs/bug-913555.t b/tests/bugs/bug-913555.t
index 0e08bd377ae..f58d7bd6dd6 100755
--- a/tests/bugs/bug-913555.t
+++ b/tests/bugs/bug-913555.t
@@ -4,16 +4,8 @@
. $(dirname $0)/../include.rc
. $(dirname $0)/../volume.rc
+. $(dirname $0)/../cluster.rc
-function vglusterd {
- wd=$1/wd-$2
- cp -r /var/lib/glusterd $wd
- rm -rf $wd/peers/* $wd/vols/*
- echo -n "UUID=$(uuidgen)\noperating-version=1\n" > $wd/glusterd.info
- opt1="management.transport.socket.bind-address=127.0.0.$2"
- opt2="management.working-directory=$wd"
- glusterd --xlator-option $opt1 --xlator-option $opt2
-}
function check_fs {
df $1 &> /dev/null
@@ -21,46 +13,42 @@ function check_fs {
}
function check_peers {
- $VCLI peer status | grep 'Peer in Cluster (Connected)' | wc -l
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
-cleanup;
+function glusterfsd_count {
+ pidof glusterfsd | wc -w;
+}
-topwd=$(mktemp -d)
-trap "rm -rf $topwd" EXIT
+cleanup;
-vglusterd $topwd 100
-VCLI="$CLI --remote-host=127.0.0.100"
-vglusterd $topwd 101
-TEST $VCLI peer probe 127.0.0.101
-vglusterd $topwd 102
-TEST $VCLI peer probe 127.0.0.102
+TEST launch_cluster 3; # start 3-node virtual cluster
+TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
EXPECT_WITHIN 20 2 check_peers
-create_cmd="$VCLI volume create $V0"
-for i in $(seq 100 102); do
- mkdir -p $B0/$V0$i
- create_cmd="$create_cmd 127.0.0.$i:$B0/$V0$i"
-done
-
-TEST $create_cmd
-TEST $VCLI volume set $V0 cluster.server-quorum-type server
-TEST $VCLI volume start $V0
-TEST glusterfs --volfile-server=127.0.0.100 --volfile-id=$V0 $M0
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+TEST $CLI_1 volume start $V0
+TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
# Kill one pseudo-node, make sure the others survive and volume stays up.
-kill -9 $(ps -ef | grep gluster | grep 127.0.0.102 | awk '{print $2}')
-EXPECT_WITHIN 20 1 check_peers
-fs_status=$(check_fs $M0)
-nnodes=$(pidof glusterfsd | wc -w)
-TEST [ "$fs_status" = 0 -a "$nnodes" = 2 ]
+TEST kill_node 3;
+EXPECT_WITHIN 20 1 check_peers;
+EXPECT 0 check_fs $M0;
+EXPECT 2 glusterfsd_count;
# Kill another pseudo-node, make sure the last one dies and volume goes down.
-kill -9 $(ps -ef | grep gluster | grep 127.0.0.101 | awk '{print $2}')
+TEST kill_node 2;
EXPECT_WITHIN 20 0 check_peers
-fs_status=$(check_fs $M0)
-nnodes=$(pidof glusterfsd | wc -w)
-TEST [ "$fs_status" = 1 -a "$nnodes" = 0 ]
+EXPECT 1 check_fs $M0;
+EXPECT 0 glusterfsd_count; # the two glusterfsds of the other two glusterds
+ # must be dead
+
+TEST $glusterd_2;
+TEST $glusterd_3;
+EXPECT_WITHIN 20 3 glusterfsd_count; # restore quorum, all ok
+EXPECT_WITHIN 5 0 check_fs $M0;
cleanup