From 586e1cabf850211b9e6856fcb8c42c696de6d484 Mon Sep 17 00:00:00 2001 From: Avra Sengupta Date: Tue, 4 Mar 2014 00:45:48 +0000 Subject: glusterd/mgmt_v3 locks. Testcase for mgmt_v3 locks. Ported upstream volume-locks.t as mgmt-v3-locks.t Change-Id: Id4824716cde4ac54efbf1c1dd9a5f530b0324e39 Signed-off-by: Avra Sengupta Reviewed-on: http://review.gluster.org/7190 Reviewed-by: Rajesh Joseph Tested-by: Rajesh Joseph --- tests/basic/mgmt_v3-locks.t | 121 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100755 tests/basic/mgmt_v3-locks.t (limited to 'tests/basic/mgmt_v3-locks.t') diff --git a/tests/basic/mgmt_v3-locks.t b/tests/basic/mgmt_v3-locks.t new file mode 100755 index 000000000..22ca27b9f --- /dev/null +++ b/tests/basic/mgmt_v3-locks.t @@ -0,0 +1,121 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../cluster.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +function volume_count { + local cli=$1; + if [ $cli -eq '1' ] ; then + $CLI_1 volume info | grep 'Volume Name' | wc -l; + else + $CLI_2 volume info | grep 'Volume Name' | wc -l; + fi +} + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI_1 volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + +function two_diff_vols_create { + # Both volume creates should be successful + $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0 & + PID_1=$! + + $CLI_2 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1 $H3:$B3/$V1 & + PID_2=$! + + wait $PID_1 $PID_2 +} + +function two_diff_vols_start { + # Both volume starts should be successful + $CLI_1 volume start $V0 & + PID_1=$! + + $CLI_2 volume start $V1 & + PID_2=$! + + wait $PID_1 $PID_2 +} + +function two_diff_vols_stop_force { + # Force stop, so that if rebalance from the + # remove bricks is in progress, stop can + # still go ahead. Both volume stops should + # be successful + $CLI_1 volume stop $V0 force & + PID_1=$! + + $CLI_2 volume stop $V1 force & + PID_2=$! + + wait $PID_1 $PID_2 +} + +function same_vol_remove_brick { + + # Running two same vol commands at the same time can result in + # two success', two failures, or one success and one failure, all + # of which are valid. The only thing that shouldn't happen is a + # glusterd crash. + + local vol=$1 + local brick=$2 + $CLI_1 volume remove-brick $1 $2 start & + $CLI_2 volume remove-brick $1 $2 start +} + +cleanup; + +TEST launch_cluster 3; +TEST $CLI_1 peer probe $H2; +TEST $CLI_1 peer probe $H3; + +EXPECT_WITHIN 20 2 check_peers + +two_diff_vols_create +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'Created' volinfo_field $V1 'Status'; + +two_diff_vols_start +EXPECT 'Started' volinfo_field $V0 'Status'; +EXPECT 'Started' volinfo_field $V1 'Status'; + +same_vol_remove_brick $V0 $H2:$B2/$V0 +# Checking glusterd crashed or not after same volume remove brick +# on both nodes. +EXPECT_WITHIN 20 2 check_peers + +same_vol_remove_brick $V1 $H2:$B2/$V1 +# Checking glusterd crashed or not after same volume remove brick +# on both nodes. +EXPECT_WITHIN 20 2 check_peers + +$CLI_1 volume set $V0 diagnostics.client-log-level DEBUG & +$CLI_1 volume set $V1 diagnostics.client-log-level DEBUG +kill_glusterd 3 +$CLI_1 volume status $V0 +$CLI_2 volume status $V1 +$CLI_1 peer status +EXPECT_WITHIN 20 1 check_peers +EXPECT 'Started' volinfo_field $V0 'Status'; +EXPECT 'Started' volinfo_field $V1 'Status'; + +TEST $glusterd_3 +$CLI_1 volume status $V0 +$CLI_2 volume status $V1 +$CLI_1 peer status +#EXPECT_WITHIN 20 2 check_peers +#EXPECT 'Started' volinfo_field $V0 'Status'; +#EXPECT 'Started' volinfo_field $V1 'Status'; +#two_diff_vols_stop_force +#EXPECT_WITHIN 20 2 check_peers +cleanup; -- cgit