diff options
Diffstat (limited to 'tests/basic')
| -rw-r--r-- | tests/basic/afr/granular-esh/add-brick.t | 2 | ||||
| -rw-r--r-- | tests/basic/afr/granular-esh/cli.t | 142 | ||||
| -rw-r--r-- | tests/basic/afr/granular-esh/conservative-merge.t | 4 | ||||
| -rw-r--r-- | tests/basic/afr/granular-esh/granular-esh.t | 2 | ||||
| -rw-r--r-- | tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t | 76 | ||||
| -rw-r--r-- | tests/basic/afr/granular-esh/replace-brick.t | 2 | ||||
| -rw-r--r-- | tests/basic/afr/split-brain-favorite-child-policy.t | 18 | ||||
| -rw-r--r-- | tests/basic/afr/split-brain-healing.t | 3 |
8 files changed, 240 insertions, 9 deletions
diff --git a/tests/basic/afr/granular-esh/add-brick.t b/tests/basic/afr/granular-esh/add-brick.t index f3125d7fe7d..270cf1d32a6 100644 --- a/tests/basic/afr/granular-esh/add-brick.t +++ b/tests/basic/afr/granular-esh/add-brick.t @@ -14,7 +14,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off TEST $CLI volume set $V0 cluster.metadata-self-heal off TEST $CLI volume set $V0 cluster.entry-self-heal off TEST $CLI volume set $V0 self-heal-daemon off -TEST $CLI volume set $V0 granular-entry-heal on +TEST $CLI volume heal $V0 granular-entry-heal enable TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 diff --git a/tests/basic/afr/granular-esh/cli.t b/tests/basic/afr/granular-esh/cli.t new file mode 100644 index 00000000000..a655180a095 --- /dev/null +++ b/tests/basic/afr/granular-esh/cli.t @@ -0,0 +1,142 @@ +#!/bin/bash + +. $(dirname $0)/../../../include.rc +. $(dirname $0)/../../../volume.rc +. $(dirname $0)/../../../afr.rc + +cleanup + +TESTS_EXPECTED_IN_LOOP=4 + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +# Test that enabling the option should work on a newly created volume +TEST $CLI volume set $V0 cluster.granular-entry-heal on +TEST $CLI volume set $V0 cluster.granular-entry-heal off + +######################### +##### DISPERSE TEST ##### +######################### +# Execute the same command on a disperse volume and make sure it fails. +TEST $CLI volume create $V1 disperse 3 redundancy 1 $H0:$B0/${V1}{0,1,2} +TEST $CLI volume start $V1 +TEST ! $CLI volume heal $V1 granular-entry-heal enable +TEST ! $CLI volume heal $V1 granular-entry-heal disable + +####################### +###### TIER TEST ###### +####################### +# Execute the same command on a disperse + replicate tiered volume and make +# sure the option is set on the replicate leg of the volume +TEST $CLI volume attach-tier $V1 replica 2 $H0:$B0/${V1}{3,4} +TEST $CLI volume heal $V1 granular-entry-heal enable +EXPECT "enable" volume_get_field $V1 cluster.granular-entry-heal +TEST $CLI volume heal $V1 granular-entry-heal disable +EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal + +# Kill a disperse brick and make heal be pending on the volume. +TEST kill_brick $V1 $H0 $B0/${V1}0 + +# Now make sure that one offline brick in disperse does not affect enabling the +# option on the volume. +TEST $CLI volume heal $V1 granular-entry-heal enable +EXPECT "enable" volume_get_field $V1 cluster.granular-entry-heal +TEST $CLI volume heal $V1 granular-entry-heal disable +EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal + +# Now kill a replicate brick. +TEST kill_brick $V1 $H0 $B0/${V1}3 +# Now make sure that one offline brick in replicate causes the command to be +# failed. +TEST ! $CLI volume heal $V1 granular-entry-heal enable +EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal + +###################### +### REPLICATE TEST ### +###################### +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST $CLI volume set $V0 self-heal-daemon off +# Test that the volume-set way of enabling the option is disallowed +TEST ! $CLI volume set $V0 granular-entry-heal on +# Test that the volume-heal way of enabling the option is allowed +TEST $CLI volume heal $V0 granular-entry-heal enable +# Volume-reset of the option should be allowed +TEST $CLI volume reset $V0 granular-entry-heal +TEST $CLI volume heal $V0 granular-entry-heal enable + +EXPECT "enable" volume_option $V0 cluster.granular-entry-heal + +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 + +# Kill brick-0. +TEST kill_brick $V0 $H0 $B0/${V0}0 + +# Disabling the option should work even when one or more bricks are down +TEST $CLI volume heal $V0 granular-entry-heal disable +# When a brick is down, 'enable' attempt should be failed +TEST ! $CLI volume heal $V0 granular-entry-heal enable + +# Restart the killed brick +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +# When all bricks are up, it should be possible to enable the option +TEST $CLI volume heal $V0 granular-entry-heal enable + +# Kill brick-0 again +TEST kill_brick $V0 $H0 $B0/${V0}0 + +# Create files under root +for i in {1..2} +do + echo $i > $M0/f$i +done + +# Test that the index associated with '/' is created on B1. +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID + +# Check for successful creation of granular entry indices +for i in {1..2} +do + TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i +done + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +TEST gluster volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 + +TEST $CLI volume heal $V0 + +# Wait for heal to complete +EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 + +# Test if data was healed +for i in {1..2} +do + TEST_IN_LOOP diff $B0/${V0}0/f$i $B0/${V0}1/f$i +done + +# Now verify that there are no name indices left after self-heal +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID + +# Perform a volume-reset-all-options operation +TEST $CLI volume reset $V0 +# Ensure that granular entry heal is also disabled +EXPECT "no" volume_get_field $V0 cluster.granular-entry-heal +EXPECT "on" volume_get_field $V0 cluster.entry-self-heal + +cleanup +#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1399038 diff --git a/tests/basic/afr/granular-esh/conservative-merge.t b/tests/basic/afr/granular-esh/conservative-merge.t index b566a0ea4d3..b170e47e0cb 100644 --- a/tests/basic/afr/granular-esh/conservative-merge.t +++ b/tests/basic/afr/granular-esh/conservative-merge.t @@ -11,13 +11,13 @@ TESTS_EXPECTED_IN_LOOP=4 TEST glusterd TEST pidof glusterd TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 TEST $CLI volume set $V0 self-heal-daemon off TEST $CLI volume set $V0 data-self-heal off TEST $CLI volume set $V0 metadata-self-heal off TEST $CLI volume set $V0 entry-self-heal off -TEST $CLI volume set $V0 granular-entry-heal on +TEST $CLI volume heal $V0 granular-entry-heal enable -TEST $CLI volume start $V0 TEST $GFS --volfile-id=$V0 -s $H0 $M0 TEST mkdir $M0/dir diff --git a/tests/basic/afr/granular-esh/granular-esh.t b/tests/basic/afr/granular-esh/granular-esh.t index ee53878e004..de0e8f4290b 100644 --- a/tests/basic/afr/granular-esh/granular-esh.t +++ b/tests/basic/afr/granular-esh/granular-esh.t @@ -16,7 +16,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off TEST $CLI volume set $V0 cluster.metadata-self-heal off TEST $CLI volume set $V0 cluster.entry-self-heal off TEST $CLI volume set $V0 self-heal-daemon off -TEST $CLI volume set $V0 granular-entry-heal on +TEST $CLI volume heal $V0 granular-entry-heal enable TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 diff --git a/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t new file mode 100644 index 00000000000..1b5421bf4b6 --- /dev/null +++ b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t @@ -0,0 +1,76 @@ +#!/bin/bash + +. $(dirname $0)/../../../include.rc +. $(dirname $0)/../../../volume.rc +. $(dirname $0)/../../../afr.rc + +cleanup + +TESTS_EXPECTED_IN_LOOP=4 + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST $CLI volume set $V0 self-heal-daemon off +TEST $CLI volume heal $V0 granular-entry-heal enable + +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 + +# Kill brick-0. +TEST kill_brick $V0 $H0 $B0/${V0}0 + +# Create files under root +for i in {1..2} +do + echo $i > $M0/f$i +done + +# Test that the index associated with '/' is created on B1. +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID + +# Check for successful creation of granular entry indices +for i in {1..2} +do + TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i +done + +# Now disable granular-entry-heal +TEST $CLI volume heal $V0 granular-entry-heal disable + +# Start the brick that was down +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +# Enable shd +TEST gluster volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 + +# Now the indices created are granular but the heal is going to be of the +# normal kind. We test to make sure that heal still completes fine and that +# the stale granular indices are going to be deleted + +TEST $CLI volume heal $V0 + +# Wait for heal to complete +EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 + +# Test if data was healed +for i in {1..2} +do + TEST_IN_LOOP diff $B0/${V0}0/f$i $B0/${V0}1/f$i +done + +# Now verify that there are no name indices left after self-heal +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID + +cleanup diff --git a/tests/basic/afr/granular-esh/replace-brick.t b/tests/basic/afr/granular-esh/replace-brick.t index aaa54da2a2c..639ed81b95c 100644 --- a/tests/basic/afr/granular-esh/replace-brick.t +++ b/tests/basic/afr/granular-esh/replace-brick.t @@ -12,7 +12,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off TEST $CLI volume set $V0 cluster.metadata-self-heal off TEST $CLI volume set $V0 cluster.entry-self-heal off TEST $CLI volume set $V0 self-heal-daemon off -TEST $CLI volume set $V0 granular-entry-heal on +TEST $CLI volume heal $V0 granular-entry-heal enable TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0; diff --git a/tests/basic/afr/split-brain-favorite-child-policy.t b/tests/basic/afr/split-brain-favorite-child-policy.t index 7a14852685c..3df8e718bf0 100644 --- a/tests/basic/afr/split-brain-favorite-child-policy.t +++ b/tests/basic/afr/split-brain-favorite-child-policy.t @@ -42,8 +42,15 @@ TEST $CLI volume heal $V0 cat $M0/file > /dev/null EXPECT "1" echo $? -#We know that the first brick has latest ctime. -LATEST_CTIME_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1) +# Umount to prevent further FOPS on the file, then find the brick with latest ctime. +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +ctime1=`stat -c "%.Z" $B0/${V0}0/file` +ctime2=`stat -c "%.Z" $B0/${V0}1/file` +if (( $(echo "$ctime1 > $ctime2" | bc -l) )); then + LATEST_CTIME_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1) +else + LATEST_CTIME_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1) +fi TEST $CLI volume set $V0 cluster.favorite-child-policy ctime TEST $CLI volume start $V0 force EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status @@ -51,10 +58,13 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 TEST $CLI volume heal $V0 EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 +B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1) +B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1) +TEST [ "$LATEST_CTIME_MD5" == "$B0_MD5" ] +TEST [ "$LATEST_CTIME_MD5" == "$B1_MD5" ] +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 cat $M0/file > /dev/null EXPECT "0" echo $? -HEALED_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1) -TEST [ "$LATEST_CTIME_MD5" == "$HEALED_MD5" ] ############ Healing using favorite-child-policy = mtime ################# TEST $CLI volume set $V0 cluster.favorite-child-policy none diff --git a/tests/basic/afr/split-brain-healing.t b/tests/basic/afr/split-brain-healing.t index 302a3e6144b..c66bb5d44df 100644 --- a/tests/basic/afr/split-brain-healing.t +++ b/tests/basic/afr/split-brain-healing.t @@ -31,6 +31,9 @@ TEST glusterd TEST pidof glusterd TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4} TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off TEST $CLI volume start $V0 TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 |
