diff options
author | Amar Tumballi <amar@kadalu.io> | 2020-08-18 14:08:20 +0530 |
---|---|---|
committer | Xavi Hernandez <xhernandez@redhat.com> | 2020-08-20 08:01:07 +0000 |
commit | 097db13c11390174c5b9f11aa0fd87eca1735871 (patch) | |
tree | 06469032e199cef35dacfdc53972fd934e7e9437 /tests/basic | |
parent | f9b5074394e3d2f3b6728aab97230ba620879426 (diff) |
tests: provide an option to mark tests as 'flaky'
* also add some time gap in other tests to see if we get things properly
* create a directory 'tests/000/', which can host any tests, which are flaky.
* move all the tests mentioned in the issue to above directory.
* as the above dir gets tested first, all flaky tests would be reported quickly.
* change `run-tests.sh` to continue tests even if flaky tests fail.
Reference: gluster/project-infrastructure#72
Updates: #1000
Change-Id: Ifdafa38d083ebd80f7ae3cbbc9aa3b68b6d21d0e
Signed-off-by: Amar Tumballi <amar@kadalu.io>
Diffstat (limited to 'tests/basic')
-rw-r--r-- | tests/basic/afr/split-brain-favorite-child-policy.t | 203 | ||||
-rw-r--r-- | tests/basic/changelog/changelog-snapshot.t | 60 | ||||
-rw-r--r-- | tests/basic/distribute/rebal-all-nodes-migrate.t | 144 | ||||
-rwxr-xr-x | tests/basic/ec/ec-quorum-count-partial-failure.t | 50 | ||||
-rwxr-xr-x | tests/basic/mount-nfs-auth.t | 342 |
5 files changed, 0 insertions, 799 deletions
diff --git a/tests/basic/afr/split-brain-favorite-child-policy.t b/tests/basic/afr/split-brain-favorite-child-policy.t deleted file mode 100644 index c268c125610..00000000000 --- a/tests/basic/afr/split-brain-favorite-child-policy.t +++ /dev/null @@ -1,203 +0,0 @@ -#!/bin/bash - -#Test the split-brain resolution CLI commands. -. $(dirname $0)/../../include.rc -. $(dirname $0)/../../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd - -#Create replica 2 volume -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} -TEST $CLI volume set $V0 performance.write-behind off -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume set $V0 cluster.entry-self-heal off -TEST $CLI volume set $V0 cluster.data-self-heal off -TEST $CLI volume set $V0 cluster.metadata-self-heal off -TEST $CLI volume set $V0 cluster.heal-timeout 5 -TEST $CLI volume start $V0 -TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 -TEST touch $M0/file - -############ Healing using favorite-child-policy = ctime ################# -TEST kill_brick $V0 $H0 $B0/${V0}0 -TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024 -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 -TEST kill_brick $V0 $H0 $B0/${V0}1 -TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024 - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 -TEST $CLI volume set $V0 cluster.self-heal-daemon on -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 -TEST $CLI volume heal $V0 - -#file still in split-brain -cat $M0/file > /dev/null -EXPECT "1" echo $? - -# Umount to prevent further FOPS on the file, then find the brick with latest ctime. -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 -ctime1=`stat -c "%.Z" $B0/${V0}0/file` -ctime2=`stat -c "%.Z" $B0/${V0}1/file` -if (( $(echo "$ctime1 > $ctime2" | bc -l) )); then - LATEST_CTIME_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1) -else - LATEST_CTIME_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1) -fi -TEST $CLI volume set $V0 cluster.favorite-child-policy ctime -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 -TEST $CLI volume heal $V0 -EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 -B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1) -B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1) -TEST [ "$LATEST_CTIME_MD5" == "$B0_MD5" ] -TEST [ "$LATEST_CTIME_MD5" == "$B1_MD5" ] -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 -cat $M0/file > /dev/null -EXPECT "0" echo $? - -############ Healing using favorite-child-policy = mtime ################# -TEST $CLI volume set $V0 cluster.favorite-child-policy none -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST kill_brick $V0 $H0 $B0/${V0}1 -TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024 -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 -TEST kill_brick $V0 $H0 $B0/${V0}0 -TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024 - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 -TEST $CLI volume set $V0 cluster.self-heal-daemon on -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 -TEST $CLI volume heal $V0 - -#file still in split-brain -cat $M0/file > /dev/null -EXPECT "1" echo $? - -#We know that the second brick has latest mtime. -LATEST_CTIME_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1) -TEST $CLI volume set $V0 cluster.favorite-child-policy mtime -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 -TEST $CLI volume heal $V0 -EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 -cat $M0/file > /dev/null -EXPECT "0" echo $? -HEALED_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1) -TEST [ "$LATEST_CTIME_MD5" == "$HEALED_MD5" ] - -############ Healing using favorite-child-policy = size ################# -TEST $CLI volume set $V0 cluster.favorite-child-policy none -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST kill_brick $V0 $H0 $B0/${V0}1 -TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024 -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 -TEST kill_brick $V0 $H0 $B0/${V0}0 -TEST dd if=/dev/urandom of=$M0/file bs=1024 count=10240 - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 -TEST $CLI volume set $V0 cluster.self-heal-daemon on -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 -TEST $CLI volume heal $V0 - -#file still in split-brain -cat $M0/file > /dev/null -EXPECT "1" echo $? - -#We know that the second brick has the bigger size file. -BIGGER_FILE_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1) -TEST $CLI volume set $V0 cluster.favorite-child-policy size -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 -TEST $CLI volume heal $V0 -EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 -cat $M0/file > /dev/null -EXPECT "0" echo $? -HEALED_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1) -TEST [ "$BIGGER_FILE_MD5" == "$HEALED_MD5" ] - -############ Healing using favorite-child-policy = majority on replica-3 ################# - -#Convert volume to replica-3 -TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 -TEST $CLI volume heal $V0 -EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 - -TEST $CLI volume set $V0 cluster.quorum-type none -TEST $CLI volume set $V0 cluster.favorite-child-policy none -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST kill_brick $V0 $H0 $B0/${V0}0 -TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024 -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 -TEST kill_brick $V0 $H0 $B0/${V0}1 -TEST kill_brick $V0 $H0 $B0/${V0}2 -TEST dd if=/dev/urandom of=$M0/file bs=1024 count=10240 - -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2 -TEST $CLI volume set $V0 cluster.self-heal-daemon on -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 -TEST $CLI volume heal $V0 - -#file still in split-brain -cat $M0/file > /dev/null -EXPECT "1" echo $? - -#We know that the second and third bricks agree with each other. Pick any one of them. -MAJORITY_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1) -TEST $CLI volume set $V0 cluster.favorite-child-policy majority -TEST $CLI volume start $V0 force -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 -TEST $CLI volume heal $V0 -EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 -cat $M0/file > /dev/null -EXPECT "0" echo $? -HEALED_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1) -TEST [ "$MAJORITY_MD5" == "$HEALED_MD5" ] - -TEST force_umount $M0 -cleanup diff --git a/tests/basic/changelog/changelog-snapshot.t b/tests/basic/changelog/changelog-snapshot.t deleted file mode 100644 index 7742db48cdd..00000000000 --- a/tests/basic/changelog/changelog-snapshot.t +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../../include.rc -. $(dirname $0)/../../snapshot.rc - -cleanup; -ROLLOVER_TIME=3 - -TEST verify_lvm_version; -TEST glusterd; -TEST pidof glusterd; - -TEST setup_lvm 1 - -TEST $CLI volume create $V0 $H0:$L1 -BRICK_LOG=$(echo "$L1" | tr / - | sed 's/^-//g') -TEST $CLI volume start $V0 - -#Enable changelog -TEST $CLI volume set $V0 changelog.changelog on -TEST $CLI volume set $V0 changelog.rollover-time $ROLLOVER_TIME -TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; - -#Create snapshot -S1="${V0}-snap1" - -mkdir $M0/RENAME -mkdir $M0/LINK -mkdir $M0/UNLINK -mkdir $M0/RMDIR -mkdir $M0/SYMLINK - -for i in {1..400} ; do touch $M0/RENAME/file$i; done -for i in {1..400} ; do touch $M0/LINK/file$i; done -for i in {1..400} ; do touch $M0/UNLINK/file$i; done -for i in {1..400} ; do mkdir $M0/RMDIR/dir$i; done -for i in {1..400} ; do touch $M0/SYMLINK/file$i; done - -#Write I/O in background -for i in {1..400} ; do touch $M0/file$i 2>/dev/null; done & -for i in {1..400} ; do mknod $M0/mknod-file$i p 2>/dev/null; done & -for i in {1..400} ; do mkdir $M0/dir$i 2>/dev/null; done & 2>/dev/null -for i in {1..400} ; do mv $M0/RENAME/file$i $M0/RENAME/rn-file$i 2>/dev/null; done & -for i in {1..400} ; do ln $M0/LINK/file$i $M0/LINK/ln-file$i 2>/dev/null; done & -for i in {1..400} ; do rm -f $M0/UNLINK/file$i 2>/dev/null; done & -for i in {1..400} ; do rmdir $M0/RMDIR/dir$i 2>/dev/null; done & -for i in {1..400} ; do ln -s $M0/SYMLINK/file$i $M0/SYMLINK/sym-file$i 2>/dev/null; done & - -sleep 1 -TEST $CLI snapshot create $S1 $V0 no-timestamp -TEST snapshot_exists 0 $S1 - -TEST grep '"Enabled changelog barrier"' /var/log/glusterfs/bricks/$BRICK_LOG.log -TEST grep '"Disabled changelog barrier"' /var/log/glusterfs/bricks/$BRICK_LOG.log - -TEST glusterfs -s $H0 --volfile-id=/snaps/$S1/$V0 $M1 - -#Clean up -TEST $CLI volume stop $V0 force -cleanup; diff --git a/tests/basic/distribute/rebal-all-nodes-migrate.t b/tests/basic/distribute/rebal-all-nodes-migrate.t deleted file mode 100644 index acc4ffefecc..00000000000 --- a/tests/basic/distribute/rebal-all-nodes-migrate.t +++ /dev/null @@ -1,144 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../../include.rc -. $(dirname $0)/../../cluster.rc -. $(dirname $0)/../../dht.rc - - -# Check if every single rebalance process migrated some files - -function cluster_rebal_all_nodes_migrated_files { - val=0 - a=$($CLI_1 volume rebalance $V0 status | grep "completed" | awk '{print $2}'); -# echo $a - b=($a) - for i in "${b[@]}" - do -# echo "$i"; - if [ "$i" -eq "0" ]; then - echo "false"; - val=1; - fi - done - echo $val -} - -cleanup - -TEST launch_cluster 3; -TEST $CLI_1 peer probe $H2; -TEST $CLI_1 peer probe $H3; -EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count - - -#Start with a pure distribute volume (multiple bricks on the same node) -TEST $CLI_1 volume create $V0 $H1:$B1/dist1 $H1:$B1/dist2 $H2:$B2/dist3 $H2:$B2/dist4 - -TEST $CLI_1 volume start $V0 -$CLI_1 volume info $V0 - -#TEST $CLI_1 volume set $V0 client-log-level DEBUG - -## Mount FUSE -TEST glusterfs -s $H1 --volfile-id $V0 $M0; - -TEST mkdir $M0/dir1 2>/dev/null; -TEST touch $M0/dir1/file-{1..500} - -## Add-brick and run rebalance to force file migration -TEST $CLI_1 volume add-brick $V0 $H1:$B1/dist5 $H2:$B2/dist6 - -#Start a rebalance -TEST $CLI_1 volume rebalance $V0 start force - -#volume rebalance status should work -#TEST $CLI_1 volume rebalance $V0 status -#$CLI_1 volume rebalance $V0 status - -EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed -EXPECT "0" cluster_rebal_all_nodes_migrated_files -$CLI_1 volume rebalance $V0 status - - -TEST umount -f $M0 -TEST $CLI_1 volume stop $V0 -TEST $CLI_1 volume delete $V0 - - -############################################################## - -# Next, a dist-rep volume -TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/drep1 $H2:$B2/drep1 $H1:$B1/drep2 $H2:$B2/drep2 - -TEST $CLI_1 volume start $V0 -$CLI_1 volume info $V0 - -#TEST $CLI_1 volume set $V0 client-log-level DEBUG - -## Mount FUSE -TEST glusterfs -s $H1 --volfile-id $V0 $M0; - -TEST mkdir $M0/dir1 2>/dev/null; -TEST touch $M0/dir1/file-{1..500} - -## Add-brick and run rebalance to force file migration -TEST $CLI_1 volume add-brick $V0 replica 2 $H1:$B1/drep3 $H2:$B2/drep3 - -#Start a rebalance -TEST $CLI_1 volume rebalance $V0 start force - -#volume rebalance status should work -#TEST $CLI_1 volume rebalance $V0 status -#$CLI_1 volume rebalance $V0 status - -EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed -#EXPECT "0" cluster_rebal_all_nodes_migrated_files -$CLI_1 volume rebalance $V0 status - - -TEST umount -f $M0 -TEST $CLI_1 volume stop $V0 -TEST $CLI_1 volume delete $V0 - -############################################################## - -# Next, a disperse volume -TEST $CLI_1 volume create $V0 disperse 3 $H1:$B1/ec1 $H2:$B1/ec2 $H3:$B1/ec3 force - -TEST $CLI_1 volume start $V0 -$CLI_1 volume info $V0 - -#TEST $CLI_1 volume set $V0 client-log-level DEBUG - -## Mount FUSE -TEST glusterfs -s $H1 --volfile-id $V0 $M0; - -TEST mkdir $M0/dir1 2>/dev/null; -TEST touch $M0/dir1/file-{1..500} - -## Add-brick and run rebalance to force file migration -TEST $CLI_1 volume add-brick $V0 $H1:$B2/ec4 $H2:$B2/ec5 $H3:$B2/ec6 - -#Start a rebalance -TEST $CLI_1 volume rebalance $V0 start force - -#volume rebalance status should work -#TEST $CLI_1 volume rebalance $V0 status -#$CLI_1 volume rebalance $V0 status - -EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed - -# this will not work unless EC is changed to return all node-uuids -# comment this out once that patch is ready -#EXPECT "0" cluster_rebal_all_nodes_migrated_files -$CLI_1 volume rebalance $V0 status - - -TEST umount -f $M0 -TEST $CLI_1 volume stop $V0 -TEST $CLI_1 volume delete $V0 - -############################################################## - -cleanup -#G_TESTDEF_TEST_STATUS_NETBSD7=1501388 diff --git a/tests/basic/ec/ec-quorum-count-partial-failure.t b/tests/basic/ec/ec-quorum-count-partial-failure.t deleted file mode 100755 index 79f5825ae10..00000000000 --- a/tests/basic/ec/ec-quorum-count-partial-failure.t +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../../include.rc -. $(dirname $0)/../../volume.rc - -#This test checks that partial failure of fop results in main fop failure only -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5} -TEST $CLI volume create $V1 $H0:$B0/${V1}{0..5} -TEST $CLI volume set $V0 performance.flush-behind off -TEST $CLI volume start $V0 -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=/$V0 $M0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 - -TEST dd if=/dev/urandom of=$M0/a bs=12347 count=1 -TEST dd if=/dev/urandom of=$M0/b bs=12347 count=1 -TEST cp $M0/b $M0/c -TEST fallocate -p -l 101 $M0/c -TEST $CLI volume stop $V0 -TEST $CLI volume set $V0 debug.delay-gen posix; -TEST $CLI volume set $V0 delay-gen.delay-duration 10000000; -TEST $CLI volume set $V0 delay-gen.enable WRITE; -TEST $CLI volume set $V0 delay-gen.delay-percentage 100 -TEST $CLI volume set $V0 disperse.quorum-count 6 -TEST $CLI volume start $V0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 -cksum=$(dd if=$M0/a bs=12345 count=1 | md5sum | awk '{print $1}') -truncate -s 12345 $M0/a & #While write is waiting for 5 seconds, introduce failure -fallocate -p -l 101 $M0/b & -sleep 1 -TEST kill_brick $V0 $H0 $B0/${V0}0 -TEST wait -TEST $CLI volume start $V0 force -EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0} -EXPECT "12345" stat --format=%s $M0/a -TEST kill_brick $V0 $H0 $B0/${V0}1 -TEST kill_brick $V0 $H0 $B0/${V0}2 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0; -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0 -EXPECT_WITHIN $CHILD_UP_TIMEOUT "4" ec_child_up_count $V0 0 -cksum_after_heal=$(dd if=$M0/a | md5sum | awk '{print $1}') -TEST [[ $cksum == $cksum_after_heal ]] -cksum=$(dd if=$M0/c | md5sum | awk '{print $1}') -cksum_after_heal=$(dd if=$M0/b | md5sum | awk '{print $1}') -TEST [[ $cksum == $cksum_after_heal ]] - -cleanup; diff --git a/tests/basic/mount-nfs-auth.t b/tests/basic/mount-nfs-auth.t deleted file mode 100755 index 3d4a9cff00b..00000000000 --- a/tests/basic/mount-nfs-auth.t +++ /dev/null @@ -1,342 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../nfs.rc - -#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST - -# Our mount timeout must be as long as the time for a regular configuration -# change to be acted upon *plus* AUTH_REFRESH_TIMEOUT, not one replacing the -# other. Otherwise this process races vs. the one making the change we're -# trying to test, which leads to spurious failures. -MY_MOUNT_TIMEOUT=$((CONFIG_UPDATE_TIMEOUT+AUTH_REFRESH_INTERVAL)) - -cleanup; -## Check whether glusterd is running -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info - -H0IP=$(ip addr show |grep -w inet |grep -v 127.0.0.1|awk '{ print $2 }'| cut -d "/" -f 1) -H0IP6=$(host $HOSTNAME | grep IPv6 | awk '{print $NF}') - -# Export variables for allow & deny -EXPORT_ALLOW="/$V0 $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)" -EXPORT_ALLOW_SLASH="/$V0/ $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)" -EXPORT_DENY="/$V0 1.2.3.4(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)" - -# Netgroup variables for allow & deny -NETGROUP_ALLOW="ngtop ng1000\nng1000 ng999\nng999 ng1\nng1 ng2\nng2 ($H0,,)" -NETGROUP_DENY="ngtop ng1000\nng1000 ng999\nng999 ng1\nng1 ng2\nng2 (1.2.3.4,,)" - -V0L1="$V0/L1" -V0L2="$V0L1/L2" -V0L3="$V0L2/L3" - -# Other variations for allow & deny -EXPORT_ALLOW_RO="/$V0 $H0(sec=sys,ro,anonuid=0) @ngtop(sec=sys,ro,anonuid=0)" -EXPORT_ALLOW_L1="/$V0L1 $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)" -EXPORT_WILDCARD="/$V0 *(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)" - -function build_dirs () { - mkdir -p $B0/b{0,1,2}/L1/L2/L3 -} - -function export_allow_this_host_ipv6 () { - printf "$EXPORT_ALLOW6\n" > "$GLUSTERD_WORKDIR"/nfs/exports -} - -function export_allow_this_host () { - printf "$EXPORT_ALLOW\n" > ${NFSDIR}/exports -} - -function export_allow_this_host_with_slash () { - printf "$EXPORT_ALLOW_SLASH\n" > ${NFSDIR}/exports -} - -function export_deny_this_host () { - printf "$EXPORT_DENY\n" > ${NFSDIR}/exports -} - -function export_allow_this_host_l1 () { - printf "$EXPORT_ALLOW_L1\n" >> ${NFSDIR}/exports -} - -function export_allow_wildcard () { - printf "$EXPORT_WILDCARD\n" > ${NFSDIR}/exports -} - -function export_allow_this_host_ro () { - printf "$EXPORT_ALLOW_RO\n" > ${NFSDIR}/exports -} - -function netgroup_allow_this_host () { - printf "$NETGROUP_ALLOW\n" > ${NFSDIR}/netgroups -} - -function netgroup_deny_this_host () { - printf "$NETGROUP_DENY\n" > ${NFSDIR}/netgroups -} - -function create_vol () { - $CLI vol create $V0 $H0:$B0/b0 -} - -function setup_cluster() { - build_dirs # Build directories - export_allow_this_host # Allow this host in the exports file - netgroup_allow_this_host # Allow this host in the netgroups file - - glusterd - create_vol # Create the volume -} - -function check_mount_success { - mount_nfs $H0:/$1 $N0 nolock - if [ $? -eq 0 ]; then - echo "Y" - else - echo "N" - fi -} - -function check_mount_failure { - mount_nfs $H0:/$1 $N0 nolock - if [ $? -ne 0 ]; then - echo "Y" - else - local timeout=$UMOUNT_TIMEOUT - while ! umount_nfs $N0 && [$timeout -ne 0] ; do - timeout=$(( $timeout - 1 )) - sleep 1 - done - fi -} - -function small_write () { - dd if=/dev/zero of=$N0/test-small-write count=1 bs=1k 2>&1 - if [ $? -ne 0 ]; then - echo "N" - else - echo "Y" - fi -} - -function bg_write () { - dd if=/dev/zero of=$N0/test-bg-write count=1 bs=1k & - BG_WRITE_PID=$! -} - -function big_write() { - dd if=/dev/zero of=$N0/test-big-write count=500 bs=1024k -} - -function create () { - touch $N0/create-test -} - -function stat_nfs () { - ls $N0/ -} - -# Restarts the NFS server -function restart_nfs () { - local NFS_PID=$(cat $GLUSTERD_PIDFILEDIR/nfs/nfs.pid) - - # kill the NFS-server if it is running - while ps -q ${NFS_PID} 2>&1 > /dev/null; do - kill ${NFS_PID} - sleep 0.5 - done - - # start-force starts the NFS-server again - $CLI vol start patchy force -} - -setup_cluster - -# run preliminary tests -TEST $CLI vol set $V0 nfs.disable off -TEST $CLI vol start $V0 - -# Get NFS state directory -NFSDIR=$( $CLI volume get patchy nfs.mount-rmtab | \ - awk '/^nfs.mount-rmtab/{print $2}' | \ - xargs dirname ) - -## Wait for volume to register with rpc.mountd -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available - -## NFS server starts with auth disabled -## Do some tests to verify that. - -EXPECT "Y" check_mount_success $V0 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Disallow host -TEST export_deny_this_host -TEST netgroup_deny_this_host - -## Technically deauthorized this host, but since auth is disabled we should be -## able to do mounts, writes, etc. -EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0 -EXPECT "Y" small_write -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Reauthorize this host -export_allow_this_host -netgroup_allow_this_host - -## Restart NFS with auth enabled -$CLI vol stop $V0 -TEST $CLI vol set $V0 nfs.exports-auth-enable on -$CLI vol start $V0 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available - -## Mount NFS -EXPECT "Y" check_mount_success $V0 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Mount NFS using the IPv6 export -export_allow_this_host_ipv6 -EXPECT "Y" check_mount_success $V0 - -## Disallow host -TEST export_deny_this_host -TEST netgroup_deny_this_host - -## Writes should not be allowed, host is not authorized -EXPECT_WITHIN $AUTH_REFRESH_INTERVAL "N" small_write - -## Unmount so we can test mount -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Subsequent ounts should not be allowed, host is not authorized -EXPECT "Y" check_mount_failure $V0 - -## Reauthorize host -TEST export_allow_this_host -TEST netgroup_allow_this_host - -EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Allow host in netgroups but not in exports, host should be allowed -TEST export_deny_this_host -TEST netgroup_allow_this_host - -# wait for the mount authentication to rebuild -sleep $[$AUTH_REFRESH_INTERVAL + 1] - -EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0 -EXPECT "Y" small_write -TEST big_write -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Allow host in exports but not in netgroups, host should be allowed -TEST export_allow_this_host -TEST netgroup_deny_this_host - -EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0 -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Finally, reauth the host in export and netgroup, test mount & write -TEST export_allow_this_host_l1 -TEST netgroup_allow_this_host - -EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0L1 -EXPECT "Y" small_write - -## Failover test: Restarting NFS and then doing a write should pass -bg_write -TEST restart_nfs -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available - -TEST wait $BG_WRITE_PID -EXPECT "Y" small_write -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Test deep mounts -EXPECT "Y" check_mount_success $V0L1 -EXPECT "Y" small_write -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -TEST export_allow_this_host_ro -TEST netgroup_deny_this_host - -## Restart the nfs server to avoid spurious failure(BZ1256352) -restart_nfs -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available - -EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0 -EXPECT "N" small_write # Writes should not be allowed -TEST ! create # Create should not be allowed -TEST stat_nfs # Stat should be allowed -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -TEST export_deny_this_host -TEST netgroup_deny_this_host -TEST export_allow_this_host_l1 # Allow this host at L1 - -EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_failure $V0 #V0 shouldnt be allowed -EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0L1 #V0L1 should be -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Test wildcard hosts -TEST export_allow_wildcard - -# the $MY_MOUNT_TIMEOUT might not be long enough? restart should do -restart_nfs -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available - -EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0 -EXPECT_WITHIN $AUTH_REFRESH_INTERVAL "Y" small_write -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Test if path is parsed correctly -## by mounting host:vol/ instead of host:vol -EXPECT "Y" check_mount_success $V0/ -EXPECT "Y" small_write -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -TEST export_allow_this_host_with_slash - -EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0 -EXPECT "Y" small_write -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -EXPECT "Y" check_mount_success $V0/ -EXPECT "Y" small_write -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - - -## Turn off exports authentication -$CLI vol stop $V0 -TEST $CLI vol set $V0 nfs.exports-auth-enable off -$CLI vol start $V0 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available - -TEST export_deny_this_host # Deny the host -TEST netgroup_deny_this_host - -EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0 # Do a mount & test -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 - -## Turn back on the exports authentication -$CLI vol stop $V0 -TEST $CLI vol set $V0 nfs.exports-auth-enable on -$CLI vol start $V0 -EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available - -## Do a simple test to set the refresh time to 20 seconds -TEST $CLI vol set $V0 nfs.auth-refresh-interval-sec 20 - -## Do a simple test to see if the volume option exists -TEST $CLI vol set $V0 nfs.auth-cache-ttl-sec 400 - -## Finish up -TEST $CLI volume stop $V0 -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup |