diff options
Diffstat (limited to 'tests')
-rw-r--r-- | tests/basic/afr/arbiter.t | 8 | ||||
-rw-r--r-- | tests/basic/stats-dump.t | 43 | ||||
-rw-r--r-- | tests/bugs/cli/bug-1030580.t | 5 | ||||
-rw-r--r-- | tests/bugs/cli/bug-1047416.t | 5 | ||||
-rw-r--r-- | tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t | 6 |
5 files changed, 64 insertions, 3 deletions
diff --git a/tests/basic/afr/arbiter.t b/tests/basic/afr/arbiter.t index 84d2ccece51..cecbc605541 100644 --- a/tests/basic/afr/arbiter.t +++ b/tests/basic/afr/arbiter.t @@ -9,6 +9,7 @@ TEST glusterd; TEST pidof glusterd # Non arbiter replica 3 volumes should not have arbiter-count option enabled. +TEST mkdir -p $B0/${V0}{0,1,2} TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2} TEST $CLI volume start $V0 TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0; @@ -17,7 +18,14 @@ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 TEST $CLI volume stop $V0 TEST $CLI volume delete $V0 +# Make sure we clean up *all the way* so we don't get "brick X is already part +# of a volume" errors. +cleanup; +TEST glusterd; +TEST pidof glusterd + # Create and mount a replica 3 arbiter volume. +TEST mkdir -p $B0/${V0}{0,1,2} TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2} TEST $CLI volume set $V0 performance.write-behind off TEST $CLI volume set $V0 cluster.self-heal-daemon off diff --git a/tests/basic/stats-dump.t b/tests/basic/stats-dump.t new file mode 100644 index 00000000000..0a680e44e55 --- /dev/null +++ b/tests/basic/stats-dump.t @@ -0,0 +1,43 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../volume.rc +. $(dirname $0)/../nfs.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2} +TEST $CLI volume set $V0 diagnostics.latency-measurement on +TEST $CLI volume set $V0 diagnostics.count-fop-hits on +TEST $CLI volume set $V0 diagnostics.stats-dump-interval 1 +TEST $CLI volume set $V0 nfs.disable off +TEST $CLI volume start $V0 +sleep 1 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST mount_nfs $H0:/$V0 $N0 nolock,soft,intr + +for i in {1..10};do + dd if=/dev/zero of=$M0/fuse_testfile$i bs=4k count=100 +done + +for i in {1..10};do + dd if=/dev/zero of=$N0/nfs_testfile$i bs=4k count=100 +done +sleep 2 + +# Verify we have non-zero write counts from the bricks, gNFSd +# and the FUSE mount +BRICK_OUTPUT="$(grep 'aggr.fop.write.count": "0"' ${GLUSTERD_WORKDIR}/stats/glusterfsd__d_backends_patchy?.dump)" +BRICK_RET="$?" +NFSD_OUTPUT="$(grep 'aggr.fop.write.count": "0"' ${GLUSTERD_WORKDIR}/stats/glusterfs_nfsd.dump)" +NFSD_RET="$?" +FUSE_OUTPUT="$(grep 'aggr.fop.write.count": "0"' ${GLUSTERD_WORKDIR}/stats/glusterfs_patchy.dump)" +FUSE_RET="$?" + +TEST [ 0 -ne "$BRICK_RET" ] +TEST [ 0 -ne "$NFSD_RET" ] +TEST [ 0 -ne "$FUSE_RET" ] + +cleanup; diff --git a/tests/bugs/cli/bug-1030580.t b/tests/bugs/cli/bug-1030580.t index a907950e73f..ac8b1d8f6db 100644 --- a/tests/bugs/cli/bug-1030580.t +++ b/tests/bugs/cli/bug-1030580.t @@ -12,10 +12,15 @@ function write_to_file { TEST glusterd TEST pidof glusterd TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +# Increasing the json stats dump time interval, so that it doesn't mess with the test. +TEST $CLI volume set $V0 diagnostics.stats-dump-interval 3600 TEST $CLI volume start $V0 TEST $CLI volume profile $V0 start TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +# Clear the profile info uptill now. +TEST $CLI volume profile $V0 info clear + # Verify 'volume profile info' prints both cumulative and incremental stats write_to_file & wait diff --git a/tests/bugs/cli/bug-1047416.t b/tests/bugs/cli/bug-1047416.t index 6e1b0a48467..864301034c9 100644 --- a/tests/bugs/cli/bug-1047416.t +++ b/tests/bugs/cli/bug-1047416.t @@ -12,10 +12,15 @@ function write_to_file { TEST glusterd TEST pidof glusterd TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +# Increasing the json stats dump time interval, so that it doesn't mess with the test. +TEST $CLI volume set $V0 diagnostics.stats-dump-interval 3600 TEST $CLI volume start $V0 TEST $CLI volume profile $V0 start TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +# Clear the profile info uptill now. +TEST $CLI volume profile $V0 info clear + # Verify 'volume profile info' prints both cumulative and incremental stats write_to_file & wait diff --git a/tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t b/tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t index 6697c263ac1..c5a285eb775 100644 --- a/tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t +++ b/tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t @@ -21,16 +21,16 @@ TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0 # in the USS world gluster snapshot config activate-on-create enable for i in {1..10}; do $CLI snapshot create snap$i $V0 no-timestamp; done -EXPECT 10 uss_count_snap_displayed $M0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 10 uss_count_snap_displayed $M0 # snapshots should not be displayed after deactivation for i in {1..10}; do $CLI snapshot deactivate snap$i --mode=script; done -EXPECT 0 uss_count_snap_displayed $M0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 uss_count_snap_displayed $M0 # activate all the snapshots and check if all the activated snapshots # are displayed again for i in {1..10}; do $CLI snapshot activate snap$i --mode=script; done -EXPECT 10 uss_count_snap_displayed $M0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 10 uss_count_snap_displayed $M0 cleanup; |