summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rwxr-xr-xtests/basic/accept-v6v4.t148
-rw-r--r--tests/basic/afr/gfid-unsplit-shd.t98
-rw-r--r--tests/basic/afr/gfid-unsplit-type-mismatch.t86
-rw-r--r--tests/basic/afr/gfid-unsplit.t120
-rw-r--r--tests/basic/afr/metadata-self-heal.t1
-rw-r--r--tests/basic/afr/self-heal.t15
-rw-r--r--tests/basic/afr/shd-autofix-nogfid.t68
-rw-r--r--tests/basic/afr/shd-force-inspect.t61
-rw-r--r--tests/basic/afr/shd-pgfid-heal.t81
-rwxr-xr-xtests/basic/bd.t1
-rw-r--r--tests/basic/cache.t69
-rwxr-xr-xtests/basic/dht-min-free-space.t69
-rw-r--r--tests/basic/ec/ec-common2
-rw-r--r--tests/basic/ec/self-heal.t2
-rw-r--r--tests/basic/exports_parsing.t15
-rw-r--r--tests/basic/fop-sampling.t78
-rwxr-xr-xtests/basic/fops-sanity-gfproxy.t32
-rw-r--r--tests/basic/gfid-access.t1
-rw-r--r--tests/basic/gfproxy.t74
-rw-r--r--tests/basic/glusterd/volfile_server_switch.t3
-rw-r--r--tests/basic/halo-failover-disabled.t77
-rw-r--r--tests/basic/halo-failover-enabled.t85
-rw-r--r--tests/basic/halo-hybrid.t70
-rw-r--r--tests/basic/halo.t51
-rwxr-xr-xtests/basic/mount-nfs-auth.t107
-rw-r--r--tests/basic/pgfid-feat.t1
-rwxr-xr-xtests/basic/quota-anon-fd-nfs.t1
-rwxr-xr-xtests/basic/quota.t1
-rw-r--r--[-rwxr-xr-x]tests/basic/rpc-coverage.t1
-rw-r--r--tests/basic/stats-dump.t5
-rw-r--r--tests/basic/uss.t2
-rw-r--r--tests/basic/write-behind.t53
-rw-r--r--tests/bugs/distribute/bug-1099890.t2
-rwxr-xr-xtests/bugs/distribute/bug-1161311.t10
-rwxr-xr-xtests/bugs/fb4482137.t65
-rw-r--r--tests/bugs/fb8149516.t40
-rw-r--r--tests/bugs/fuse/bug-858488-min-free-disk.t1
-rw-r--r--tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t22
-rwxr-xr-xtests/bugs/glusterd/bug-859927.t8
-rwxr-xr-xtests/bugs/nfs/bug-1166862.t4
-rwxr-xr-xtests/bugs/nfs/bug-904065.t4
-rw-r--r--tests/bugs/quota/bug-1292020.t7
-rwxr-xr-xtests/bugs/replicate/bug-859581.t2
-rw-r--r--tests/cluster.rc9
-rw-r--r--tests/configfiles/exports-v61
-rw-r--r--tests/env.rc.in3
-rwxr-xr-xtests/features/brick-min-free-space.t121
-rw-r--r--tests/features/lock_revocation.t52
-rw-r--r--tests/halo.rc52
-rw-r--r--tests/include.rc19
-rw-r--r--tests/nfs.rc2
-rw-r--r--tests/volume.rc7
52 files changed, 1864 insertions, 45 deletions
diff --git a/tests/basic/accept-v6v4.t b/tests/basic/accept-v6v4.t
new file mode 100755
index 00000000000..ce3a1bae7f9
--- /dev/null
+++ b/tests/basic/accept-v6v4.t
@@ -0,0 +1,148 @@
+#!/bin/bash
+
+. $(dirname $0)/../nfs.rc
+
+#
+# This test ensures that GlusterFS provides NFS, Mount and its Management daemon
+# over both IPv4 and IPv6. It uses netcat to check the services running on both
+# IPv4 & IPv6 addresses as well as a mount to test that mount & nfs work.
+#
+
+IPV4_SUPPORT=false
+IPV6_SUPPORT=false
+
+host $HOSTNAME | grep -q "has address" && IPV4_SUPPORT=true
+host $HOSTNAME | grep -q "has IPv6 address" && IPV6_SUPPORT=true
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+mkdir -p $B0/b{0,1,2}
+
+# make sure no registered rpcbind services are running
+service rpcbind restart
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI vol create $V0 replica 3 $H0:$B0/b0 $H0:$B0/b1 $H0:$B0/b2
+
+TEST $CLI vol set $V0 cluster.self-heal-daemon off
+TEST $CLI vol set $V0 nfs.disable off
+TEST $CLI vol set $V0 cluster.choose-local off
+TEST $CLI vol start $V0
+
+MOUNTD_PORT=38465
+MGMTD_PORT=24007
+NFSD_PORT=2049
+
+function check_ip_port {
+ ip=$1
+ port=$2
+ type=$3
+
+ nc_flags=""
+ if [ "$type" == "v6" ] && [ "$ip" == "NONE" ]; then
+ echo "Y"
+ return
+ else
+ nc_flags="-6"
+ fi
+
+ if [ "$type" == "v4" ] && [ "$ip" == "NONE" ]; then
+ echo "Y"
+ return
+ fi
+
+ if exec 3<>/dev/tcp/$ip/$port; then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function check_nfs {
+ ip=$1
+ type=$2
+
+ if [ "$ip" == "NONE" ]; then
+ echo "Y"
+ return
+ fi
+
+ if [ "$type" == "v6" ]; then
+ addr="[$ip]"
+ else
+ addr="$ip"
+ fi
+
+ if mount_nfs $addr:/$V0 $N0; then
+ umount_nfs $N0
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+if [ ! $IPV4_SUPPORT ] && [ ! $IPV6_SUPPORT ]; then
+ exit 1
+fi
+
+# Get the V4 & V6 addresses of this host
+if $IPV4_SUPPORT; then
+ V4=$(host $HOSTNAME | head -n1 | awk -F ' ' '{print $4}')
+else
+ V4="NONE"
+fi
+
+if $IPV6_SUPPORT; then
+ V6=$(host $HOSTNAME | tail -n1 | awk -F ' ' '{print $5}')
+else
+ V6="NONE"
+fi
+
+# First check the management daemon
+EXPECT "Y" check_ip_port $V6 $MGMTD_PORT "v6"
+EXPECT "Y" check_ip_port $V4 $MGMTD_PORT "v4"
+
+# Give the MOUNT/NFS Daemon some time to start up
+sleep 4
+
+EXPECT "Y" check_ip_port $V4 $MOUNTD_PORT "v6"
+EXPECT "Y" check_ip_port $V6 $MOUNTD_PORT "v4"
+
+EXPECT "Y" check_ip_port $V4 $NFSD_PORT "v6"
+EXPECT "Y" check_ip_port $V6 $NFSD_PORT "v4"
+
+# Mount the file system
+EXPECT "Y" check_nfs $V6 "v6"
+EXPECT "Y" check_nfs $V4 "v4"
+
+# Test a rpcbind crash
+pkill -9 rpcbind && service rpcbind start
+sleep 15
+
+# Test that the port re-registered
+rpcinfo=$(rpcinfo -s | grep nfs | grep -v nfs_acl)
+
+function check_rpcinfo {
+ support=$1
+ type=$2
+
+ if [ ! $support ]; then
+ echo "Y"
+ return
+ fi
+
+ if [ "$type" == "v6" ]; then
+ echo $(echo $rpcinfo | grep tcp6 && echo "Y" || echo "N")
+ else
+ echo $(echo $rpcinfo | grep tcp && echo "Y" || echo "N")
+ fi
+}
+
+EXPECT "Y" check_rpcinfo $IPV4_SUPPORT "v4"
+EXPECT "Y" check_rpcinfo $IPV6_SUPPORT "v6"
+
+cleanup;
diff --git a/tests/basic/afr/gfid-unsplit-shd.t b/tests/basic/afr/gfid-unsplit-shd.t
new file mode 100644
index 00000000000..77da5243724
--- /dev/null
+++ b/tests/basic/afr/gfid-unsplit-shd.t
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+TEST $CLI volume set $V0 nfs.disable off
+TEST $CLI volume set $V0 cluster.quorum-type none
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 cluster.favorite-child-policy majority
+#EST $CLI volume set $V0 cluster.favorite-child-by-majority off
+#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on
+#EST $CLI volume set $V0 cluster.favorite-child-by-size off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume start $V0
+sleep 5
+
+# Part I: FUSE Test
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+cd $M0
+mkdir foo
+dd if=/dev/urandom of=foo/splitfile bs=128k count=5 2>/dev/null
+
+MD5=$(md5sum foo/splitfile | cut -d\ -f1)
+
+sleep 1
+cd ~
+
+GFID_PARENT_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/foo 2>/dev/null | grep trusted.gfid | cut -d= -f2)
+GFID_PARENT_FORMATTED=$(echo "$GFID_PARENT_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')
+GFID_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/foo/splitfile 2>/dev/null | grep trusted.gfid | cut -d= -f2)
+GFID_FORMATTED=$(echo "$GFID_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')
+GFID_LINK_B1="$B0/${V0}1/.glusterfs/$(echo $GFID_RAW | awk '{print substr($0,3,2)"/"substr($0,5,2)"/"substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')"
+
+# Create a split-brain by downing a brick, and flipping the
+# gfid on the down brick, then bring the brick back up.
+
+# For good measure kill the first brick so the inode cache is wiped, we don't
+# want any funny business
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST $CLI volume start $V0 force
+pkill -f gluster/glustershd
+
+rm -f $GFID_LINK_B1
+TEST setfattr -n "trusted.gfid" -v "0xfd551a5cfddd4c1aa4d096ef09ef5c08" $B0/${V0}1/foo/splitfile
+sleep 1
+TEST touch $B0/${V0}1/foo/splitfile
+
+mkdir -p $B0/${V0}1/.glusterfs/fd/55
+ln $B0/${V0}1/foo/splitfile $B0/${V0}1/.glusterfs/fd/55/fd551a5c-fddd-4c1a-a4d0-96ef09ef5c08
+cd ~
+
+touch $B0/${V0}3/.glusterfs/indices/xattrop/$GFID_FORMATTED
+touch $B0/${V0}3/.glusterfs/indices/xattrop/$GFID_PARENT_FORMATTED
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+sleep 5
+
+EXPECT_WITHIN 60 "0" get_pending_heal_count $V0
+
+TEST stat $B0/${V0}1/foo/splitfile
+
+cd $M0
+
+# Tickle the file to trigger the gfid unsplit
+TEST stat foo/splitfile
+sleep 1
+
+# Verify the file is readable
+TEST dd if=foo/splitfile of=/dev/null 2>/dev/null
+
+# Verify entry healing happened on the back-end regardless of the
+# gfid-splitbrain state of the directory.
+TEST stat $B0/${V0}1/foo/splitfile
+
+# Verify the MD5 signature of the file
+HEALED_MD5=$(md5sum foo/splitfile | cut -d\ -f1)
+TEST [ "$MD5" == "$HEALED_MD5" ]
+
+# Verify the file can be removed
+TEST rm -f foo/splitfile
+cd ~
+
+cleanup
diff --git a/tests/basic/afr/gfid-unsplit-type-mismatch.t b/tests/basic/afr/gfid-unsplit-type-mismatch.t
new file mode 100644
index 00000000000..9e205021a0d
--- /dev/null
+++ b/tests/basic/afr/gfid-unsplit-type-mismatch.t
@@ -0,0 +1,86 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 nfs.disable on
+TEST $CLI volume set $V0 cluster.quorum-type none
+TEST $CLI volume set $V0 cluster.favorite-child-policy mtime
+#EST $CLI volume set $V0 cluster.favorite-child-by-majority on
+#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume start $V0
+sleep 5
+
+pkill -f gluster/glustershd
+
+# Part I: FUSE Test
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+cd $M0
+dd if=/dev/urandom of=splitfile bs=128k count=5 2>/dev/null
+
+MD5=$(md5sum splitfile | cut -d\ -f1)
+
+# Create a split-brain by downing a brick, and flipping the
+# gfid on the down brick, then bring the brick back up.
+TEST kill_brick $V0 $H0 $B0/${V0}1
+GFID_DIR_B1="$B0/${V0}1/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}1/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')"
+rm -rf $GFID_DIR_B1
+rm -fv $B0/${V0}1/splitfile
+
+# Now really screw the file up, by changing it's type to a directory
+# not a file...the so-called "type mismatch" situation. Our test
+# should prove we can un-mangle this situation using the same strategy.
+mkdir $B0/${V0}1/splitfile
+touch -t 199011011510 $B0/${V0}1/splitfile
+TEST setfattr -n "trusted.gfid" -v "0xfd551a5cfddd4c1aa4d096ef09ef5c08" $B0/${V0}1/splitfile
+cd ~
+
+touch $M0/newfile
+
+# Synthetically force a conservative merge of the directory. We want
+# to ensure that conservative merges happen in-spite of GFID mis-matches,
+# since we can handle them there's no sense in not doing these. In fact,
+# if we stop them it will block GFID split-brain resolution.
+setfattr -n trusted.afr.patchy-client-1 -v 0x000000000000000000000002 $B0/${V0}1
+setfattr -n trusted.afr.patchy-client-2 -v 0x000000000000000000000002 $B0/${V0}1
+
+# Restart the down brick
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+sleep 5
+cd $M0
+
+# Tickle the file to trigger the gfid unsplit
+TEST stat splitfile
+sleep 1
+
+# Verify the file is readable
+TEST dd if=splitfile of=/dev/null 2>/dev/null
+# Verify entry healing happened on the back-end regardless of the
+# gfid-splitbrain state of the directory.
+TEST stat $B0/${V0}1/splitfile
+
+# Verify the MD5 signature of the file
+HEALED_MD5=$(md5sum splitfile | cut -d\ -f1)
+TEST [ "$MD5" == "$HEALED_MD5" ]
+
+# Verify the file can be removed
+TEST rm -f splitfile
+cd ~
+
+cleanup
diff --git a/tests/basic/afr/gfid-unsplit.t b/tests/basic/afr/gfid-unsplit.t
new file mode 100644
index 00000000000..0b883ab658f
--- /dev/null
+++ b/tests/basic/afr/gfid-unsplit.t
@@ -0,0 +1,120 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.quorum-type none
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 nfs.disable off
+#EST $CLI volume set $V0 cluster.favorite-child-by-majority on
+#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on
+TEST $CLI volume set $V0 cluster.favorite-child-policy mtime
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume start $V0
+sleep 5
+
+# Part I: FUSE Test
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 2>/dev/null
+
+MD5=$(md5sum $M0/splitfile | cut -d\ -f1)
+
+# Create a split-brain by downing a brick, and flipping the
+# gfid on the down brick, then bring the brick back up.
+TEST kill_brick $V0 $H0 $B0/${V0}1
+GFID_DIR_B1="$B0/${V0}1/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}1/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')"
+rm -rf $GFID_DIR_B1
+mkdir -p $B0/${V0}1/.glusterfs/fd/55
+ln $B0/${V0}1/splitfile $B0/${V0}1/.glusterfs/fd/55/fd551a5c-fddd-4c1a-a4d0-96ef09ef5c08
+TEST setfattr -n "trusted.gfid" -v "0xfd551a5cfddd4c1aa4d096ef09ef5c08" $B0/${V0}1/splitfile
+
+GFID_DIR_B3="$B0/${V0}3/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}3/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')"
+#EST rm -f $B0/${V0}3/splitfile
+#m -rf $GFID_DIR_B3
+
+touch $M0/newfile
+
+# Synthetically force a conservative merge of the directory. We want
+# to ensure that conservative merges happen in-spite of GFID mis-matches,
+# since we can handle them there's no sense in not doing these. In fact,
+# if we stop them it will block GFID split-brain resolution.
+setfattr -n trusted.afr.patchy-client-1 -v 0x000000000000000000000002 $B0/${V0}1
+setfattr -n trusted.afr.patchy-client-2 -v 0x000000000000000000000002 $B0/${V0}1
+
+# Restart the down brick
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+sleep 5
+
+# Tickle the file to trigger the gfid unsplit
+TEST stat $M0/splitfile
+sleep 1
+
+# Verify the file is readable
+TEST dd if=$M0/splitfile of=/dev/null 2>/dev/null
+
+# Verify entry healing happened on the back-end regardless of the
+# gfid-splitbrain state of the directory.
+TEST stat $B0/${V0}1/splitfile
+
+# Verify the MD5 signature of the file
+HEALED_MD5=$(md5sum $M0/splitfile | cut -d\ -f1)
+TEST [ "$MD5" == "$HEALED_MD5" ]
+
+# Verify the file can be removed
+TEST rm -f $M0/splitfile
+
+# Part II: NFS test
+TEST mount_nfs $H0:/$V0 $N0 nolock
+#EST mount -t nfs -o nolock,noatime,noacl,soft,intr $H0:/$V0 $N0;
+
+dd if=/dev/urandom of=$N0/splitfile bs=128k count=5 2>/dev/null
+
+MD5=$(md5sum $N0/splitfile | cut -d\ -f1)
+
+# Create a split-brain by downing a brick, and flipping the
+# gfid on the down brick, then bring the brick back up.
+TEST kill_brick $V0 $H0 $B0/${V0}1
+GFID_DIR_B1="$B0/${V0}1/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}1/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')"
+rm -rf $GFID_DIR_B1
+TEST setfattr -n "trusted.gfid" -v "0xfd551a5cfddd4c1aa4d096ef09ef5c08" $B0/${V0}1/splitfile
+
+GFID_DIR_B3="$B0/${V0}3/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}3/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')"
+#EST rm -f $B0/${V0}3/splitfile
+#m -rf $GFID_DIR_B3
+
+# Restart the down brick
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+sleep 5
+
+# Tickle the file to trigger the gfid unsplit
+TEST stat $N0/splitfile
+sleep 1
+
+# Verify the file is readable
+TEST dd if=$N0/splitfile of=/dev/null 2>/dev/null
+
+# Verify the MD5 signature of the file
+HEALED_MD5=$(md5sum $N0/splitfile | cut -d\ -f1)
+TEST [ "$MD5" == "$HEALED_MD5" ]
+
+# Verify the file can be removed
+TEST rm -f $N0/splitfile
+
+cleanup
diff --git a/tests/basic/afr/metadata-self-heal.t b/tests/basic/afr/metadata-self-heal.t
index b88c16a93e1..45bae7bdbfc 100644
--- a/tests/basic/afr/metadata-self-heal.t
+++ b/tests/basic/afr/metadata-self-heal.t
@@ -50,6 +50,7 @@ function print_pending_heals {
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 performance.stat-prefetch off
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
cd $M0
diff --git a/tests/basic/afr/self-heal.t b/tests/basic/afr/self-heal.t
index e1ac17c2d79..f2af52d9773 100644
--- a/tests/basic/afr/self-heal.t
+++ b/tests/basic/afr/self-heal.t
@@ -194,13 +194,22 @@ TEST rm -rf $M0/*
#7. Link/symlink heal
+# Make links (especially symlinks) with relative paths instead of absolute
+# paths, because absolute paths pointing from the brick to the mountpoint have
+# caused problems.
+make_link () {
+ mountpoint=$1; shift
+ # Do this in a subshell so we don't change "cd -" for the parent.
+ (cd $mountpoint; ln $*)
+}
+
#Test
TEST touch $M0/file
-TEST ln $M0/file $M0/link_to_file
+TEST make_link $M0 file link_to_file
TEST kill_brick $V0 $H0 $B0/brick0
TEST rm -f $M0/link_to_file
-TEST ln -s $M0/file $M0/link_to_file
-TEST ln $M0/file $M0/hard_link_to_file
+TEST make_link $M0 file -s link_to_file
+TEST make_link $M0 file hard_link_to_file
TEST $CLI volume start $V0 force
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
diff --git a/tests/basic/afr/shd-autofix-nogfid.t b/tests/basic/afr/shd-autofix-nogfid.t
new file mode 100644
index 00000000000..7c9026dce62
--- /dev/null
+++ b/tests/basic/afr/shd-autofix-nogfid.t
@@ -0,0 +1,68 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+TEST $CLI volume set $V0 nfs.disable on
+TEST $CLI volume set $V0 cluster.quorum-type auto
+TEST $CLI volume set $V0 cluster.favorite-child-policy majority
+#EST $CLI volume set $V0 cluster.favorite-child-by-majority on
+#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume start $V0
+sleep 5
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+# Kill the SHD while we setup the test
+pkill -f gluster/glustershd
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+mkdir $M0/foo
+dd if=/dev/urandom of=$M0/foo/testfile bs=128k count=5 2>/dev/null
+MD5=$(md5sum $M0/foo/testfile | cut -d\ -f1)
+
+mkdir $B0/${V0}1/foo
+
+# Kick off the SHD and wait 30 seconds for healing to take place
+TEST gluster vol start $V0 force
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
+
+# Verify the file was healed back to brick 1
+TEST stat $B0/${V0}1/foo/testfile
+
+# Part II: Test recovery for a file without a GFID
+# Kill the SHD while we setup the test
+pkill -f gluster/glustershd
+TEST kill_brick $V0 $H0 $B0/${V0}1
+rm -f $GFID_LINK_B1
+rm -f $B0/${V0}1/foo/testfile
+touch $B0/${V0}1/foo/testfile
+
+# Queue the directories for healing, don't bother the queue the file
+# as this shouldn't be required.
+touch $B0/${V0}3/.glusterfs/indices/xattrop/00000000-0000-0000-0000-000000000001
+touch $B0/${V0}3/.glusterfs/indices/xattrop/$GFID_PARENT_FORMATTED
+
+TEST gluster vol start $V0 force
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
+TEST stat $B0/${V0}1/foo/testfile
+
+# Prove the directory and file are removable
+TEST rm -f $B0/${V0}1/foo/testfile
+TEST rmdir $B0/${V0}1/foo
+
+cleanup
diff --git a/tests/basic/afr/shd-force-inspect.t b/tests/basic/afr/shd-force-inspect.t
new file mode 100644
index 00000000000..caceb841322
--- /dev/null
+++ b/tests/basic/afr/shd-force-inspect.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+TEST $CLI volume set $V0 nfs.disable on
+TEST $CLI volume set $V0 cluster.quorum-type none
+TEST $CLI volume set $V0 cluster.favorite-child-policy majority
+#EST $CLI volume set $V0 cluster.favorite-child-by-majority on
+#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume start $V0
+sleep 5
+
+# Part I: FUSE Test
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+cd $M0
+mkdir foo
+dd if=/dev/urandom of=foo/testfile bs=128k count=5 2>/dev/null
+MD5=$(md5sum foo/testfile | cut -d\ -f1)
+
+# Kill the SHD while we setup the test
+pkill -f gluster/glustershd
+
+# Grab the GFID of the file and parent dir
+GFID_PARENT_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/foo 2>/dev/null | grep trusted.gfid | cut -d= -f2)
+GFID_PARENT_FORMATTED=$(echo "$GFID_PARENT_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')
+GFID_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/foo/testfile 2>/dev/null | grep trusted.gfid | cut -d= -f2)
+GFID_FORMATTED=$(echo "$GFID_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')
+GFID_LINK_B1="$B0/${V0}1/.glusterfs/$(echo $GFID_RAW | awk '{print substr($0,3,2)"/"substr($0,5,2)"/"substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')"
+
+# Nuke the file from brick 1
+rm -f $GFID_LINK_B1
+rm -f $B0/${V0}1/foo/testfile
+
+# Now manually queue up the parent directory for healing
+touch $B0/${V0}2/.glusterfs/indices/xattrop/$GFID_PARENT_FORMATTED
+touch $B0/${V0}3/.glusterfs/indices/xattrop/$GFID_PARENT_FORMATTED
+
+# Kick off the SHD and wait 30 seconds for healing to take place
+TEST gluster vol start patchy force
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
+
+# Verify the file was healed back to brick 1
+TEST stat $B0/${V0}1/foo/testfile
+
+cleanup
diff --git a/tests/basic/afr/shd-pgfid-heal.t b/tests/basic/afr/shd-pgfid-heal.t
new file mode 100644
index 00000000000..6213e4c6374
--- /dev/null
+++ b/tests/basic/afr/shd-pgfid-heal.t
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+TEST $CLI volume set $V0 nfs.disable on
+TEST $CLI volume set $V0 cluster.quorum-type none
+#EST $CLI volume set $V0 cluster.favorite-child-by-majority on
+#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on
+TEST $CLI volume set $V0 cluster.pgfid-self-heal on
+TEST $CLI volume set $V0 cluster.favorite-child-policy majority
+TEST $CLI volume set $V0 storage.build-pgfid on
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume start $V0
+sleep 5
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+cd $M0
+mkdir -p a/b/c
+dd if=/dev/urandom of=a/b/c/testfile bs=128k count=5 2>/dev/null
+
+# Kill the SHD while we setup the test
+pkill -f gluster/glustershd
+# Kill the brick as well such that
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+echo stuff >> $M0/a/b/c/testfile
+MD5=$(md5sum a/b/c/testfile | cut -d\ -f1)
+
+# Grab the GFID of the file and parent dir
+GFID_PARENT_B_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/a/b 2>/dev/null | grep trusted.gfid | cut -d= -f2)
+GFID_PARENT_B_FORMATTED=$(echo "$GFID_PARENT_B_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')
+GFID_PARENT_B_LINK_B1="$B0/${V0}1/.glusterfs/$(echo $GFID_PARENT_B_RAW | awk '{print substr($0,3,2)"/"substr($0,5,2)"/"substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')"
+GFID_PARENT_C_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/a/b/c 2>/dev/null | grep trusted.gfid | cut -d= -f2)
+GFID_PARENT_C_FORMATTED=$(echo "$GFID_PARENT_C_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')
+GFID_PARENT_C_LINK_B1="$B0/${V0}1/.glusterfs/$(echo $GFID_PARENT_C_RAW | awk '{print substr($0,3,2)"/"substr($0,5,2)"/"substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')"
+GFID_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/a/b/c/testfile 2>/dev/null | grep trusted.gfid | cut -d= -f2)
+GFID_FORMATTED=$(echo "$GFID_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')
+GFID_LINK_B1="$B0/${V0}1/.glusterfs/$(echo $GFID_RAW | awk '{print substr($0,3,2)"/"substr($0,5,2)"/"substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')"
+
+#
+# Here we are going to create a situation such that a file 3
+# levels deep into the FS requires healing, along with 2 levels
+# of parent directories. The only signal SHD has is that the
+# file itself needs healing. The directory (entry) heals are
+# missing; simulating a crash or some sort of bug that we need
+# to be able to recover from.
+#
+
+# Nuke the file from brick 1, along with the parent directories
+# and all backend hard/symbolic links
+rm -f $B0/${V0}1/a/b/c/testfile
+rm -f $GFID_LINK_B1
+rmdir $B0/${V0}1/a/b/c
+rm -f $GFID_PARENT_C_LINK_B1
+rmdir $B0/${V0}1/a/b
+rm -f $GFID_PARENT_B_LINK_B1
+
+# Kick off the SHD and wait 30 seconds for healing to take place
+TEST gluster vol start patchy force
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
+sleep 5
+
+# Verify the file was healed back to brick 1
+TEST stat $B0/${V0}1/a/b/c/testfile
+
+cleanup
diff --git a/tests/basic/bd.t b/tests/basic/bd.t
index 63622edd709..11582db81c0 100755
--- a/tests/basic/bd.t
+++ b/tests/basic/bd.t
@@ -86,6 +86,7 @@ TEST pidof glusterd
configure
TEST $CLI volume create $V0 ${H0}:/$B0/$V0?${V0}
+TEST $CLI volume set $V0 performance.stat-prefetch off
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
diff --git a/tests/basic/cache.t b/tests/basic/cache.t
new file mode 100644
index 00000000000..92251732f4a
--- /dev/null
+++ b/tests/basic/cache.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+#
+
+FILE=/var/log/glusterfs/samples/glusterfs_patchy.samp
+rm $FILE
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+function print_cnt() {
+ local FOP_TYPE=$1
+ local FOP_CNT=$(grep ,${FOP_TYPE} $FILE | wc -l)
+ echo $FOP_CNT
+}
+
+function print_avg() {
+ local FOP_TYPE=$1
+ local FILE=/var/log/glusterfs/samples/glusterfs_patchy.samp
+ local FOP_AVG=$(grep -oE "${FOP_TYPE},[0-9]+\." ${FILE} | grep -oE '[0-9]+' | awk 'NR == 1 { sum = 0 } { sum += $1; } END {printf "%d", sum/NR}')
+ echo $FOP_AVG
+}
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 nfs.disable off
+TEST $CLI volume set $V0 diagnostics.latency-measurement on
+TEST $CLI volume set $V0 diagnostics.count-fop-hits on
+TEST $CLI volume set $V0 diagnostics.fop-sample-buf-size 65535
+TEST $CLI volume set $V0 diagnostics.fop-sample-interval 1
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 1
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+for i in {1..100}
+do
+ df $M0 &> /dev/null
+done
+
+sleep 6
+
+# Get average
+STATFS_CNT0=$(print_cnt STATFS)
+TEST [ "$STATFS_CNT0" -gt "0" ]
+STATFS_AVG0=$(print_avg STATFS)
+# Make it easier to compute averages
+rm $FILE
+
+TEST $CLI volume set $V0 performance.nfs.io-cache on
+TEST $CLI volume set $V0 performance.statfs-cache on
+TEST $CLI volume set $V0 performance.statfs-cache-timeout 10
+
+for i in {1..100}
+do
+ df $M0 &> /dev/null
+done
+
+sleep 6
+
+# Get average
+STATFS_CNT1=$(print_cnt STATFS)
+TEST [ "$STATFS_CNT1" -eq "$STATFS_CNT0" ]
+STATFS_AVG1=$(print_avg STATFS)
+
+# Verify that cached average * 10 is still faster than uncached
+STATFS_AVG1x10=$(($STATFS_AVG1 * 10))
+TEST [ "$STATFS_AVG0" -gt "$STATFS_AVG1x10" ]
+#cleanup
diff --git a/tests/basic/dht-min-free-space.t b/tests/basic/dht-min-free-space.t
new file mode 100755
index 00000000000..9553f9247aa
--- /dev/null
+++ b/tests/basic/dht-min-free-space.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../traps.rc
+
+grep $B0/patchy1 /proc/mounts &> /dev/null && umount $B0/patchy1
+grep $B0/patchy2 /proc/mounts &> /dev/null && umount $B0/patchy2
+mkdir $B0/${V0}{1..2}
+
+TEST glusterd
+
+TEST truncate --size $((30*1048576)) $B0/${V0}-dev1
+push_trapfunc "rm -f $B0/${V0}-dev1"
+TEST truncate --size $((30*1048576)) $B0/${V0}-dev2
+push_trapfunc "rm -f $B0/${V0}-dev2"
+
+TEST mkfs.xfs $B0/${V0}-dev1
+TEST mkfs.xfs $B0/${V0}-dev2
+
+TEST mount -o loop $B0/${V0}-dev1 $B0/${V0}1
+TEST mount -o loop $B0/${V0}-dev2 $B0/${V0}2
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume set $V0 cluster.min-free-disk 2MB
+TEST $CLI volume set $V0 cluster.min-free-strict-mode on
+TEST $CLI volume set $V0 cluster.du-refresh-interval-sec 0
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+####################################
+# Test re-directs of file creation #
+####################################
+
+# This should work, no redirects
+TEST dd if=/dev/zero of=$M0/testfile1 bs=1M count=8
+TEST [ -f /d/backends/${V0}2/testfile1 ] && [ ! -k /d/backends/${V0}1/testfile1 ]
+
+TEST $CLI volume set $V0 cluster.min-free-disk 19MB
+
+# This should work, & the file redirected
+# Subvolume 2 should have the linkto &
+# Subvolume 1 should have the original
+TEST dd if=/dev/zero of=$M0/testfile3 bs=1M count=4
+TEST [ -f /d/backends/${V0}1/testfile3 ] && [ ! -k /d/backends/${V0}1/testfile3 ]
+TEST [ -k /d/backends/${V0}2/testfile3 ]
+
+# This should fail, cluster is full
+TEST ! dd if=/dev/zero of=$M0/testfile2 bs=1M count=23
+
+###################
+# Strict mode off #
+###################
+TEST $CLI volume set $V0 cluster.min-free-strict-mode off
+TEST dd if=/dev/zero of=$M0/testfile1 bs=1M count=20
+TEST rm -f $M0/testfile1
+
+###################
+# Strict mode on #
+###################
+TEST $CLI volume set $V0 cluster.min-free-strict-mode on
+TEST ! dd if=/dev/zero of=$M0/testfile1 bs=1M count=16
+TEST rm -f $M0/testfile1
+
+# Cleanup will deal with our mounts for us, and (because we used "-o loop") our
+# device files too, but not the underlying files. That will happen in the EXIT
+# trap handler instead.
+cleanup;
diff --git a/tests/basic/ec/ec-common b/tests/basic/ec/ec-common
index 83c4463a912..152e3b51236 100644
--- a/tests/basic/ec/ec-common
+++ b/tests/basic/ec/ec-common
@@ -45,7 +45,7 @@ for size in $SIZE_LIST; do
eval cs_big_truncate[$size]=$(sha1sum $tmp/big1 | awk '{ print $1 }')
done
-TEST df -h
+TEST df -h $M0
TEST stat $M0
for idx in `seq 0 $LAST_BRICK`; do
diff --git a/tests/basic/ec/self-heal.t b/tests/basic/ec/self-heal.t
index 98dd9232c73..3e3467535fb 100644
--- a/tests/basic/ec/self-heal.t
+++ b/tests/basic/ec/self-heal.t
@@ -136,7 +136,7 @@ TEST dd if=/dev/urandom of=$tmp/test bs=1024 count=1024
cs=$(sha1sum $tmp/test | awk '{ print $1 }')
-TEST df -h
+TEST df -h $M0
TEST stat $M0
for idx in {0..5}; do
diff --git a/tests/basic/exports_parsing.t b/tests/basic/exports_parsing.t
index fdaf9c2822e..da88bbcb2cc 100644
--- a/tests/basic/exports_parsing.t
+++ b/tests/basic/exports_parsing.t
@@ -32,7 +32,20 @@ function test_bad_opt ()
glusterfsd --print-exports $1 2>&1 | sed -n 1p
}
-EXPECT_KEYWORD "/test @test(rw,anonuid=0,sec=sys,) 10.35.11.31(rw,anonuid=0,sec=sys,)" test_good_file $EXP_FILES/exports
+function check_export_line() {
+ if [ "$1" == "$2" ]; then
+ echo "Y"
+ else
+ echo "N"
+ fi
+ return
+}
+
+export_result=$(test_good_file $EXP_FILES/exports)
+EXPECT "Y" check_export_line '/test @test(rw,anonuid=0,sec=sys,) 10.35.11.31(rw,anonuid=0,sec=sys,) ' "$export_result"
+
+export_result=$(test_good_file $EXP_FILES/exports-v6)
+EXPECT "Y" check_export_line '/test @test(rw,anonuid=0,sec=sys,) 2401:db00:11:1:face:0:3d:0(rw,anonuid=0,sec=sys,) ' "$export_result"
EXPECT_KEYWORD "Error parsing netgroups for:" test_bad_line $EXP_FILES/bad_exports
EXPECT_KEYWORD "Error parsing netgroups for:" test_long_netgroup $EXP_FILES/bad_exports
diff --git a/tests/basic/fop-sampling.t b/tests/basic/fop-sampling.t
index cea8aa737c0..713c7e27579 100644
--- a/tests/basic/fop-sampling.t
+++ b/tests/basic/fop-sampling.t
@@ -2,13 +2,27 @@
#
. $(dirname $0)/../include.rc
+. $(dirname $0)/../nfs.rc
. $(dirname $0)/../volume.rc
-SAMPLE_FILE="$(gluster --print-logdir)/samples/glusterfs_${V0}.samp"
+BRICK_SAMPLES="$(gluster --print-logdir)/samples/glusterfsd__d_backends_${V0}0.samp"
+NFS_SAMPLES="$(gluster --print-logdir)/samples/glusterfs_nfsd.samp"
+
+function check_path {
+ op=$1
+ path=$2
+ file=$3
+ grep $op $file | awk -F, '{print $11}' | grep $path 2>&1 > /dev/null
+ if [ $? -eq 0 ]; then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
function print_cnt() {
local FOP_TYPE=$1
- local FOP_CNT=$(grep ,${FOP_TYPE} ${SAMPLE_FILE} | wc -l)
+ local FOP_CNT=$(grep ,${FOP_TYPE} ${BRICK_SAMPLES} | wc -l)
echo $FOP_CNT
}
@@ -42,12 +56,18 @@ TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
TEST $CLI volume set $V0 nfs.disable off
TEST $CLI volume set $V0 diagnostics.latency-measurement on
TEST $CLI volume set $V0 diagnostics.count-fop-hits on
-TEST $CLI volume set $V0 diagnostics.stats-dump-interval 2
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 5
TEST $CLI volume set $V0 diagnostics.fop-sample-buf-size 65535
TEST $CLI volume set $V0 diagnostics.fop-sample-interval 1
TEST $CLI volume set $V0 diagnostics.stats-dnscache-ttl-sec 3600
-
TEST $CLI volume start $V0
+
+>${NFS_SAMPLES}
+>${BRICK_SAMPLES}
+
+#################
+# Basic Samples #
+#################
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
for i in {1..5}
@@ -58,4 +78,52 @@ done
TEST ls -l $M0
EXPECT_WITHIN 6 "OK" check_samples
-cleanup
+sleep 2
+
+################################
+# Paths in the samples #
+################################
+
+TEST mount_nfs $H0:$V0 $N0
+
+ls $N0 &> /dev/null
+touch $N0/file1
+stat $N0/file1 &> /dev/null
+echo "some data" > $N0/file1
+dd if=/dev/zero of=$N0/file2 bs=1M count=10 conv=fsync
+dd if=/dev/zero of=$N0/file1 bs=1M count=1
+cat $N0/file2 &> /dev/null
+mkdir -p $N0/dir1
+rmdir $N0/dir1
+rm $N0/file1
+rm $N0/file2
+
+EXPECT_WITHIN 10 "Y" check_path CREATE /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path LOOKUP /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path SETATTR /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path WRITE /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path FINODELK /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path ENTRYLK / $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path FLUSH /file2 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path TRUNCATE /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path MKDIR /dir1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path RMDIR /dir1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path UNLINK /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path UNLINK /file2 $BRICK_SAMPLES
+
+
+EXPECT_WITHIN 10 "Y" check_path CREATE /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path LOOKUP /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path ACCESS /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path SETATTR /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path WRITE /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path FLUSH /file2 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path ACCESS /file2 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path READ /file2 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path TRUNCATE /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path MKDIR /dir1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path RMDIR /dir1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path UNLINK /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path UNLINK /file2 $NFS_SAMPLES
+
+cleanup;
diff --git a/tests/basic/fops-sanity-gfproxy.t b/tests/basic/fops-sanity-gfproxy.t
new file mode 100755
index 00000000000..b3bb8a502cc
--- /dev/null
+++ b/tests/basic/fops-sanity-gfproxy.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+#gfproxy server
+TEST glusterfs --volfile-id=gfproxy/$V0 --volfile-server=$H0 -l /var/log/glusterfs/${V0}-gfproxy.log
+
+#mount on a random dir
+TEST glusterfs --entry-timeout=3600 --attribute-timeout=3600 -s $H0 --volfile-id=gfproxy-client/$V0 $M0 --direct-io-mode=yes
+TEST grep gfproxy-client /proc/mounts
+
+build_tester $(dirname $0)/fops-sanity.c
+
+TEST cp $(dirname $0)/fops-sanity $M0
+cd $M0
+TEST ./fops-sanity $V0
+cd -
+rm -f $(dirname $0)/fops-sanity
+
+cleanup;
diff --git a/tests/basic/gfid-access.t b/tests/basic/gfid-access.t
index 19b6564e676..fc29a19fc6c 100644
--- a/tests/basic/gfid-access.t
+++ b/tests/basic/gfid-access.t
@@ -8,6 +8,7 @@ cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 performance.stat-prefetch off
TEST $CLI volume start $V0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 --aux-gfid-mount;
TEST mkdir $M0/a
diff --git a/tests/basic/gfproxy.t b/tests/basic/gfproxy.t
new file mode 100644
index 00000000000..71c6788db76
--- /dev/null
+++ b/tests/basic/gfproxy.t
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../nfs.rc
+
+cleanup;
+
+function start_gfproxyd {
+ glusterfs --volfile-id=gfproxy/${V0} --volfile-server=$H0 -l /var/log/glusterfs/${V0}-gfproxy.log
+}
+
+function restart_gfproxyd {
+ pkill -f gfproxy/${V0}
+ start_gfproxyd
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 config.gfproxyd-remote-host $H0
+TEST $CLI volume start $V0
+
+sleep 2
+
+REGULAR_CLIENT_VOLFILE="/var/lib/glusterd/vols/${V0}/trusted-${V0}.tcp-fuse.vol"
+GFPROXY_CLIENT_VOLFILE="/var/lib/glusterd/vols/${V0}/trusted-${V0}.tcp-gfproxy-fuse.vol"
+GFPROXYD_VOLFILE="/var/lib/glusterd/vols/${V0}/${V0}.gfproxyd.vol"
+
+# Client volfile must exist
+TEST [ -f $GFPROXY_CLIENT_VOLFILE ]
+
+# AHA & write-behind translators must exist
+TEST grep "cluster/aha" $GFPROXY_CLIENT_VOLFILE
+TEST grep "performance/write-behind" $GFPROXY_CLIENT_VOLFILE
+
+# Make sure we didn't screw up the existing client
+TEST grep "performance/write-behind" $REGULAR_CLIENT_VOLFILE
+TEST grep "cluster/replicate" $REGULAR_CLIENT_VOLFILE
+TEST grep "cluster/distribute" $REGULAR_CLIENT_VOLFILE
+
+TEST [ -f $GFPROXYD_VOLFILE ]
+
+TEST grep "cluster/replicate" $GFPROXYD_VOLFILE
+TEST grep "cluster/distribute" $GFPROXYD_VOLFILE
+
+# AHA & write-behind must *not* exist
+TEST ! grep "cluster/aha" $GFPROXYD_VOLFILE
+TEST ! grep "performance/write-behind" $GFPROXYD_VOLFILE
+
+# Test that we can start the server and the client
+TEST start_gfproxyd
+TEST glusterfs --volfile-id=gfproxy-client/${V0} --volfile-server=$H0 -l /var/log/glusterfs/${V0}-gfproxy-client.log $M0
+sleep 2
+TEST grep gfproxy-client/${V0} /proc/mounts
+
+# Write data to the mount and checksum it
+TEST dd if=/dev/urandom bs=1M count=10 of=/tmp/testfile1
+md5=$(md5sum /tmp/testfile1 | awk '{print $1}')
+TEST cp -v /tmp/testfile1 $M0/testfile1
+TEST [ "$(md5sum $M0/testfile1 | awk '{print $1}')" == "$md5" ]
+
+rm /tmp/testfile1
+
+dd if=/dev/zero of=$N0/bigfile bs=1M count=3072 &
+BG_STRESS_PID=$!
+
+sleep 3
+
+restart_gfproxyd
+
+TEST wait $BG_STRESS_PID
+
+cleanup;
diff --git a/tests/basic/glusterd/volfile_server_switch.t b/tests/basic/glusterd/volfile_server_switch.t
index 0b0e6470244..0b01398215c 100644
--- a/tests/basic/glusterd/volfile_server_switch.t
+++ b/tests/basic/glusterd/volfile_server_switch.t
@@ -1,5 +1,8 @@
#!/bin/bash
+#G_TESTDEF_TEST_STATUS_CENTOS6=KNOWN_ISSUE,BUG=000000
+#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
+
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
diff --git a/tests/basic/halo-failover-disabled.t b/tests/basic/halo-failover-disabled.t
new file mode 100644
index 00000000000..f3655eaef3b
--- /dev/null
+++ b/tests/basic/halo-failover-disabled.t
@@ -0,0 +1,77 @@
+#!/bin/bash
+#
+# Tests that fail-over works correctly for Halo Geo-replication
+#
+# 1. Create a volume @ 3x replication w/ halo + quorum enabled
+# 2. Write some data, background it & fail a brick
+# 3. The expected result is that the writes fail-over to the 3rd
+# brick immediatelly, and md5s will show they are equal once
+# the write completes.
+# 4. The mount should also be RW after the brick is killed as
+# quorum will be immediately restored by swapping in the
+# other brick.
+#
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../halo.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.shd-max-threads 1
+TEST $CLI volume set $V0 cluster.halo-enabled True
+TEST $CLI volume set $V0 cluster.halo-max-latency 9999
+TEST $CLI volume set $V0 cluster.halo-shd-max-latency 9999
+TEST $CLI volume set $V0 cluster.halo-max-replicas 2
+TEST $CLI volume set $V0 cluster.halo-min-samples 1
+TEST $CLI volume set $V0 cluster.halo-failover-enabled off
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 2
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+TEST $CLI volume set $V0 diagnostics.brick-log-level DEBUG
+TEST $CLI volume set $V0 nfs.log-level DEBUG
+
+# Use a large ping time here so the spare brick is not marked up
+# based on the ping time. The only way it can get marked up is
+# by being swapped in via the down event (which is what we are disabling).
+TEST $CLI volume set $V0 network.ping-timeout 1000
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+# Make sure two children are up and one is down.
+EXPECT_WITHIN 10 "2 1" halo_sum_child_states 3
+
+# Write some data to the mount
+TEST dd if=/dev/urandom of=$M0/test bs=1k count=200 conv=fsync
+
+UP_IDX=$(cat /var/log/glusterfs/$M0LOG | grep "halo state: UP" | tail -n1 | grep -Eo "Child [0-9]+" | grep -Eo "[0-9]+")
+TEST kill_brick $V0 $H0 $B0/${V0}${UP_IDX}
+
+# Make sure two children are down and one is up.
+EXPECT_WITHIN 10 "1 2" halo_sum_child_states 3
+
+# Test that quorum should fail and the mount is RO, the reason here
+# is that although there _is_ another brick running which _could_
+# take the failed bricks place, it is not marked "up" so quorum
+# will not be fullfilled. If we waited 1000 second the brick would
+# indeed be activated based on ping time, but for our test we want
+# the decision to be solely "down event" driven, not ping driven.
+TEST ! dd if=/dev/urandom of=$M0/test_rw bs=1M count=1 conv=fsync
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 $UP_IDX
+
+# Test that quorum should be restored and the file is writable
+TEST dd if=/dev/urandom of=$M0/test_rw bs=1M count=1
+
+cleanup
diff --git a/tests/basic/halo-failover-enabled.t b/tests/basic/halo-failover-enabled.t
new file mode 100644
index 00000000000..7d23d80968a
--- /dev/null
+++ b/tests/basic/halo-failover-enabled.t
@@ -0,0 +1,85 @@
+#!/bin/bash
+#
+# Tests that fail-over works correctly for Halo Geo-replication
+#
+# 1. Create a volume @ 3x replication w/ halo + quorum enabled
+# 2. Write some data, background it & fail a brick
+# 3. The expected result is that the writes fail-over to the 3rd
+# brick immediatelly, and md5s will show they are equal once
+# the write completes.
+# 4. The mount should also be RW after the brick is killed as
+# quorum will be immediately restored by swapping in the
+# other brick.
+#
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../halo.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.shd-max-threads 1
+TEST $CLI volume set $V0 cluster.halo-enabled True
+TEST $CLI volume set $V0 cluster.halo-failover-enabled on
+TEST $CLI volume set $V0 cluster.halo-max-replicas 2
+TEST $CLI volume set $V0 cluster.halo-min-samples 1
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 2
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 network.ping-timeout 20
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+TEST $CLI volume set $V0 diagnostics.brick-log-level DEBUG
+TEST $CLI volume set $V0 nfs.log-level DEBUG
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+# Make sure two children are up and one is down.
+EXPECT_WITHIN 10 "2 1" halo_sum_child_states 3
+
+# Write some data to the mount
+TEST dd if=/dev/urandom of=$M0/test bs=1k count=200 conv=fsync
+
+KILL_IDX=$(cat /var/log/glusterfs/$M0LOG | grep "halo state: UP" | tail -n1 | grep -Eo "Child [0-9]+" | grep -Eo "[0-9]+")
+TEST [ -n "$KILL_IDX" ]
+# NB: UP_CHILDREN is the set of children that should be up after we kill
+# the brick indicated by KILL_IDX, *not* the set of children which are
+# currently up!
+UP_CHILDREN=($(echo "0 1 2" | sed "s/${KILL_IDX}//g"))
+UP1_HAS_TEST="$(ls $B0/${V0}${UP_CHILDREN[0]}/test 2>/dev/null)"
+UP2_HAS_TEST="$(ls $B0/${V0}${UP_CHILDREN[1]}/test 2>/dev/null)"
+VICTIM_HAS_TEST="$(ls $B0/${V0}${KILL_IDX}/test 2>/dev/null)"
+
+# The victim brick should have a copy of the file.
+TEST [ -n "$VICTIM_HAS_TEST" ]
+
+# Of the bricks which will remain standing, there should be only one
+# brick which has the file called test. If the both have the first
+# test file, the test is invalid as all the bricks are up and the
+# halo-max-replicas is not being honored; e.g. bug exists.
+TEST [ $([ -z "$UP1_HAS_TEST" ]) = $([ -z "$UP2_HAS_TEST" ]) ]
+
+echo "Failing child ${KILL_IDX}..."
+TEST kill_brick $V0 $H0 $B0/${V0}${KILL_IDX}
+
+# Test the mount is still RW (i.e. quorum works)
+TEST dd if=/dev/urandom of=$M0/test_failover bs=1M count=1 conv=fsync
+
+# Calulate the MD5s
+MD5_UP1=$(md5sum $B0/${V0}${UP_CHILDREN[0]}/test_failover | cut -d' ' -f1)
+MD5_UP2=$(md5sum $B0/${V0}${UP_CHILDREN[1]}/test_failover | cut -d' ' -f1)
+
+# Verify the two up bricks have identical MD5s, if both are identical
+# then we must have successfully failed-over to the brick which was
+# previously proven to be down (via the ONLY_ONE test).
+TEST [ "$MD5_UP1" == "$MD5_UP2" ]
+
+cleanup
diff --git a/tests/basic/halo-hybrid.t b/tests/basic/halo-hybrid.t
new file mode 100644
index 00000000000..4574fdfe41e
--- /dev/null
+++ b/tests/basic/halo-hybrid.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+#
+# Test for the Halo hybrid feature
+#
+# 1. Create volume w/ 3x replication w/ max-replicas = 2 for clients,
+# heal daemon is off to start.
+# 2. Write some data
+# 3. Verify hybrid code chose children for lookups
+# 4. Verify hybrid code chose child for reads
+# 5. Verify hybrid code wrote synchronously to all replicas
+#
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+function found_fuse_log_msg {
+ local dir="$1"
+ local msg="$2"
+ local cnt=$(cat /var/log/glusterfs/$M0LOG | grep "$msg" | tail -n1 | wc -l)
+ if (( $cnt == 1 )); then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.shd-max-threads 1
+TEST $CLI volume set $V0 cluster.halo-enabled True
+TEST $CLI volume set $V0 cluster.halo-hybrid-mode True
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 diagnostics.client-log-level TRACE
+TEST $CLI volume start $V0
+
+# Start a synchronous mount
+TEST glusterfs --volfile-id=/$V0 \
+ --xlator-option *replicate*.halo-max-latency=9999 \
+ --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 2
+cd $M0
+
+TEST mkdir testdir
+TEST cd testdir
+for i in {1..5}
+do
+ dd if=/dev/urandom of=testfile$i bs=1M count=1 2>/dev/null
+done
+TEST ls -l
+
+EXPECT_WITHIN "60" "Y" found_fuse_log_msg "children for LOOKUPs"
+EXPECT_WITHIN "60" "Y" found_fuse_log_msg "Selected hybrid child"
+
+B0_CNT=$(ls $B0/${V0}0/testdir | wc -l)
+B1_CNT=$(ls $B0/${V0}1/testdir | wc -l)
+B2_CNT=$(ls $B0/${V0}2/testdir | wc -l)
+
+# Writes should be synchronous, all should have same
+# file count
+TEST "(($B0_CNT == 5 && $B1_CNT == 5 && $B2_CNT == 5))"
+
+cleanup
diff --git a/tests/basic/halo.t b/tests/basic/halo.t
new file mode 100644
index 00000000000..25aca3442ab
--- /dev/null
+++ b/tests/basic/halo.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# Test for the Halo geo-replication feature
+#
+# 1. Create volume w/ 3x replication w/ max-replicas = 2 for clients,
+# heal daemon is off to start.
+# 2. Write some data
+# 3. Verify at least one of the bricks did not receive the writes.
+# 4. Turn the heal daemon on
+# 5. Within 30 seconds the SHD should async heal the data over
+# to the 3rd brick.
+#
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.shd-max-threads 1
+TEST $CLI volume set $V0 cluster.halo-enabled True
+TEST $CLI volume set $V0 cluster.halo-max-replicas 2
+TEST $CLI volume set $V0 cluster.halo-min-samples 1
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+cd $M0
+
+for i in {1..5}
+do
+ dd if=/dev/urandom of=f bs=1M count=1 2>/dev/null
+ mkdir a; cd a;
+done
+
+B0_CNT=$(ls $B0/${V0}0 | wc -l)
+B1_CNT=$(ls $B0/${V0}1 | wc -l)
+B2_CNT=$(ls $B0/${V0}2 | wc -l)
+
+# One of the brick dirs should be empty
+TEST "(($B0_CNT == 0 || $B1_CNT == 0 || $B2_CNT == 0))"
+
+# Ok, turn the heal daemon on and verify it heals it up
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
+cleanup
diff --git a/tests/basic/mount-nfs-auth.t b/tests/basic/mount-nfs-auth.t
index 9df5cb45c3b..7f990c9aeb2 100755
--- a/tests/basic/mount-nfs-auth.t
+++ b/tests/basic/mount-nfs-auth.t
@@ -3,6 +3,13 @@
. $(dirname $0)/../include.rc
. $(dirname $0)/../nfs.rc
+# On test systems, connecting to ourselves by hostname appears at the other end
+# as coming from localhost, so that's what needs to go in exports files etc.
+# The only place we really need to use the actual hostname is in the Gluster
+# volume-create thing. Maybe it's an IPv6 thing, maybe it's just a crazy
+# resolver configuration, but this lets the test work.
+H0=localhost
+
# Our mount timeout must be as long as the time for a regular configuration
# change to be acted upon *plus* AUTH_REFRESH_TIMEOUT, not one replacing the
# other. Otherwise this process races vs. the one making the change we're
@@ -15,6 +22,9 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume info
+H0IP=$(ip addr show |grep -w inet |grep -v 127.0.0.1|awk '{ print $2 }'| cut -d "/" -f 1)
+H0IP6=$(host $HOSTNAME | grep IPv6 | awk '{print $NF}')
+
# Export variables for allow & deny
EXPORT_ALLOW="/$V0 $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
EXPORT_ALLOW_SLASH="/$V0/ $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
@@ -28,13 +38,21 @@ V0L1="$V0/L1"
V0L2="$V0L1/L2"
V0L3="$V0L2/L3"
+NETGROUP_COMPLEX_ALLOW="storage storage.region\nstorage.region (1.2.3.4,,)\nngtop ng1\nng1 ($H0,,)"
+EXPORT_COMPLEX_RO_ALLOW="/$V0L1 @storage(sec=sys,rw,anonuid=0) @ngtop(sec=sys,ro,anonuid=0)"
+
# Other variations for allow & deny
+EXPORT_ALLOW_NETGROUP_RO="/$V0 @ngtop(sec=sys,ro,anonuid=0)"
EXPORT_ALLOW_RO="/$V0 $H0(sec=sys,ro,anonuid=0) @ngtop(sec=sys,ro,anonuid=0)"
EXPORT_ALLOW_L1="/$V0L1 $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
EXPORT_WILDCARD="/$V0 *(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
function build_dirs () {
- mkdir -p $B0/b{0,1,2}/L1/L2/L3
+ mkdir -p $B0/b{0,1,2,3,4,5}/L1/L2/L3
+}
+
+function export_allow_this_host_ipv6 () {
+ printf "$EXPORT_ALLOW6\n" > ${NFSDIR}/exports
}
function export_allow_this_host () {
@@ -46,6 +64,9 @@ function export_allow_this_host_with_slash () {
}
function export_deny_this_host () {
+ if [[ "$1" && "$1" != "$V0" ]]; then
+ local EXPORT_DENY=$(echo $EXPORT_DENY | sed "s/$V0/$1/")
+ fi
printf "$EXPORT_DENY\n" > ${NFSDIR}/exports
}
@@ -61,6 +82,10 @@ function export_allow_this_host_ro () {
printf "$EXPORT_ALLOW_RO\n" > ${NFSDIR}/exports
}
+function export_allow_netgroup_ro () {
+ printf "$EXPORT_ALLOW_NETGROUP_RO\n" > ${NFSDIR}/exports
+}
+
function netgroup_allow_this_host () {
printf "$NETGROUP_ALLOW\n" > ${NFSDIR}/netgroups
}
@@ -69,8 +94,16 @@ function netgroup_deny_this_host () {
printf "$NETGROUP_DENY\n" > ${NFSDIR}/netgroups
}
+function netgroup_complex_allow() {
+ printf "$NETGROUP_COMPLEX_ALLOW\n" > ${NFSDIR}/netgroup
+}
+
+function export_complex_ro_allow() {
+ printf "$EXPORT_COMPLEX_RO_ALLOW\n" > ${NFSDIR}/exports
+}
+
function create_vol () {
- $CLI vol create $V0 $H0:$B0/b0
+ $CLI vol create $V0 $(hostname):$B0/b0
}
function setup_cluster() {
@@ -104,6 +137,10 @@ function check_mount_failure {
fi
}
+function do_mount () {
+ mount_nfs $H0:/$1 $N0 nolock
+}
+
function small_write () {
dd if=/dev/zero of=$N0/test-small-write count=1 bs=1k 2>&1
if [ $? -ne 0 ]; then
@@ -150,10 +187,7 @@ setup_cluster
TEST $CLI vol set $V0 nfs.disable off
TEST $CLI vol start $V0
-# Get NFS state directory
-NFSDIR=$( $CLI volume get patchy nfs.mount-rmtab | \
- awk '/^nfs.mount-rmtab/{print $2}' | \
- xargs dirname )
+NFSDIR=/var/lib/glusterd/nfs
## Wait for volume to register with rpc.mountd
EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
@@ -186,6 +220,11 @@ EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
## Mount NFS
EXPECT "Y" check_mount_success $V0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Mount NFS using the IPv6 export
+export_allow_this_host_ipv6
+EXPECT "Y" check_mount_success $V0
## Disallow host
TEST export_deny_this_host
@@ -260,6 +299,31 @@ TEST ! create # Create should not be allowed
TEST stat_nfs # Stat should be allowed
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+TEST export_allow_netgroup_ro
+TEST netgroup_allow_this_host
+sleep $((AUTH_REFRESH_INTERVAL+1))
+
+EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
+# TBD: figure out why these two tests fail, so they can be reenabled
+#EST ! small_write # Writes should not be allowed
+#EST ! create # Create should not be allowed
+TEST stat_nfs # Stat should be allowed
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+# This test checks the case where the exports file
+# has a 'rw' perm set for a netgroup followed
+# by a 'ro' perm for a different netgroup.
+TEST netgroup_complex_allow
+TEST export_complex_ro_allow
+sleep $((AUTH_REFRESH_INTERVAL+1))
+
+EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0L1
+# TBD: figure out why these two tests fail, so they can be reenabled
+#EST ! small_write # Writes should not be allowed
+#EST ! create # Create should not be allowed
+TEST stat_nfs # Stat should be allowed
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
TEST export_deny_this_host
TEST netgroup_deny_this_host
TEST export_allow_this_host_l1 # Allow this host at L1
@@ -320,9 +384,40 @@ TEST $CLI vol set $V0 nfs.auth-refresh-interval-sec 20
## Do a simple test to see if the volume option exists
TEST $CLI vol set $V0 nfs.auth-cache-ttl-sec 400
+## Test authentication in 1 of 2 (sub)volumes
+ME=$(hostname)
+TEST $CLI vol create $V1 replica 3 $ME:$B0/b3 $ME:$B0/b4 $ME:$B0/b5
+TEST $CLI vol set $V1 cluster.self-heal-daemon off
+TEST $CLI vol set $V1 nfs.disable off
+TEST $CLI vol set $V1 cluster.choose-local off
+TEST $CLI vol start $V1
+TEST $CLI volume info $V1;
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "2" is_nfs_export_available $V0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available $V1
+TEST $CLI vol set $V0 nfs.exports-auth-enable on
+TEST $CLI vol set $V1 nfs.exports-auth-enable off
+# Deny the hosts, but only effective on $V0
+TEST export_deny_this_host $V0
+TEST netgroup_deny_this_host
+TEST export_deny_this_host $V1
+
+sleep $AUTH_REFRESH_INTERVAL
+TEST ! do_mount $V0 # Do a mount & test
+TEST do_mount $V1 # Do a mount & test
+
+TEST touch /tmp/foo
+TEST cp /tmp/foo $N0/
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
## Finish up
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0;
TEST ! $CLI volume info $V0;
+TEST $CLI volume stop $V1
+TEST $CLI volume delete $V1;
+TEST ! $CLI volume info $V1;
+
cleanup
diff --git a/tests/basic/pgfid-feat.t b/tests/basic/pgfid-feat.t
index a7baeec7b7a..615a0cd867e 100644
--- a/tests/basic/pgfid-feat.t
+++ b/tests/basic/pgfid-feat.t
@@ -16,6 +16,7 @@ TEST pidof glusterd
TEST $CLI volume info;
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+TEST $CLI volume set $V0 performance.stat-prefetch off
TEST $CLI volume set $V0 build-pgfid on;
TEST $CLI volume start $V0;
diff --git a/tests/basic/quota-anon-fd-nfs.t b/tests/basic/quota-anon-fd-nfs.t
index d911cc90b87..a6dec6bfcf8 100755
--- a/tests/basic/quota-anon-fd-nfs.t
+++ b/tests/basic/quota-anon-fd-nfs.t
@@ -17,6 +17,7 @@ TEST pidof glusterd
TEST $CLI volume info;
TEST $CLI volume create $V0 $H0:$B0/brick1;
+TEST $CLI volume set $V0 performance.stat-prefetch off
EXPECT 'Created' volinfo_field $V0 'Status';
TEST $CLI volume set $V0 nfs.disable false
diff --git a/tests/basic/quota.t b/tests/basic/quota.t
index 7f8b21de6f8..99af5a4e7e4 100755
--- a/tests/basic/quota.t
+++ b/tests/basic/quota.t
@@ -19,6 +19,7 @@ TEST pidof glusterd
TEST $CLI volume info;
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+TEST $CLI volume set $V0 performance.stat-prefetch off
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
diff --git a/tests/basic/rpc-coverage.t b/tests/basic/rpc-coverage.t
index a76ba7084eb..b5221dcd9dd 100755..100644
--- a/tests/basic/rpc-coverage.t
+++ b/tests/basic/rpc-coverage.t
@@ -10,6 +10,7 @@ TEST pidof glusterd
TEST $CLI volume info;
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+TEST $CLI volume set $V0 performance.stat-prefetch off
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
diff --git a/tests/basic/stats-dump.t b/tests/basic/stats-dump.t
index 7da6e0605a4..2840498218b 100644
--- a/tests/basic/stats-dump.t
+++ b/tests/basic/stats-dump.t
@@ -12,6 +12,7 @@ TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
TEST $CLI volume set $V0 diagnostics.latency-measurement on
TEST $CLI volume set $V0 diagnostics.count-fop-hits on
TEST $CLI volume set $V0 diagnostics.stats-dump-interval 1
+TEST $CLI volume set $V0 performance.nfs.io-threads on
TEST $CLI volume set $V0 nfs.disable off
TEST $CLI volume start $V0
EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
@@ -36,6 +37,10 @@ NFSD_RET="$?"
FUSE_OUTPUT="$(grep 'aggr.fop.write.count": "0"' ${GLUSTERD_WORKDIR}/stats/glusterfs_patchy.dump)"
FUSE_RET="$?"
+# Test that io-stats is getting queue sizes from io-threads
+TEST grep 'queue_size' ${GLUSTERD_WORKDIR}/stats/glusterfs_nfsd.dump
+TEST grep 'queue_size' ${GLUSTERD_WORKDIR}/stats/glusterfsd__d_backends_patchy?.dump
+
TEST [ 0 -ne "$BRICK_RET" ]
TEST [ 0 -ne "$NFSD_RET" ]
TEST [ 0 -ne "$FUSE_RET" ]
diff --git a/tests/basic/uss.t b/tests/basic/uss.t
index 6cfc0303895..d6ca416bd65 100644
--- a/tests/basic/uss.t
+++ b/tests/basic/uss.t
@@ -382,3 +382,5 @@ TEST ls $M0/.history/snap6/;
TEST ! stat $M0/.history/snap6/aaa;
cleanup;
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/basic/write-behind.t b/tests/basic/write-behind.t
new file mode 100644
index 00000000000..edad59786af
--- /dev/null
+++ b/tests/basic/write-behind.t
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+function clear_stats {
+ > /var/lib/glusterfs/stats/glusterfs_d_backends_${V0}0.dump
+}
+
+function got_expected_write_count {
+ expected_size=$1
+ expected_value=$2
+ grep aggr.write_${expected_size} "/var/lib/glusterd/stats/glusterfsd__d_backends_${V0}0.dump" | grep $expected_value
+ if [ $? == 0 ]; then
+ echo "Y";
+ else
+ echo "N";
+ fi
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+
+# These are needed for our tracking of write sizes
+TEST $CLI volume set $V0 diagnostics.latency-measurement on
+TEST $CLI volume set $V0 diagnostics.count-fop-hits on
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 2
+
+# Disable this in testing to get deterministic results
+TEST $CLI volume set $V0 performance.write-behind-trickling-writes off
+
+TEST $CLI volume start $V0
+
+sleep 2;
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+# Write a 100MB file with a window-size 1MB, we should get 100 writes of 1MB each
+TEST dd if=/dev/zero of=$M0/100mb_file bs=1M count=100
+EXPECT_WITHIN 5 "Y" got_expected_write_count "1mb" 100
+
+TEST $CLI volume set $V0 performance.write-behind-window-size 512KB
+
+# Write a 100MB file with a window-size 512KB, we should get 200 writes of 512KB each
+TEST dd if=/dev/zero of=$M0/100mb_file_2 bs=1M count=100
+EXPECT_WITHIN 5 "Y" got_expected_write_count "512kb" 200
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-1099890.t b/tests/bugs/distribute/bug-1099890.t
index 1a19ba880c0..9f8ae1487cc 100644
--- a/tests/bugs/distribute/bug-1099890.t
+++ b/tests/bugs/distribute/bug-1099890.t
@@ -44,6 +44,8 @@ TEST $CLI volume set $V0 features.quota-deem-statfs on
TEST $CLI volume quota $V0 limit-usage / 150MB;
+TEST $CLI volume set $V0 cluster.du-refresh-interval-sec 1
+
TEST $CLI volume set $V0 cluster.min-free-disk 50%
TEST glusterfs -s $H0 --volfile-id=$V0 $M0
diff --git a/tests/bugs/distribute/bug-1161311.t b/tests/bugs/distribute/bug-1161311.t
index c5a7f041ac8..8cf905a8f0b 100755
--- a/tests/bugs/distribute/bug-1161311.t
+++ b/tests/bugs/distribute/bug-1161311.t
@@ -53,8 +53,14 @@ TEST glusterfs -s $H0 --volfile-id $V0 $M0;
TEST mkdir $M0/dir1
TEST mkdir -p $M0/dir2/dir3
-# Create a large file (1GB), so that rebalance takes time
-dd if=/dev/urandom of=$M0/dir1/FILE2 bs=64k count=10240
+# Create a large file (6.4 GB), so that rebalance takes time
+# Reading from /dev/urandom is slow, so we'll cat it together
+dd if=/dev/urandom of=/tmp/FILE2 bs=64k count=10240
+for i in {1..10}; do
+ cat /tmp/FILE2 >> $M0/dir1/FILE2
+done
+
+#dd if=/dev/urandom of=$M0/dir1/FILE2 bs=64k count=10240
# Rename the file to create a linkto, for rebalance to
# act on the file
diff --git a/tests/bugs/fb4482137.t b/tests/bugs/fb4482137.t
new file mode 100755
index 00000000000..bd3be89326b
--- /dev/null
+++ b/tests/bugs/fb4482137.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+#
+# Test the scenario where a SHD daemon suffers a frame timeout during a
+# crawl. The expected behavior is that present crawl will continue
+# after the timeout and not deadlock.
+#
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+function wait_for_shd_no_sink() {
+ local TIMEOUT=$1
+ # If we see the "no active sinks" log message we know
+ # the heal is alive. It cannot proceed as the "sink"
+ # is hung, but it's at least alive and trying.
+ timeout $TIMEOUT grep -q 'replicate-0: no active sinks for' \
+ <(tail -fn0 /var/log/glusterfs/glustershd.log)
+ return $?
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info 2> /dev/null;
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 network.frame-timeout 2
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+TEST $CLI volume set $V0 cluster.heal-timeout 10
+TEST $CLI volume start $V0
+sleep 5
+
+# Mount the volume
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+# Kill bricks 1
+TEST kill_brick $V0 $H0 $B0/${V0}1
+sleep 1
+
+# Write some data into the mount which will require healing
+cd $M0
+for i in {1..1000}; do
+ dd if=/dev/urandom of=testdata_$i bs=64k count=1 2>/dev/null
+done
+
+# Re-start the brick
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+
+sleep 1
+TEST hang_brick $V0 $H0 $B0/${V0}1
+sleep 4
+TEST wait_for_shd_no_sink 20
+cleanup
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/bugs/fb8149516.t b/tests/bugs/fb8149516.t
new file mode 100644
index 00000000000..54372794c6f
--- /dev/null
+++ b/tests/bugs/fb8149516.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.read-subvolume-index 2
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.heal-timeout 30
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 nfs.disable off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+cd $M0
+for i in {1..10}
+do
+ dd if=/dev/urandom of=testfile$i bs=1M count=1 2>/dev/null
+done
+cd ~
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST rm -rf $B0/${V0}2/testfile*
+TEST rm -rf $B0/${V0}2/.glusterfs
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 2
+
+# Verify we see all ten files when ls'ing, without the patch this should
+# return no files and fail.
+FILE_LIST=($(\ls $M0))
+TEST "((${#FILE_LIST[@]} == 10))"
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
+
+cleanup
diff --git a/tests/bugs/fuse/bug-858488-min-free-disk.t b/tests/bugs/fuse/bug-858488-min-free-disk.t
index 635dc04d1e6..ab636575d3f 100644
--- a/tests/bugs/fuse/bug-858488-min-free-disk.t
+++ b/tests/bugs/fuse/bug-858488-min-free-disk.t
@@ -23,6 +23,7 @@ TEST MOUNT_LOOP $LO2 $B0/${V0}2
## Lets create volume
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 cluster.du-refresh-interval-sec 1
## Verify volume is created
EXPECT "$V0" volinfo_field $V0 'Volume Name';
diff --git a/tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t b/tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t
index 9fc7ac3b845..3bc80ab9dab 100644
--- a/tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t
+++ b/tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t
@@ -1,6 +1,6 @@
#!/bin/bash
-## Test case for cluster.min-free-disk option validation.
+## Test case for cluster.cluster.min-free-disk option validation.
. $(dirname $0)/../../include.rc
@@ -17,21 +17,21 @@ TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2
TEST $CLI volume start $V0
## Setting invalid value for option cluster.min-free-disk should fail
-TEST ! $CLI volume set $V0 min-free-disk ""
-TEST ! $CLI volume set $V0 min-free-disk 143.!/12
-TEST ! $CLI volume set $V0 min-free-disk 123%
-TEST ! $CLI volume set $V0 min-free-disk 194.34%
+TEST ! $CLI volume set $V0 cluster.min-free-disk ""
+TEST ! $CLI volume set $V0 cluster.min-free-disk 143.!/12
+TEST ! $CLI volume set $V0 cluster.min-free-disk 123%
+TEST ! $CLI volume set $V0 cluster.min-free-disk 194.34%
## Setting fractional value as a size (unit is byte) for option
## cluster.min-free-disk should fail
-TEST ! $CLI volume set $V0 min-free-disk 199.051
-TEST ! $CLI volume set $V0 min-free-disk 111.999
+TEST ! $CLI volume set $V0 cluster.min-free-disk 199.051
+TEST ! $CLI volume set $V0 cluster.min-free-disk 111.999
## Setting valid value for option cluster.min-free-disk should pass
-TEST $CLI volume set $V0 min-free-disk 12%
-TEST $CLI volume set $V0 min-free-disk 56.7%
-TEST $CLI volume set $V0 min-free-disk 120
-TEST $CLI volume set $V0 min-free-disk 369.0000
+TEST $CLI volume set $V0 cluster.min-free-disk 12%
+TEST $CLI volume set $V0 cluster.min-free-disk 56.7%
+TEST $CLI volume set $V0 cluster.min-free-disk 120
+TEST $CLI volume set $V0 cluster.min-free-disk 369.0000
cleanup;
diff --git a/tests/bugs/glusterd/bug-859927.t b/tests/bugs/glusterd/bug-859927.t
index c30d2b852d4..1b9ca18c08a 100755
--- a/tests/bugs/glusterd/bug-859927.t
+++ b/tests/bugs/glusterd/bug-859927.t
@@ -44,12 +44,12 @@ TEST ! $CLI volume set $V0 min-free-inodes " "
TEST $CLI volume set $V0 min-free-inodes 60%
EXPECT "60%" volume_option $V0 cluster.min-free-inodes
-TEST ! $CLI volume set $V0 min-free-disk ""
-TEST ! $CLI volume set $V0 min-free-disk " "
-TEST $CLI volume set $V0 min-free-disk 60%
+TEST ! $CLI volume set $V0 cluster.min-free-disk ""
+TEST ! $CLI volume set $V0 cluster.min-free-disk " "
+TEST $CLI volume set $V0 cluster.min-free-disk 60%
EXPECT "60%" volume_option $V0 cluster.min-free-disk
-TEST $CLI volume set $V0 min-free-disk 120
+TEST $CLI volume set $V0 cluster.min-free-disk 120
EXPECT "120" volume_option $V0 cluster.min-free-disk
TEST ! $CLI volume set $V0 frame-timeout ""
diff --git a/tests/bugs/nfs/bug-1166862.t b/tests/bugs/nfs/bug-1166862.t
index f986fe36ab7..fd57ccb992b 100755
--- a/tests/bugs/nfs/bug-1166862.t
+++ b/tests/bugs/nfs/bug-1166862.t
@@ -65,3 +65,7 @@ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
EXPECT '0' count_lines cat $GLUSTERD_WORKDIR/nfs/rmtab
cleanup
+
+# rmtab support permanently hacked out on FB branch.
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/bugs/nfs/bug-904065.t b/tests/bugs/nfs/bug-904065.t
index 0becb756da4..0d539a2341c 100755
--- a/tests/bugs/nfs/bug-904065.t
+++ b/tests/bugs/nfs/bug-904065.t
@@ -90,3 +90,7 @@ EXPECT '2' count_lines $M0/rmtab
# rmtab.
cleanup
+
+# rmtab support permanently hacked out on FB branch.
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/bugs/quota/bug-1292020.t b/tests/bugs/quota/bug-1292020.t
index 14b311c9d76..f713c74859b 100644
--- a/tests/bugs/quota/bug-1292020.t
+++ b/tests/bugs/quota/bug-1292020.t
@@ -4,10 +4,12 @@
. $(dirname $0)/../../volume.rc
function write_sample_data () {
- dd if=/dev/zero of=$M0/f1 bs=256k count=400 2>&1 | grep -i exceeded
+ dd if=/dev/zero of=$M0/f1 bs=256k count=400 2>&1 |
+ egrep -i 'exceeded|no space' && echo 'passed'
}
cleanup;
+rm -f /tmp/kbv.log
TEST glusterd;
TEST pidof glusterd;
@@ -18,7 +20,8 @@ TEST $CLI volume quota $V0 enable;
TEST $CLI volume quota $V0 limit-usage / 1
TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
-EXPECT "exceeded" write_sample_data
+
+EXPECT "passed" write_sample_data
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0
diff --git a/tests/bugs/replicate/bug-859581.t b/tests/bugs/replicate/bug-859581.t
index d8b45a257a1..313067b6049 100755
--- a/tests/bugs/replicate/bug-859581.t
+++ b/tests/bugs/replicate/bug-859581.t
@@ -51,3 +51,5 @@ TEST $CLI volume delete $V0
cleanup
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/cluster.rc b/tests/cluster.rc
index 467bbcb06e1..42547f09e37 100644
--- a/tests/cluster.rc
+++ b/tests/cluster.rc
@@ -46,17 +46,18 @@ function define_glusterds() {
bopt="management.transport.socket.bind-address=${!h}";
popt="--pid-file=${!b}/glusterd.pid";
sopt="management.glusterd-sockfile=${!b}/glusterd/gd.sock"
+ aopt="*.transport.address-family=inet"
#Get the logdir
logdir=`gluster --print-logdir`
#Fetch the testcases name and prefix the glusterd log with it
logfile=`echo ${0##*/}`_glusterd$i.log
lopt="--log-file=$logdir/$logfile"
if [ "$2" == "-LDEBUG" ]; then
- eval "glusterd_$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt $lopt $popt'";
- eval "glusterd$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt $lopt $popt'";
+ eval "glusterd_$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt --xlator-option $aopt $lopt $popt'";
+ eval "glusterd$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt --xlator-option $aopt $lopt $popt'";
else
- eval "glusterd_$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt $lopt $popt'";
- eval "glusterd$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt $lopt $popt'";
+ eval "glusterd_$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt --xlator-option $aopt $lopt $popt'";
+ eval "glusterd$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt --xlator-option $aopt $lopt $popt'";
fi
done
}
diff --git a/tests/configfiles/exports-v6 b/tests/configfiles/exports-v6
new file mode 100644
index 00000000000..426b1ef5705
--- /dev/null
+++ b/tests/configfiles/exports-v6
@@ -0,0 +1 @@
+/test @test(rw,anonuid=0,sec=sys,) 2401:db00:11:1:face:0:3d:0(rw,anonuid=0,sec=sys,)
diff --git a/tests/env.rc.in b/tests/env.rc.in
index 82971c4a8de..87befc3711d 100644
--- a/tests/env.rc.in
+++ b/tests/env.rc.in
@@ -28,3 +28,6 @@ export PYTHON
PYTHONPATH=@BUILD_PYTHON_SITE_PACKAGES@:$PYTHON_PATH
export PYTHONPATH
+
+TESTER_CFLAGS="@TESTER_CFLAGS@"
+export TESTER_CFLAGS
diff --git a/tests/features/brick-min-free-space.t b/tests/features/brick-min-free-space.t
new file mode 100755
index 00000000000..0fc5a241534
--- /dev/null
+++ b/tests/features/brick-min-free-space.t
@@ -0,0 +1,121 @@
+#!/bin/bash
+#
+# Test storage.min-free-disk option works.
+#
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST truncate -s 16M $B0/brick0
+TEST LOOPDEV=$(losetup --find --show $B0/brick0)
+TEST mkfs.xfs $LOOPDEV
+
+mkdir -p $B0/$V0
+
+TEST mount -t xfs $LOOPDEV $B0/$V0
+
+###########
+# AIO on #
+###########
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 readdir-ahead on
+TEST $CLI vol set $V0 storage.linux-aio on
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Filesystem has ~12MB capacity after XFS and glusterfs overhead.
+# A 16MB write should blow up.
+TEST ! dd if=/dev/zero of=$M0/test bs=1M count=16 oflag=direct
+TEST rm $M0/test
+
+# But we should be able to write 10MB
+TEST dd if=/dev/zero of=$M0/test bs=1M count=10 oflag=direct
+
+# Now enable limit and set to at least 8MB free space
+TEST $CLI volume set $V0 storage.freespace-check-interval 1
+TEST $CLI volume set $V0 storage.min-free-disk 8388608
+
+sleep 5
+
+# Now even a tiny write ought fail.
+TEST ! dd if=/dev/zero of=$M0/test1 bs=1M count=1 oflag=direct
+TEST rm $M0/test1
+
+# Repeat using percent syntax.
+TEST $CLI volume set $V0 storage.min-free-disk 33%
+
+sleep 5
+
+TEST ! dd if=/dev/zero of=$M0/test1 bs=4K count=1 oflag=direct
+TEST rm $M0/test1
+
+# Disable limit.
+TEST $CLI volume set $V0 storage.freespace-check-interval 0
+
+# Now we can write again.
+TEST dd if=/dev/zero of=$M0/test1 bs=4K count=1 oflag=direct
+
+TEST rm $M0/test1
+TEST rm $M0/test
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0;
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+############
+# AIO off #
+############
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 readdir-ahead on
+TEST $CLI vol set $V0 storage.linux-aio off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Filesystem has ~12MB capacity after XFS and glusterfs overhead.
+# A 16MB write should blow up.
+TEST ! dd if=/dev/zero of=$M0/test bs=1M count=16 oflag=direct
+TEST rm $M0/test
+
+# But we should be able to write 10MB
+TEST dd if=/dev/zero of=$M0/test bs=1M count=10 oflag=direct
+
+# Now enable limit and set to at least 8MB free space
+TEST $CLI volume set $V0 storage.freespace-check-interval 1
+TEST $CLI volume set $V0 storage.min-free-disk 8388608
+
+sleep 5
+
+# Now even a tiny write ought fail.
+TEST ! dd if=/dev/zero of=$M0/test1 bs=1M count=1 oflag=direct
+TEST rm $M0/test1
+
+# Repeat using percent syntax.
+TEST $CLI volume set $V0 storage.min-free-disk 33%
+
+sleep 5
+
+TEST ! dd if=/dev/zero of=$M0/test1 bs=4K count=1 oflag=direct
+TEST rm $M0/test1
+
+# Disable limit.
+TEST $CLI volume set $V0 storage.freespace-check-interval 0
+
+# Now we can write again.
+TEST dd if=/dev/zero of=$M0/test1 bs=4K count=1 oflag=direct
+
+TEST rm $M0/test1
+TEST rm $M0/test
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0;
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/features/lock_revocation.t b/tests/features/lock_revocation.t
new file mode 100644
index 00000000000..cbf21b71650
--- /dev/null
+++ b/tests/features/lock_revocation.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+logdir=$(gluster --print-logdir)
+BRICK_LOGFILES="$logdir/bricks/d-backends-brick?.log"
+rm -f $BRICK_LOGFILES &> /dev/null
+
+# Test that lock revocation works
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+cleanup;
+
+function deadlock_fop() {
+ local MNT=$1
+ for i in {1..1000}; do
+ dd if=/dev/zero of=$MNT/testfile bs=1k count=10 &> /dev/null
+ if grep "MONKEY LOCKING" $BRICK_LOGFILES &> /dev/null; then
+ break
+ fi
+ done
+}
+
+function monkey_unlock() {
+ grep "MONKEY LOCKING" $BRICK_LOGFILES &> /dev/null && echo SUCCESS
+ return 0
+}
+
+function append_to_file() {
+ local FILE_PATH=$1
+ echo "hello" >> $FILE_PATH
+ return 0
+}
+
+#Init
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 features.locks-monkey-unlocking on
+TEST $CLI volume set $V0 features.locks-revocation-secs 2
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 -s $H0 $M0;
+TEST $GFS --volfile-id=$V0 -s $H0 $M1;
+
+# Deadlock writes to a file using monkey unlocking
+deadlock_fop $M0 &
+EXPECT_WITHIN 60 "SUCCESS" monkey_unlock
+
+# Sleep > unlock timeout and attempt to write to the file
+sleep 3
+TEST append_to_file $M1/testfile
+
+cleanup
diff --git a/tests/halo.rc b/tests/halo.rc
new file mode 100644
index 00000000000..4cb7c81da85
--- /dev/null
+++ b/tests/halo.rc
@@ -0,0 +1,52 @@
+# Return the current Halo state of a given child (by index, i.e. 0
+# is first child).
+function halo_child_state {
+ grep "Child $1 .*halo state: " /var/log/glusterfs/$M0LOG |
+ tail -n1 | sed 's/^.* halo state: //' | sed 's/ .*$//'
+}
+
+# Return number of Halo children which are in a given state.
+# First parameter is total # children.
+# Second parameter is state to match (e.g. "UP").
+function halo_children_in_state {
+ local CHILD_COUNT=$1
+ local SUM=0
+ for CHILD in $(seq 0 $((CHILD_COUNT-1))); do
+ if [ x"$(halo_child_state $CHILD)" == x"$2" ]; then
+ SUM=$((SUM+1))
+ fi
+ done
+ echo $SUM
+}
+
+# Return number of up halo children,
+# First parameter is total # children,
+function halo_children_up {
+ echo $(halo_children_in_state $1 "UP")
+}
+
+# Return number of down halo children,
+# First parameter is total # children,
+function halo_children_down {
+ echo $(halo_children_in_state $1 "DOWN")
+}
+
+# Return number of up & down halo children.
+# First parameter is total number of children.
+function halo_sum_child_states {
+ local CHILD_COUNT=$1
+
+ local UP=0
+ local DOWN=0
+
+ for CHILD in $(seq 0 $((CHILD_COUNT-1))); do
+ local STATE=$(halo_child_state $CHILD)
+ if [ x"$STATE" == x"UP" ]; then
+ UP=$((UP+1))
+ elif [ x"$STATE" == x"DOWN" ]; then
+ DOWN=$((DOWN+1))
+ fi
+ done
+
+ echo "$UP $DOWN"
+}
diff --git a/tests/include.rc b/tests/include.rc
index 492e35a7b6c..8b6504e6c58 100644
--- a/tests/include.rc
+++ b/tests/include.rc
@@ -19,11 +19,13 @@ META_MNT=${META_MNT:=/var/run/gluster/shared_storage}; # Mount point of shared g
CC=cc
OSTYPE=$(uname -s)
-ENV_RC=$(dirname $0)/../env.rc
+M0LOG=${M0LOG:="mnt-glusterfs-0.log"}; # Log file for 0th FUSE mount point
+
+ENV_RC=$(dirname $0)/env.rc
if [ ! -f $ENV_RC ]; then
- ENV_RC=$(dirname $0)/../../env.rc
+ ENV_RC=$(dirname $0)/../env.rc
if [ ! -f $ENV_RC ]; then
- ENV_RC=$(dirname $0)/../../../env.rc
+ ENV_RC=$(dirname $0)/../../env.rc
fi
fi
@@ -171,6 +173,7 @@ function test_footer()
echo "FAILED COMMAND: $saved_cmd"
fi
if [ "$EXIT_EARLY" = "1" ]; then
+ cleanup
exit $RET
fi
fi
@@ -350,6 +353,7 @@ which killall > /dev/null || {
which pidof > /dev/null || {
pidof() {
+
$PYTHON pidof.py $@
}
}
@@ -422,11 +426,13 @@ stat -c %s /dev/null > /dev/null 2>&1 || {
function cleanup()
{
+ local OLDPWD=$PWD
+ cd # Things go pear-shaped if we're inside a Gluster mount.
# Prepare flags for umount
case `uname -s` in
Linux)
- flag="-l"
+ flag="-l -f --no-canonicalize"
;;
NetBSD)
flag="-f -R"
@@ -573,6 +579,8 @@ function cleanup()
# above to fail, promoting that into a failure of the whole test (and
# thus of an entire regression-test run) seems a bit excessive. Make
# sure we return good status anyway.
+
+ cd $OLDPWD
return 0
}
@@ -612,6 +620,7 @@ function build_tester ()
then
cflags="$cflags $(pkg-config glusterfs-api --cflags-only-I --libs-only-L)"
fi
+ cflags="$cflags ${TESTER_CFLAGS}"
$CC -g -o $(dirname $cfile)/$execname $cfile $cflags
}
@@ -1163,3 +1172,5 @@ function STAT_INO()
echo 0
fi
}
+
+systemctl stop nfs-mountd
diff --git a/tests/nfs.rc b/tests/nfs.rc
index 2140f311c33..ee52d96e6d3 100644
--- a/tests/nfs.rc
+++ b/tests/nfs.rc
@@ -23,7 +23,7 @@ function mount_nfs ()
local m=$2
local opt=$3
if [ ! -z "$opt" ]; then opt=",$opt"; fi
- opt="soft,intr,vers=3$opt"
+ opt="soft,intr,nfsvers=3,proto=tcp$opt"
nopt=""
for o in ${opt//,/ }; do
diff --git a/tests/volume.rc b/tests/volume.rc
index f95c0013b2e..84630f3d4b4 100644
--- a/tests/volume.rc
+++ b/tests/volume.rc
@@ -237,6 +237,13 @@ function kill_brick {
kill -9 $(get_brick_pid $vol $host $brick)
}
+function hang_brick {
+ local vol=$1
+ local host=$2
+ local brick=$3
+ kill -STOP $(get_brick_pid $vol $host $brick)
+}
+
function check_option_help_presence {
local option=$1
$CLI volume set help | grep "^Option:" | grep -w $option