summaryrefslogtreecommitdiffstats
path: root/tests/basic
diff options
context:
space:
mode:
Diffstat (limited to 'tests/basic')
-rw-r--r--tests/basic/accept-v6v4.t122
-rwxr-xr-xtests/basic/dht-min-free-space.t78
-rw-r--r--tests/basic/ec/ec-common2
-rw-r--r--tests/basic/ec/self-heal.t2
-rw-r--r--tests/basic/exports_parsing.t15
-rw-r--r--tests/basic/fop-sampling.t78
-rwxr-xr-xtests/basic/fops-sanity-gfproxy.t32
-rw-r--r--tests/basic/gfproxy.t74
-rw-r--r--tests/basic/glusterd/volfile_server_switch.t3
-rw-r--r--tests/basic/halo-failover-disabled.t77
-rw-r--r--tests/basic/halo-failover-enabled.t87
-rw-r--r--tests/basic/halo-hybrid.t70
-rw-r--r--tests/basic/halo.t51
-rwxr-xr-xtests/basic/mount-nfs-auth.t17
-rw-r--r--tests/basic/uss.t2
-rw-r--r--tests/basic/write-behind.t53
16 files changed, 751 insertions, 12 deletions
diff --git a/tests/basic/accept-v6v4.t b/tests/basic/accept-v6v4.t
new file mode 100644
index 00000000000..7128c12c6be
--- /dev/null
+++ b/tests/basic/accept-v6v4.t
@@ -0,0 +1,122 @@
+#!/bin/bash
+
+. $(dirname $0)/../nfs.rc
+
+#
+# This test ensures that GlusterFS provides NFS, Mount and its Management daemon
+# over both IPv4 and IPv6. It uses netcat to check the services running on both
+# IPv4 & IPv6 addresses as well as a mount to test that mount & nfs work.
+#
+
+IPV4_SUPPORT=false
+IPV6_SUPPORT=false
+
+host $HOSTNAME | grep -q "has address" && IPV4_SUPPORT=true
+host $HOSTNAME | grep -q "has IPv6 address" && IPV6_SUPPORT=true
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+mkdir -p $B0/b{0,1,2}
+
+# make sure no registered rpcbind services are running
+service rpcbind restart
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI vol create $V0 replica 3 $H0:$B0/b0 $H0:$B0/b1 $H0:$B0/b2
+
+TEST $CLI vol set $V0 cluster.self-heal-daemon off
+TEST $CLI vol set $V0 nfs.disable off
+TEST $CLI vol set $V0 cluster.choose-local off
+TEST $CLI vol start $V0
+
+MOUNTD_PORT=38465
+MGMTD_PORT=24007
+NFSD_PORT=2049
+
+function check_ip_port {
+ ip=$1
+ port=$2
+ type=$3
+
+ nc_flags=""
+ if [ "$type" == "v6" ] && [ "$ip" == "NONE" ]; then
+ echo "Y"
+ return
+ else
+ nc_flags="-6"
+ fi
+
+ if [ "$type" == "v4" ] && [ "$ip" == "NONE" ]; then
+ echo "Y"
+ return
+ fi
+
+ if exec 3<>/dev/tcp/$ip/$port; then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function check_nfs {
+ ip=$1
+ type=$2
+
+ if [ "$ip" == "NONE" ]; then
+ echo "Y"
+ return
+ fi
+
+ if [ "$type" == "v6" ]; then
+ addr="[$ip]"
+ else
+ addr="$ip"
+ fi
+
+ if mount_nfs $addr:/$V0 $N0; then
+ umount_nfs $N0
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+if [ ! $IPV4_SUPPORT ] && [ ! $IPV6_SUPPORT ]; then
+ exit 1
+fi
+
+# Get the V4 & V6 addresses of this host
+if $IPV4_SUPPORT; then
+ V4=$(host $HOSTNAME | head -n1 | awk -F ' ' '{print $4}')
+else
+ V4="NONE"
+fi
+
+if $IPV6_SUPPORT; then
+ V6=$(host $HOSTNAME | tail -n1 | awk -F ' ' '{print $5}')
+else
+ V6="NONE"
+fi
+
+# First check the management daemon
+EXPECT "Y" check_ip_port $V6 $MGMTD_PORT "v6"
+EXPECT "Y" check_ip_port $V4 $MGMTD_PORT "v4"
+
+# Give the MOUNT/NFS Daemon some time to start up
+sleep 4
+
+EXPECT "Y" check_ip_port $V4 $MOUNTD_PORT "v6"
+EXPECT "Y" check_ip_port $V6 $MOUNTD_PORT "v4"
+
+EXPECT "Y" check_ip_port $V4 $NFSD_PORT "v6"
+EXPECT "Y" check_ip_port $V6 $NFSD_PORT "v4"
+
+# Mount the file system
+EXPECT "Y" check_nfs $V6 "v6"
+EXPECT "Y" check_nfs $V4 "v4"
+
+cleanup;
diff --git a/tests/basic/dht-min-free-space.t b/tests/basic/dht-min-free-space.t
new file mode 100755
index 00000000000..17d10cc39a5
--- /dev/null
+++ b/tests/basic/dht-min-free-space.t
@@ -0,0 +1,78 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+grep $B0/patchy1 /proc/mounts &> /dev/null && umount $B0/patchy1
+grep $B0/patchy2 /proc/mounts &> /dev/null && umount $B0/patchy2
+losetup -d /dev/loop0 2> /dev/null
+losetup -d /dev/loop1 2> /dev/null
+mkdir $B0/${V0}{1..2}
+
+TEST glusterd
+
+TEST dd if=/dev/zero of=/tmp/${V0}-dev1 bs=1M count=30
+TEST dd if=/dev/zero of=/tmp/${V0}-dev2 bs=1M count=30
+
+TEST losetup /dev/loop0 /tmp/${V0}-dev1
+TEST losetup /dev/loop1 /tmp/${V0}-dev2
+
+TEST mkfs.xfs /dev/loop0
+TEST mkfs.xfs /dev/loop1
+
+TEST mount /dev/loop0 $B0/${V0}1
+TEST mount /dev/loop1 $B0/${V0}2
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume set $V0 cluster.min-free-disk 2MB
+TEST $CLI volume set $V0 cluster.min-free-strict-mode on
+TEST $CLI volume set $V0 cluster.du-refresh-interval-sec 0
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+####################################
+# Test re-directs of file creation #
+####################################
+
+# This should work, no redirects
+TEST dd if=/dev/zero of=$M0/testfile1 bs=1M count=8
+TEST [ -f /d/backends/${V0}2/testfile1 ] && [ ! -k /d/backends/${V0}1/testfile1 ]
+
+TEST $CLI volume set $V0 cluster.min-free-disk 19MB
+
+# This should work, & the file redirected
+# Subvolume 2 should have the linkto &
+# Subvolume 1 should have the original
+TEST dd if=/dev/zero of=$M0/testfile3 bs=1M count=4
+TEST [ -f /d/backends/${V0}1/testfile3 ] && [ ! -k /d/backends/${V0}1/testfile3 ]
+TEST [ -k /d/backends/${V0}2/testfile3 ]
+
+# This should fail, cluster is full
+TEST ! dd if=/dev/zero of=$M0/testfile2 bs=1M count=23
+
+###################
+# Strict mode off #
+###################
+TEST $CLI volume set $V0 cluster.min-free-strict-mode off
+TEST dd if=/dev/zero of=$M0/testfile1 bs=1M count=20
+TEST rm -f $M0/testfile1
+
+###################
+# Strict mode on #
+###################
+TEST $CLI volume set $V0 cluster.min-free-strict-mode on
+TEST ! dd if=/dev/zero of=$M0/testfile1 bs=1M count=16
+TEST rm -f $M0/testfile1
+
+killall gluster{fs,fsd,d}
+
+umount -lf $B0/${V0}1
+umount -lf $B0/${V0}2
+
+losetup -d /dev/loop0
+losetup -d /dev/loop1
+
+cleanup;
diff --git a/tests/basic/ec/ec-common b/tests/basic/ec/ec-common
index 83c4463a912..152e3b51236 100644
--- a/tests/basic/ec/ec-common
+++ b/tests/basic/ec/ec-common
@@ -45,7 +45,7 @@ for size in $SIZE_LIST; do
eval cs_big_truncate[$size]=$(sha1sum $tmp/big1 | awk '{ print $1 }')
done
-TEST df -h
+TEST df -h $M0
TEST stat $M0
for idx in `seq 0 $LAST_BRICK`; do
diff --git a/tests/basic/ec/self-heal.t b/tests/basic/ec/self-heal.t
index 98dd9232c73..3e3467535fb 100644
--- a/tests/basic/ec/self-heal.t
+++ b/tests/basic/ec/self-heal.t
@@ -136,7 +136,7 @@ TEST dd if=/dev/urandom of=$tmp/test bs=1024 count=1024
cs=$(sha1sum $tmp/test | awk '{ print $1 }')
-TEST df -h
+TEST df -h $M0
TEST stat $M0
for idx in {0..5}; do
diff --git a/tests/basic/exports_parsing.t b/tests/basic/exports_parsing.t
index fdaf9c2822e..da88bbcb2cc 100644
--- a/tests/basic/exports_parsing.t
+++ b/tests/basic/exports_parsing.t
@@ -32,7 +32,20 @@ function test_bad_opt ()
glusterfsd --print-exports $1 2>&1 | sed -n 1p
}
-EXPECT_KEYWORD "/test @test(rw,anonuid=0,sec=sys,) 10.35.11.31(rw,anonuid=0,sec=sys,)" test_good_file $EXP_FILES/exports
+function check_export_line() {
+ if [ "$1" == "$2" ]; then
+ echo "Y"
+ else
+ echo "N"
+ fi
+ return
+}
+
+export_result=$(test_good_file $EXP_FILES/exports)
+EXPECT "Y" check_export_line '/test @test(rw,anonuid=0,sec=sys,) 10.35.11.31(rw,anonuid=0,sec=sys,) ' "$export_result"
+
+export_result=$(test_good_file $EXP_FILES/exports-v6)
+EXPECT "Y" check_export_line '/test @test(rw,anonuid=0,sec=sys,) 2401:db00:11:1:face:0:3d:0(rw,anonuid=0,sec=sys,) ' "$export_result"
EXPECT_KEYWORD "Error parsing netgroups for:" test_bad_line $EXP_FILES/bad_exports
EXPECT_KEYWORD "Error parsing netgroups for:" test_long_netgroup $EXP_FILES/bad_exports
diff --git a/tests/basic/fop-sampling.t b/tests/basic/fop-sampling.t
index cea8aa737c0..713c7e27579 100644
--- a/tests/basic/fop-sampling.t
+++ b/tests/basic/fop-sampling.t
@@ -2,13 +2,27 @@
#
. $(dirname $0)/../include.rc
+. $(dirname $0)/../nfs.rc
. $(dirname $0)/../volume.rc
-SAMPLE_FILE="$(gluster --print-logdir)/samples/glusterfs_${V0}.samp"
+BRICK_SAMPLES="$(gluster --print-logdir)/samples/glusterfsd__d_backends_${V0}0.samp"
+NFS_SAMPLES="$(gluster --print-logdir)/samples/glusterfs_nfsd.samp"
+
+function check_path {
+ op=$1
+ path=$2
+ file=$3
+ grep $op $file | awk -F, '{print $11}' | grep $path 2>&1 > /dev/null
+ if [ $? -eq 0 ]; then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
function print_cnt() {
local FOP_TYPE=$1
- local FOP_CNT=$(grep ,${FOP_TYPE} ${SAMPLE_FILE} | wc -l)
+ local FOP_CNT=$(grep ,${FOP_TYPE} ${BRICK_SAMPLES} | wc -l)
echo $FOP_CNT
}
@@ -42,12 +56,18 @@ TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
TEST $CLI volume set $V0 nfs.disable off
TEST $CLI volume set $V0 diagnostics.latency-measurement on
TEST $CLI volume set $V0 diagnostics.count-fop-hits on
-TEST $CLI volume set $V0 diagnostics.stats-dump-interval 2
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 5
TEST $CLI volume set $V0 diagnostics.fop-sample-buf-size 65535
TEST $CLI volume set $V0 diagnostics.fop-sample-interval 1
TEST $CLI volume set $V0 diagnostics.stats-dnscache-ttl-sec 3600
-
TEST $CLI volume start $V0
+
+>${NFS_SAMPLES}
+>${BRICK_SAMPLES}
+
+#################
+# Basic Samples #
+#################
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
for i in {1..5}
@@ -58,4 +78,52 @@ done
TEST ls -l $M0
EXPECT_WITHIN 6 "OK" check_samples
-cleanup
+sleep 2
+
+################################
+# Paths in the samples #
+################################
+
+TEST mount_nfs $H0:$V0 $N0
+
+ls $N0 &> /dev/null
+touch $N0/file1
+stat $N0/file1 &> /dev/null
+echo "some data" > $N0/file1
+dd if=/dev/zero of=$N0/file2 bs=1M count=10 conv=fsync
+dd if=/dev/zero of=$N0/file1 bs=1M count=1
+cat $N0/file2 &> /dev/null
+mkdir -p $N0/dir1
+rmdir $N0/dir1
+rm $N0/file1
+rm $N0/file2
+
+EXPECT_WITHIN 10 "Y" check_path CREATE /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path LOOKUP /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path SETATTR /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path WRITE /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path FINODELK /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path ENTRYLK / $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path FLUSH /file2 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path TRUNCATE /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path MKDIR /dir1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path RMDIR /dir1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path UNLINK /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path UNLINK /file2 $BRICK_SAMPLES
+
+
+EXPECT_WITHIN 10 "Y" check_path CREATE /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path LOOKUP /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path ACCESS /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path SETATTR /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path WRITE /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path FLUSH /file2 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path ACCESS /file2 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path READ /file2 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path TRUNCATE /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path MKDIR /dir1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path RMDIR /dir1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path UNLINK /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path UNLINK /file2 $NFS_SAMPLES
+
+cleanup;
diff --git a/tests/basic/fops-sanity-gfproxy.t b/tests/basic/fops-sanity-gfproxy.t
new file mode 100755
index 00000000000..b3bb8a502cc
--- /dev/null
+++ b/tests/basic/fops-sanity-gfproxy.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+#gfproxy server
+TEST glusterfs --volfile-id=gfproxy/$V0 --volfile-server=$H0 -l /var/log/glusterfs/${V0}-gfproxy.log
+
+#mount on a random dir
+TEST glusterfs --entry-timeout=3600 --attribute-timeout=3600 -s $H0 --volfile-id=gfproxy-client/$V0 $M0 --direct-io-mode=yes
+TEST grep gfproxy-client /proc/mounts
+
+build_tester $(dirname $0)/fops-sanity.c
+
+TEST cp $(dirname $0)/fops-sanity $M0
+cd $M0
+TEST ./fops-sanity $V0
+cd -
+rm -f $(dirname $0)/fops-sanity
+
+cleanup;
diff --git a/tests/basic/gfproxy.t b/tests/basic/gfproxy.t
new file mode 100644
index 00000000000..71c6788db76
--- /dev/null
+++ b/tests/basic/gfproxy.t
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../nfs.rc
+
+cleanup;
+
+function start_gfproxyd {
+ glusterfs --volfile-id=gfproxy/${V0} --volfile-server=$H0 -l /var/log/glusterfs/${V0}-gfproxy.log
+}
+
+function restart_gfproxyd {
+ pkill -f gfproxy/${V0}
+ start_gfproxyd
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 config.gfproxyd-remote-host $H0
+TEST $CLI volume start $V0
+
+sleep 2
+
+REGULAR_CLIENT_VOLFILE="/var/lib/glusterd/vols/${V0}/trusted-${V0}.tcp-fuse.vol"
+GFPROXY_CLIENT_VOLFILE="/var/lib/glusterd/vols/${V0}/trusted-${V0}.tcp-gfproxy-fuse.vol"
+GFPROXYD_VOLFILE="/var/lib/glusterd/vols/${V0}/${V0}.gfproxyd.vol"
+
+# Client volfile must exist
+TEST [ -f $GFPROXY_CLIENT_VOLFILE ]
+
+# AHA & write-behind translators must exist
+TEST grep "cluster/aha" $GFPROXY_CLIENT_VOLFILE
+TEST grep "performance/write-behind" $GFPROXY_CLIENT_VOLFILE
+
+# Make sure we didn't screw up the existing client
+TEST grep "performance/write-behind" $REGULAR_CLIENT_VOLFILE
+TEST grep "cluster/replicate" $REGULAR_CLIENT_VOLFILE
+TEST grep "cluster/distribute" $REGULAR_CLIENT_VOLFILE
+
+TEST [ -f $GFPROXYD_VOLFILE ]
+
+TEST grep "cluster/replicate" $GFPROXYD_VOLFILE
+TEST grep "cluster/distribute" $GFPROXYD_VOLFILE
+
+# AHA & write-behind must *not* exist
+TEST ! grep "cluster/aha" $GFPROXYD_VOLFILE
+TEST ! grep "performance/write-behind" $GFPROXYD_VOLFILE
+
+# Test that we can start the server and the client
+TEST start_gfproxyd
+TEST glusterfs --volfile-id=gfproxy-client/${V0} --volfile-server=$H0 -l /var/log/glusterfs/${V0}-gfproxy-client.log $M0
+sleep 2
+TEST grep gfproxy-client/${V0} /proc/mounts
+
+# Write data to the mount and checksum it
+TEST dd if=/dev/urandom bs=1M count=10 of=/tmp/testfile1
+md5=$(md5sum /tmp/testfile1 | awk '{print $1}')
+TEST cp -v /tmp/testfile1 $M0/testfile1
+TEST [ "$(md5sum $M0/testfile1 | awk '{print $1}')" == "$md5" ]
+
+rm /tmp/testfile1
+
+dd if=/dev/zero of=$N0/bigfile bs=1M count=3072 &
+BG_STRESS_PID=$!
+
+sleep 3
+
+restart_gfproxyd
+
+TEST wait $BG_STRESS_PID
+
+cleanup;
diff --git a/tests/basic/glusterd/volfile_server_switch.t b/tests/basic/glusterd/volfile_server_switch.t
index 0b0e6470244..0b01398215c 100644
--- a/tests/basic/glusterd/volfile_server_switch.t
+++ b/tests/basic/glusterd/volfile_server_switch.t
@@ -1,5 +1,8 @@
#!/bin/bash
+#G_TESTDEF_TEST_STATUS_CENTOS6=KNOWN_ISSUE,BUG=000000
+#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
+
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
diff --git a/tests/basic/halo-failover-disabled.t b/tests/basic/halo-failover-disabled.t
new file mode 100644
index 00000000000..f3655eaef3b
--- /dev/null
+++ b/tests/basic/halo-failover-disabled.t
@@ -0,0 +1,77 @@
+#!/bin/bash
+#
+# Tests that fail-over works correctly for Halo Geo-replication
+#
+# 1. Create a volume @ 3x replication w/ halo + quorum enabled
+# 2. Write some data, background it & fail a brick
+# 3. The expected result is that the writes fail-over to the 3rd
+# brick immediatelly, and md5s will show they are equal once
+# the write completes.
+# 4. The mount should also be RW after the brick is killed as
+# quorum will be immediately restored by swapping in the
+# other brick.
+#
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../halo.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.shd-max-threads 1
+TEST $CLI volume set $V0 cluster.halo-enabled True
+TEST $CLI volume set $V0 cluster.halo-max-latency 9999
+TEST $CLI volume set $V0 cluster.halo-shd-max-latency 9999
+TEST $CLI volume set $V0 cluster.halo-max-replicas 2
+TEST $CLI volume set $V0 cluster.halo-min-samples 1
+TEST $CLI volume set $V0 cluster.halo-failover-enabled off
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 2
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+TEST $CLI volume set $V0 diagnostics.brick-log-level DEBUG
+TEST $CLI volume set $V0 nfs.log-level DEBUG
+
+# Use a large ping time here so the spare brick is not marked up
+# based on the ping time. The only way it can get marked up is
+# by being swapped in via the down event (which is what we are disabling).
+TEST $CLI volume set $V0 network.ping-timeout 1000
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+# Make sure two children are up and one is down.
+EXPECT_WITHIN 10 "2 1" halo_sum_child_states 3
+
+# Write some data to the mount
+TEST dd if=/dev/urandom of=$M0/test bs=1k count=200 conv=fsync
+
+UP_IDX=$(cat /var/log/glusterfs/$M0LOG | grep "halo state: UP" | tail -n1 | grep -Eo "Child [0-9]+" | grep -Eo "[0-9]+")
+TEST kill_brick $V0 $H0 $B0/${V0}${UP_IDX}
+
+# Make sure two children are down and one is up.
+EXPECT_WITHIN 10 "1 2" halo_sum_child_states 3
+
+# Test that quorum should fail and the mount is RO, the reason here
+# is that although there _is_ another brick running which _could_
+# take the failed bricks place, it is not marked "up" so quorum
+# will not be fullfilled. If we waited 1000 second the brick would
+# indeed be activated based on ping time, but for our test we want
+# the decision to be solely "down event" driven, not ping driven.
+TEST ! dd if=/dev/urandom of=$M0/test_rw bs=1M count=1 conv=fsync
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 $UP_IDX
+
+# Test that quorum should be restored and the file is writable
+TEST dd if=/dev/urandom of=$M0/test_rw bs=1M count=1
+
+cleanup
diff --git a/tests/basic/halo-failover-enabled.t b/tests/basic/halo-failover-enabled.t
new file mode 100644
index 00000000000..2dddf9951fa
--- /dev/null
+++ b/tests/basic/halo-failover-enabled.t
@@ -0,0 +1,87 @@
+#!/bin/bash
+#
+# Tests that fail-over works correctly for Halo Geo-replication
+#
+# 1. Create a volume @ 3x replication w/ halo + quorum enabled
+# 2. Write some data, background it & fail a brick
+# 3. The expected result is that the writes fail-over to the 3rd
+# brick immediatelly, and md5s will show they are equal once
+# the write completes.
+# 4. The mount should also be RW after the brick is killed as
+# quorum will be immediately restored by swapping in the
+# other brick.
+#
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../halo.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.shd-max-threads 1
+TEST $CLI volume set $V0 cluster.halo-enabled True
+TEST $CLI volume set $V0 cluster.halo-failover-enabled on
+TEST $CLI volume set $V0 cluster.halo-max-replicas 2
+TEST $CLI volume set $V0 cluster.halo-min-samples 1
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 2
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 network.ping-timeout 20
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+TEST $CLI volume set $V0 diagnostics.brick-log-level DEBUG
+TEST $CLI volume set $V0 nfs.log-level DEBUG
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+# Make sure two children are up and one is down.
+EXPECT_WITHIN 10 "2 1" halo_sum_child_states 3
+
+# Write some data to the mount
+TEST dd if=/dev/urandom of=$M0/test bs=1k count=200 conv=fsync
+
+KILL_IDX=$(cat /var/log/glusterfs/$M0LOG | grep "halo state: UP" | tail -n1 | grep -Eo "Child [0-9]+" | grep -Eo "[0-9]+")
+TEST [ -n "$KILL_IDX" ]
+# NB: UP_CHILDREN is the set of children that should be up after we kill
+# the brick indicated by KILL_IDX, *not* the set of children which are
+# currently up!
+UP_CHILDREN=($(echo "0 1 2" | sed "s/${KILL_IDX}//g"))
+UP1_HAS_TEST="$(ls $B0/${V0}${UP_CHILDREN[0]}/test 2>/dev/null)"
+UP2_HAS_TEST="$(ls $B0/${V0}${UP_CHILDREN[1]}/test 2>/dev/null)"
+VICTIM_HAS_TEST="$(ls $B0/${V0}${KILL_IDX}/test 2>/dev/null)"
+
+# The victim brick should have a copy of the file.
+TEST [ -n "$VICTIM_HAS_TEST" ]
+
+# Of the bricks which will remain standing, there should be only one
+# brick which has the file called test. If the both have the first
+# test file, the test is invalid as all the bricks are up and the
+# halo-max-replicas is not being honored; e.g. bug exists.
+ONLY_ONE=$((([ -z "$UP2_HAS_TEST" ] || [ -z "$UP1_HAS_TEST" ]) &&
+ ([ -n "$UP2_HAS_TEST" ] || [ -n "$UP1_HAS_TEST" ])) && echo true)
+TEST [ "x$ONLY_ONE" == "xtrue" ]
+
+echo "Failing child ${KILL_IDX}..."
+TEST kill_brick $V0 $H0 $B0/${V0}${KILL_IDX}
+
+# Test the mount is still RW (i.e. quorum works)
+TEST dd if=/dev/urandom of=$M0/test_failover bs=1M count=1 conv=fsync
+
+# Calulate the MD5s
+MD5_UP1=$(md5sum $B0/${V0}${UP_CHILDREN[0]}/test_failover | cut -d' ' -f1)
+MD5_UP2=$(md5sum $B0/${V0}${UP_CHILDREN[1]}/test_failover | cut -d' ' -f1)
+
+# Verify the two up bricks have identical MD5s, if both are identical
+# then we must have successfully failed-over to the brick which was
+# previously proven to be down (via the ONLY_ONE test).
+TEST [ "$MD5_UP1" == "$MD5_UP2" ]
+
+cleanup
diff --git a/tests/basic/halo-hybrid.t b/tests/basic/halo-hybrid.t
new file mode 100644
index 00000000000..4574fdfe41e
--- /dev/null
+++ b/tests/basic/halo-hybrid.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+#
+# Test for the Halo hybrid feature
+#
+# 1. Create volume w/ 3x replication w/ max-replicas = 2 for clients,
+# heal daemon is off to start.
+# 2. Write some data
+# 3. Verify hybrid code chose children for lookups
+# 4. Verify hybrid code chose child for reads
+# 5. Verify hybrid code wrote synchronously to all replicas
+#
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+function found_fuse_log_msg {
+ local dir="$1"
+ local msg="$2"
+ local cnt=$(cat /var/log/glusterfs/$M0LOG | grep "$msg" | tail -n1 | wc -l)
+ if (( $cnt == 1 )); then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.shd-max-threads 1
+TEST $CLI volume set $V0 cluster.halo-enabled True
+TEST $CLI volume set $V0 cluster.halo-hybrid-mode True
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 diagnostics.client-log-level TRACE
+TEST $CLI volume start $V0
+
+# Start a synchronous mount
+TEST glusterfs --volfile-id=/$V0 \
+ --xlator-option *replicate*.halo-max-latency=9999 \
+ --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 2
+cd $M0
+
+TEST mkdir testdir
+TEST cd testdir
+for i in {1..5}
+do
+ dd if=/dev/urandom of=testfile$i bs=1M count=1 2>/dev/null
+done
+TEST ls -l
+
+EXPECT_WITHIN "60" "Y" found_fuse_log_msg "children for LOOKUPs"
+EXPECT_WITHIN "60" "Y" found_fuse_log_msg "Selected hybrid child"
+
+B0_CNT=$(ls $B0/${V0}0/testdir | wc -l)
+B1_CNT=$(ls $B0/${V0}1/testdir | wc -l)
+B2_CNT=$(ls $B0/${V0}2/testdir | wc -l)
+
+# Writes should be synchronous, all should have same
+# file count
+TEST "(($B0_CNT == 5 && $B1_CNT == 5 && $B2_CNT == 5))"
+
+cleanup
diff --git a/tests/basic/halo.t b/tests/basic/halo.t
new file mode 100644
index 00000000000..25aca3442ab
--- /dev/null
+++ b/tests/basic/halo.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# Test for the Halo geo-replication feature
+#
+# 1. Create volume w/ 3x replication w/ max-replicas = 2 for clients,
+# heal daemon is off to start.
+# 2. Write some data
+# 3. Verify at least one of the bricks did not receive the writes.
+# 4. Turn the heal daemon on
+# 5. Within 30 seconds the SHD should async heal the data over
+# to the 3rd brick.
+#
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.shd-max-threads 1
+TEST $CLI volume set $V0 cluster.halo-enabled True
+TEST $CLI volume set $V0 cluster.halo-max-replicas 2
+TEST $CLI volume set $V0 cluster.halo-min-samples 1
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+cd $M0
+
+for i in {1..5}
+do
+ dd if=/dev/urandom of=f bs=1M count=1 2>/dev/null
+ mkdir a; cd a;
+done
+
+B0_CNT=$(ls $B0/${V0}0 | wc -l)
+B1_CNT=$(ls $B0/${V0}1 | wc -l)
+B2_CNT=$(ls $B0/${V0}2 | wc -l)
+
+# One of the brick dirs should be empty
+TEST "(($B0_CNT == 0 || $B1_CNT == 0 || $B2_CNT == 0))"
+
+# Ok, turn the heal daemon on and verify it heals it up
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
+cleanup
diff --git a/tests/basic/mount-nfs-auth.t b/tests/basic/mount-nfs-auth.t
index 9df5cb45c3b..99f032cbd44 100755
--- a/tests/basic/mount-nfs-auth.t
+++ b/tests/basic/mount-nfs-auth.t
@@ -15,6 +15,9 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume info
+H0IP=$(ip addr show |grep -w inet |grep -v 127.0.0.1|awk '{ print $2 }'| cut -d "/" -f 1)
+H0IP6=$(host $HOSTNAME | grep IPv6 | awk '{print $NF}')
+
# Export variables for allow & deny
EXPORT_ALLOW="/$V0 $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
EXPORT_ALLOW_SLASH="/$V0/ $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
@@ -37,6 +40,10 @@ function build_dirs () {
mkdir -p $B0/b{0,1,2}/L1/L2/L3
}
+function export_allow_this_host_ipv6 () {
+ printf "$EXPORT_ALLOW6\n" > ${NFSDIR}/exports
+}
+
function export_allow_this_host () {
printf "$EXPORT_ALLOW\n" > ${NFSDIR}/exports
}
@@ -150,10 +157,7 @@ setup_cluster
TEST $CLI vol set $V0 nfs.disable off
TEST $CLI vol start $V0
-# Get NFS state directory
-NFSDIR=$( $CLI volume get patchy nfs.mount-rmtab | \
- awk '/^nfs.mount-rmtab/{print $2}' | \
- xargs dirname )
+NFSDIR=/var/lib/glusterd/nfs
## Wait for volume to register with rpc.mountd
EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
@@ -186,6 +190,11 @@ EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
## Mount NFS
EXPECT "Y" check_mount_success $V0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Mount NFS using the IPv6 export
+export_allow_this_host_ipv6
+EXPECT "Y" check_mount_success $V0
## Disallow host
TEST export_deny_this_host
diff --git a/tests/basic/uss.t b/tests/basic/uss.t
index 6cfc0303895..d6ca416bd65 100644
--- a/tests/basic/uss.t
+++ b/tests/basic/uss.t
@@ -382,3 +382,5 @@ TEST ls $M0/.history/snap6/;
TEST ! stat $M0/.history/snap6/aaa;
cleanup;
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/basic/write-behind.t b/tests/basic/write-behind.t
new file mode 100644
index 00000000000..edad59786af
--- /dev/null
+++ b/tests/basic/write-behind.t
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+function clear_stats {
+ > /var/lib/glusterfs/stats/glusterfs_d_backends_${V0}0.dump
+}
+
+function got_expected_write_count {
+ expected_size=$1
+ expected_value=$2
+ grep aggr.write_${expected_size} "/var/lib/glusterd/stats/glusterfsd__d_backends_${V0}0.dump" | grep $expected_value
+ if [ $? == 0 ]; then
+ echo "Y";
+ else
+ echo "N";
+ fi
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+
+# These are needed for our tracking of write sizes
+TEST $CLI volume set $V0 diagnostics.latency-measurement on
+TEST $CLI volume set $V0 diagnostics.count-fop-hits on
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 2
+
+# Disable this in testing to get deterministic results
+TEST $CLI volume set $V0 performance.write-behind-trickling-writes off
+
+TEST $CLI volume start $V0
+
+sleep 2;
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+# Write a 100MB file with a window-size 1MB, we should get 100 writes of 1MB each
+TEST dd if=/dev/zero of=$M0/100mb_file bs=1M count=100
+EXPECT_WITHIN 5 "Y" got_expected_write_count "1mb" 100
+
+TEST $CLI volume set $V0 performance.write-behind-window-size 512KB
+
+# Write a 100MB file with a window-size 512KB, we should get 200 writes of 512KB each
+TEST dd if=/dev/zero of=$M0/100mb_file_2 bs=1M count=100
+EXPECT_WITHIN 5 "Y" got_expected_write_count "512kb" 200
+
+cleanup;