summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorKevin Vigor <kvigor@fb.com>2017-01-05 12:21:20 -0800
committerKevin Vigor <kvigor@fb.com>2017-01-05 12:21:20 -0800
commitc27aa58e72cf528583c585691e65abdb765535e5 (patch)
treefae75e5b924ac4fb80a3d4ed42203638732fbb52 /tests
parent63403742f53ec59a6acbe26ff4c39bab1b0842ed (diff)
parentcb8bc3396d16e777d9a2683886fefd43e747e8a3 (diff)
Merge remote-tracking branch 'origin/release-3.8' into merge-3.8-again
Change-Id: I844adf2aef161a44d446f8cd9b7ebcb224ee618a Signed-off-by: Kevin Vigor <kvigor@fb.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/basic/afr/granular-esh/add-brick.t2
-rw-r--r--tests/basic/afr/granular-esh/cli.t142
-rw-r--r--tests/basic/afr/granular-esh/conservative-merge.t4
-rw-r--r--tests/basic/afr/granular-esh/granular-esh.t2
-rw-r--r--tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t76
-rw-r--r--tests/basic/afr/granular-esh/replace-brick.t2
-rw-r--r--tests/basic/afr/split-brain-favorite-child-policy.t18
-rw-r--r--tests/basic/afr/split-brain-healing.t3
-rwxr-xr-xtests/bugs/core/bug-1402841.t-mt-dir-scan-race.t41
-rwxr-xr-xtests/bugs/glusterd/1313628-import-brick-ports-always.t47
-rw-r--r--tests/bugs/io-cache/bug-read-hang.c125
-rwxr-xr-xtests/bugs/io-cache/bug-read-hang.t30
-rw-r--r--tests/bugs/replicate/bug-1402730.t42
-rw-r--r--tests/bugs/replicate/bug-1408712.t87
-rw-r--r--tests/bugs/snapshot/bug-1316437.t3
-rwxr-xr-xtests/bugs/snapshot/bug-1399598-uss-with-ssl.t98
-rwxr-xr-xtests/bugs/upcall/bug-1394131.t29
-rw-r--r--tests/features/ssl-ciphers.t5
-rw-r--r--tests/include.rc2
19 files changed, 700 insertions, 58 deletions
diff --git a/tests/basic/afr/granular-esh/add-brick.t b/tests/basic/afr/granular-esh/add-brick.t
index f3125d7fe7d..270cf1d32a6 100644
--- a/tests/basic/afr/granular-esh/add-brick.t
+++ b/tests/basic/afr/granular-esh/add-brick.t
@@ -14,7 +14,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off
TEST $CLI volume set $V0 cluster.metadata-self-heal off
TEST $CLI volume set $V0 cluster.entry-self-heal off
TEST $CLI volume set $V0 self-heal-daemon off
-TEST $CLI volume set $V0 granular-entry-heal on
+TEST $CLI volume heal $V0 granular-entry-heal enable
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
diff --git a/tests/basic/afr/granular-esh/cli.t b/tests/basic/afr/granular-esh/cli.t
new file mode 100644
index 00000000000..a655180a095
--- /dev/null
+++ b/tests/basic/afr/granular-esh/cli.t
@@ -0,0 +1,142 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../afr.rc
+
+cleanup
+
+TESTS_EXPECTED_IN_LOOP=4
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+# Test that enabling the option should work on a newly created volume
+TEST $CLI volume set $V0 cluster.granular-entry-heal on
+TEST $CLI volume set $V0 cluster.granular-entry-heal off
+
+#########################
+##### DISPERSE TEST #####
+#########################
+# Execute the same command on a disperse volume and make sure it fails.
+TEST $CLI volume create $V1 disperse 3 redundancy 1 $H0:$B0/${V1}{0,1,2}
+TEST $CLI volume start $V1
+TEST ! $CLI volume heal $V1 granular-entry-heal enable
+TEST ! $CLI volume heal $V1 granular-entry-heal disable
+
+#######################
+###### TIER TEST ######
+#######################
+# Execute the same command on a disperse + replicate tiered volume and make
+# sure the option is set on the replicate leg of the volume
+TEST $CLI volume attach-tier $V1 replica 2 $H0:$B0/${V1}{3,4}
+TEST $CLI volume heal $V1 granular-entry-heal enable
+EXPECT "enable" volume_get_field $V1 cluster.granular-entry-heal
+TEST $CLI volume heal $V1 granular-entry-heal disable
+EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal
+
+# Kill a disperse brick and make heal be pending on the volume.
+TEST kill_brick $V1 $H0 $B0/${V1}0
+
+# Now make sure that one offline brick in disperse does not affect enabling the
+# option on the volume.
+TEST $CLI volume heal $V1 granular-entry-heal enable
+EXPECT "enable" volume_get_field $V1 cluster.granular-entry-heal
+TEST $CLI volume heal $V1 granular-entry-heal disable
+EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal
+
+# Now kill a replicate brick.
+TEST kill_brick $V1 $H0 $B0/${V1}3
+# Now make sure that one offline brick in replicate causes the command to be
+# failed.
+TEST ! $CLI volume heal $V1 granular-entry-heal enable
+EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal
+
+######################
+### REPLICATE TEST ###
+######################
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 self-heal-daemon off
+# Test that the volume-set way of enabling the option is disallowed
+TEST ! $CLI volume set $V0 granular-entry-heal on
+# Test that the volume-heal way of enabling the option is allowed
+TEST $CLI volume heal $V0 granular-entry-heal enable
+# Volume-reset of the option should be allowed
+TEST $CLI volume reset $V0 granular-entry-heal
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+EXPECT "enable" volume_option $V0 cluster.granular-entry-heal
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Kill brick-0.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# Disabling the option should work even when one or more bricks are down
+TEST $CLI volume heal $V0 granular-entry-heal disable
+# When a brick is down, 'enable' attempt should be failed
+TEST ! $CLI volume heal $V0 granular-entry-heal enable
+
+# Restart the killed brick
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+# When all bricks are up, it should be possible to enable the option
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+# Kill brick-0 again
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# Create files under root
+for i in {1..2}
+do
+ echo $i > $M0/f$i
+done
+
+# Test that the index associated with '/' is created on B1.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+
+# Check for successful creation of granular entry indices
+for i in {1..2}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i
+done
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST gluster volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Test if data was healed
+for i in {1..2}
+do
+ TEST_IN_LOOP diff $B0/${V0}0/f$i $B0/${V0}1/f$i
+done
+
+# Now verify that there are no name indices left after self-heal
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+
+# Perform a volume-reset-all-options operation
+TEST $CLI volume reset $V0
+# Ensure that granular entry heal is also disabled
+EXPECT "no" volume_get_field $V0 cluster.granular-entry-heal
+EXPECT "on" volume_get_field $V0 cluster.entry-self-heal
+
+cleanup
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1399038
diff --git a/tests/basic/afr/granular-esh/conservative-merge.t b/tests/basic/afr/granular-esh/conservative-merge.t
index b566a0ea4d3..b170e47e0cb 100644
--- a/tests/basic/afr/granular-esh/conservative-merge.t
+++ b/tests/basic/afr/granular-esh/conservative-merge.t
@@ -11,13 +11,13 @@ TESTS_EXPECTED_IN_LOOP=4
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
TEST $CLI volume set $V0 self-heal-daemon off
TEST $CLI volume set $V0 data-self-heal off
TEST $CLI volume set $V0 metadata-self-heal off
TEST $CLI volume set $V0 entry-self-heal off
-TEST $CLI volume set $V0 granular-entry-heal on
+TEST $CLI volume heal $V0 granular-entry-heal enable
-TEST $CLI volume start $V0
TEST $GFS --volfile-id=$V0 -s $H0 $M0
TEST mkdir $M0/dir
diff --git a/tests/basic/afr/granular-esh/granular-esh.t b/tests/basic/afr/granular-esh/granular-esh.t
index ee53878e004..de0e8f4290b 100644
--- a/tests/basic/afr/granular-esh/granular-esh.t
+++ b/tests/basic/afr/granular-esh/granular-esh.t
@@ -16,7 +16,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off
TEST $CLI volume set $V0 cluster.metadata-self-heal off
TEST $CLI volume set $V0 cluster.entry-self-heal off
TEST $CLI volume set $V0 self-heal-daemon off
-TEST $CLI volume set $V0 granular-entry-heal on
+TEST $CLI volume heal $V0 granular-entry-heal enable
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
diff --git a/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t
new file mode 100644
index 00000000000..1b5421bf4b6
--- /dev/null
+++ b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../afr.rc
+
+cleanup
+
+TESTS_EXPECTED_IN_LOOP=4
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Kill brick-0.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# Create files under root
+for i in {1..2}
+do
+ echo $i > $M0/f$i
+done
+
+# Test that the index associated with '/' is created on B1.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+
+# Check for successful creation of granular entry indices
+for i in {1..2}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i
+done
+
+# Now disable granular-entry-heal
+TEST $CLI volume heal $V0 granular-entry-heal disable
+
+# Start the brick that was down
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+# Enable shd
+TEST gluster volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+# Now the indices created are granular but the heal is going to be of the
+# normal kind. We test to make sure that heal still completes fine and that
+# the stale granular indices are going to be deleted
+
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Test if data was healed
+for i in {1..2}
+do
+ TEST_IN_LOOP diff $B0/${V0}0/f$i $B0/${V0}1/f$i
+done
+
+# Now verify that there are no name indices left after self-heal
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+
+cleanup
diff --git a/tests/basic/afr/granular-esh/replace-brick.t b/tests/basic/afr/granular-esh/replace-brick.t
index aaa54da2a2c..639ed81b95c 100644
--- a/tests/basic/afr/granular-esh/replace-brick.t
+++ b/tests/basic/afr/granular-esh/replace-brick.t
@@ -12,7 +12,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off
TEST $CLI volume set $V0 cluster.metadata-self-heal off
TEST $CLI volume set $V0 cluster.entry-self-heal off
TEST $CLI volume set $V0 self-heal-daemon off
-TEST $CLI volume set $V0 granular-entry-heal on
+TEST $CLI volume heal $V0 granular-entry-heal enable
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
diff --git a/tests/basic/afr/split-brain-favorite-child-policy.t b/tests/basic/afr/split-brain-favorite-child-policy.t
index 7a14852685c..3df8e718bf0 100644
--- a/tests/basic/afr/split-brain-favorite-child-policy.t
+++ b/tests/basic/afr/split-brain-favorite-child-policy.t
@@ -42,8 +42,15 @@ TEST $CLI volume heal $V0
cat $M0/file > /dev/null
EXPECT "1" echo $?
-#We know that the first brick has latest ctime.
-LATEST_CTIME_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+# Umount to prevent further FOPS on the file, then find the brick with latest ctime.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+ctime1=`stat -c "%.Z" $B0/${V0}0/file`
+ctime2=`stat -c "%.Z" $B0/${V0}1/file`
+if (( $(echo "$ctime1 > $ctime2" | bc -l) )); then
+ LATEST_CTIME_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+else
+ LATEST_CTIME_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+fi
TEST $CLI volume set $V0 cluster.favorite-child-policy ctime
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
@@ -51,10 +58,13 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+TEST [ "$LATEST_CTIME_MD5" == "$B0_MD5" ]
+TEST [ "$LATEST_CTIME_MD5" == "$B1_MD5" ]
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
cat $M0/file > /dev/null
EXPECT "0" echo $?
-HEALED_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
-TEST [ "$LATEST_CTIME_MD5" == "$HEALED_MD5" ]
############ Healing using favorite-child-policy = mtime #################
TEST $CLI volume set $V0 cluster.favorite-child-policy none
diff --git a/tests/basic/afr/split-brain-healing.t b/tests/basic/afr/split-brain-healing.t
index 302a3e6144b..c66bb5d44df 100644
--- a/tests/basic/afr/split-brain-healing.t
+++ b/tests/basic/afr/split-brain-healing.t
@@ -31,6 +31,9 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}
TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
diff --git a/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t b/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
new file mode 100755
index 00000000000..6351ba22511
--- /dev/null
+++ b/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
@@ -0,0 +1,41 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 cluster.shd-wait-qlength 100
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+touch $M0/file{1..200}
+
+TEST kill_brick $V0 $H0 $B0/${V0}1
+for i in {1..200}; do echo hello>$M0/file$i; done
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+EXPECT "200" get_pending_heal_count $V0
+TEST $CLI volume set $V0 self-heal-daemon on
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+TEST $CLI volume set $V0 self-heal-daemon off
+EXPECT_NOT "^0$" get_pending_heal_count $V0
+TEST $CLI volume set $V0 self-heal-daemon on
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+TEST umount $M0
+cleanup;
diff --git a/tests/bugs/glusterd/1313628-import-brick-ports-always.t b/tests/bugs/glusterd/1313628-import-brick-ports-always.t
deleted file mode 100755
index d04c4293466..00000000000
--- a/tests/bugs/glusterd/1313628-import-brick-ports-always.t
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-## Check that brick ports are always copied on import
-## --------------------------------------------------
-## This test checks that the brick ports are copied on import by checking that
-## they don't change when the following happens,
-## - Stop a volume
-## - Stop glusterd
-## - Start the stopped volume
-## - Start the stopped glusterd
-
-function get_brick_port() {
- local VOL=$1
- local BRICK=$2
- $CLI2 volume status $VOL $BRICK --xml | sed -ne 's/.*<port>\([0-9]*\)<\/port>/\1/p'
-}
-
-
-cleanup
-
-TEST launch_cluster 2
-TEST $CLI1 peer probe $H2
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-# Create and start volume so that brick port assignment happens
-TEST $CLI1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
-TEST $CLI1 volume start $V0
-
-# Save port for 2nd brick
-BPORT_ORIG=$(get_brick_port $V0 $H2:$B2/$V0)
-
-# Stop volume, stop 2nd glusterd, start volume, start 2nd glusterd
-TEST $CLI1 volume stop $V0
-TEST kill_glusterd 2
-
-TEST $CLI1 volume start $V0
-TEST start_glusterd 2
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-# Get new port and compare with old one
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT $BPORT_ORIG get_brick_port $V0 $H2:$B2/$V0
-
-$CLI1 volume stop $V0
-
-cleanup
diff --git a/tests/bugs/io-cache/bug-read-hang.c b/tests/bugs/io-cache/bug-read-hang.c
new file mode 100644
index 00000000000..74dfddd7a6e
--- /dev/null
+++ b/tests/bugs/io-cache/bug-read-hang.c
@@ -0,0 +1,125 @@
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define NO_INIT 1
+
+int count = 0;
+void
+read_cbk (glfs_fd_t *fd, ssize_t ret, void *data) {
+count++;
+}
+
+glfs_t *
+setup_new_client(char *hostname, char *volname, char *log_file, int flag)
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new (volname);
+ if (!fs) {
+ fprintf (stderr, "\nglfs_new: returned NULL (%s)\n",
+ strerror (errno));
+ goto error;
+ }
+
+ ret = glfs_set_volfile_server (fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf (stderr, "\nglfs_set_volfile_server failed ret:%d (%s)\n",
+ ret, strerror (errno));
+ goto error;
+ }
+
+ ret = glfs_set_logging (fs, log_file, 7);
+ if (ret < 0) {
+ fprintf (stderr, "\nglfs_set_logging failed with ret: %d (%s)\n",
+ ret, strerror (errno));
+ goto error;
+ }
+
+ if (flag == NO_INIT)
+ goto out;
+
+ ret = glfs_init (fs);
+ if (ret < 0) {
+ fprintf (stderr, "\nglfs_init failed with ret: %d (%s)\n",
+ ret, strerror (errno));
+ goto error;
+ }
+
+out:
+ return fs;
+error:
+ return NULL;
+}
+
+int
+main (int argc, char *argv[])
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+ struct glfs_fd *fd = NULL;
+ char *volname = NULL;
+ char *log_file = NULL;
+ char *hostname = NULL;
+ char *buf = NULL;
+ struct stat stat;
+
+ if (argc != 4) {
+ fprintf (stderr,
+ "Expect following args %s <hostname> <Vol> <log file location>\n"
+ , argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ log_file = argv[3];
+
+ fs = setup_new_client (hostname, volname, log_file, 0);
+ if (!fs) {
+ fprintf (stderr, "\nsetup_new_client: returned NULL (%s)\n",
+ strerror (errno));
+ goto error;
+ }
+
+ fd = glfs_opendir (fs, "/");
+ if (!fd) {
+ fprintf (stderr, "/: %s\n", strerror (errno));
+ return -1;
+ }
+
+ glfs_readdirplus (fd, &stat);
+
+ fd = glfs_open (fs, "/test", O_RDWR);
+ if (fd == NULL) {
+ fprintf (stderr, "glfs_open: returned NULL\n");
+ goto error;
+ }
+
+ buf = (char *) malloc (5);
+
+ ret = glfs_pread (fd, buf, 5, 0, 0);
+ if (ret < 0) {
+ fprintf (stderr, "Read(%s): %d (%s)\n", "test", ret,
+ strerror (errno));
+ return ret;
+ }
+
+ free (buf);
+ glfs_close (fd);
+
+ ret = glfs_fini (fs);
+ if (ret < 0) {
+ fprintf (stderr, "glfs_fini failed with ret: %d (%s)\n",
+ ret, strerror (errno));
+ return -1;
+ }
+
+ return 0;
+error:
+ return -1;
+}
diff --git a/tests/bugs/io-cache/bug-read-hang.t b/tests/bugs/io-cache/bug-read-hang.t
new file mode 100755
index 00000000000..fb20c2c5515
--- /dev/null
+++ b/tests/bugs/io-cache/bug-read-hang.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+#. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..2};
+
+TEST $CLI volume set $V0 performance.md-cache-timeout 60
+TEST $CLI volume set $V0 open-behind off
+
+logdir=`gluster --print-logdir`
+
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+echo "Hello" > $M0/test
+
+TEST build_tester $(dirname $0)/bug-read-hang.c -lgfapi
+TEST $(dirname $0)/bug-read-hang $H0 $V0 $logdir/bug-read-hang.log
+
+cleanup_tester $(dirname $0)/bug-read-hang
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1402730.t b/tests/bugs/replicate/bug-1402730.t
new file mode 100644
index 00000000000..dcde60dbdf7
--- /dev/null
+++ b/tests/bugs/replicate/bug-1402730.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 granular-entry-heal on
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0
+
+TEST mkdir -p $M0/a/b/c -p
+cd $M0/a/b/c
+
+TEST kill_brick $V0 $H0 $B0/${V0}2
+rm -rf $B0/${V0}2/*
+rm -rf $B0/${V0}2/.glusterfs
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST touch file
+
+GFID_C=$(get_gfid_string $M0/a/b/c)
+TEST stat $B0/${V0}0/.glusterfs/indices/entry-changes/$GFID_C/file
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$GFID_C/file
+
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}0/a/b/c trusted.afr.$V0-client-2 entry
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a/b/c trusted.afr.$V0-client-2 entry
+
+cd ~
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1408712.t b/tests/bugs/replicate/bug-1408712.t
new file mode 100644
index 00000000000..b26e8a06923
--- /dev/null
+++ b/tests/bugs/replicate/bug-1408712.t
@@ -0,0 +1,87 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+TESTS_EXPECTED_IN_LOOP=12
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume heal $V0 granular-entry-heal enable
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 performance.flush-behind off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M1
+
+cd $M0
+TEST dd if=/dev/zero of=file bs=1M count=8
+
+# Kill brick-0.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+TEST "dd if=/dev/zero bs=1M count=8 >> file"
+
+FILE_GFID=$(get_gfid_string $M0/file)
+
+# Test that the index associated with '/.shard' is created on B1 and B2.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+TEST stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+# Check for successful creation of granular entry indices
+for i in {2..3}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+ TEST_IN_LOOP stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+done
+
+cd ~
+TEST md5sum $M1/file
+
+# Test that the index associated with '/.shard' and the created shards do not disappear on B1 and B2.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+TEST stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+for i in {2..3}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+ TEST_IN_LOOP stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+done
+
+# Start the brick that was down
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Enable shd
+TEST gluster volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Now verify that there are no name indices left after self-heal
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+TEST ! stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+
+for i in {2..3}
+do
+ TEST_IN_LOOP ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+ TEST_IN_LOOP ! stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+done
+
+cleanup
diff --git a/tests/bugs/snapshot/bug-1316437.t b/tests/bugs/snapshot/bug-1316437.t
index 30a221e3171..0ae57a71657 100644
--- a/tests/bugs/snapshot/bug-1316437.t
+++ b/tests/bugs/snapshot/bug-1316437.t
@@ -20,8 +20,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
killall glusterd glusterfsd glusterfs
-SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}');
-TEST ! [ $SNAPD_PID -gt 0 ];
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_if_snapd_exist
glusterd
diff --git a/tests/bugs/snapshot/bug-1399598-uss-with-ssl.t b/tests/bugs/snapshot/bug-1399598-uss-with-ssl.t
new file mode 100755
index 00000000000..1c50f746527
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1399598-uss-with-ssl.t
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../traps.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../ssl.rc
+
+function file_exists
+{
+ if [ -f $1 ]; then echo "Y"; else echo "N"; fi
+}
+
+function volume_online_brick_count
+{
+ $CLI volume status $V0 | awk '$1 == "Brick" && $6 != "N/A" { print $6}' | wc -l;
+}
+
+cleanup;
+
+# Initialize the test setup
+TEST setup_lvm 1;
+
+TEST create_self_signed_certs
+
+# Start glusterd
+TEST glusterd
+TEST pidof glusterd;
+
+# Create and start the volume
+TEST $CLI volume create $V0 $H0:$L1/b1;
+
+TEST $CLI volume start $V0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" volume_online_brick_count
+
+# Mount the volume and create some files
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+TEST touch $M0/file;
+
+# Enable activate-on-create
+TEST $CLI snapshot config activate-on-create enable;
+
+# Create a snapshot
+TEST $CLI snapshot create snap1 $V0 no-timestamp;
+
+TEST $CLI volume set $V0 features.uss enable;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+EXPECT "Y" file_exists $M0/file
+# Volume set can trigger graph switch therefore chances are we send this
+# req to old graph. Old graph will not have .snaps. Therefore we should
+# wait for some time.
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/.snaps/snap1/file
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# Enable management encryption
+touch $GLUSTERD_WORKDIR/secure-access
+killall_gluster
+
+TEST glusterd
+TEST pidof glusterd;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" volume_online_brick_count
+
+# Mount the volume
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+EXPECT "Y" file_exists $M0/file
+EXPECT "Y" file_exists $M0/.snaps/snap1/file
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# Enable I/O encryption
+TEST $CLI volume set $V0 client.ssl on
+TEST $CLI volume set $V0 server.ssl on
+
+killall_gluster
+
+TEST glusterd
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" volume_online_brick_count
+
+# Mount the volume
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+EXPECT "Y" file_exists $M0/file
+EXPECT "Y" file_exists $M0/.snaps/snap1/file
+
+TEST $CLI snapshot delete all
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/upcall/bug-1394131.t b/tests/bugs/upcall/bug-1394131.t
new file mode 100755
index 00000000000..b371ce4e682
--- /dev/null
+++ b/tests/bugs/upcall/bug-1394131.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## 1. Start glusterd
+TEST glusterd;
+
+## 2. Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3};
+
+## 3. Enable the upcall xlator, and increase the md-cache timeout to max
+TEST $CLI volume set $V0 features.cache-invalidation on
+TEST $CLI volume set $V0 features.cache-invalidation-timeout 600
+TEST $CLI volume set $V0 indexing on
+
+## 6. Start the volume
+TEST $CLI volume start $V0
+
+## 7. Create two gluster mounts
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+## 8. Create directory and files from the M0
+TEST touch $M0/file1
+TEST mv $M0/file1 $M0/file2
+
+cleanup;
diff --git a/tests/features/ssl-ciphers.t b/tests/features/ssl-ciphers.t
index 9ee7fc6c16f..f5909f320ac 100644
--- a/tests/features/ssl-ciphers.t
+++ b/tests/features/ssl-ciphers.t
@@ -137,6 +137,7 @@ EXPECT "`pwd`/`dirname $0`/dh1024.pem" volume_option $V0 ssl.dh-param
TEST $CLI volume stop $V0
TEST $CLI volume start $V0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+BRICK_PORT=`brick_port $V0`
EXPECT "Y" openssl_connect -cipher EDH -connect $H0:$BRICK_PORT
# Test the cipher-list option
@@ -145,6 +146,7 @@ EXPECT AES256-SHA volume_option $V0 ssl.cipher-list
TEST $CLI volume stop $V0
TEST $CLI volume start $V0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+BRICK_PORT=`brick_port $V0`
EXPECT "Y" openssl_connect -cipher AES256-SHA -connect $H0:$BRICK_PORT
EXPECT "N" openssl_connect -cipher AES128-SHA -connect $H0:$BRICK_PORT
@@ -154,6 +156,7 @@ EXPECT EECDH:EDH:!TLSv1 volume_option $V0 ssl.cipher-list
TEST $CLI volume stop $V0
TEST $CLI volume start $V0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+BRICK_PORT=`brick_port $V0`
EXPECT "N" openssl_connect -cipher AES256-SHA -connect $H0:$BRICK_PORT
EXPECT "Y" openssl_connect -cipher EECDH -connect $H0:$BRICK_PORT
@@ -162,6 +165,7 @@ EXPECT invalid volume_option $V0 ssl.ec-curve
TEST $CLI volume stop $V0
TEST $CLI volume start $V0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+BRICK_PORT=`brick_port $V0`
EXPECT "N" openssl_connect -cipher EECDH -connect $H0:$BRICK_PORT
TEST $CLI volume set $V0 ssl.ec-curve secp521r1
@@ -169,6 +173,7 @@ EXPECT secp521r1 volume_option $V0 ssl.ec-curve
TEST $CLI volume stop $V0
TEST $CLI volume start $V0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+BRICK_PORT=`brick_port $V0`
EXPECT "Y" openssl_connect -cipher EECDH -connect $H0:$BRICK_PORT
# test revocation
diff --git a/tests/include.rc b/tests/include.rc
index d1acbee5995..9f32e88f5f5 100644
--- a/tests/include.rc
+++ b/tests/include.rc
@@ -11,6 +11,7 @@ B0=${B0:=/d/backends}; # top level of brick directories
WORKDIRS="$B0 $M0 $M1 $M2 $N0 $N1"
ROOT_GFID="00000000-0000-0000-0000-000000000001"
+DOT_SHARD_GFID="be318638-e8a0-4c6d-977d-7a937aa84806"
META_VOL=${META_VOL:=gluster_shared_storage}; # shared gluster storage volume used by snapshot scheduler, nfs ganesha and geo-rep.
META_MNT=${META_MNT:=/var/run/gluster/shared_storage}; # Mount point of shared gluster volume.
@@ -408,6 +409,7 @@ stat -c %s /dev/null > /dev/null 2>&1 || {
*%Y*) cmd="${cmd} s/%Y/`$( which stat ) -f %m $f`/g;" ;&
*%X*) cmd="${cmd} s/%X/`$( which stat ) -f %a $f`/g;" ;&
*%Z*) cmd="${cmd} s/%Z/`$( which stat ) -f %c $f`/g;" ;&
+ *%.Z*) cmd="${cmd} s/%.Z/`$( which stat ) -f %.9Fc $f`/g;" ;&
*%b*) cmd="${cmd} s/%b/`$( which stat ) -f %b $f`/g;" ;&
*%B*) cmd="${cmd} s/%B/512/g;" ;&
*%t*) cmd="${cmd} s/%t/`$( which stat ) -f %XHr $f`/g;" ;&