summaryrefslogtreecommitdiffstats
path: root/tests/bugs
diff options
context:
space:
mode:
Diffstat (limited to 'tests/bugs')
-rwxr-xr-xtests/bugs/859927/repl.t69
-rw-r--r--tests/bugs/886998/strict-readdir.t52
-rw-r--r--tests/bugs/949327.t23
-rwxr-xr-xtests/bugs/bug-000000.t9
-rw-r--r--tests/bugs/bug-1002207.t54
-rwxr-xr-xtests/bugs/bug-1002556.t25
-rw-r--r--tests/bugs/bug-1004218.t26
-rw-r--r--tests/bugs/bug-1004744.t48
-rwxr-xr-xtests/bugs/bug-1015990-rep.t81
-rwxr-xr-xtests/bugs/bug-1015990.t95
-rwxr-xr-xtests/bugs/bug-1022055.t26
-rw-r--r--tests/bugs/bug-1022905.t39
-rw-r--r--tests/bugs/bug-1030208.t35
-rw-r--r--tests/bugs/bug-1040934.t37
-rw-r--r--tests/bugs/bug-1045333.t48
-rwxr-xr-xtests/bugs/bug-1049834.t40
-rwxr-xr-xtests/bugs/bug-1064768.t20
-rwxr-xr-xtests/bugs/bug-762989.t32
-rw-r--r--tests/bugs/bug-764638.t13
-rwxr-xr-xtests/bugs/bug-765230.t60
-rw-r--r--tests/bugs/bug-765380.t39
-rwxr-xr-xtests/bugs/bug-765473.t33
-rw-r--r--tests/bugs/bug-765564.t83
-rwxr-xr-xtests/bugs/bug-767095.t51
-rwxr-xr-xtests/bugs/bug-767585-gfid.t43
-rwxr-xr-xtests/bugs/bug-770655.t168
-rwxr-xr-xtests/bugs/bug-782095.t48
-rwxr-xr-xtests/bugs/bug-797171.t43
-rwxr-xr-xtests/bugs/bug-802417.t108
-rwxr-xr-xtests/bugs/bug-808400-dist.t31
-rw-r--r--tests/bugs/bug-808400-fcntl.c113
-rw-r--r--tests/bugs/bug-808400-flock.c92
-rwxr-xr-xtests/bugs/bug-808400-repl.t30
-rwxr-xr-xtests/bugs/bug-808400-stripe.t31
-rwxr-xr-xtests/bugs/bug-808400.t34
-rwxr-xr-xtests/bugs/bug-811493.t18
-rw-r--r--tests/bugs/bug-821056.t52
-rwxr-xr-xtests/bugs/bug-822830.t44
-rwxr-xr-xtests/bugs/bug-823081.t40
-rw-r--r--tests/bugs/bug-824753-file-locker.c42
-rwxr-xr-xtests/bugs/bug-824753.t45
-rwxr-xr-xtests/bugs/bug-830665.t106
-rw-r--r--tests/bugs/bug-834465.c61
-rwxr-xr-xtests/bugs/bug-834465.t44
-rw-r--r--tests/bugs/bug-839595.t31
-rwxr-xr-xtests/bugs/bug-844688.t37
-rw-r--r--tests/bugs/bug-845213.t19
-rw-r--r--tests/bugs/bug-846240.t58
-rwxr-xr-xtests/bugs/bug-847622.t25
-rwxr-xr-xtests/bugs/bug-847624.t23
-rw-r--r--tests/bugs/bug-848251.t50
-rwxr-xr-xtests/bugs/bug-852147.t85
-rwxr-xr-xtests/bugs/bug-853258.t45
-rwxr-xr-xtests/bugs/bug-853680.t52
-rwxr-xr-xtests/bugs/bug-853690.t94
-rw-r--r--tests/bugs/bug-856455.t42
-rw-r--r--tests/bugs/bug-857330/common.rc55
-rwxr-xr-xtests/bugs/bug-857330/normal.t78
-rwxr-xr-xtests/bugs/bug-857330/xml.t101
-rwxr-xr-xtests/bugs/bug-858215.t81
-rw-r--r--tests/bugs/bug-858242.c77
-rwxr-xr-xtests/bugs/bug-858242.t28
-rw-r--r--tests/bugs/bug-858488-min-free-disk.t114
-rwxr-xr-xtests/bugs/bug-859927.t70
-rw-r--r--tests/bugs/bug-860297.t13
-rw-r--r--tests/bugs/bug-860663.t51
-rw-r--r--tests/bugs/bug-861015-index.t36
-rw-r--r--tests/bugs/bug-861015-log.t29
-rwxr-xr-xtests/bugs/bug-861542.t51
-rwxr-xr-xtests/bugs/bug-862834.t46
-rw-r--r--tests/bugs/bug-862967.t59
-rw-r--r--tests/bugs/bug-863068.t76
-rwxr-xr-xtests/bugs/bug-864222.t26
-rwxr-xr-xtests/bugs/bug-865825.t76
-rw-r--r--tests/bugs/bug-866459.t44
-rw-r--r--tests/bugs/bug-867252.t41
-rw-r--r--tests/bugs/bug-867253.t59
-rw-r--r--tests/bugs/bug-869724.t37
-rwxr-xr-xtests/bugs/bug-872923.t57
-rwxr-xr-xtests/bugs/bug-873367.t41
-rw-r--r--tests/bugs/bug-873549.t17
-rw-r--r--tests/bugs/bug-873962-spb.t39
-rwxr-xr-xtests/bugs/bug-873962.t108
-rw-r--r--tests/bugs/bug-874498.t61
-rwxr-xr-xtests/bugs/bug-877293.t41
-rwxr-xr-xtests/bugs/bug-877885.t35
-rwxr-xr-xtests/bugs/bug-877992.t61
-rw-r--r--tests/bugs/bug-878004.t29
-rwxr-xr-xtests/bugs/bug-879490.t37
-rwxr-xr-xtests/bugs/bug-879494.t37
-rw-r--r--tests/bugs/bug-880898.t23
-rwxr-xr-xtests/bugs/bug-882278.t72
-rw-r--r--tests/bugs/bug-884328.t12
-rw-r--r--tests/bugs/bug-884452.t46
-rwxr-xr-xtests/bugs/bug-884455.t84
-rwxr-xr-xtests/bugs/bug-884597.t152
-rw-r--r--tests/bugs/bug-886998.t52
-rw-r--r--tests/bugs/bug-887098-gmount-crash.t48
-rwxr-xr-xtests/bugs/bug-887145.t89
-rw-r--r--tests/bugs/bug-888174.t65
-rw-r--r--tests/bugs/bug-888752.t24
-rwxr-xr-xtests/bugs/bug-889630.t56
-rw-r--r--tests/bugs/bug-889996.t19
-rwxr-xr-xtests/bugs/bug-892730.t76
-rw-r--r--tests/bugs/bug-893338.t34
-rwxr-xr-xtests/bugs/bug-893378.t73
-rw-r--r--tests/bugs/bug-895235.t23
-rwxr-xr-xtests/bugs/bug-896431.t124
-rwxr-xr-xtests/bugs/bug-902610.t59
-rw-r--r--tests/bugs/bug-903336.t13
-rwxr-xr-xtests/bugs/bug-904065.t90
-rwxr-xr-xtests/bugs/bug-904300.t61
-rw-r--r--tests/bugs/bug-905307.t36
-rw-r--r--tests/bugs/bug-905864.c82
-rw-r--r--tests/bugs/bug-905864.t32
-rw-r--r--tests/bugs/bug-906646.t93
-rwxr-xr-xtests/bugs/bug-907072.t46
-rwxr-xr-xtests/bugs/bug-908146.t39
-rwxr-xr-xtests/bugs/bug-912297.t44
-rwxr-xr-xtests/bugs/bug-912564.t92
-rw-r--r--tests/bugs/bug-913051.t65
-rw-r--r--tests/bugs/bug-913487.t14
-rw-r--r--tests/bugs/bug-913544.t24
-rwxr-xr-xtests/bugs/bug-913555.t54
-rwxr-xr-xtests/bugs/bug-915280.t51
-rwxr-xr-xtests/bugs/bug-915554.t75
-rw-r--r--tests/bugs/bug-916226.t26
-rwxr-xr-xtests/bugs/bug-916549.t19
-rw-r--r--tests/bugs/bug-918437-sh-mtime.t52
-rwxr-xr-xtests/bugs/bug-921072.t118
-rw-r--r--tests/bugs/bug-921231.t31
-rwxr-xr-xtests/bugs/bug-921408.t89
-rwxr-xr-xtests/bugs/bug-924075.t23
-rwxr-xr-xtests/bugs/bug-924265.t35
-rwxr-xr-xtests/bugs/bug-927616.t61
-rw-r--r--tests/bugs/bug-948686.t46
-rw-r--r--tests/bugs/bug-948729/bug-948729-force.t84
-rw-r--r--tests/bugs/bug-948729/bug-948729-mode-script.t85
-rw-r--r--tests/bugs/bug-948729/bug-948729.t67
-rw-r--r--tests/bugs/bug-949242.t54
-rw-r--r--tests/bugs/bug-949298.t12
-rw-r--r--tests/bugs/bug-949930.t27
-rwxr-xr-xtests/bugs/bug-955588.t27
-rw-r--r--tests/bugs/bug-957877.t31
-rw-r--r--tests/bugs/bug-958691.t50
-rw-r--r--tests/bugs/bug-958790.t21
-rw-r--r--tests/bugs/bug-961307.t32
-rw-r--r--tests/bugs/bug-961615.t34
-rw-r--r--tests/bugs/bug-961669.t48
-rwxr-xr-xtests/bugs/bug-963541.t33
-rw-r--r--tests/bugs/bug-963678.t56
-rwxr-xr-xtests/bugs/bug-964059.t30
-rw-r--r--tests/bugs/bug-966018.t34
-rwxr-xr-xtests/bugs/bug-969193.t13
-rwxr-xr-xtests/bugs/bug-970070.t14
-rwxr-xr-xtests/bugs/bug-973073.t48
-rw-r--r--tests/bugs/bug-974007.t52
-rwxr-xr-xtests/bugs/bug-974972.t36
-rw-r--r--tests/bugs/bug-976800.t28
-rw-r--r--tests/bugs/bug-977246.t21
-rwxr-xr-xtests/bugs/bug-977797.t114
-rw-r--r--tests/bugs/bug-978794.t29
-rwxr-xr-xtests/bugs/bug-979365.t47
-rw-r--r--tests/bugs/bug-982174.t36
-rwxr-xr-xtests/bugs/bug-983477.t52
-rw-r--r--tests/bugs/bug-985074.t55
-rw-r--r--tests/bugs/bug-986429.t19
-rwxr-xr-xtests/bugs/bug-986905.t27
-rw-r--r--tests/bugs/bug-991622.t35
-rw-r--r--tests/bugs/getlk_owner.c120
-rwxr-xr-xtests/bugs/overlap.py59
171 files changed, 8675 insertions, 0 deletions
diff --git a/tests/bugs/859927/repl.t b/tests/bugs/859927/repl.t
new file mode 100755
index 000000000..73c86e7be
--- /dev/null
+++ b/tests/bugs/859927/repl.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd
+
+#Tests for data-self-heal-algorithm option
+function create_setup_for_self_heal {
+ file=$1
+ kill_brick $V0 $H0 $B0/${V0}1
+ dd of=$file if=/dev/urandom bs=1M count=1 2>&1 > /dev/null
+ $CLI volume start $V0 force
+}
+
+function test_write {
+ dd of=$M0/a if=/dev/urandom bs=1k count=1 2>&1 > /dev/null
+}
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 client-log-level DEBUG
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0;
+
+touch $M0/a
+
+TEST $CLI volume set $V0 cluster.data-self-heal-algorithm full
+EXPECT full volume_option $V0 cluster.data-self-heal-algorithm
+create_setup_for_self_heal $M0/a
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+ls -l $file 2>&1 > /dev/null
+TEST cmp $B0/${V0}1/a $B0/${V0}2/a
+
+TEST $CLI volume set $V0 cluster.data-self-heal-algorithm diff
+EXPECT diff volume_option $V0 cluster.data-self-heal-algorithm
+create_setup_for_self_heal $M0/a
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+ls -l $file 2>&1 > /dev/null
+TEST cmp $B0/${V0}1/a $B0/${V0}2/a
+
+TEST $CLI volume reset $V0 cluster.data-self-heal-algorithm
+create_setup_for_self_heal $M0/a
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+ls -l $file 2>&1 > /dev/null
+TEST cmp $B0/${V0}1/a $B0/${V0}2/a
+
+TEST ! $CLI volume set $V0 cluster.data-self-heal-algorithm ""
+
+#Tests for quorum-type option
+TEST ! $CLI volume set $V0 cluster.quorum-type ""
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+EXPECT fixed volume_option $V0 cluster.quorum-type
+TEST $CLI volume set $V0 cluster.quorum-count 2
+kill_brick $V0 $H0 $B0/${V0}1
+TEST ! test_write
+TEST $CLI volume set $V0 cluster.quorum-type auto
+EXPECT auto volume_option $V0 cluster.quorum-type
+TEST ! test_write
+TEST $CLI volume set $V0 cluster.quorum-type none
+EXPECT none volume_option $V0 cluster.quorum-type
+TEST test_write
+TEST $CLI volume reset $V0 cluster.quorum-type
+TEST test_write
+cleanup;
diff --git a/tests/bugs/886998/strict-readdir.t b/tests/bugs/886998/strict-readdir.t
new file mode 100644
index 000000000..0de953e8a
--- /dev/null
+++ b/tests/bugs/886998/strict-readdir.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function num_files_in_dir {
+ d=$1
+ ls $d | sort | uniq | wc -l
+}
+
+#Basic sanity tests for readdir functionality
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/r2d2_0 $H0:$B0/r2d2_1 $H0:$B0/r2d2_2 $H0:$B0/r2d2_3
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-server=$H0 --volfile-id=/$V0 $M0
+
+TEST touch $M0/{1..100}
+EXPECT "100" num_files_in_dir $M0
+
+TEST kill_brick $V0 $H0 $B0/r2d2_0
+TEST kill_brick $V0 $H0 $B0/r2d2_2
+EXPECT "100" num_files_in_dir $M0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 2
+
+TEST kill_brick $V0 $H0 $B0/r2d2_1
+TEST kill_brick $V0 $H0 $B0/r2d2_3
+EXPECT "100" num_files_in_dir $M0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 3
+
+TEST $CLI volume set $V0 cluster.strict-readdir on
+EXPECT "on" volinfo_field $V0 cluster.strict-readdir
+TEST kill_brick $V0 $H0 $B0/r2d2_0
+TEST kill_brick $V0 $H0 $B0/r2d2_2
+EXPECT "100" num_files_in_dir $M0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 2
+
+TEST kill_brick $V0 $H0 $B0/r2d2_1
+TEST kill_brick $V0 $H0 $B0/r2d2_3
+EXPECT "100" num_files_in_dir $M0
+cleanup;
diff --git a/tests/bugs/949327.t b/tests/bugs/949327.t
new file mode 100644
index 000000000..7b0c5c51a
--- /dev/null
+++ b/tests/bugs/949327.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+function tmp_file_count()
+{
+echo $(ls -lh /tmp/tmp.* | wc -l)
+}
+
+
+old_count=$(tmp_file_count);
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+new_count=$(tmp_file_count);
+
+TEST [ "$old_count" -eq "$new_count" ]
+
+cleanup
diff --git a/tests/bugs/bug-000000.t b/tests/bugs/bug-000000.t
new file mode 100755
index 000000000..7f3d15c9d
--- /dev/null
+++ b/tests/bugs/bug-000000.t
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+
+cleanup;
diff --git a/tests/bugs/bug-1002207.t b/tests/bugs/bug-1002207.t
new file mode 100644
index 000000000..50b8c7d31
--- /dev/null
+++ b/tests/bugs/bug-1002207.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+dd if=/dev/zero of=$M0/file$i.data bs=1024 count=1024 1>/dev/null 2>&1
+
+function xattr_query_check()
+{
+ local path=$1
+ local xa_name=$2
+
+ local ret=`getfattr -m . -n $xa_name $path 2>&1 | grep -o "$xa_name: No such attribute" | wc -l`
+ echo $ret
+}
+
+function set_xattr()
+{
+ local path=$1
+ local xa_name=$2
+ local xa_val=$3
+
+ setfattr -n $xa_name -v $xa_val $path
+ echo $?
+}
+
+EXPECT 0 set_xattr $M0/file$i.data "trusted.name" "testofafairlylongxattrstringthatbutnotlongenoughtofailmemoryallocation"
+
+EXPECT 0 xattr_query_check $M0/file$i.data "trusted.name"
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
+
diff --git a/tests/bugs/bug-1002556.t b/tests/bugs/bug-1002556.t
new file mode 100755
index 000000000..a57f455d4
--- /dev/null
+++ b/tests/bugs/bug-1002556.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks';
+
+TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}1 force
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+TEST killall glusterd
+TEST glusterd
+
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+cleanup
diff --git a/tests/bugs/bug-1004218.t b/tests/bugs/bug-1004218.t
new file mode 100644
index 000000000..17eb3c65b
--- /dev/null
+++ b/tests/bugs/bug-1004218.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+# Test if only a single xml document is generated by 'status all'
+# when a volume is not started
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create ${V0}1 $H0:$B0/${V0}1{1,2}
+TEST $CLI volume create ${V0}2 $H0:$B0/${V0}2{1,2}
+
+TEST $CLI volume start ${V0}1
+
+function test_status_all ()
+{
+ $CLI volume status all --xml | xmllint -format -
+}
+
+TEST test_status_all
+
+TEST $CLI volume stop ${V0}1
+
+cleanup
diff --git a/tests/bugs/bug-1004744.t b/tests/bugs/bug-1004744.t
new file mode 100644
index 000000000..0290119ef
--- /dev/null
+++ b/tests/bugs/bug-1004744.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+#Test case: After a rebalance fix-layout, check if the rebalance status command
+#displays the appropriate message at the CLI.
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a 2x1 distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0
+
+# Mount FUSE and create file/directory
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+for i in `seq 1 10`;
+do
+ mkdir $M0/dir_$i
+ echo file>$M0/dir_$i/file_$i
+ for j in `seq 1 100`;
+ do
+ mkdir $M0/dir_$i/dir_$j
+ echo file>$M0/dir_$i/dir_$j/file_$j
+ done
+done
+
+#add 2 bricks
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{3,4};
+
+#perform rebalance fix-layout
+TEST $CLI volume rebalance $V0 fix-layout start
+
+EXPECT_WITHIN 1 "fix-layout in progress" rebalance_status_field $V0;
+
+EXPECT_WITHIN 20 "fix-layout completed" rebalance_status_field $V0;
+
+TEST umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-1015990-rep.t b/tests/bugs/bug-1015990-rep.t
new file mode 100755
index 000000000..f59bb2f75
--- /dev/null
+++ b/tests/bugs/bug-1015990-rep.t
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../afr.rc
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+
+
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+sleep 5
+TEST kill_brick $V0 $H0 $B0/$V0"3"
+sleep 5
+
+for i in {1..100}; do echo "STRING" > $M0/File$i; done
+
+brick_2_sh_entries=$(count_sh_entries $B0/$V0"2")
+brick_4_sh_entries=$(count_sh_entries $B0/$V0"4")
+
+
+command_output=$(gluster volume heal $V0 statistics heal-count replica $H0:$B0/$V0"1")
+
+
+substring="Number of entries:"
+count=0
+while read -r line;
+do
+ if [[ "$line" == *$substring* ]]
+ then
+ value=$(echo $line | cut -f 2 -d :)
+ count=$(($count + $value))
+ fi
+
+done <<< "$command_output"
+
+brick_2_entries_count=$(($count-$value))
+
+EXPECT "0" echo $brick_2_entries_count
+
+brick_2_entries_count=$count
+
+
+xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2")
+##Remove the count of the xattrop-gfid entry count as it does not contribute
+##to the number of files to be healed
+
+sub_val=1
+xattrop_count_brick_2=$(($xattrop_count_brick_2-$sub_val))
+
+ret=0
+if [ "$xattrop_count_brick_2" -eq "$brick_2_entries_count" ]
+ then
+ ret=$(($ret + $sub_val))
+fi
+
+EXPECT "1" echo $ret
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0
+
+cleanup;
diff --git a/tests/bugs/bug-1015990.t b/tests/bugs/bug-1015990.t
new file mode 100755
index 000000000..165af5168
--- /dev/null
+++ b/tests/bugs/bug-1015990.t
@@ -0,0 +1,95 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../afr.rc
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+
+
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+sleep 5
+TEST kill_brick $V0 $H0 $B0/$V0"3"
+sleep 5
+
+for i in {1..100}; do echo "STRING" > $M0/File$i; done
+
+brick_2_sh_entries=$(count_sh_entries $B0/$V0"2")
+brick_4_sh_entries=$(count_sh_entries $B0/$V0"4")
+
+
+command_output=$(gluster volume heal $V0 statistics heal-count)
+
+
+substring="Number of entries:"
+count=0
+while read -r line;
+do
+ if [[ "$line" == *$substring* ]]
+ then
+ value=$(echo $line | cut -f 2 -d :)
+ count=$(($count + $value))
+ fi
+
+done <<< "$command_output"
+
+brick_2_entries_count=$(($count-$value))
+brick_4_entries_count=$value
+
+
+xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2")
+##Remove the count of the xattrop-gfid entry count as it does not contribute
+##to the number of files to be healed
+
+sub_val=1
+xattrop_count_brick_2=$(($xattrop_count_brick_2-$sub_val))
+
+xattrop_count_brick_4=$(count_sh_entries $B0/$V0"4")
+##Remove xattrop-gfid entry count
+
+xattrop_count_brick_4=$(($xattrop_count_brick_4-$sub_val))
+
+
+ret=0
+if [ "$xattrop_count_brick_2" -eq "$brick_2_entries_count" ]
+ then
+ ret=$(($ret + $sub_val))
+fi
+
+EXPECT "1" echo $ret
+
+
+ret=0
+if [ "$xattrop_count_brick_4" -eq "$brick_4_entries_count" ]
+ then
+ ret=$(($ret + $sub_val))
+fi
+
+EXPECT "1" echo $ret
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0
+
+cleanup;
+
diff --git a/tests/bugs/bug-1022055.t b/tests/bugs/bug-1022055.t
new file mode 100755
index 000000000..c2f4218bb
--- /dev/null
+++ b/tests/bugs/bug-1022055.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN 20 1 check_peers;
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0;
+
+TEST $CLI_1 volume start $V0;
+
+TEST $CLI_1 volume log rotate $V0;
+
+TEST $CLI_1 volume status;
+
+cleanup;
diff --git a/tests/bugs/bug-1022905.t b/tests/bugs/bug-1022905.t
new file mode 100644
index 000000000..aef3395dd
--- /dev/null
+++ b/tests/bugs/bug-1022905.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+## Create a volume
+TEST glusterd;
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1};
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Volume start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Enable a protected and a resettable/unprotected option
+TEST $CLI volume quota $V0 enable
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+
+## Reset cmd resets only unprotected option(s), succeeds.
+TEST $CLI volume reset $V0;
+
+## Reset should fail
+TEST ! $CLI volume reset $V0;
+
+## Set an unprotected option
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+
+## Now 1 protected and 1 unprotected options are set
+## Reset force should succeed
+TEST $CLI volume reset $V0 force;
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/bug-1030208.t b/tests/bugs/bug-1030208.t
new file mode 100644
index 000000000..866999692
--- /dev/null
+++ b/tests/bugs/bug-1030208.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+#Test case: Hardlink test
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
+TEST $CLI volume start $V0
+
+# Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+#Create a file and perform fop on a DIR
+TEST touch $M0/foo
+TEST ls $M0/
+
+#Create hardlink
+TEST ln $M0/foo $M0/bar
+
+
+TEST umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-1040934.t b/tests/bugs/bug-1040934.t
new file mode 100644
index 000000000..3089d7ce1
--- /dev/null
+++ b/tests/bugs/bug-1040934.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+. $(dirname $0)/../snapshot.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST launch_cluster 2
+TEST setup_lvm 2
+
+TEST $CLI_1 peer probe $H2
+EXPECT_WITHIN 20 1 peer_count
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$L1 $H2:$L2
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST $CLI_1 snapshot create ${V0}_snap ${V0}
+PID_1=$!
+wait $PID_1
+
+TEST snapshot_exists ${V0}_snap
+TEST mount -t glusterfs $H1:/snaps/${V0}_snap/$V0 $M0
+cd $M0
+TEST ! touch a
+
+TEST $CLI_1 snapshot delete ${V0}_snap
+PID_1=$!
+wait $PID_1
+
+TEST ! snapshot_exists ${V0}_snap
+
+cleanup;
diff --git a/tests/bugs/bug-1045333.t b/tests/bugs/bug-1045333.t
new file mode 100644
index 000000000..d1f8069e8
--- /dev/null
+++ b/tests/bugs/bug-1045333.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../snapshot.rc
+
+cleanup;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST setup_lvm 1
+
+TEST $CLI volume create $V0 $H0:$L1
+TEST $CLI volume start $V0
+
+
+S1="${V0}-snap1" #Create snapshot with name contains hyphen(-)
+S2="-${V0}-snap2" #Create snapshot with name starts with hyphen(-)
+#Create snapshot with a long name
+S3="${V0}_single_gluster_volume_is_accessible_by_multiple_clients_offline_snapshot_is_a_long_name"
+
+TEST $CLI snapshot create $S1 $V0
+TEST snapshot_exists $S1
+
+TEST $CLI snapshot create $S2 $V0
+TEST snapshot_exists $S2
+
+TEST $CLI snapshot create $S3 $V0
+TEST snapshot_exists $S3
+
+
+TEST mount -t glusterfs $H0:/snaps/$S1/$V0 $M0
+TEST umount -f $M0
+
+TEST mount -t glusterfs $H0:/snaps/$S2/$V0 $M0
+TEST umount -f $M0
+
+TEST mount -t glusterfs $H0:/snaps/$S3/$V0 $M0
+TEST umount -f $M0
+
+#Clean up
+TEST $CLI snapshot delete $S1
+TEST $CLI snapshot delete $S2
+TEST $CLI snapshot delete $S3
+
+TEST $CLI volume stop $V0 force
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/bug-1049834.t b/tests/bugs/bug-1049834.t
new file mode 100755
index 000000000..6019a561c
--- /dev/null
+++ b/tests/bugs/bug-1049834.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+. $(dirname $0)/../snapshot.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST launch_cluster 2
+TEST setup_lvm 2
+
+TEST $CLI_1 peer probe $H2
+EXPECT_WITHIN 20 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#Setting the snap-max-hard-limit to 4
+TEST $CLI_1 snapshot config $V0 snap-max-hard-limit 4
+PID_1=$!
+wait $PID_1
+
+#Creating 4 snapshots on the volume
+TEST create_n_snapshots $V0 4 $V0_snap
+TEST snapshot_n_exists $V0 4 $V0_snap
+
+#Creating the 5th snapshots on the volume and expecting it not to be created.
+TEST ! $CLI_1 snapshot create ${V0}_snap5 ${V0}
+TEST ! snapshot_exists ${V0}_snap5
+TEST ! $CLI_1 snapshot delete ${V0}_snap5
+
+#Deleting the 4 snaps
+TEST delete_n_snapshots $V0 4 $V0_snap
+TEST ! snapshot_n_exists $V0 4 $V0_snap
+
+cleanup;
diff --git a/tests/bugs/bug-1064768.t b/tests/bugs/bug-1064768.t
new file mode 100755
index 000000000..b87168150
--- /dev/null
+++ b/tests/bugs/bug-1064768.t
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1
+TEST $CLI volume start $V0
+EXPECT_WITHIN 15 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume profile $V0 start
+TEST $CLI volume profile $V0 info
+TEST $CLI volume profile $V0 stop
+
+TEST $CLI volume status
+TEST $CLI volume stop $V0
+EXPECT_WITHIN 15 'Stopped' volinfo_field $V0 'Status';
+cleanup;
diff --git a/tests/bugs/bug-762989.t b/tests/bugs/bug-762989.t
new file mode 100755
index 000000000..1794693cc
--- /dev/null
+++ b/tests/bugs/bug-762989.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+## reserve port 1023
+older_ports=$(cat /proc/sys/net/ipv4/ip_local_reserved_ports);
+echo "1023" > /proc/sys/net/ipv4/ip_local_reserved_ports;
+
+## Start and create a volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+TEST $CLI volume start $V0;
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 \
+$M0;
+
+## Wait for volume to register with rpc.mountd
+sleep 6;
+## check if port 1023 (which has been reserved) is used by the gluster processes
+op=$(netstat -ntp | grep gluster | grep -w 1023);
+EXPECT "" echo $op;
+
+#set the reserved ports to the older values
+echo $older_ports > /proc/sys/net/ipv4/ip_local_reserved_ports
+
+cleanup;
diff --git a/tests/bugs/bug-764638.t b/tests/bugs/bug-764638.t
new file mode 100644
index 000000000..816546524
--- /dev/null
+++ b/tests/bugs/bug-764638.t
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI pool list;
+TEST $CLI pool list --xml;
+
+cleanup;
diff --git a/tests/bugs/bug-765230.t b/tests/bugs/bug-765230.t
new file mode 100755
index 000000000..2012be5ad
--- /dev/null
+++ b/tests/bugs/bug-765230.t
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting quota-timeout as 20
+TEST ! $CLI volume set $V0 features.quota-timeout 20
+EXPECT '' volinfo_field $V0 'features.quota-timeout';
+
+## Enabling features.quota-deem-statfs
+TEST ! $CLI volume set $V0 features.quota-deem-statfs on
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+
+## Enabling quota
+TEST $CLI volume quota $V0 enable
+EXPECT 'on' volinfo_field $V0 'features.quota'
+
+## Setting quota-timeout as 20
+TEST $CLI volume set $V0 features.quota-timeout 20
+EXPECT '20' volinfo_field $V0 'features.quota-timeout';
+
+## Enabling features.quota-deem-statfs
+TEST $CLI volume set $V0 features.quota-deem-statfs on
+EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
+
+## Disabling quota
+TEST $CLI volume quota $V0 disable
+EXPECT 'off' volinfo_field $V0 'features.quota'
+
+## Setting quota-timeout as 30
+TEST ! $CLI volume set $V0 features.quota-timeout 30
+EXPECT '20' volinfo_field $V0 'features.quota-timeout';
+
+## Disabling features.quota-deem-statfs
+TEST ! $CLI volume set $V0 features.quota-deem-statfs off
+EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-765380.t b/tests/bugs/bug-765380.t
new file mode 100644
index 000000000..a9784b93d
--- /dev/null
+++ b/tests/bugs/bug-765380.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+REPLICA=2
+
+TEST $CLI volume create $V0 replica $REPLICA $H0:$B0/${V0}00 $H0:$B0/${V0}01 $H0:$B0/${V0}10 $H0:$B0/${V0}11
+TEST $CLI volume start $V0
+
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+function count_hostname_or_uuid_from_pathinfo()
+{
+ pathinfo=`getfattr -m . -n trusted.glusterfs.pathinfo $M0/f00f`
+ echo $pathinfo | grep -o $1 | wc -l
+}
+
+touch $M0/f00f
+
+EXPECT $REPLICA count_hostname_or_uuid_from_pathinfo $H0
+
+# turn on node-uuid-pathinfo option
+TEST $CLI volume set $V0 node-uuid-pathinfo on
+
+# do not expext hostname as part of the pathinfo string
+EXPECT 0 count_hostname_or_uuid_from_pathinfo $H0
+
+uuid=`grep UUID /var/lib/glusterd/glusterd.info | cut -f2 -d=`
+
+# ... but expect the uuid $REPLICA times
+EXPECT $REPLICA count_hostname_or_uuid_from_pathinfo $uuid
+
+cleanup;
diff --git a/tests/bugs/bug-765473.t b/tests/bugs/bug-765473.t
new file mode 100755
index 000000000..5fc0ec9d7
--- /dev/null
+++ b/tests/bugs/bug-765473.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../fileio.rc
+
+cleanup;
+
+function clients_connected()
+{
+ volname=$1
+ gluster volume status $volname clients | grep -i 'Clients connected' | sed -e 's/[^0-9]*\(.*\)/\1/g'
+}
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1}
+TEST $CLI volume start $V0;
+
+TEST glusterfs --direct-io-mode=yes --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+TEST fd=`fd_available`
+TEST fd_open $fd 'w' "$M0/testfile"
+TEST fd_write $fd "content"
+TEST $CLI volume stop $V0
+# write some content which will result in marking fd bad
+fd_write $fd "more content"
+TEST $CLI volume start $V0
+EXPECT_WITHIN 30 2 clients_connected $V0
+TEST ! fd_write $fd "still more content"
+
+cleanup
diff --git a/tests/bugs/bug-765564.t b/tests/bugs/bug-765564.t
new file mode 100644
index 000000000..0b8b8cd4f
--- /dev/null
+++ b/tests/bugs/bug-765564.t
@@ -0,0 +1,83 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+TEST glusterd
+TEST pidof glusterd
+
+## Start and create a volume
+mkdir -p ${B0}/${V0}-0
+mkdir -p ${B0}/${V0}-1
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+
+TEST $CLI volume set $V0 performance.io-cache off;
+TEST $CLI volume set $V0 performance.write-behind off;
+TEST $CLI volume set $V0 performance.stat-prefetch off
+
+TEST $CLI volume start $V0;
+
+## Mount native
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+#returns success if 'olddir' is absent
+#'olddir' must be absent in both replicas
+function rm_succeeded () {
+ local dir1=$1
+ [[ -d $H0:$B0/${V0}-0/$dir1 || -d $H0:$B0/${V0}-1/$dir1 ]] && return 0
+ return 1
+}
+
+# returns successes if 'newdir' is present
+#'newdir' must be present in both replicas
+function mv_succeeded () {
+ local dir1=$1
+ [[ -d $H0:$B0/${V0}-0/$dir1 && -d $H0:$B0/${V0}-1/$dir1 ]] && return 1
+ return 0
+}
+
+# returns zero on success
+# Only one of rm and mv can succeed. This is captured by the XOR below
+
+function chk_backend_consistency(){
+ local dir1=$1
+ local dir2=$2
+ local rm_status=rm_succeeded $dir1
+ local mv_status=mv_succeeded $dir2
+ [[ ( $rm_status && ! $mv_status ) || ( ! $rm_status && $mv_status ) ]] && return 0
+ return 1
+}
+
+#concurrent removal/rename of dirs
+function rm_mv_correctness () {
+ ret=0
+ for i in {1..100}; do
+ mkdir $M0/"dir"$i
+ rmdir $M0/"dir"$i &
+ mv $M0/"dir"$i $M0/"adir"$i &
+ wait
+ tmp_ret=$(chk_backend_consistency "dir"$i "adir"$i)
+ (( ret += tmp_ret ))
+ rm -rf $M0/"dir"$i
+ rm -rf $M0/"adir"$i
+ done
+ return $ret
+}
+
+TEST touch $M0/a;
+TEST mv $M0/a $M0/b;
+
+#test rename fop when one of the bricks is down
+kill_brick ${V0} ${H0} ${B0}/${V0}-1;
+TEST touch $M0/h;
+TEST mv $M0/h $M0/1;
+
+TEST $CLI volume start $V0 force;
+
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1;
+find $M0 | xargs stat 2>/dev/null 1>/dev/null;
+
+TEST rm_mv_correctness;
+TEST umount $M0;
+cleanup;
+
diff --git a/tests/bugs/bug-767095.t b/tests/bugs/bug-767095.t
new file mode 100755
index 000000000..a8842bd54
--- /dev/null
+++ b/tests/bugs/bug-767095.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+dump_dir='/tmp/gerrit_glusterfs'
+TEST mkdir -p $dump_dir;
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 error-gen posix;
+TEST $CLI volume set $V0 server.statedump-path $dump_dir;
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST PID=`gluster volume status $V0 | grep patchy1 | awk {'print $5'}`;
+TEST kill -USR1 $PID;
+sleep 2;
+for file_name in $(ls $dump_dir)
+do
+ TEST grep "error-gen.priv" $dump_dir/$file_name;
+done
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+TEST rm -rf $dump_dir;
+
+cleanup;
diff --git a/tests/bugs/bug-767585-gfid.t b/tests/bugs/bug-767585-gfid.t
new file mode 100755
index 000000000..49cf7423f
--- /dev/null
+++ b/tests/bugs/bug-767585-gfid.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#Test cases to perform gfid-self-heal
+#file 'a' should be assigned a fresh gfid
+#file 'b' should be healed with gfid1 from brick1
+#file 'c' should be healed with gfid2 from brick2
+
+gfid1="0x8428b7193a764bf8be8046fb860b8993"
+gfid2="0x85ad91afa2f74694bf52c3326d048209"
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable
+touch $B0/${V0}0/a $B0/${V0}1/a
+touch $B0/${V0}0/b $B0/${V0}1/b
+touch $B0/${V0}0/c $B0/${V0}1/c
+
+TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}0/b
+TEST setfattr -n trusted.gfid -v $gfid2 $B0/${V0}1/c
+
+sleep 2
+
+cd $M0
+TEST ls -l a
+TEST ls -l b
+TEST ls -l c
+
+TEST gf_get_gfid_xattr $B0/${V0}0/a
+TEST gf_get_gfid_xattr $B0/${V0}1/a
+
+EXPECT "$gfid1" gf_get_gfid_xattr $B0/${V0}0/b
+EXPECT "$gfid1" gf_get_gfid_xattr $B0/${V0}1/b
+
+EXPECT "$gfid2" gf_get_gfid_xattr $B0/${V0}0/c
+EXPECT "$gfid2" gf_get_gfid_xattr $B0/${V0}1/c
+
+cleanup;
diff --git a/tests/bugs/bug-770655.t b/tests/bugs/bug-770655.t
new file mode 100755
index 000000000..945e323bb
--- /dev/null
+++ b/tests/bugs/bug-770655.t
@@ -0,0 +1,168 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start and create a distribute-replicate volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT 'Distributed-Replicate' volinfo_field $V0 'Type';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting stripe-block-size as 10MB
+TEST ! $CLI volume set $V0 stripe-block-size 10MB
+EXPECT '' volinfo_field $V0 'cluster.stripe-block-size';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
+
+## Start and create a replicate volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 8 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT 'Replicate' volinfo_field $V0 'Type';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting stripe-block-size as 10MB
+TEST ! $CLI volume set $V0 stripe-block-size 10MB
+EXPECT '' volinfo_field $V0 'cluster.stripe-block-size';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
+
+## Start and create a distribute volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT 'Distribute' volinfo_field $V0 'Type';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting stripe-block-size as 10MB
+TEST ! $CLI volume set $V0 stripe-block-size 10MB
+EXPECT '' volinfo_field $V0 'cluster.stripe-block-size';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
+
+## Start and create a stripe volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 stripe 8 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT 'Stripe' volinfo_field $V0 'Type';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting stripe-block-size as 10MB
+TEST $CLI volume set $V0 stripe-block-size 10MB
+EXPECT '10MB' volinfo_field $V0 'cluster.stripe-block-size';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
+
+## Start and create a distributed stripe volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 stripe 4 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT 'Distributed-Stripe' volinfo_field $V0 'Type';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting stripe-block-size as 10MB
+TEST $CLI volume set $V0 stripe-block-size 10MB
+EXPECT '10MB' volinfo_field $V0 'cluster.stripe-block-size';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
+
+## Start and create a distributed stripe replicate volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 stripe 2 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT 'Distributed-Striped-Replicate' volinfo_field $V0 'Type';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting stripe-block-size as 10MB
+TEST $CLI volume set $V0 stripe-block-size 10MB
+EXPECT '10MB' volinfo_field $V0 'cluster.stripe-block-size';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-782095.t b/tests/bugs/bug-782095.t
new file mode 100755
index 000000000..a0cea14ee
--- /dev/null
+++ b/tests/bugs/bug-782095.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting performance cache min size as 2MB
+TEST $CLI volume set $V0 performance.cache-min-file-size 2MB
+EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## Setting performance cache max size as 20MB
+TEST $CLI volume set $V0 performance.cache-max-file-size 20MB
+EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
+
+## Trying to set performance cache min size as 25MB
+TEST ! $CLI volume set $V0 performance.cache-min-file-size 25MB
+EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## Able to set performance cache min size as long as its lesser than max size
+TEST $CLI volume set $V0 performance.cache-min-file-size 15MB
+EXPECT '15MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## Trying it out with only cache-max-file-size in CLI as 10MB
+TEST ! $CLI volume set $V0 cache-max-file-size 10MB
+EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-797171.t b/tests/bugs/bug-797171.t
new file mode 100755
index 000000000..a1b28d9ff
--- /dev/null
+++ b/tests/bugs/bug-797171.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+TEST $CLI volume set $V0 debug.trace marker;
+TEST $CLI volume set $V0 debug.log-history on
+
+TEST $CLI volume start $V0;
+
+sleep 1;
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 \
+$M0;
+
+sleep 5;
+
+touch $M0/{1..22};
+rm -f $M0/*;
+
+pid_file=$(ls /var/lib/glusterd/vols/$V0/run);
+brick_pid=$(cat /var/lib/glusterd/vols/$V0/run/$pid_file);
+
+mkdir $statedumpdir/statedump_tmp/;
+echo "path=$statedumpdir/statedump_tmp" > $statedumpdir/glusterdump.options;
+echo "all=yes" >> $statedumpdir/glusterdump.options;
+
+TEST $CLI volume statedump $V0 history;
+
+file_name=$(ls $statedumpdir/statedump_tmp);
+TEST grep "xlator.debug.trace.history" $statedumpdir/statedump_tmp/$file_name;
+
+TEST umount $M0
+
+rm -rf $statedumpdir/statedump_tmp;
+rm -f $statedumpdir/glusterdump.options;
+
+cleanup;
diff --git a/tests/bugs/bug-802417.t b/tests/bugs/bug-802417.t
new file mode 100755
index 000000000..314141f6b
--- /dev/null
+++ b/tests/bugs/bug-802417.t
@@ -0,0 +1,108 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+function write_file()
+{
+ path="$1"; shift
+ echo "$*" > "$path"
+}
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Start and create a volume
+mkdir -p ${B0}/${V0}-0
+mkdir -p ${B0}/${V0}-1
+mkdir -p ${B0}/${V0}-2
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2}
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Make sure io-cache and write-behind don't interfere.
+TEST $CLI volume set $V0 performance.io-cache off;
+TEST $CLI volume set $V0 performance.write-behind off;
+TEST $CLI volume set $V0 performance.stat-prefetch off
+
+## Make sure automatic self-heal doesn't perturb our results.
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount native
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+## Create a file with some recognizably stale data.
+TEST write_file $M0/a_file "old_data"
+
+## Kill two of the bricks and write some newer data.
+TEST kill_brick ${V0} ${H0} ${B0}/${V0}-1
+TEST kill_brick ${V0} ${H0} ${B0}/${V0}-2
+TEST write_file $M0/a_file "new_data"
+
+## Bring all the bricks up and kill one so we do a partial self-heal.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 2
+TEST kill_brick ${V0} ${H0} ${B0}/${V0}-2
+TEST ls -l ${M0}/a_file
+
+
+obs_path_0=${B0}/${V0}-0/a_file
+obs_path_1=${B0}/${V0}-1/a_file
+obs_path_2=${B0}/${V0}-2/a_file
+
+tgt_xattr_0="trusted.afr.${V0}-client-0"
+tgt_xattr_1="trusted.afr.${V0}-client-1"
+tgt_xattr_2="trusted.afr.${V0}-client-2"
+
+actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_0)
+EXPECT "0x000000000000000000000000" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_1)
+EXPECT "0x000000000000000000000000" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_2)
+EXPECT "0x000000020000000000000000" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_0)
+EXPECT "0x000000000000000000000000" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_1)
+EXPECT "0x000000000000000000000000" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_2)
+EXPECT "0x000000020000000000000000" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_0)
+EXPECT "0x000000000000000000000000" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_1)
+EXPECT "0x000000000000000000000000" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_2)
+EXPECT "0x000000000000000000000000" echo $actual
+
+if [ "$EXIT_EARLY" = "1" ]; then
+ exit 0;
+fi
+
+## Finish up
+TEST umount $M0;
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-808400-dist.t b/tests/bugs/bug-808400-dist.t
new file mode 100755
index 000000000..6a29eb626
--- /dev/null
+++ b/tests/bugs/bug-808400-dist.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+MOUNTDIR=$M0;
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR;
+
+build_tester $(dirname $0)/bug-808400-flock.c
+build_tester $(dirname $0)/bug-808400-fcntl.c
+
+TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\'
+TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\'
+
+TEST rm -rf $MOUNTDIR/*
+TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log
+
+TEST umount $MOUNTDIR -l
+
+cleanup; \ No newline at end of file
diff --git a/tests/bugs/bug-808400-fcntl.c b/tests/bugs/bug-808400-fcntl.c
new file mode 100644
index 000000000..4deef34a5
--- /dev/null
+++ b/tests/bugs/bug-808400-fcntl.c
@@ -0,0 +1,113 @@
+#include <sys/file.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/wait.h>
+
+int
+run_child (char *filename)
+{
+ int fd = -1, ret = -1;
+ struct flock lock = {0, };
+ int ppid = 0;
+
+ fd = open (filename, O_RDWR);
+ if (fd < 0) {
+ fprintf (stderr, "open failed (%s)\n", strerror (errno));
+ goto out;
+ }
+
+ ppid = getppid ();
+
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 0;
+
+ ret = fcntl (fd, F_GETLK, &lock);
+ if (ret < 0) {
+ fprintf (stderr, "GETLK failed (%s)\n", strerror (errno));
+ goto out;
+ }
+
+ if ((lock.l_type == F_UNLCK) ||
+ (ppid != lock.l_pid)) {
+ fprintf (stderr, "no locks present, though parent has held "
+ "one\n");
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+main (int argc, char *argv[])
+{
+ int fd = -1, ret = -1, status = 0;
+ char *filename = NULL, *cmd = NULL;
+ struct stat stbuf = {0, };
+ struct flock lock = {0, };
+
+ if (argc != 3) {
+ fprintf (stderr, "Usage: %s <filename> "
+ "<gluster-cmd-to-trigger-graph-switch>\n", argv[0]);
+ goto out;
+ }
+
+ filename = argv[1];
+ cmd = argv[2];
+
+ fd = open (filename, O_RDWR | O_CREAT, 0);
+ if (fd < 0) {
+ fprintf (stderr, "open (%s) failed (%s)\n", filename,
+ strerror (errno));
+ goto out;
+ }
+
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 0;
+
+ ret = fcntl (fd, F_SETLK, &lock);
+ if (ret < 0) {
+ fprintf (stderr, "fcntl failed (%s)\n", strerror (errno));
+ goto out;
+ }
+
+ system (cmd);
+
+ /* wait till graph switch completes */
+ ret = fstat64 (fd, &stbuf);
+ if (ret < 0) {
+ fprintf (stderr, "fstat64 failure (%s)\n", strerror (errno));
+ goto out;
+ }
+
+ sleep (10);
+
+ /* By now old-graph would be disconnected and locks should be cleaned
+ * up if they are not migrated. Check that by trying to acquire a lock
+ * on a new fd opened by another process on same file.
+ */
+ ret = fork ();
+ if (ret == 0) {
+ ret = run_child (filename);
+ } else {
+ wait (&status);
+ if (WIFEXITED(status)) {
+ ret = WEXITSTATUS(status);
+ } else {
+ ret = 0;
+ }
+ }
+
+out:
+ return ret;
+}
diff --git a/tests/bugs/bug-808400-flock.c b/tests/bugs/bug-808400-flock.c
new file mode 100644
index 000000000..4770c81dc
--- /dev/null
+++ b/tests/bugs/bug-808400-flock.c
@@ -0,0 +1,92 @@
+#include <sys/file.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/wait.h>
+
+int
+run_child (char *filename)
+{
+ int fd = -1, ret = -1;
+
+ fd = open (filename, O_RDWR);
+ if (fd < 0) {
+ fprintf (stderr, "open failed (%s)\n", strerror (errno));
+ goto out;
+ }
+
+ ret = flock (fd, LOCK_EX | LOCK_NB);
+ if ((ret == 0) || (errno != EWOULDBLOCK)) {
+ fprintf (stderr, "no locks present, though parent has held "
+ "one\n");
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+main (int argc, char *argv[])
+{
+ int fd = -1, ret = -1, status = 0;
+ char *filename = NULL, *cmd = NULL;
+ struct stat stbuf = {0, };
+
+ if (argc != 3) {
+ fprintf (stderr, "Usage: %s <filename> "
+ "<gluster-cmd-to-trigger-graph-switch>\n", argv[0]);
+ goto out;
+ }
+
+ filename = argv[1];
+ cmd = argv[2];
+
+ fd = open (filename, O_RDWR | O_CREAT, 0);
+ if (fd < 0) {
+ fprintf (stderr, "open (%s) failed (%s)\n", filename,
+ strerror (errno));
+ goto out;
+ }
+
+ ret = flock (fd, LOCK_EX);
+ if (ret < 0) {
+ fprintf (stderr, "flock failed (%s)\n", strerror (errno));
+ goto out;
+ }
+
+ system (cmd);
+
+ /* wait till graph switch completes */
+ ret = fstat64 (fd, &stbuf);
+ if (ret < 0) {
+ fprintf (stderr, "fstat64 failure (%s)\n", strerror (errno));
+ goto out;
+ }
+
+ sleep (10);
+
+ /* By now old-graph would be disconnected and locks should be cleaned
+ * up if they are not migrated. Check that by trying to acquire a lock
+ * on a new fd opened by another process on same file
+ */
+ ret = fork ();
+ if (ret == 0) {
+ ret = run_child (filename);
+ } else {
+ wait (&status);
+ if (WIFEXITED(status)) {
+ ret = WEXITSTATUS(status);
+ } else {
+ ret = 0;
+ }
+ }
+
+out:
+ return ret;
+}
diff --git a/tests/bugs/bug-808400-repl.t b/tests/bugs/bug-808400-repl.t
new file mode 100755
index 000000000..69cd9379b
--- /dev/null
+++ b/tests/bugs/bug-808400-repl.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+MOUNTDIR=$M0;
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR;
+
+build_tester $(dirname $0)/bug-808400-flock.c
+build_tester $(dirname $0)/bug-808400-fcntl.c
+
+TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\'
+TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\'
+
+TEST rm -rf $MOUNTDIR/*
+TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log
+
+TEST umount $MOUNTDIR -l
+
+cleanup; \ No newline at end of file
diff --git a/tests/bugs/bug-808400-stripe.t b/tests/bugs/bug-808400-stripe.t
new file mode 100755
index 000000000..3ab6f738e
--- /dev/null
+++ b/tests/bugs/bug-808400-stripe.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 stripe 2 $H0:$B0/brick1 $H0:$B0/brick2;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+MOUNTDIR=$M0;
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR;
+
+build_tester $(dirname $0)/bug-808400-flock.c
+build_tester $(dirname $0)/bug-808400-fcntl.c
+
+TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\'
+TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\'
+
+TEST rm -rf $MOUNTDIR/*
+TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log
+
+TEST umount $MOUNTDIR -l
+
+cleanup; \ No newline at end of file
diff --git a/tests/bugs/bug-808400.t b/tests/bugs/bug-808400.t
new file mode 100755
index 000000000..49d88afd6
--- /dev/null
+++ b/tests/bugs/bug-808400.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+#mount on a random dir
+TEST MOUNTDIR="/tmp/$RANDOM"
+TEST mkdir $MOUNTDIR
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR;
+
+build_tester $(dirname $0)/bug-808400-flock.c
+build_tester $(dirname $0)/bug-808400-fcntl.c
+
+TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\'
+TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\'
+
+TEST rm -rf $MOUNTDIR/*
+TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log
+
+TEST umount $MOUNTDIR -l
+TEST rm -rf $MOUNTDIR
+
+cleanup; \ No newline at end of file
diff --git a/tests/bugs/bug-811493.t b/tests/bugs/bug-811493.t
new file mode 100755
index 000000000..13e99af57
--- /dev/null
+++ b/tests/bugs/bug-811493.t
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI system uuid reset;
+
+uuid1=$(grep UUID /var/lib/glusterd/glusterd.info | cut -f 2 -d "=");
+
+TEST $CLI system uuid reset;
+uuid2=$(grep UUID /var/lib/glusterd/glusterd.info | cut -f 2 -d "=");
+
+TEST [ $uuid1 != $uuid2 ]
+
+cleanup
diff --git a/tests/bugs/bug-821056.t b/tests/bugs/bug-821056.t
new file mode 100644
index 000000000..5e81541ac
--- /dev/null
+++ b/tests/bugs/bug-821056.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 eager-lock off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind on
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+touch $M0/a
+
+#Open file with fd as 5
+exec 5>$M0/a
+realpath=$(gf_get_gfid_backend_file_path $B0/${V0}0 "a")
+
+kill_brick $V0 $H0 $B0/${V0}0
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+
+EXPECT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
+
+kill_brick $V0 $H0 $B0/${V0}0
+TEST gf_rm_file_and_gfid_link $B0/${V0}0 "a"
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+ls -l $M0/a 2>&1 > /dev/null #Make sure the file is re-created
+EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
+EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/a
+
+for i in {1..1024}; do
+ echo "open sesame" >&5
+done
+
+EXPECT_WITHIN 20 "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/a
+#close the fd
+exec 5>&-
+
+#Check that anon-fd based file is not leaking.
+EXPECT_WITHIN 20 "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
+cleanup;
diff --git a/tests/bugs/bug-822830.t b/tests/bugs/bug-822830.t
new file mode 100755
index 000000000..000d99f03
--- /dev/null
+++ b/tests/bugs/bug-822830.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting nfs.rpc-auth-reject as 192.*..*
+TEST ! $CLI volume set $V0 nfs.rpc-auth-reject 192.*..*
+EXPECT '' volinfo_field $V0 'nfs.rpc-auth-reject';
+
+# Setting nfs.rpc-auth-allow as a.a.
+TEST ! $CLI volume set $V0 nfs.rpc-auth-allow a.a.
+EXPECT '' volinfo_field $V0 'nfs.rpc-auth-allow';
+
+# Setting nfs.rpc-auth-allow as a.a
+TEST $CLI volume set $V0 nfs.rpc-auth-allow a.a
+EXPECT 'a.a' volinfo_field $V0 'nfs.rpc-auth-allow';
+
+## Setting nfs.rpc-auth-reject as 192.*.*
+TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.*.*
+EXPECT '192.*.*' volinfo_field $V0 'nfs.rpc-auth-reject';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-823081.t b/tests/bugs/bug-823081.t
new file mode 100755
index 000000000..760d9e2b6
--- /dev/null
+++ b/tests/bugs/bug-823081.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+V1=patchy2
+
+TEST glusterd
+TEST pidof glusterd
+
+logdir=`gluster --print-logdir`
+function set_tail ()
+{
+ vol=$1;
+ tail_success="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 : SUCCESS"
+ tail_failure="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 : FAILED : Volume $vol already exists"
+ tail_success_force="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 force : SUCCESS"
+ tail_failure_force="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 force : FAILED : Volume $vol already exists"
+}
+
+set_tail $V0;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+tail=`tail --lines=1 $logdir/.cmd_log_history | cut -d " " -f 5-`
+TEST [[ \"$tail\" == \"$tail_success\" ]]
+
+TEST ! $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+tail=`tail --lines=1 $logdir/.cmd_log_history | cut -d " " -f 5-`
+TEST [[ \"$tail\" == \"$tail_failure\" ]]
+
+set_tail $V1;
+TEST gluster volume create $V1 $H0:$B0/${V1}{1,2} force;
+tail=`tail --lines=1 $logdir/.cmd_log_history | cut -d " " -f 5-`
+TEST [[ \"$tail\" == \"$tail_success_force\" ]]
+
+TEST ! gluster volume create $V1 $H0:$B0/${V1}{1,2} force;
+tail=`tail --lines=1 $logdir/.cmd_log_history | cut -d " " -f 5-`
+TEST [[ \"$tail\" == \"$tail_failure_force\" ]]
+
+cleanup;
diff --git a/tests/bugs/bug-824753-file-locker.c b/tests/bugs/bug-824753-file-locker.c
new file mode 100644
index 000000000..903e23e0a
--- /dev/null
+++ b/tests/bugs/bug-824753-file-locker.c
@@ -0,0 +1,42 @@
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+
+int main (int argc, char *argv[])
+{
+ int fd = -1;
+ int ret = -1;
+ char command[2048] = "";
+ char filepath[255] = "";
+ struct flock fl;
+
+ fl.l_type = F_WRLCK;
+ fl.l_whence = SEEK_SET;
+ fl.l_start = 7;
+ fl.l_len = 1;
+ fl.l_pid = getpid();
+
+ snprintf(filepath, 255, "%s/%s", argv[4], argv[5]);
+
+ fd = open(filepath, O_RDWR);
+
+ if (fd == -1)
+ return -1;
+
+ if (fcntl(fd, F_SETLKW, &fl) == -1) {
+ return -1;
+ }
+
+ snprintf(command, sizeof(command),
+ "gluster volume clear-locks %s /%s kind all posix 0,7-1 |"
+ " grep %s | awk -F'..: ' '{print $1}' | grep %s:%s/%s",
+ argv[1], argv[5], argv[2], argv[2], argv[3], argv[1]);
+
+ ret = system (command);
+ close(fd);
+
+ if (ret)
+ return -1;
+ else
+ return 0;
+}
diff --git a/tests/bugs/bug-824753.t b/tests/bugs/bug-824753.t
new file mode 100755
index 000000000..38f6bf696
--- /dev/null
+++ b/tests/bugs/bug-824753.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+touch $M0/file1;
+
+TEST gcc -g $(dirname $0)/bug-824753-file-locker.c -o $(dirname $0)/file-locker
+
+TEST $(dirname $0)/file-locker $V0 $H0 $B0 $M0 file1
+
+## Finish up
+TEST rm -f $(dirname $0)/file-locker
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-830665.t b/tests/bugs/bug-830665.t
new file mode 100755
index 000000000..0073ff1d9
--- /dev/null
+++ b/tests/bugs/bug-830665.t
@@ -0,0 +1,106 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+function recreate {
+ rm -rf $1 && mkdir -p $1
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Start and create a volume
+recreate ${B0}/${V0}-0
+recreate ${B0}/${V0}-1
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Make sure stat-prefetch doesn't prevent self-heal checks.
+TEST $CLI volume set $V0 performance.stat-prefetch off;
+
+## Make sure automatic self-heal doesn't perturb our results.
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+
+## Wait for volume to register with rpc.mountd
+sleep 5;
+
+## Mount NFS
+TEST mount -t nfs -o vers=3,nolock,soft,intr $H0:/$V0 $N0;
+
+## Create some files and directories
+echo "test_data" > $N0/a_file;
+mkdir $N0/a_dir;
+echo "more_test_data" > $N0/a_dir/another_file;
+
+## Unmount and stop the volume.
+TEST umount $N0;
+TEST $CLI volume stop $V0;
+
+# Recreate the brick. Note that because of http://review.gluster.org/#change,4202
+# we need to preserve and restore the volume ID or else the brick (and thus the
+# entire not-very-HA-any-more volume) won't start. When that bug is fixed, we can
+# remove the [gs]etxattr calls.
+volid=$(getfattr -e hex -n trusted.glusterfs.volume-id $B0/${V0}-0 2> /dev/null \
+ | grep = | cut -d= -f2)
+rm -rf $B0/${V0}-0;
+mkdir $B0/${V0}-0;
+setfattr -n trusted.glusterfs.volume-id -v $volid $B0/${V0}-0
+
+## Restart and remount. Note that we use actimeo=0 so that the stat calls
+## we need for self-heal don't get blocked by the NFS client.
+TEST $CLI volume start $V0;
+sleep 5
+TEST mount -t nfs -o vers=3,nolock,soft,intr,actimeo=0 $H0:/$V0 $N0;
+
+## The Linux NFS client has a really charming habit of caching stuff right
+## after mount, even though we set actimeo=0 above. Life would be much easier
+## if NFS developers cared as much about correctness as they do about shaving
+## a few seconds off of benchmarks.
+ls -l $N0 &> /dev/null;
+sleep 5;
+
+## Force entry self-heal.
+find $N0 | xargs stat > /dev/null;
+#ls -lR $N0 > /dev/null;
+
+## Do NOT check through the NFS mount here. That will force a new self-heal
+## check, but we want to test whether self-heal already happened.
+
+## Make sure everything's in order on the recreated brick.
+EXPECT 'test_data' cat $B0/${V0}-0/a_file;
+EXPECT 'more_test_data' cat $B0/${V0}-0/a_dir/another_file;
+
+if [ "$EXIT_EARLY" = "1" ]; then
+ exit 0;
+fi
+
+## Finish up
+TEST umount $N0;
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-834465.c b/tests/bugs/bug-834465.c
new file mode 100644
index 000000000..61d3deac0
--- /dev/null
+++ b/tests/bugs/bug-834465.c
@@ -0,0 +1,61 @@
+#include <sys/file.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+int
+main (int argc, char *argv[])
+{
+ int fd = -1;
+ char *filename = NULL;
+ struct flock lock = {0, };
+ int i = 0;
+ int ret = -1;
+
+ if (argc != 2) {
+ fprintf (stderr, "Usage: %s <filename> ", argv[0]);
+ goto out;
+ }
+
+ filename = argv[1];
+
+ fd = open (filename, O_RDWR | O_CREAT, 0);
+ if (fd < 0) {
+ fprintf (stderr, "open (%s) failed (%s)\n", filename,
+ strerror (errno));
+ goto out;
+ }
+
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 1;
+ lock.l_len = 1;
+
+ while (i < 100) {
+ lock.l_type = F_WRLCK;
+ ret = fcntl (fd, F_SETLK, &lock);
+ if (ret < 0) {
+ fprintf (stderr, "fcntl setlk failed (%s)\n",
+ strerror (errno));
+ goto out;
+ }
+
+ lock.l_type = F_UNLCK;
+ ret = fcntl (fd, F_SETLK, &lock);
+ if (ret < 0) {
+ fprintf (stderr, "fcntl setlk failed (%s)\n",
+ strerror (errno));
+ goto out;
+ }
+
+ i++;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
diff --git a/tests/bugs/bug-834465.t b/tests/bugs/bug-834465.t
new file mode 100755
index 000000000..af7f4bd12
--- /dev/null
+++ b/tests/bugs/bug-834465.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+MOUNTDIR=$M0;
+TEST glusterfs --mem-accounting --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR;
+
+sdump1=$(generate_mount_statedump $V0);
+nalloc1=0
+grep -A2 "fuse - usage-type 85" $sdump1
+if [ $? -eq '0' ]
+then
+ nalloc1=`grep -A2 "fuse - usage-type 85" $sdump1 | grep num_allocs | cut -d '=' -f2`
+fi
+
+build_tester $(dirname $0)/bug-834465.c
+
+TEST $(dirname $0)/bug-834465 $M0/testfile
+
+sdump2=$(generate_mount_statedump $V0);
+nalloc2=`grep -A2 "fuse - usage-type 85" $sdump2 | grep num_allocs | cut -d '=' -f2`
+
+TEST [ $nalloc1 -eq $nalloc2 ];
+
+TEST rm -rf $MOUNTDIR/*
+TEST rm -rf $(dirname $0)/bug-834465
+cleanup_mount_statedump $V0
+
+TEST umount $MOUNTDIR -l
+
+cleanup;
diff --git a/tests/bugs/bug-839595.t b/tests/bugs/bug-839595.t
new file mode 100644
index 000000000..979827fa7
--- /dev/null
+++ b/tests/bugs/bug-839595.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1
+TEST $CLI volume set $V0 cluster.server-quorum-type server
+EXPECT "server" volume_option $V0 cluster.server-quorum-type
+TEST $CLI volume set $V0 cluster.server-quorum-type none
+EXPECT "none" volume_option $V0 cluster.server-quorum-type
+TEST $CLI volume reset $V0 cluster.server-quorum-type
+TEST ! $CLI volume set $V0 cluster.server-quorum-type abc
+TEST ! $CLI volume set all cluster.server-quorum-type none
+TEST ! $CLI volume set $V0 cluster.server-quorum-ratio 100
+
+TEST ! $CLI volume set all cluster.server-quorum-ratio abc
+TEST ! $CLI volume set all cluster.server-quorum-ratio -1
+TEST ! $CLI volume set all cluster.server-quorum-ratio 100.0000005
+TEST $CLI volume set all cluster.server-quorum-ratio 0
+EXPECT "0" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 100
+EXPECT "100" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 0.0000005
+EXPECT "0.0000005" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 100%
+EXPECT "100%" volume_option $V0 cluster.server-quorum-ratio
+cleanup;
diff --git a/tests/bugs/bug-844688.t b/tests/bugs/bug-844688.t
new file mode 100755
index 000000000..154d35e48
--- /dev/null
+++ b/tests/bugs/bug-844688.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/brick0
+TEST $CLI volume start $V0
+
+sleep 5
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+mount_pid=$(get_mount_process_pid $V0);
+# enable dumping of call stack creation and frame creation times in statedump
+kill -USR2 $mount_pid;
+
+TEST touch $M0/touchfile;
+(dd if=/dev/urandom of=$M0/file bs=5K 2>/dev/null 1>/dev/null)&
+back_pid=$!;
+statedump_file=$(generate_mount_statedump $V0);
+grep "callstack-creation-time" $statedump_file 2>/dev/null 1>/dev/null;
+TEST [ $? -eq 0 ];
+grep "frame-creation-time" $statedump_file 2>/dev/null 1>/dev/null;
+TEST [ $? -eq 0 ];
+
+kill -SIGTERM $back_pid;
+wait >/dev/null 2>&1;
+
+TEST rm -f $M0/touchfile $M0/file;
+TEST umount $M0;
+
+rm -f $statedumpdir/glusterdump.$mount_pid.*;
+cleanup
diff --git a/tests/bugs/bug-845213.t b/tests/bugs/bug-845213.t
new file mode 100644
index 000000000..e79b37109
--- /dev/null
+++ b/tests/bugs/bug-845213.t
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+## Create and start a volume with aio enabled
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 remote-dio enable;
+TEST $CLI volume set $V0 network.remote-dio disable;
+
+cleanup;
+
diff --git a/tests/bugs/bug-846240.t b/tests/bugs/bug-846240.t
new file mode 100644
index 000000000..12e4949ef
--- /dev/null
+++ b/tests/bugs/bug-846240.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../fileio.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+MOUNTDIR=$M0;
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR;
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M1;
+
+TEST touch $M0/testfile;
+
+# open the file with the fd as 4
+TEST fd=`fd_available`;
+TEST fd_open $fd 'w' "$M0/testfile";
+
+# remove the file from the other mount point. If unlink is sent from
+# $M0 itself, then the file will be actually opened by open-behind which
+# we dont want for this testcase
+TEST rm -f $M1/testfile;
+
+# below command opens the file and writes to the file.
+# upon open, open-behind unwinds the open call with success.
+# now when write comes, open-behind actually opens the file
+# and then sends write on the fd. But before sending open itself,
+# the file would have been removed from the mount $M1. open() gets error
+# and the write call which is put into a stub (open had to be sent first)
+# should unwind with the error received in the open call.
+echo "data" >> $M0/testfile 2>/dev/null 1>/dev/null;
+TEST [ $? -ne 0 ]
+
+TEST fd_close $fd;
+
+TEST rm -rf $MOUNTDIR/*
+
+TEST umount $MOUNTDIR -l
+
+cleanup;
diff --git a/tests/bugs/bug-847622.t b/tests/bugs/bug-847622.t
new file mode 100755
index 000000000..138499527
--- /dev/null
+++ b/tests/bugs/bug-847622.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/brick0
+TEST $CLI volume start $V0
+
+sleep 5
+
+TEST mount -t nfs -o vers=3,nolock $H0:/$V0 $N0
+cd $N0
+
+# simple getfacl setfacl commands
+TEST touch testfile
+TEST setfacl -m u:14:r testfile
+TEST getfacl testfile
+
+cd
+TEST umount $N0
+cleanup
+
diff --git a/tests/bugs/bug-847624.t b/tests/bugs/bug-847624.t
new file mode 100755
index 000000000..f4e9942e9
--- /dev/null
+++ b/tests/bugs/bug-847624.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+cleanup
+
+#1
+TEST glusterd
+TEST pidof glusterd
+#3
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume set $V0 nfs.drc on
+TEST $CLI volume start $V0
+sleep 5
+TEST mount -t nfs -o vers=3,nolock,soft,intr $H0:/$V0 $N0
+cd $N0
+#7
+TEST dbench -t 10 10
+TEST rm -rf $N0/*
+cd
+TEST umount $N0
+#10
+TEST $CLI volume set $V0 nfs.drc-size 10000
+cleanup
diff --git a/tests/bugs/bug-848251.t b/tests/bugs/bug-848251.t
new file mode 100644
index 000000000..dda393272
--- /dev/null
+++ b/tests/bugs/bug-848251.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+
+TEST $CLI volume start $V0;
+
+#enable quota
+TEST $CLI volume quota $V0 enable;
+
+#mount on a random dir
+TEST MOUNTDIR="/tmp/$RANDOM"
+TEST mkdir $MOUNTDIR
+TEST glusterfs -s $H0 --volfile-id=$V0 $MOUNTDIR
+
+function set_quota(){
+ mkdir "$MOUNTDIR/$name"
+ $CLI volume quota $V0 limit-usage /$name 50KB
+}
+
+function quota_list(){
+ $CLI volume quota $V0 list | grep -- /$name | awk '{print $3}'
+}
+
+TEST name=":d1"
+#file name containing ':' in the start
+TEST set_quota
+EXPECT "0Bytes" quota_list
+
+TEST name=":d1/d:1"
+#file name containing ':' in between
+TEST set_quota
+EXPECT "0Bytes" quota_list
+
+TEST name=":d1/d:1/d1:"
+#file name containing ':' in the end
+TEST set_quota
+EXPECT "0Bytes" quota_list
+
+TEST umount $MOUNTDIR
+TEST rm -rf $MOUNTDIR
+
+cleanup;
diff --git a/tests/bugs/bug-852147.t b/tests/bugs/bug-852147.t
new file mode 100755
index 000000000..0e7923086
--- /dev/null
+++ b/tests/bugs/bug-852147.t
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+logdir=`gluster --print-logdir`"/bricks"
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+touch $M0/file1;
+
+TEST $CLI volume set $V0 performance.cache-max-file-size 20MB
+TEST $CLI volume set $V0 performance.cache-min-file-size 10MB
+
+EXPECT "20MB" volinfo_field $V0 'performance.cache-max-file-size';
+EXPECT "10MB" volinfo_field $V0 'performance.cache-min-file-size';
+
+#Performing volume reset and verifying.
+TEST $CLI volume reset $V0
+EXPECT "" volinfo_field $V0 'performance.cache-max-file-size';
+EXPECT "" volinfo_field $V0 'performance.cache-min-file-size';
+
+#Verifying vlolume-profile start, info and stop
+EXPECT "Starting volume profile on $V0 has been successful " $CLI volume profile $V0 start
+
+function vol_prof_info()
+{
+ $CLI volume profile $V0 info | grep Brick | wc -l
+}
+EXPECT "8" vol_prof_info
+
+EXPECT "Stopping volume profile on $V0 has been successful " $CLI volume profile $V0 stop
+
+function log-file-name()
+{
+ logfilename=$B0"/"$V0"1.log"
+ echo ${logfilename:1} | tr / -
+}
+
+function file-size()
+{
+ ls -lrt $1 | awk '{print $5}'
+}
+
+#Finding the current log file's size
+log_file=$logdir"/"`log-file-name`
+log_file_size=`file-size $log_file`
+
+#Removing the old backup log files
+ren_file=$log_file".*"
+rm -rf $ren_file
+
+#Initiating log rotate
+TEST $CLI volume log rotate $V0
+
+#Capturing new log file's size
+new_file_size=`file-size $log_file`
+
+#Verifying the size of the new log file and the creation of the backup log file
+TEST ! [ $new_file_size -eq $log_file_size ]
+TEST ls -lrt $ren_file
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-853258.t b/tests/bugs/bug-853258.t
new file mode 100755
index 000000000..faa9d4465
--- /dev/null
+++ b/tests/bugs/bug-853258.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+mkdir -p $H0:$B0/${V0}0
+mkdir -p $H0:$B0/${V0}1
+mkdir -p $H0:$B0/${V0}2
+mkdir -p $H0:$B0/${V0}3
+
+# Create and start a volume.
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+EXPECT_WITHIN 15 'Started' volinfo_field $V0 'Status';
+
+# Force assignment of initial ranges.
+TEST $CLI volume rebalance $V0 fix-layout start
+EXPECT_WITHIN 15 "fix-layout completed" rebalance_status_field $V0
+
+# Get the original values.
+xattrs=""
+for i in $(seq 0 2); do
+ xattrs="$xattrs $(dht_get_layout $B0/${V0}$i)"
+done
+
+# Expand the volume and force assignment of new ranges.
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3
+# Force assignment of initial ranges.
+TEST $CLI volume rebalance $V0 fix-layout start
+EXPECT_WITHIN 15 "fix-layout completed" rebalance_status_field $V0
+
+for i in $(seq 0 3); do
+ xattrs="$xattrs $(dht_get_layout $B0/${V0}$i)"
+done
+
+overlap=$(python2 $(dirname $0)/overlap.py $xattrs)
+# 2863311531 = 0xaaaaaaab = 2/3 overlap
+TEST [ "$overlap" -ge 2863311531 ]
+
+cleanup
diff --git a/tests/bugs/bug-853680.t b/tests/bugs/bug-853680.t
new file mode 100755
index 000000000..72d53ae6c
--- /dev/null
+++ b/tests/bugs/bug-853680.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+#
+# Bug 853680
+#
+# Test that io-threads least-rate-limit throttling functions as expected. Set
+# a limit, perform a few operations with a least-priority mount and verify
+# said operations take a minimum amount of time according to the limit.
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+
+#Accept min val
+TEST $CLI volume set $V0 performance.least-rate-limit 0
+#Accept some value in between
+TEST $CLI volume set $V0 performance.least-rate-limit 1035
+#Accept max val INT_MAX
+TEST $CLI volume set $V0 performance.least-rate-limit 2147483647
+
+#Reject other values
+TEST ! $CLI volume set $V0 performance.least-rate-limit 2147483648
+TEST ! $CLI volume set $V0 performace.least-rate-limit -8
+TEST ! $CLI volume set $V0 performance.least-rate-limit abc
+TEST ! $CLI volume set $V0 performance.least-rate-limit 0.0
+TEST ! $CLI volume set $V0 performance.least-rate-limit -10.0
+TEST ! $CLI volume set $V0 performance.least-rate-limit 1%
+
+# set rate limit to 1 operation/sec
+TEST $CLI volume set $V0 performance.least-rate-limit 1
+
+# use client-pid=-1 for least priority mount
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --client-pid=-1
+
+# create a few files and verify this takes more than a few seconds
+date1=`date +%s`
+TEST touch $M0/file{0..2}
+date2=`date +%s`
+
+optime=$(($date2 - $date1))
+TEST [ $optime -ge 3 ]
+
+TEST umount $M0
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/bug-853690.t b/tests/bugs/bug-853690.t
new file mode 100755
index 000000000..77a581f54
--- /dev/null
+++ b/tests/bugs/bug-853690.t
@@ -0,0 +1,94 @@
+#!/bin/bash
+#
+# Bug 853690 - Test that short writes do not lead to corruption.
+#
+# Mismanagement of short writes in AFR leads to corruption and immediately
+# detectable split-brain. Write a file to a replica volume using error-gen
+# to cause short writes on one replica.
+#
+# Short writes are also possible during heal. If ignored, the files are marked
+# consistent and silently differ. After reading the file, cause a lookup, wait
+# for self-heal and verify that the afr xattrs do not match.
+#
+########
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST mkdir -p $B0/test{1,2}
+
+# Our graph is a two brick replica with 100% frequency of short writes on one
+# side of the replica. This guarantees a single write fop leads to an out-of-sync
+# situation.
+cat > $B0/test.vol <<EOF
+volume test-posix-0
+ type storage/posix
+ option directory $B0/test1
+end-volume
+
+volume test-error-0
+ type debug/error-gen
+ option failure 100
+ option enable writev
+ option error-no GF_ERROR_SHORT_WRITE
+ subvolumes test-posix-0
+end-volume
+
+volume test-locks-0
+ type features/locks
+ subvolumes test-error-0
+end-volume
+
+volume test-posix-1
+ type storage/posix
+ option directory $B0/test2
+end-volume
+
+volume test-locks-1
+ type features/locks
+ subvolumes test-posix-1
+end-volume
+
+volume test-replicate-0
+ type cluster/replicate
+ option background-self-heal-count 0
+ subvolumes test-locks-0 test-locks-1
+end-volume
+EOF
+
+TEST glusterd
+
+TEST glusterfs --volfile=$B0/test.vol --attribute-timeout=0 --entry-timeout=0 $M0
+
+# Send a single write, guaranteed to be short on one replica, and attempt to
+# read the data back. Failure to detect the short write results in different
+# file sizes and immediate split-brain (EIO).
+TEST dd if=/dev/zero of=$M0/file bs=128k count=1
+TEST dd if=$M0/file of=/dev/null bs=128k count=1
+
+########
+#
+# Test self-heal with short writes...
+#
+########
+
+# Cause a lookup and wait a few seconds for posterity. This self-heal also fails
+# due to a short write.
+TEST ls $M0/file
+
+# Verify the attributes on the healthy replica do not reflect consistency with
+# the other replica.
+TEST "getfattr -n trusted.afr.test-locks-0 $B0/test2/file --only-values > $B0/out1 2> /dev/null"
+TEST "getfattr -n trusted.afr.test-locks-1 $B0/test2/file --only-values > $B0/out2 2> /dev/null"
+TEST ! cmp $B0/out1 $B0/out2
+
+TEST rm -f $B0/out1 $B0/out2
+TEST rm -f $M0/file
+TEST umount $M0
+
+rm -f $B0/test.vol
+rm -rf $B0/test1 $B0/test2
+
+cleanup;
+
diff --git a/tests/bugs/bug-856455.t b/tests/bugs/bug-856455.t
new file mode 100644
index 000000000..becb20222
--- /dev/null
+++ b/tests/bugs/bug-856455.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+BRICK_COUNT=3
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+function query_pathinfo()
+{
+ local path=$1;
+ local retval;
+
+ local pathinfo=`getfattr -m . -n trusted.glusterfs.pathinfo $path`;
+ retval=`echo $pathinfo | grep -o 'POSIX' | wc -l`;
+ echo $retval
+}
+
+touch $M0/f00f;
+mkdir $M0/f00d;
+
+# verify pathinfo for a file and directory
+EXPECT 1 query_pathinfo $M0/f00f;
+EXPECT $BRICK_COUNT query_pathinfo $M0/f00d;
+
+# Kill a brick process and then query for pathinfo
+# for directories pathinfo should list backend patch from available (up) subvolumes
+
+kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}1.pid`;
+
+EXPECT `expr $BRICK_COUNT - 1` query_pathinfo $M0/f00d;
+
+cleanup;
diff --git a/tests/bugs/bug-857330/common.rc b/tests/bugs/bug-857330/common.rc
new file mode 100644
index 000000000..e5a7cd79a
--- /dev/null
+++ b/tests/bugs/bug-857330/common.rc
@@ -0,0 +1,55 @@
+. $(dirname $0)/../../include.rc
+
+UUID_REGEX='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
+
+TASK_ID=""
+COMMAND=""
+PATTERN=""
+
+function check-and-store-task-id()
+{
+ TASK_ID=""
+
+ local task_id=$($CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX")
+
+ if [ -z "$task_id" ] && [ "${task_id+asdf}" = "asdf" ]; then
+ return 1
+ fi
+
+ TASK_ID=$task_id
+ return 0;
+}
+
+function get-task-id()
+{
+ $CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX" | tail -n1
+
+}
+
+function check-and-store-task-id-xml()
+{
+ TASK_ID=""
+
+ local task_id=$($CLI $COMMAND --xml | xmllint --format - | grep $PATTERN | grep -o -E "$UUID_REGEX")
+
+ if [ -z "$task_id" ] && [ "${task_id+asdf}" = "asdf" ]; then
+ return 1
+ fi
+
+ TASK_ID=$task_id
+ return 0;
+}
+
+function get-task-id-xml()
+{
+ $CLI $COMMAND --xml | xmllint --format - | grep $PATTERN | grep -o -E "$UUID_REGEX"
+}
+
+function get-task-status()
+{
+ $CLI $COMMAND | grep -o $PATTERN
+ if [ ${PIPESTATUS[0]} -ne 0 ]; then
+ return 1
+ fi
+ return 0
+}
diff --git a/tests/bugs/bug-857330/normal.t b/tests/bugs/bug-857330/normal.t
new file mode 100755
index 000000000..24dfe52c4
--- /dev/null
+++ b/tests/bugs/bug-857330/normal.t
@@ -0,0 +1,78 @@
+#!/bin/bash
+
+. $(dirname $0)/common.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+TEST $CLI volume info $V0;
+TEST $CLI volume start $V0;
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
+
+TEST python2 $(dirname $0)/../../utils/create-files.py --multi -b 10 -d 10 -n 10 $M0;
+
+TEST umount $M0;
+
+###############
+## Rebalance ##
+###############
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2;
+
+COMMAND="volume rebalance $V0 start"
+PATTERN="ID:"
+TEST check-and-store-task-id
+
+COMMAND="volume status $V0"
+PATTERN="ID"
+EXPECT $TASK_ID get-task-id
+
+COMMAND="volume rebalance $V0 status"
+PATTERN="completed"
+EXPECT_WITHIN 300 $PATTERN get-task-status
+
+###################
+## Replace-brick ##
+###################
+REP_BRICK_PAIR="$H0:$B0/${V0}2 $H0:$B0/${V0}3"
+
+COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR start"
+PATTERN="ID:"
+TEST check-and-store-task-id
+
+COMMAND="volume status $V0"
+PATTERN="ID"
+EXPECT $TASK_ID get-task-id
+
+COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status"
+PATTERN="complete"
+EXPECT_WITHIN 300 $PATTERN get-task-status
+
+TEST $CLI volume replace-brick $V0 $REP_BRICK_PAIR commit;
+
+##################
+## Remove-brick ##
+##################
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 start"
+PATTERN="ID:"
+TEST check-and-store-task-id
+
+COMMAND="volume status $V0"
+PATTERN="ID"
+EXPECT $TASK_ID get-task-id
+
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status"
+PATTERN="completed"
+EXPECT_WITHIN 300 $PATTERN get-task-status
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 commit
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-857330/xml.t b/tests/bugs/bug-857330/xml.t
new file mode 100755
index 000000000..688f46619
--- /dev/null
+++ b/tests/bugs/bug-857330/xml.t
@@ -0,0 +1,101 @@
+#!/bin/bash
+
+. $(dirname $0)/common.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+TEST $CLI volume info $V0;
+TEST $CLI volume start $V0;
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
+
+TEST python2 $(dirname $0)/../../utils/create-files.py --multi -b 10 -d 10 -n 10 $M0;
+
+TEST umount $M0;
+
+
+###############
+## Rebalance ##
+###############
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2;
+
+COMMAND="volume rebalance $V0 start"
+PATTERN="task-id"
+TEST check-and-store-task-id-xml
+
+COMMAND="volume status $V0"
+PATTERN="id"
+EXPECT $TASK_ID get-task-id-xml
+
+COMMAND="volume rebalance $V0 status"
+PATTERN="task-id"
+EXPECT $TASK_ID get-task-id-xml
+
+## TODO: Add tests for rebalance stop
+
+COMMAND="volume rebalance $V0 status"
+PATTERN="completed"
+EXPECT_WITHIN 300 $PATTERN get-task-status
+
+###################
+## Replace-brick ##
+###################
+REP_BRICK_PAIR="$H0:$B0/${V0}2 $H0:$B0/${V0}3"
+
+COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR start"
+PATTERN="task-id"
+TEST check-and-store-task-id-xml
+
+COMMAND="volume status $V0"
+PATTERN="id"
+EXPECT $TASK_ID get-task-id-xml
+
+COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status"
+PATTERN="task-id"
+EXPECT $TASK_ID get-task-id-xml
+
+## TODO: Add more tests for replace-brick pause|abort
+
+COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status"
+PATTERN="complete"
+EXPECT_WITHIN 300 $PATTERN get-task-status
+
+COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR commit"
+PATTERN="task-id"
+EXPECT $TASK_ID get-task-id-xml
+
+##################
+## Remove-brick ##
+##################
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 start"
+PATTERN="task-id"
+TEST check-and-store-task-id-xml
+
+COMMAND="volume status $V0"
+PATTERN="id"
+EXPECT $TASK_ID get-task-id-xml
+
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status"
+PATTERN="task-id"
+EXPECT $TASK_ID get-task-id-xml
+
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status"
+PATTERN="completed"
+EXPECT_WITHIN 300 $PATTERN get-task-status
+
+## TODO: Add tests for remove-brick stop
+
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 commit"
+PATTERN="task-id"
+EXPECT $TASK_ID get-task-id-xml
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-858215.t b/tests/bugs/bug-858215.t
new file mode 100755
index 000000000..aee7d5fcb
--- /dev/null
+++ b/tests/bugs/bug-858215.t
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+## Wait for volume to register with rpc.mountd
+sleep 5;
+
+## Test for checking whether the fops have been saved in the event-history
+TEST ! stat $M0/newfile;
+TEST touch $M0/newfile;
+TEST stat $M0/newfile;
+TEST rm $M0/newfile;
+
+nfs_pid=$(cat /var/lib/glusterd/nfs/run/nfs.pid);
+glustershd_pid=$(cat /var/lib/glusterd/glustershd/run/glustershd.pid);
+
+pids=$(pidof glusterfs);
+for i in $pids
+do
+ if [ $i -ne $nfs_pid ] && [ $i -ne $glustershd_pid ]; then
+ mount_pid=$i;
+ break;
+ fi
+done
+
+dump_dir='/tmp/gerrit_glusterfs'
+cat >$statedumpdir/glusterdump.options <<EOF
+all=yes
+path=$dump_dir
+EOF
+
+TEST mkdir -p $dump_dir;
+TEST kill -USR1 $mount_pid;
+sleep 2;
+for file_name in $(ls $dump_dir)
+do
+ TEST grep "xlator.mount.fuse.history" $dump_dir/$file_name;
+done
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+TEST rm -rf $dump_dir;
+TEST rm $statedumpdir/glusterdump.options;
+
+cleanup;
diff --git a/tests/bugs/bug-858242.c b/tests/bugs/bug-858242.c
new file mode 100644
index 000000000..00a3a2d5f
--- /dev/null
+++ b/tests/bugs/bug-858242.c
@@ -0,0 +1,77 @@
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+int
+main (int argc, char *argv[])
+{
+ char *filename = NULL, *volname = NULL, *cmd = NULL;
+ char buffer[1024] = {0, };
+ int fd = -1;
+ int ret = -1;
+ struct stat statbuf = {0, };
+
+ if (argc != 3) {
+ fprintf (stderr, "usage: %s <file-name> <volname>\n", argv[0]);
+ goto out;
+ }
+
+ filename = argv[1];
+ volname = argv[2];
+
+ fd = open (filename, O_RDWR | O_CREAT, 0);
+ if (fd < 0) {
+ fprintf (stderr, "open (%s) failed (%s)\n", filename,
+ strerror (errno));
+ goto out;
+ }
+
+ ret = write (fd, "test-content", 12);
+ if (ret < 0) {
+ fprintf (stderr, "write failed (%s)", strerror (errno));
+ goto out;
+ }
+
+ ret = fsync (fd);
+ if (ret < 0) {
+ fprintf (stderr, "fsync failed (%s)", strerror (errno));
+ goto out;
+ }
+
+ ret = fstat64 (fd, &statbuf);
+ if (ret < 0) {
+ fprintf (stderr, "fstat64 failed (%s)", strerror (errno));
+ goto out;
+ }
+
+ ret = asprintf (&cmd, "gluster --mode=script volume stop %s force",
+ volname);
+ if (ret < 0) {
+ fprintf (stderr, "cannot construct cli command string (%s)",
+ strerror (errno));
+ goto out;
+ }
+
+ ret = system (cmd);
+ if (ret < 0) {
+ fprintf (stderr, "stopping volume (%s) failed", volname);
+ goto out;
+ }
+
+ ret = read (fd, buffer, 1024);
+ if (ret >= 0) {
+ fprintf (stderr, "read should've returned error, "
+ "but is successful\n");
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
diff --git a/tests/bugs/bug-858242.t b/tests/bugs/bug-858242.t
new file mode 100755
index 000000000..e93c2d244
--- /dev/null
+++ b/tests/bugs/bug-858242.t
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume set $V0 performance.quick-read off
+
+#mount on a random dir
+TEST glusterfs --entry-timeout=3600 --attribute-timeout=3600 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=yes
+
+build_tester $(dirname $0)/bug-858242.c
+
+TEST $(dirname $0)/bug-858242 $M0/testfile $V0
+
+TEST rm -rf $(dirname $0)/858242
+cleanup;
+
diff --git a/tests/bugs/bug-858488-min-free-disk.t b/tests/bugs/bug-858488-min-free-disk.t
new file mode 100644
index 000000000..ae5ac3bde
--- /dev/null
+++ b/tests/bugs/bug-858488-min-free-disk.t
@@ -0,0 +1,114 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+function pidgrep()
+{
+ ps ax | grep "$1" | grep -v grep | awk '{print $1}' | head -1
+}
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+## Lets create partitions for bricks
+TEST truncate -s 100M $B0/brick1
+TEST truncate -s 200M $B0/brick2
+TEST LO1=`losetup --find --show $B0/brick1`
+TEST mkfs.xfs $LO1
+TEST LO2=`losetup --find --show $B0/brick2`
+TEST mkfs.xfs $LO2
+TEST mkdir -p $B0/${V0}1 $B0/${V0}2
+TEST mount -t xfs $LO1 $B0/${V0}1
+TEST mount -t xfs $LO2 $B0/${V0}2
+
+
+## Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs -s $H0 --volfile-id=$V0 --acl $M0
+MOUNT_PID=`ps ax |grep "glusterfs -s $H0 --volfile-id=$V0 --acl $M0" | awk '{print $1}' | head -1`
+## Real test starts here
+## ----------------------------------------------------------------------------
+
+MINFREEDISKVALUE=90
+
+## Set min free disk to MINFREEDISKVALUE percent
+TEST $CLI volume set $V0 cluster.min-free-disk $MINFREEDISKVALUE
+
+## We need to have file name to brick map based on hash.
+## We will use this info in test case 0.
+i=1
+CONTINUE=2
+BRICK1FILE=0
+BRICK2FILE=0
+while [[ $CONTINUE -ne 0 ]]
+do
+ dd if=/dev/zero of=$M0/file$i.data bs=1024 count=1024 1>/dev/null 2>&1
+
+ if [[ -e $B0/${V0}1/file$i.data && $BRICK1FILE = "0" ]]
+ then
+ BRICK1FILE=file$i.data
+ CONTINUE=$CONTINUE-1
+ fi
+
+ if [[ -e $B0/${V0}2/file$i.data && $BRICK2FILE = "0" ]]
+ then
+ BRICK2FILE=file$i.data
+ CONTINUE=$CONTINUE-1
+ fi
+
+ rm $M0/file$i.data
+ let i++
+done
+
+
+## Bring free space on one of the bricks to less than minfree value by
+## creating one big file.
+dd if=/dev/zero of=$M0/fillonebrick.data bs=1024 count=25600 1>/dev/null 2>&1
+
+#Lets find out where it was created
+if [ -f $B0/${V0}1/fillonebrick.data ]
+then
+ FILETOCREATE=$BRICK1FILE
+ OTHERBRICK=$B0/${V0}2
+else
+ FILETOCREATE=$BRICK2FILE
+ OTHERBRICK=$B0/${V0}1
+fi
+
+##--------------------------------TEST CASE 0-----------------------------------
+## If we try to create a file which should go into full brick as per hash, it
+## should go into the other brick instead.
+
+## Before that let us create files just to make gluster refresh the stat
+## Using touch so it should not change the disk usage stats
+for k in {1..20};
+do
+ touch $M0/dummyfile$k
+done
+
+dd if=/dev/zero of=$M0/$FILETOCREATE bs=1024 count=2048 1>/dev/null 2>&1
+TEST [ -e $OTHERBRICK/$FILETOCREATE ]
+
+## Done testing, lets clean up
+EXPECT "$MOUNT_PID" pidgrep $MOUNT_PID
+TEST rm -rf $M0/*
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+$CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-859927.t b/tests/bugs/bug-859927.t
new file mode 100755
index 000000000..ed74d3eb8
--- /dev/null
+++ b/tests/bugs/bug-859927.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+cleanup;
+
+glusterd;
+
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+TEST ! $CLI volume set $V0 statedump-path ""
+TEST ! $CLI volume set $V0 statedump-path " "
+TEST $CLI volume set $V0 statedump-path "/home/"
+EXPECT "/home/" volume_option $V0 server.statedump-path
+
+TEST ! $CLI volume set $V0 background-self-heal-count ""
+TEST ! $CLI volume set $V0 background-self-heal-count " "
+TEST $CLI volume set $V0 background-self-heal-count 10
+EXPECT "10" volume_option $V0 cluster.background-self-heal-count
+
+TEST ! $CLI volume set $V0 cache-size ""
+TEST ! $CLI volume set $V0 cache-size " "
+TEST $CLI volume set $V0 cache-size 512MB
+EXPECT "512MB" volume_option $V0 performance.cache-size
+
+TEST ! $CLI volume set $V0 self-heal-daemon ""
+TEST ! $CLI volume set $V0 self-heal-daemon " "
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT "on" volume_option $V0 cluster.self-heal-daemon
+
+TEST ! $CLI volume set $V0 read-subvolume ""
+TEST ! $CLI volume set $V0 read-subvolume " "
+TEST $CLI volume set $V0 read-subvolume $V0-client-0
+EXPECT "$V0-client-0" volume_option $V0 cluster.read-subvolume
+
+TEST ! $CLI volume set $V0 data-self-heal-algorithm ""
+TEST ! $CLI volume set $V0 data-self-heal-algorithm " "
+TEST ! $CLI volume set $V0 data-self-heal-algorithm on
+TEST $CLI volume set $V0 data-self-heal-algorithm full
+EXPECT "full" volume_option $V0 cluster.data-self-heal-algorithm
+
+TEST ! $CLI volume set $V0 min-free-inodes ""
+TEST ! $CLI volume set $V0 min-free-inodes " "
+TEST $CLI volume set $V0 min-free-inodes 60%
+EXPECT "60%" volume_option $V0 cluster.min-free-inodes
+
+TEST ! $CLI volume set $V0 min-free-disk ""
+TEST ! $CLI volume set $V0 min-free-disk " "
+TEST $CLI volume set $V0 min-free-disk 60%
+EXPECT "60%" volume_option $V0 cluster.min-free-disk
+
+TEST $CLI volume set $V0 min-free-disk 120
+EXPECT "120" volume_option $V0 cluster.min-free-disk
+
+TEST ! $CLI volume set $V0 frame-timeout ""
+TEST ! $CLI volume set $V0 frame-timeout " "
+TEST $CLI volume set $V0 frame-timeout 0
+EXPECT "0" volume_option $V0 network.frame-timeout
+
+TEST ! $CLI volume set $V0 auth.allow ""
+TEST ! $CLI volume set $V0 auth.allow " "
+TEST $CLI volume set $V0 auth.allow 192.168.122.1
+EXPECT "192.168.122.1" volume_option $V0 auth.allow
+
+TEST ! $CLI volume set $V0 stripe-block-size ""
+TEST ! $CLI volume set $V0 stripe-block-size " "
+TEST $CLI volume set $V0 stripe-block-size 512MB
+EXPECT "512MB" volume_option $V0 cluster.stripe-block-size
+
+cleanup;
diff --git a/tests/bugs/bug-860297.t b/tests/bugs/bug-860297.t
new file mode 100644
index 000000000..2a3ca7a7a
--- /dev/null
+++ b/tests/bugs/bug-860297.t
@@ -0,0 +1,13 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+TEST $CLI volume create $V0 $H0:$B0/brick1
+setfattr -x trusted.glusterfs.volume-id $B0/brick1
+## If Extended attribute trusted.glusterfs.volume-id is not present
+## then volume should not be able to start
+TEST ! $CLI volume start $V0;
+cleanup;
diff --git a/tests/bugs/bug-860663.t b/tests/bugs/bug-860663.t
new file mode 100644
index 000000000..05dea5fbc
--- /dev/null
+++ b/tests/bugs/bug-860663.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+function file_count()
+{
+ val=1
+
+ if [ "$1" == "$2" ]
+ then
+ val=0
+ fi
+ echo $val
+}
+
+BRICK_COUNT=3
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+sleep 5;
+
+TEST touch $M0/files{1..10000};
+
+ORIG_FILE_COUNT=`ls -l $M0 | wc -l`;
+
+# Kill a brick process
+kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}1.pid`;
+
+TEST $CLI volume rebalance $V0 fix-layout start
+
+sleep 30;
+
+TEST ! touch $M0/files{1..10000};
+
+TEST $CLI volume start $V0 force
+
+sleep 5;
+
+NEW_FILE_COUNT=`ls -l $M0 | wc -l`;
+
+EXPECT "0" file_count $ORIG_FILE_COUNT $NEW_FILE_COUNT
+
+cleanup;
diff --git a/tests/bugs/bug-861015-index.t b/tests/bugs/bug-861015-index.t
new file mode 100644
index 000000000..4b148e6cc
--- /dev/null
+++ b/tests/bugs/bug-861015-index.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 ensure-durability off
+TEST $CLI volume start $V0
+EXPECT_WITHIN 20 "Y" glustershd_up_status
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}4
+cd $M0
+HEAL_FILES=0
+for i in {1..10}
+do
+ echo "abc" > $i
+ HEAL_FILES=$(($HEAL_FILES+1))
+done
+HEAL_FILES=$(($HEAL_FILES+3)) #count brick root distribute-subvol num of times
+
+cd ~
+EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
+TEST rm -f $M0/*
+TEST umount $M0
+TEST $CLI volume heal $V0 info
+#Only root dir should be present now in the indices
+EXPECT "1" afr_get_num_indices_in_brick $B0/${V0}1
+EXPECT "1" afr_get_num_indices_in_brick $B0/${V0}3
+EXPECT "1" afr_get_num_indices_in_brick $B0/${V0}5
+cleanup
diff --git a/tests/bugs/bug-861015-log.t b/tests/bugs/bug-861015-log.t
new file mode 100644
index 000000000..032032470
--- /dev/null
+++ b/tests/bugs/bug-861015-log.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+log_wd=$(gluster --print-logdir)
+TEST glusterd
+TEST pidof glusterd
+rm -f $log_wd/glustershd.log
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+cd $M0
+for i in {1..10}
+do
+ dd if=/dev/urandom of=f bs=1M count=10 2>/dev/null
+done
+
+cd ~
+TEST $CLI volume heal $V0 info
+function count_inode_link_failures {
+ logfile=$1
+ grep "inode link failed on the inode" $logfile | wc -l
+}
+EXPECT "0" count_inode_link_failures $log_wd/glustershd.log
+cleanup
diff --git a/tests/bugs/bug-861542.t b/tests/bugs/bug-861542.t
new file mode 100755
index 000000000..5fd08f12d
--- /dev/null
+++ b/tests/bugs/bug-861542.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+# Distributed volume with a single brick was chosen solely for the ease of
+#implementing the test case (to be precise, for the ease of extracting the port number).
+TEST $CLI volume create $V0 $H0:$B0/brick0;
+
+TEST $CLI volume start $V0;
+
+function port_field()
+{
+ local vol=$1;
+ local opt=$2;
+ if [ $opt -eq '0' ]; then
+ $CLI volume status $vol | grep "brick0" | awk '{print $3}';
+ else
+ $CLI volume status $vol detail | grep "^Port " | awk '{print $3}';
+ fi
+}
+
+function xml_port_field()
+{
+ local vol=$1;
+ local opt=$2;
+ $CLI --xml volume status $vol $opt | tr -d '\n' |\
+#Find the first occurrence of the string between <port> and </port>
+ sed -r 's/<port>/&\n/;s/<\/port>/\n&/;s/^.*\n(.*)\n.*$/\1/'| \
+ grep -v xml | tr -d '\n';
+}
+
+TEST $CLI volume status $V0;
+TEST $CLI volume status $V0 detail;
+TEST $CLI --xml volume status $V0;
+TEST $CLI --xml volume status $V0 detail;
+
+# Kill the brick process. After this, port number for the killed (in this case brick) process must be "N/A".
+kill `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-brick0.pid`
+
+EXPECT "N/A" port_field $V0 '0'; # volume status
+EXPECT "N/A" port_field $V0 '1'; # volume status detail
+
+EXPECT "N/A" xml_port_field $V0 '';
+EXPECT "N/A" xml_port_field $V0 'detail';
+
+cleanup;
diff --git a/tests/bugs/bug-862834.t b/tests/bugs/bug-862834.t
new file mode 100755
index 000000000..33aaea1a8
--- /dev/null
+++ b/tests/bugs/bug-862834.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+V1="patchy2"
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+function check_brick()
+{
+ vol=$1;
+ num=$2
+ $CLI volume info $V0 | grep "Brick$num" | awk '{print $2}';
+}
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+function brick_count()
+{
+ local vol=$1;
+
+ $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
+}
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '2' brick_count $V0
+
+
+EXPECT "$H0:$B0/${V0}1" check_brick $V0 '1';
+EXPECT "$H0:$B0/${V0}2" check_brick $V0 '2';
+
+TEST ! $CLI volume create $V1 $H0:$B0/${V1}0 $H0:$B0/${V0}1;
+
+cleanup;
diff --git a/tests/bugs/bug-862967.t b/tests/bugs/bug-862967.t
new file mode 100644
index 000000000..00fa88440
--- /dev/null
+++ b/tests/bugs/bug-862967.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+function uid_gid_compare()
+{
+ val=1
+
+ if [ "$1" == "$3" ]
+ then
+ if [ "$2" == "$4" ]
+ then
+ val=0
+ fi
+ fi
+ echo "$val"
+}
+
+BRICK_COUNT=3
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume set $V0 stat-prefetch off
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs --attribute-timeout=0 --entry-timeout=0 --gid-timeout=-1 -s $H0 --volfile-id $V0 $M0;
+
+# change dir permissions
+mkdir $M0/dir;
+chown 1:1 $M0/dir;
+
+# Kill a brick process
+
+kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}1.pid`;
+# change dir ownership
+NEW_UID=36;
+NEW_GID=36;
+chown $NEW_UID:$NEW_GID $M0/dir;
+
+# bring the brick back up
+TEST $CLI volume start $V0 force
+
+sleep 10;
+
+ls -l $M0/dir;
+
+# check if uid/gid is healed on backend brick which was taken down
+BACKEND_UID=`stat --printf=%u $B0/${V0}1/dir`;
+BACKEND_GID=`stat --printf=%g $B0/${V0}1/dir`;
+
+
+EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID
+
+cleanup;
diff --git a/tests/bugs/bug-863068.t b/tests/bugs/bug-863068.t
new file mode 100644
index 000000000..931aad623
--- /dev/null
+++ b/tests/bugs/bug-863068.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+## This function get the No. of entries for
+## gluster volume heal volnmae info healed command for brick1 and brick2
+## and compare the initial value (Before volume heal full) and final value
+## (After gluster volume heal vol full) and compare.
+
+function getdiff()
+{
+ val=10
+ if [ "$1" == "$3" ]
+ then
+ if [ "$2" == "$4" ]
+ then
+ val=0
+ else
+ val=20
+ fi
+ fi
+
+ echo $val
+}
+
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2;
+TEST $CLI volume start $V0;
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+B0_hiphenated=`echo $B0 | tr '/' '-'`
+kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0$B0_hiphenated-brick1.pid` ;
+
+mkdir $M0/{a,b,c};
+echo "GLUSTERFS" >> $M0/a/file;
+
+TEST $CLI volume start $V0 force;
+sleep 5
+TEST $CLI volume heal $V0 full;
+sleep 5
+
+##First Brick Initial(Before full type self heal) value
+FBI=`gluster volume heal $V0 info healed | grep entries | awk '{print $4}' | head -n 1`
+
+##Second Brick Initial Value
+SBI=`gluster volume heal $V0 info healed | grep entries | awk '{print $4}' | tail -n 1`
+TEST $CLI volume heal $V0 full;
+
+sleep 5
+
+##First Brick Final value
+##Number of entries from output of <gluster volume heal volname info healed>
+
+FBF=`gluster volume heal $V0 info healed | grep entries | awk '{print $4}' | head -n 1`
+
+##Second Brick Final Value
+SBF=`gluster volume heal $V0 info healed | grep entries | awk '{print $4}' | tail -n 1`
+
+##get the difference of values
+EXPECT "0" getdiff $FBI $SBI $FBF $SBF;
+
+## Tests after this comment checks for the background self heal
+
+TEST mkdir $M0/d
+kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0$B0_hiphenated-brick1.pid` ;
+TEST $CLI volume set $V0 self-heal-daemon off
+dd if=/dev/random of=$M0/d/file1 bs=100M count=1 2>/dev/null;
+TEST $CLI volume start $V0 force
+sleep 3
+TEST ls -l $M0/d
+
+cleanup;
diff --git a/tests/bugs/bug-864222.t b/tests/bugs/bug-864222.t
new file mode 100755
index 000000000..6e02ab60b
--- /dev/null
+++ b/tests/bugs/bug-864222.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/brick0
+TEST $CLI volume start $V0
+
+sleep 5
+
+TEST mount -t nfs -o vers=3,nolock $H0:/$V0 $N0
+cd $N0
+
+TEST ls
+
+TEST $CLI volume set $V0 nfs.enable-ino32 on
+# Main test. This should pass.
+TEST ls
+
+cd
+TEST umount $N0
+cleanup
+
diff --git a/tests/bugs/bug-865825.t b/tests/bugs/bug-865825.t
new file mode 100755
index 000000000..6bb1c2348
--- /dev/null
+++ b/tests/bugs/bug-865825.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Start and create a volume
+mkdir -p ${B0}/${V0}-0
+mkdir -p ${B0}/${V0}-1
+mkdir -p ${B0}/${V0}-2
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2}
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Make sure io-cache and write-behind don't interfere.
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 performance.io-cache off;
+TEST $CLI volume set $V0 performance.write-behind off;
+TEST $CLI volume set $V0 performance.stat-prefetch off
+
+## Make sure automatic self-heal doesn't perturb our results.
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount native
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+## Create a file with some recognizable contents.
+echo "test_data" > $M0/a_file;
+
+## Unmount.
+TEST umount $M0;
+
+## Mess with the flags as though brick-0 accuses brick-2 while brick-1 is
+## missing its brick-2 changelog altogether.
+value=0x000000010000000000000000
+setfattr -n trusted.afr.${V0}-client-2 -v $value $B0/${V0}-0/a_file
+setfattr -x trusted.afr.${V0}-client-2 $B0/${V0}-1/a_file
+echo "wrong_data" > $B0/${V0}-2/a_file
+
+## Remount and force a self-heal.
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+stat ${M0}/a_file > /dev/null
+
+## Make sure brick 2 now has the correct contents.
+EXPECT "test_data" cat $B0/${V0}-2/a_file
+
+if [ "$EXIT_EARLY" = "1" ]; then
+ exit 0;
+fi
+
+## Finish up
+TEST umount $M0;
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-866459.t b/tests/bugs/bug-866459.t
new file mode 100644
index 000000000..d66f70c69
--- /dev/null
+++ b/tests/bugs/bug-866459.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+
+## Start and create a volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Create and start a volume with aio enabled
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 linux-aio on
+TEST $CLI volume set $V0 background-self-heal-count 0
+TEST $CLI volume set $V0 performance.stat-prefetch off;
+TEST $CLI volume start $V0
+
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+dd of=$M0/a if=/dev/urandom bs=1M count=1 2>&1 > /dev/null
+B0_hiphenated=`echo $B0 | tr '/' '-'`
+## Bring a brick down
+kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0$B0_hiphenated-${V0}1.pid`
+EXPECT '1' echo `pgrep glusterfsd | wc -l`
+## Rewrite the file
+dd of=$M0/a if=/dev/urandom bs=1M count=1 2>&1 > /dev/null
+TEST $CLI volume start $V0 force
+## Wait for the brick to give CHILD_UP in client protocol
+sleep 5
+md5offile2=`md5sum $B0/${V0}2/a | awk '{print $1}'`
+
+##trigger self-heal
+ls -l $M0/a
+
+EXPECT "$md5offile2" echo `md5sum $B0/${V0}1/a | awk '{print $1}'`
+
+## Finish up
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-867252.t b/tests/bugs/bug-867252.t
new file mode 100644
index 000000000..8309ed9b9
--- /dev/null
+++ b/tests/bugs/bug-867252.t
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+
+function brick_count()
+{
+ local vol=$1;
+
+ $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
+}
+
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '1' brick_count $V0
+
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2;
+EXPECT '2' brick_count $V0
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2;
+EXPECT '1' brick_count $V0
+
+cleanup;
diff --git a/tests/bugs/bug-867253.t b/tests/bugs/bug-867253.t
new file mode 100644
index 000000000..ae4e243af
--- /dev/null
+++ b/tests/bugs/bug-867253.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+function file_count()
+{
+ val=1
+
+ if [ "$1" == "0" ]
+ then
+ if [ "$2" == "0" ]
+ then
+ val=0
+ fi
+ fi
+ echo $val
+}
+
+BRICK_COUNT=2
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+
+sleep 5;
+## Mount nfs, with nocache option
+TEST mount -o vers=3,nolock,noac -t nfs $H0:/$V0 $M0;
+
+touch $M0/files{1..1000};
+
+# Kill a brick process
+kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}0.pid`;
+
+echo 3 >/proc/sys/vm/drop_caches;
+
+ls -l $M0 >/dev/null;
+
+NEW_FILE_COUNT=`echo $?`;
+
+TEST $CLI volume start $V0 force
+
+# Kill a brick process
+kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}1.pid`;
+
+echo 3 >/proc/sys/vm/drop_caches;
+
+ls -l $M0 >/dev/null;
+
+NEW_FILE_COUNT1=`echo $?`;
+
+EXPECT "0" file_count $NEW_FILE_COUNT $NEW_FILE_COUNT1
+
+TEST umount -l $M0
+
+cleanup
diff --git a/tests/bugs/bug-869724.t b/tests/bugs/bug-869724.t
new file mode 100644
index 000000000..eec5d344c
--- /dev/null
+++ b/tests/bugs/bug-869724.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+
+## Start and create a volume
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+
+## Make volume tightly consistent for metdata
+TEST $CLI volume set $V0 performance.stat-prefetch off;
+
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+touch $M0/test;
+build_tester $(dirname $0)/getlk_owner.c
+
+TEST $(dirname $0)/getlk_owner $M0/test;
+
+rm -f $(dirname $0)/getlk_owner
+cleanup;
+
diff --git a/tests/bugs/bug-872923.t b/tests/bugs/bug-872923.t
new file mode 100755
index 000000000..6757846dc
--- /dev/null
+++ b/tests/bugs/bug-872923.t
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1
+TEST $CLI volume start $V0
+sleep 5
+
+mount -t nfs -o vers=3,nolock `hostname`:/$V0 $N0
+
+cd $N0
+mkdir test_hardlink_self_heal;
+cd test_hardlink_self_heal;
+
+for i in `seq 1 5`;
+do
+ mkdir dir.$i;
+ for j in `seq 1 10`;
+ do
+ dd if=/dev/zero of=dir.$i/file.$j bs=1k count=$j > /dev/null 2>&1;
+ done;
+done;
+
+cd ..
+kill `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-brick0.pid`
+sleep 2
+
+
+cd test_hardlink_self_heal;
+
+RET=0
+for i in `seq 1 5`;
+do
+ for j in `seq 1 10`;
+ do
+ ln dir.$i/file.$j dir.$i/link_file.$j > /dev/null 2>&1;
+ RET=$?
+ if [ $RET -ne 0 ]; then
+ break;
+ fi
+ done ;
+ if [ $RET -ne 0 ]; then
+ break;
+ fi
+done;
+
+cd
+umount $N0
+
+EXPECT "0" echo $RET;
+
+cleanup;
diff --git a/tests/bugs/bug-873367.t b/tests/bugs/bug-873367.t
new file mode 100755
index 000000000..cfbbc98d0
--- /dev/null
+++ b/tests/bugs/bug-873367.t
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+SSL_BASE=/etc/ssl
+SSL_KEY=$SSL_BASE/glusterfs.key
+SSL_CERT=$SSL_BASE/glusterfs.pem
+SSL_CA=$SSL_BASE/glusterfs.ca
+
+cleanup;
+rm -f $SSL_BASE/glusterfs.*
+mkdir -p $B0/1
+mkdir -p $M0
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST openssl genrsa -out $SSL_KEY 1024
+TEST openssl req -new -x509 -key $SSL_KEY -subj /CN=Anyone -out $SSL_CERT
+ln $SSL_CERT $SSL_CA
+
+TEST $CLI volume create $V0 $H0:$B0/1
+TEST $CLI volume set $V0 server.ssl on
+TEST $CLI volume set $V0 client.ssl on
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+echo some_data > $M0/data_file
+TEST umount $M0
+
+# If the bug is not fixed, the next mount will fail.
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+EXPECT some_data cat $M0/data_file
+
+TEST umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/bug-873549.t b/tests/bugs/bug-873549.t
new file mode 100644
index 000000000..5b541de6c
--- /dev/null
+++ b/tests/bugs/bug-873549.t
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd -LDEBUG;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+TEST $CLI volume set $V0 performance.cache-size 512MB
+TEST $CLI volume start $V0
+TEST $CLI volume statedump $V0 all
+
+cleanup;
diff --git a/tests/bugs/bug-873962-spb.t b/tests/bugs/bug-873962-spb.t
new file mode 100644
index 000000000..62a8318ed
--- /dev/null
+++ b/tests/bugs/bug-873962-spb.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+touch $M0/a
+
+exec 5<$M0/a
+
+kill_brick $V0 $H0 $B0/${V0}0
+echo "hi" > $M0/a
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+
+kill_brick $V0 $H0 $B0/${V0}1
+echo "bye" > $M0/a
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+
+TEST ! cat $M0/a #To mark split-brain
+
+TEST ! read -u 5 line
+exec 5<&-
+
+cleanup;
diff --git a/tests/bugs/bug-873962.t b/tests/bugs/bug-873962.t
new file mode 100755
index 000000000..b245cc3da
--- /dev/null
+++ b/tests/bugs/bug-873962.t
@@ -0,0 +1,108 @@
+#!/bin/bash
+
+#AFR TEST-IDENTIFIER SPLIT-BRAIN
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+B0_hiphenated=`echo $B0 | tr '/' '-'`
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}
+
+# If we allow self-heal to happen in the background, we'll get spurious
+# failures - especially at the point labeled "FAIL HERE" but
+# occasionally elsewhere. This behavior is very timing-dependent. It
+# doesn't show up in Jenkins, but it does on JD's and KP's machines, and
+# it got sharply worse because of an unrelated fsync change (6ae6f3d)
+# which changed timing. Putting anything at the FAIL HERE marker tends
+# to make it go away most of the time on affected machines, even if the
+# "anything" is unrelated.
+#
+# What's going on is that the I/O on the first mountpoint is allowed to
+# complete even though self-heal is still in progress and the state on
+# disk does not reflect its result. In fact, the state changes during
+# self-heal create the appearance of split brain when the second I/O
+# comes in, so that fails even though we haven't actually been in split
+# brain since the manual xattr operations. By disallowing background
+# self-heal, we ensure that the second I/O can't happen before self-heal
+# is complete, because it has to follow the first I/O which now has to
+# follow self-heal.
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+
+#Make sure self-heal is not triggered when the bricks are re-started
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+TEST touch $M0/a
+TEST touch $M0/b
+TEST touch $M0/c
+TEST touch $M0/d
+echo "1" > $M0/b
+echo "1" > $M0/d
+TEST kill_brick $V0 $H0 $B0/${V0}2
+echo "1" > $M0/a
+echo "1" > $M0/c
+TEST setfattr -n trusted.mdata -v abc $M0/b
+TEST setfattr -n trusted.mdata -v abc $M0/d
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/${V0}1
+echo "2" > $M0/a
+echo "2" > $M0/c
+TEST setfattr -n trusted.mdata -v def $M0/b
+TEST setfattr -n trusted.mdata -v def $M0/d
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M1 --direct-io-mode=enable
+#Files are in split-brain, so open should fail
+TEST ! cat $M0/a;
+TEST ! cat $M1/a;
+TEST ! cat $M0/b;
+TEST ! cat $M1/b;
+
+#Reset split-brain status
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/a;
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/b;
+
+#The operations should do self-heal and give correct output
+EXPECT "2" cat $M0/a;
+# FAIL HERE - see comment about cluster.self-heal-background-count above.
+EXPECT "2" cat $M1/a;
+EXPECT "def" getfattr -n trusted.mdata --only-values $M0/b 2>/dev/null
+EXPECT "def" getfattr -n trusted.mdata --only-values $M1/b 2>/dev/null
+
+TEST umount $M0
+TEST umount $M1
+
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M1 --direct-io-mode=enable
+
+#Files are in split-brain, so open should fail
+TEST ! cat $M0/c
+TEST ! cat $M1/c
+TEST ! cat $M0/d
+TEST ! cat $M1/d
+
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/c
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/d
+
+#The operations should NOT do self-heal but give correct output
+EXPECT "2" cat $M0/c
+EXPECT "2" cat $M1/c
+EXPECT "1" cat $M0/d
+EXPECT "1" cat $M1/d
+
+#Check that the self-heal is not triggered.
+EXPECT "1" cat $B0/${V0}1/c
+EXPECT "abc" getfattr -n trusted.mdata --only-values $B0/${V0}1/d 2>/dev/null
+cleanup;
diff --git a/tests/bugs/bug-874498.t b/tests/bugs/bug-874498.t
new file mode 100644
index 000000000..0b5991011
--- /dev/null
+++ b/tests/bugs/bug-874498.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2;
+TEST $CLI volume start $V0;
+
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+B0_hiphenated=`echo $B0 | tr '/' '-'`
+kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0$B0_hiphenated-brick1.pid` ;
+
+echo "GLUSTER FILE SYSTEM" > $M0/FILE1
+echo "GLUSTER FILE SYSTEM" > $M0/FILE2
+
+FILEN=$B0"/brick2"
+XATTROP=$FILEN/.glusterfs/indices/xattrop
+
+function get_gfid()
+{
+path_of_file=$1
+
+gfid_value=`getfattr -d -m . $path_of_file -e hex 2>/dev/null | grep trusted.gfid | cut --complement -c -15 | sed 's/\([a-f0-9]\{8\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)/\1-\2-\3-\4-/'`
+
+echo $gfid_value
+}
+
+GFID_ROOT=`get_gfid $B0/brick2`
+GFID_FILE1=`get_gfid $B0/brick2/FILE1`
+GFID_FILE2=`get_gfid $B0/brick2/FILE2`
+
+
+count=0
+for i in `ls $XATTROP`
+do
+ if [ "$i" == "$GFID_ROOT" ] || [ "$i" == "$GFID_FILE1" ] || [ "$i" == "$GFID_FILE2" ]
+ then
+ count=$(( count + 1 ))
+ fi
+done
+
+EXPECT "3" echo $count
+
+
+TEST $CLI volume start $V0 force
+sleep 5
+TEST $CLI volume heal $V0
+
+
+##Expected number of entries are 0 in the .glusterfs/indices/xattrop directory
+EXPECT_WITHIN 60 '0' count_sh_entries $FILEN;
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-877293.t b/tests/bugs/bug-877293.t
new file mode 100755
index 000000000..774c2a0cc
--- /dev/null
+++ b/tests/bugs/bug-877293.t
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+TEST glusterd
+TEST pidof glusterd
+
+## Start and create a replicated volume
+mkdir -p ${B0}/${V0}-0
+mkdir -p ${B0}/${V0}-1
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+
+TEST $CLI volume set $V0 indexing on
+
+TEST $CLI volume start $V0;
+
+## Mount native
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+## Mount client-pid=-1
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 --client-pid=-1 $M1
+
+TEST touch $M0
+
+vol_uuid=`getfattr -n trusted.glusterfs.volume-mark -ehex $M1 | sed -n 's/^trusted.glusterfs.volume-mark=0x//p' | cut -b5-36 | sed 's/\([a-f0-9]\{8\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)/\1-\2-\3-\4-/'`
+xtime=trusted.glusterfs.$vol_uuid.xtime
+
+TEST "getfattr -n $xtime $M1 | grep -q ${xtime}="
+
+TEST kill_brick $V0 $H0 $B0/${V0}-0
+
+TEST "getfattr -n $xtime $M1 | grep -q ${xtime}="
+
+TEST umount $M0
+TEST umount $M1
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup
diff --git a/tests/bugs/bug-877885.t b/tests/bugs/bug-877885.t
new file mode 100755
index 000000000..0d4620b00
--- /dev/null
+++ b/tests/bugs/bug-877885.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1
+TEST $CLI volume start $V0
+
+sleep 5
+
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 \
+$M0;
+
+TEST touch $M0/file
+TEST mkdir $M0/dir
+
+TEST mount -t nfs -o vers=3,nolock $H0:/$V0 $N0
+cd $N0
+
+rm -rf * &
+
+TEST mount -t nfs -o retry=0,nolock,vers=3 $H0:/$V0 $N1;
+
+cd;
+
+kill %1;
+
+TEST umount $N0
+TEST umount $N1;
+
+cleanup
diff --git a/tests/bugs/bug-877992.t b/tests/bugs/bug-877992.t
new file mode 100755
index 000000000..932ecc77b
--- /dev/null
+++ b/tests/bugs/bug-877992.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+
+## Start and create a volume
+TEST glusterd -LDEBUG
+TEST pidof glusterd
+
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+
+function hooks_prep ()
+{
+ local event=$1
+ touch /tmp/pre.out /tmp/post.out
+ touch /var/lib/glusterd/hooks/1/"$event"/pre/Spre.sh
+ touch /var/lib/glusterd/hooks/1/"$event"/post/Spost.sh
+
+ printf "#! /bin/bash\necho "$event"Pre > /tmp/pre.out\n" > /var/lib/glusterd/hooks/1/"$event"/pre/Spre.sh
+ printf "#! /bin/bash\necho "$event"Post > /tmp/post.out\n" > /var/lib/glusterd/hooks/1/"$event"/post/Spost.sh
+ chmod a+x /var/lib/glusterd/hooks/1/"$event"/pre/Spre.sh
+ chmod a+x /var/lib/glusterd/hooks/1/"$event"/post/Spost.sh
+}
+
+function hooks_cleanup ()
+{
+ local event=$1
+ rm /tmp/pre.out /tmp/post.out
+ rm /var/lib/glusterd/hooks/1/"$event"/pre/Spre.sh
+ rm /var/lib/glusterd/hooks/1/"$event"/post/Spost.sh
+}
+
+## Verify volume is created and its hooks script ran
+hooks_prep 'create'
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT 'createPre' cat /tmp/pre.out;
+EXPECT 'createPost' cat /tmp/post.out;
+hooks_cleanup 'create'
+
+
+## Start volume and verify that its hooks script ran
+hooks_prep 'start'
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT 'startPre' cat /tmp/pre.out;
+EXPECT 'startPost' cat /tmp/post.out;
+hooks_cleanup 'start'
+
+cleanup;
diff --git a/tests/bugs/bug-878004.t b/tests/bugs/bug-878004.t
new file mode 100644
index 000000000..5bee4c62f
--- /dev/null
+++ b/tests/bugs/bug-878004.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3;
+
+function brick_count()
+{
+ local vol=$1;
+
+ $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
+}
+
+
+TEST $CLI volume start $V0
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2;
+EXPECT '2' brick_count $V0
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3;
+EXPECT '1' brick_count $V0
+
+cleanup;
+
diff --git a/tests/bugs/bug-879490.t b/tests/bugs/bug-879490.t
new file mode 100755
index 000000000..5b9ae7bb9
--- /dev/null
+++ b/tests/bugs/bug-879490.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+function peer_probe()
+{
+ $CLI peer probe a.b.c.d --xml | xmllint --format - | grep "<opErrstr>"
+}
+
+EXPECT " <opErrstr>Probe returned with unknown errno 107</opErrstr>" peer_probe
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-879494.t b/tests/bugs/bug-879494.t
new file mode 100755
index 000000000..5caca7922
--- /dev/null
+++ b/tests/bugs/bug-879494.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+function peer_probe()
+{
+ $CLI peer detach a.b.c.d --xml | xmllint --format - | grep "<opErrstr>"
+}
+
+EXPECT " <opErrstr>a.b.c.d is not part of cluster</opErrstr>" peer_probe
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-880898.t b/tests/bugs/bug-880898.t
new file mode 100644
index 000000000..a069d4a8a
--- /dev/null
+++ b/tests/bugs/bug-880898.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2
+TEST $CLI volume start $V0
+pkill glusterfs
+uuid=""
+for line in $(cat /var/lib/glusterd/glusterd.info)
+do
+ if [[ $line == UUID* ]]
+ then
+ uuid=`echo $line | sed -r 's/^.{5}//'`
+ fi
+done
+
+gluster volume heal $V0 info | grep "Status: self-heal-daemon is not running on $uuid";
+EXPECT "0" echo $?
+
+cleanup;
diff --git a/tests/bugs/bug-882278.t b/tests/bugs/bug-882278.t
new file mode 100755
index 000000000..7933e1863
--- /dev/null
+++ b/tests/bugs/bug-882278.t
@@ -0,0 +1,72 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+cleanup
+
+# Is there a good reason to require --fqdn elsewhere? It's worse than useless
+# here.
+H0=$(hostname -s)
+
+function recreate {
+ # The rm is necessary so we don't get fooled by leftovers from old runs.
+ rm -rf $1 && mkdir -p $1
+}
+
+function count_lines {
+ grep "$1" $2/* | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Start and create a volume
+TEST recreate ${B0}/${V0}-0
+TEST recreate ${B0}/${V0}-1
+TEST $CLI volume create $V0 $H0:$B0/${V0}-{0,1}
+TEST $CLI volume set $V0 cluster.nufa on
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount native
+special_option="--xlator-option ${V0}-dht.local-volume-name=${V0}-client-1"
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $special_option $M0
+
+## Create a bunch of test files.
+for i in $(seq 0 99); do
+ echo hello > $(printf $M0/file%02d $i)
+done
+
+## Make sure the files went to the right place. There might be link files in
+## the other brick, but they won't have any contents.
+EXPECT "0" count_lines hello ${B0}/${V0}-0
+EXPECT "100" count_lines hello ${B0}/${V0}-1
+
+if [ "$EXIT_EARLY" = "1" ]; then
+ exit 0;
+fi
+
+## Finish up
+TEST umount $M0;
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-884328.t b/tests/bugs/bug-884328.t
new file mode 100644
index 000000000..ee5509bbc
--- /dev/null
+++ b/tests/bugs/bug-884328.t
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST check_option_help_presence "cluster.quorum-type"
+TEST check_option_help_presence "cluster.quorum-count"
+cleanup;
diff --git a/tests/bugs/bug-884452.t b/tests/bugs/bug-884452.t
new file mode 100644
index 000000000..d07651e46
--- /dev/null
+++ b/tests/bugs/bug-884452.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/{1..10000}
+
+RUN_LS_LOOP_FILE="$M0/run-ls-loop"
+function ls-loop
+{
+ while [ -f $RUN_LS_LOOP_FILE ]; do
+ ls -lR $M0 1>/dev/null 2>&1
+ done;
+}
+
+touch $RUN_LS_LOOP_FILE
+ls-loop &
+
+function vol-status-loop
+{
+ for i in {1..1000}; do
+ $CLI volume status $V0 clients >/dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ return 1
+ fi
+ done;
+
+ return 0
+}
+
+TEST vol-status-loop
+
+rm -f $RUN_LS_LOOP_FILE
+wait
+
+TEST umount $M0
+
+cleanup;
diff --git a/tests/bugs/bug-884455.t b/tests/bugs/bug-884455.t
new file mode 100755
index 000000000..3b3a2241e
--- /dev/null
+++ b/tests/bugs/bug-884455.t
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../dht.rc
+
+cleanup;
+
+function layout_compare()
+{
+ res=0
+
+ if [ "$1" == "$2" ]
+ then
+ res=1
+ fi
+ if [ "$1" == "$3" ]
+ then
+ res=1
+ fi
+ if [ "$2" == "$3" ]
+ then
+ res=1
+ fi
+
+ echo $res
+}
+
+function get_layout()
+{
+ layout1=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1|grep dht |cut -d = -f2`
+ layout2=`getfattr -n trusted.glusterfs.dht -e hex $2 2>&1|grep dht |cut -d = -f2`
+ layout3=`getfattr -n trusted.glusterfs.dht -e hex $3 2>&1|grep dht |cut -d = -f2`
+
+ ret=$(layout_compare $layout1 $layout2 $layout3)
+
+ if [ $ret -ne 0 ]
+ then
+ echo 1
+ else
+ echo 0
+ fi
+
+}
+
+BRICK_COUNT=3
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+## set subvols-per-dir option
+TEST $CLI volume set $V0 subvols-per-directory 2
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST mkdir $M0/dir{1..10} 2>/dev/null;
+
+## Add-brick n run rebalance to force re-write of layout
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2
+sleep 5;
+
+## trigger dir self heal on client
+TEST ls -l $M0 2>/dev/null;
+
+TEST $CLI volume rebalance $V0 start force
+
+EXPECT_WITHIN 30 "0" rebalance_completed
+
+## check for layout overlaps.
+EXPECT "0" get_layout $B0/${V0}0 $B0/${V0}1 $B0/${V0}2
+EXPECT "0" get_layout $B0/${V0}0/dir1 $B0/${V0}1/dir1 $B0/${V0}2/dir1
+EXPECT "0" get_layout $B0/${V0}0/dir2 $B0/${V0}1/dir2 $B0/${V0}2/dir2
+EXPECT "0" get_layout $B0/${V0}0/dir3 $B0/${V0}1/dir3 $B0/${V0}2/dir3
+EXPECT "0" get_layout $B0/${V0}0/dir4 $B0/${V0}1/dir4 $B0/${V0}2/dir4
+EXPECT "0" get_layout $B0/${V0}0/dir5 $B0/${V0}1/dir5 $B0/${V0}2/dir5
+EXPECT "0" get_layout $B0/${V0}0/dir6 $B0/${V0}1/dir6 $B0/${V0}2/dir6
+EXPECT "0" get_layout $B0/${V0}0/dir7 $B0/${V0}1/dir7 $B0/${V0}2/dir7
+EXPECT "0" get_layout $B0/${V0}0/dir8 $B0/${V0}1/dir8 $B0/${V0}2/dir8
+EXPECT "0" get_layout $B0/${V0}0/dir9 $B0/${V0}1/dir9 $B0/${V0}2/dir9
+EXPECT "0" get_layout $B0/${V0}0/dir10 $B0/${V0}1/dir10 $B0/${V0}2/dir10
+
+cleanup;
diff --git a/tests/bugs/bug-884597.t b/tests/bugs/bug-884597.t
new file mode 100755
index 000000000..8eb1f330b
--- /dev/null
+++ b/tests/bugs/bug-884597.t
@@ -0,0 +1,152 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../dht.rc
+
+cleanup;
+BRICK_COUNT=3
+function uid_gid_compare()
+{
+ val=1
+
+ if [ "$1" == "$3" ]
+ then
+ if [ "$2" == "$4" ]
+ then
+ val=0
+ fi
+ fi
+ echo "$val"
+}
+
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+i=1
+NEW_UID=36
+NEW_GID=36
+
+TEST touch $M0/$i
+
+chown $NEW_UID:$NEW_GID $M0/$i
+## rename till file gets a linkfile
+
+while [ $i -ne 0 ]
+do
+ TEST mv $M0/$i $M0/$(( $i+1 ))
+ let i++
+ file_has_linkfile $i
+ has_link=$?
+ if [ $has_link -eq 2 ]
+ then
+ break;
+ fi
+done
+
+get_hashed_brick $i
+cached=$?
+
+# check if uid/gid on linkfile is created with correct uid/gid
+BACKEND_UID=`stat --printf=%u $B0/${V0}$cached/$i`;
+BACKEND_GID=`stat --printf=%g $B0/${V0}$cached/$i`;
+
+EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID
+
+# remove linkfile from backend, and trigger a lookup heal. uid/gid should match
+rm -rf $B0/${V0}$cached/$i
+
+# without a unmount, we are not able to trigger a lookup based heal
+
+TEST umount $M0
+
+## Mount FUSE
+TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+lookup=`ls -l $M0/$i 2>/dev/null`
+
+# check if uid/gid on linkfile is created with correct uid/gid
+BACKEND_UID=`stat --printf=%u $B0/${V0}$cached/$i`;
+BACKEND_GID=`stat --printf=%g $B0/${V0}$cached/$i`;
+
+EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID
+# create hardlinks. Make sure a linkfile gets created
+
+i=1
+NEW_UID=36
+NEW_GID=36
+
+TEST touch $M0/file
+chown $NEW_UID:$NEW_GID $M0/file;
+
+## ln till file gets a linkfile
+
+while [ $i -ne 0 ]
+do
+ TEST ln $M0/file $M0/link$i
+
+ file_has_linkfile link$i
+ has_link=$?
+ if [ $has_link -eq 2 ]
+ then
+ break;
+ fi
+ let i++
+done
+
+get_hashed_brick link$i
+cached=$?
+
+# check if uid/gid on linkfile is created with correct uid/gid
+BACKEND_UID=`stat --printf=%u $B0/${V0}$cached/link$i`;
+BACKEND_GID=`stat --printf=%g $B0/${V0}$cached/link$i`;
+
+EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID
+
+## UID/GID creation as different user
+i=1
+NEW_UID=36
+NEW_GID=36
+
+TEST touch $M0/user_file1
+TEST chown $NEW_UID:$NEW_GID $M0/user_file1;
+
+## Give permission on volume, so that different users can perform rename
+
+TEST chmod 0777 $M0
+
+## Add a user known as ABC and perform renames
+TEST `useradd -M ABC 2>/dev/null`
+
+TEST cd $M0
+## rename as different user till file gets a linkfile
+
+while [ $i -ne 0 ]
+do
+ su -c "mv $M0/user_file$i $M0/user_file$(( $i+1 ))" ABC
+ let i++
+ file_has_linkfile user_file$i
+ has_link=$?
+ if [ $has_link -eq 2 ]
+ then
+ break;
+ fi
+done
+
+## del user ABC
+TEST userdel ABC
+
+get_hashed_brick user_file$i
+cached=$?
+
+# check if uid/gid on linkfile is created with correct uid/gid
+BACKEND_UID=`stat --printf=%u $B0/${V0}$cached/user_file$i`;
+BACKEND_GID=`stat --printf=%g $B0/${V0}$cached/user_file$i`;
+
+EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID
+cleanup;
diff --git a/tests/bugs/bug-886998.t b/tests/bugs/bug-886998.t
new file mode 100644
index 000000000..7a905a113
--- /dev/null
+++ b/tests/bugs/bug-886998.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+# This tests that the replicate trash directory(.landfill) has following
+# properties.
+# Note: This is to have backward compatibility with 3.3 glusterfs
+# In the latest releases this dir is present inside .glusterfs of brick.
+# 1) lookup of trash dir fails
+# 2) readdir does not show this directory
+# 3) Self-heal does not do any self-heal of these directories.
+gfid1="0xc2e75dde97f346e7842d1076a8e699f8"
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable
+
+TEST mkdir $B0/${V0}1/.landfill
+TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}1/.landfill
+TEST mkdir $B0/${V0}0/.landfill
+TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}0/.landfill
+
+TEST ! stat $M0/.landfill
+EXPECT "" echo $(ls -a $M0 | grep ".landfill")
+
+TEST rmdir $B0/${V0}0/.landfill
+#Force a conservative merge and it should not create .landfill
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}0/
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/
+
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1/
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/
+
+EXPECT "" echo $(ls -a $M0 | grep ".landfill")
+TEST ! stat $B0/${V0}0/.landfill
+TEST stat $B0/${V0}1/.landfill
+
+#TEST that the dir is not deleted even when xattrs suggest to delete
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}0/
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/
+
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}1/
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/
+
+EXPECT "" echo $(ls -a $M0 | grep ".landfill")
+TEST ! stat $B0/${V0}0/.landfill
+TEST stat $B0/${V0}1/.landfill
+cleanup;
diff --git a/tests/bugs/bug-887098-gmount-crash.t b/tests/bugs/bug-887098-gmount-crash.t
new file mode 100644
index 000000000..1998b4062
--- /dev/null
+++ b/tests/bugs/bug-887098-gmount-crash.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+function pidgrep()
+{
+ ps ax | grep "$1" | grep -v grep | awk '{print $1}' | head -1
+}
+
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs -s $H0 --volfile-id=$V0 --acl $M0
+MOUNT_PID=`ps ax |grep "glusterfs -s $H0 --volfile-id=$V0 --acl $M0" | grep -v grep | awk '{print $1}' | head -1`
+
+for i in {1..25};
+do
+ mkdir $M0/tmp_$i && cat /etc/hosts > $M0/tmp_$i/file
+ cp -RPp $M0/tmp_$i $M0/newtmp_$i && cat /etc/hosts > $M0/newtmp_$i/newfile
+done
+
+EXPECT "$MOUNT_PID" pidgrep $MOUNT_PID
+TEST rm -rf $M0/*
+umount $M0
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-887145.t b/tests/bugs/bug-887145.t
new file mode 100755
index 000000000..e2013e50b
--- /dev/null
+++ b/tests/bugs/bug-887145.t
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../nfs.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 performance.open-behind off;
+TEST $CLI volume start $V0
+
+sleep 2;
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+EXPECT_WITHIN 20 "1" is_nfs_export_available;
+
+
+useradd tmp_user 2>/dev/null 1>/dev/null;
+mkdir $M0/dir;
+mkdir $M0/other;
+cp /etc/passwd $M0/;
+cp $M0/passwd $M0/file;
+chmod 600 $M0/file;
+
+TEST mount -t nfs -o vers=3,nolock $H0:/$V0 $N0;
+
+chown -R nfsnobody:nfsnobody $M0/dir;
+chown -R tmp_user:tmp_user $M0/other;
+
+TEST $CLI volume set $V0 server.root-squash on;
+
+sleep 2;
+
+EXPECT_WITHIN 20 "1" is_nfs_export_available;
+
+# create files and directories in the root of the glusterfs and nfs mount
+# which is owned by root and hence the right behavior is getting EACCESS
+# as the fops are executed as nfsnobody.
+touch $M0/foo 2>/dev/null;
+TEST [ $? -ne 0 ]
+touch $N0/foo 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $M0/new 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $N0/new 2>/dev/null;
+TEST [ $? -ne 0 ]
+cp $M0/file $M0/tmp_file 2>/dev/null;
+TEST [ $? -ne 0 ]
+cp $N0/file $N0/tmp_file 2>/dev/null;
+TEST [ $? -ne 0 ]
+cat $M0/file 2>/dev/null;
+TEST [ $? -ne 0 ]
+# here read should be allowed because eventhough file "passwd" is owned
+# by root, the permissions if the file allow other users to read it.
+cat $M0/passwd 1>/dev/null;
+TEST [ $? -eq 0 ]
+cat $N0/passwd 1>/dev/null;
+TEST [ $? -eq 0 ]
+
+# create files and directories should succeed as the fops are being executed
+# inside the directory owned by nfsnobody
+TEST touch $M0/dir/file;
+TEST touch $N0/dir/foo;
+TEST mkdir $M0/dir/new;
+TEST mkdir $N0/dir/other;
+TEST rm -f $M0/dir/file $M0/dir/foo;
+TEST rmdir $N0/dir/*;
+
+# create files and directories here should fail as other directory is owned
+# by tmp_user.
+touch $M0/other/foo 2>/dev/null;
+TEST [ $? -ne 0 ]
+touch $N0/other/foo 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $M0/other/new 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $N0/other/new 2>/dev/null;
+TEST [ $? -ne 0 ]
+
+userdel tmp_user;
+rm -rf /home/tmp_user;
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-888174.t b/tests/bugs/bug-888174.t
new file mode 100644
index 000000000..4ea34645b
--- /dev/null
+++ b/tests/bugs/bug-888174.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#This tests if flush, fsync wakes up the delayed post-op or not.
+#If it is not woken up, INODELK from the next command waits
+#for post-op-delay secs. There would be pending changelog even after the command
+#completes.
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1
+
+TEST $CLI volume set $V0 cluster.eager-lock on
+
+TEST $CLI volume set $V0 performance.flush-behind off
+EXPECT "off" volume_option $V0 performance.flush-behind
+
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 3
+EXPECT "3" volume_option $V0 cluster.post-op-delay-secs
+
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0
+
+#Check that INODELK MAX latency is not in the order of seconds
+TEST gluster volume profile $V0 start
+for i in {1..5}
+do
+ echo hi > $M0/a
+done
+#Test if the MAX INODELK fop latency is of the order of seconds.
+inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}")
+
+TEST [ -z $inodelk_max_latency ]
+
+TEST dd of=$M0/a if=/dev/urandom bs=1M count=10 conv=fsync
+#Check for no trace of pending changelog. Flush should make sure of it.
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_0/a trusted.afr.$V0-client-0
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_0/a trusted.afr.$V0-client-1
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_1/a trusted.afr.$V0-client-0
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_1/a trusted.afr.$V0-client-1
+
+dd of=$M0/a if=/dev/urandom bs=1M count=1024 2>/dev/null &
+p=$!
+#trigger graph switches, tests for fsync not leaving any pending flags
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+
+kill -SIGTERM $p
+#wait for dd to exit
+wait > /dev/null 2>&1
+
+#Goal is to check if there is permanent FOOL changelog
+sleep 5
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_0/a trusted.afr.$V0-client-0
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_0/a trusted.afr.$V0-client-1
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_1/a trusted.afr.$V0-client-0
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_1/a trusted.afr.$V0-client-1
+
+cleanup;
diff --git a/tests/bugs/bug-888752.t b/tests/bugs/bug-888752.t
new file mode 100644
index 000000000..56d3f9ffb
--- /dev/null
+++ b/tests/bugs/bug-888752.t
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+
+# Check if xml output is generated correctly for volume status for a single brick
+# present on another peer and no async tasks are running.
+
+function get_peer_count {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+cleanup
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN 5 1 get_peer_count
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+TEST $CLI_1 volume status $V0 $H2:$B2/$V0 --xml
+
+TEST $CLI_1 volume stop $V0
+
+cleanup
diff --git a/tests/bugs/bug-889630.t b/tests/bugs/bug-889630.t
new file mode 100755
index 000000000..b04eb3407
--- /dev/null
+++ b/tests/bugs/bug-889630.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+function volume_count {
+ local cli=$1;
+ if [ $cli -eq '1' ] ; then
+ $CLI_1 volume info | grep 'Volume Name' | wc -l;
+ else
+ $CLI_2 volume info | grep 'Volume Name' | wc -l;
+ fi
+}
+
+cleanup;
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN 20 1 check_peers
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+b="B1";
+
+#Create an extra file in the originator's volume store
+touch ${!b}/glusterd/vols/$V0/run/file
+
+TEST $CLI_1 volume stop $V0
+#Test for self-commit failure
+TEST $CLI_1 volume delete $V0
+
+#Check whether delete succeeded on both the nodes
+EXPECT "0" volume_count '1'
+EXPECT "0" volume_count '2'
+
+#Check whether the volume name can be reused after deletion
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
+TEST $CLI_1 volume start $V0
+
+#Create an extra file in the peer's volume store
+touch ${!b}/glusterd/vols/$V0/run/file
+
+TEST $CLI_1 volume stop $V0
+#Test for commit failure on the other node
+TEST $CLI_2 volume delete $V0
+
+EXPECT "0" volume_count '1';
+EXPECT "0" volume_count '2';
+
+cleanup;
diff --git a/tests/bugs/bug-889996.t b/tests/bugs/bug-889996.t
new file mode 100644
index 000000000..6b07d8918
--- /dev/null
+++ b/tests/bugs/bug-889996.t
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+rm -rf $B0/${V0}1;
+
+TEST ! $CLI volume start $V0;
+EXPECT 0 online_brick_count;
+
+cleanup;
diff --git a/tests/bugs/bug-892730.t b/tests/bugs/bug-892730.t
new file mode 100755
index 000000000..0a677069e
--- /dev/null
+++ b/tests/bugs/bug-892730.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+#
+# Bug 892730 - Verify that afr handles EIO errors from the brick properly.
+#
+# The associated bug describes a problem where EIO errors returned from the
+# local filesystem of a brick that is part of a replica volume are exposed to
+# the user. This test simulates such failures and verifies that the volume
+# operates as expected.
+#
+########
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST mkdir -p $B0/test{1,2}
+
+# The graph is a two brick replica with error-gen enabled on the second brick
+# and configured to return EIO lookup errors 100% of the time. This simulates
+# a brick with a crashed or shut down local filesystem. Note that the order in
+# which errors occur is a factor in reproducing the original bug (error-gen
+# must be enabled in the second brick for this test to be effective).
+
+cat > $B0/test.vol <<EOF
+volume test-posix-0
+ type storage/posix
+ option directory $B0/test1
+end-volume
+
+volume test-locks-0
+ type features/locks
+ subvolumes test-posix-0
+end-volume
+
+volume test-posix-1
+ type storage/posix
+ option directory $B0/test2
+end-volume
+
+volume test-error-1
+ type debug/error-gen
+ option failure 100
+ option enable lookup
+ option error-no EIO
+ subvolumes test-posix-1
+end-volume
+
+volume test-locks-1
+ type features/locks
+ subvolumes test-error-1
+end-volume
+
+volume test-replicate-0
+ type cluster/replicate
+ option background-self-heal-count 0
+ subvolumes test-locks-0 test-locks-1
+end-volume
+EOF
+
+TEST glusterd
+
+TEST glusterfs --volfile=$B0/test.vol --attribute-timeout=0 --entry-timeout=0 $M0
+
+# We should be able to create and remove a file without interference from the
+# "broken" brick.
+
+TEST touch $M0/file
+TEST rm $M0/file
+
+TEST umount $M0
+
+rm -f $B0/test.vol
+rm -rf $B0/test1 $B0/test2
+
+cleanup;
+
diff --git a/tests/bugs/bug-893338.t b/tests/bugs/bug-893338.t
new file mode 100644
index 000000000..cc39f28e3
--- /dev/null
+++ b/tests/bugs/bug-893338.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 stripe 2 $H0:$B0/${V0}{1,2,3,4};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+
+## Test for symlink success
+TEST touch $M0/reg_file
+TEST ln -s $M0/reg_file $M0/symlink
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-893378.t b/tests/bugs/bug-893378.t
new file mode 100755
index 000000000..fd8b9a7ce
--- /dev/null
+++ b/tests/bugs/bug-893378.t
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+cleanup;
+BRICK_COUNT=3
+
+function file_has_linkfile()
+{
+ i=0
+ j=0
+ while [ $i -lt $BRICK_COUNT ]
+ do
+ stat=`stat $B0/${V0}$i/$1 2>/dev/null`
+ if [ $? -eq 0 ]
+ then
+ let j++
+ let "BRICK${j}=$i"
+
+ fi
+ let i++
+ done
+ return $j
+}
+
+function get_cached_brick()
+{
+ i=1
+ while [ $i -lt 3 ]
+ do
+ test=`getfattr -n trusted.glusterfs.dht.linkto -e text $B0/${V0}$BRICK$i 2>&1`
+ if [ $? -eq 1 ]
+ then
+ cached=$BRICK"$i"
+ i=$(( $i+3 ))
+ fi
+ let i++
+ done
+
+ return $cached
+}
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+## create a linkfile on subvolume 0
+TEST touch $M0/1
+TEST mv $M0/1 $M0/2
+
+file_has_linkfile 2
+has_link=$?
+if [ $has_link -eq 2 ]
+then
+ get_cached_brick
+ CACHED=$?
+ # Kill a brick process
+ kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}$CACHED.pid`;
+fi
+
+## trigger a lookup
+ls -l $M0/2 2>/dev/null
+
+## fail dd if file exists.
+
+dd if=/dev/zero of=$M0/2 bs=1 count=1 conv=excl 2>/dev/null
+EXPECT "1" echo $?
+
+cleanup;
diff --git a/tests/bugs/bug-895235.t b/tests/bugs/bug-895235.t
new file mode 100644
index 000000000..0764b50d4
--- /dev/null
+++ b/tests/bugs/bug-895235.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 ensure-durability off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+
+TEST gluster volume profile $V0 start
+TEST dd of=$M0/a if=/dev/zero bs=1M count=1 oflag=append
+finodelk_max_latency=$($CLI volume profile $V0 info | grep FINODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}")
+
+TEST [ -z $finodelk_max_latency ]
+
+cleanup;
diff --git a/tests/bugs/bug-896431.t b/tests/bugs/bug-896431.t
new file mode 100755
index 000000000..f968e59c1
--- /dev/null
+++ b/tests/bugs/bug-896431.t
@@ -0,0 +1,124 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting cluster.subvols-per-directory as -5
+TEST ! $CLI volume set $V0 cluster.subvols-per-directory -5
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+TEST ! $CLI volume set $V0 subvols-per-directory -5
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Setting cluster.subvols-per-directory as 0
+TEST ! $CLI volume set $V0 cluster.subvols-per-directory 0
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+TEST ! $CLI volume set $V0 subvols-per-directory 0
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Setting cluster.subvols-per-directory as 4 (the total number of bricks)
+TEST ! $CLI volume set $V0 cluster.subvols-per-directory 4
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+TEST ! $CLI volume set $V0 subvols-per-directory 4
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Setting cluster.subvols-per-directory as 2 (the total number of subvolumes)
+TEST $CLI volume set $V0 cluster.subvols-per-directory 2
+EXPECT '2' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Setting cluster.subvols-per-directory as 1
+TEST $CLI volume set $V0 subvols-per-directory 1
+EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
+
+## Start and create a pure replicate volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 8 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT 'Replicate' volinfo_field $V0 'Type';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting cluster.subvols-per-directory as 8 for a replicate volume
+TEST ! $CLI volume set $V0 cluster.subvols-per-directory 8
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+TEST ! $CLI volume set $V0 subvols-per-directory 8
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Setting cluster.subvols-per-directory as 1 for a replicate volume
+TEST $CLI volume set $V0 cluster.subvols-per-directory 1
+EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory';
+TEST $CLI volume set $V0 subvols-per-directory 1
+EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
+
+## Start and create a pure stripe volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 stripe 8 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT 'Stripe' volinfo_field $V0 'Type';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting cluster.subvols-per-directory as 8 for a stripe volume
+TEST ! $CLI volume set $V0 cluster.subvols-per-directory 8
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+TEST ! $CLI volume set $V0 subvols-per-directory 8
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Setting cluster.subvols-per-directory as 1 for a stripe volume
+TEST $CLI volume set $V0 cluster.subvols-per-directory 1
+EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory';
+TEST $CLI volume set $V0 subvols-per-directory 1
+EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-902610.t b/tests/bugs/bug-902610.t
new file mode 100755
index 000000000..00ba03adf
--- /dev/null
+++ b/tests/bugs/bug-902610.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+## Layout-spread set to 3, but subvols up are 2. So layout should split 50-50
+function get_layout()
+{
+ layout1=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1|grep dht |cut -d = -f2`
+ layout2=`getfattr -n trusted.glusterfs.dht -e hex $2 2>&1|grep dht |cut -d = -f2`
+
+ if [ $layout1 == "0x0000000100000000000000007ffffffe" ]
+ then
+ if [ $layout2 == "0x00000001000000007fffffffffffffff" ]
+ then
+ return 0
+ else
+ return 1
+ fi
+ fi
+
+ if [ $layout2 == "0x0000000100000000000000007ffffffe" ]
+ then
+ if [ $layout1 == "0x00000001000000007fffffffffffffff" ]
+ then
+ return 0
+ else
+ return 1
+ fi
+ fi
+ return 1
+}
+
+BRICK_COUNT=4
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3
+## set subvols-per-dir option
+TEST $CLI volume set $V0 subvols-per-directory 3
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0 --entry-timeout=0 --attribute-timeout=0;
+
+TEST ls -l $M0
+
+## kill 2 bricks to bring down available subvol < spread count
+kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}2.pid`;
+kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}3.pid`;
+
+mkdir $M0/dir1 2>/dev/null
+
+get_layout $B0/${V0}0/dir1 $B0/${V0}1/dir1
+EXPECT "0" echo $?
+
+cleanup;
diff --git a/tests/bugs/bug-903336.t b/tests/bugs/bug-903336.t
new file mode 100644
index 000000000..c1f91312a
--- /dev/null
+++ b/tests/bugs/bug-903336.t
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST setfattr -n trusted.io-stats-dump -v /tmp $M0
+cleanup
diff --git a/tests/bugs/bug-904065.t b/tests/bugs/bug-904065.t
new file mode 100755
index 000000000..505854d9b
--- /dev/null
+++ b/tests/bugs/bug-904065.t
@@ -0,0 +1,90 @@
+#!/bin/bash
+#
+# This test does not use 'showmount' from the nfs-utils package, it would
+# require setting up a portmapper (either rpcbind or portmap, depending on the
+# Linux distribution used for testing). The persistancy of the rmtab should not
+# affect the current showmount outputs, so existing regression tests should be
+# sufficient.
+#
+
+# count the lines of a file, return 0 if the file does not exist
+function count_lines()
+{
+ if [ -e "$1" ]
+ then
+ wc -l < $1
+ else
+ echo 0
+ fi
+}
+
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../nfs.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+# glusterfs/nfs needs some time to start up in the background
+EXPECT_WITHIN 20 1 is_nfs_export_available
+
+# before mounting the rmtab should be empty
+EXPECT '0' count_lines /var/lib/glusterd/nfs/rmtab
+
+TEST mount -t nfs -o vers=3,nolock $H0:/$V0 $N0
+# the output would looks similar to:
+#
+# hostname-0=172.31.122.104
+# mountpoint-0=/ufo
+#
+EXPECT '2' count_lines /var/lib/glusterd/nfs/rmtab
+
+# duplicate mounts should not be recorded (client could have crashed)
+TEST mount -t nfs -o vers=3,nolock $H0:/$V0 $N1
+EXPECT '2' count_lines /var/lib/glusterd/nfs/rmtab
+
+# removing a mount should (even if there are two) should remove the entry
+TEST umount $N1
+EXPECT '0' count_lines /var/lib/glusterd/nfs/rmtab
+
+# unmounting the other mount should work flawlessly
+TEST umount $N0
+EXPECT '0' count_lines /var/lib/glusterd/nfs/rmtab
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $M0
+
+# we'll create a fake rmtab here, similar to how an other storage server would do
+# using an invalid IP address to prevent (unlikely) collisions on the test-machine
+cat << EOF > $M0/rmtab
+hostname-0=127.0.0.256
+mountpoint-0=/ufo
+EOF
+EXPECT '2' count_lines $M0/rmtab
+
+# reconfigure merges the rmtab with the one on the volume
+TEST gluster volume set $V0 nfs.mount-rmtab $M0/rmtab
+
+# glusterfs/nfs needs some time to restart
+EXPECT_WITHIN 20 1 is_nfs_export_available
+
+# a new mount should be added to the rmtab, not overwrite exiting ones
+TEST mount -t nfs -o vers=3,nolock $H0:/$V0 $N0
+EXPECT '4' count_lines $M0/rmtab
+
+TEST umount $N0
+EXPECT '2' count_lines $M0/rmtab
+
+# TODO: nfs/reconfigure() is never called and is therefor disabled. When the
+# NFS-server supports reloading and does not get restarted anymore, we should
+# add a test that includes the merging of entries in the old rmtab with the new
+# rmtab.
+
+cleanup
diff --git a/tests/bugs/bug-904300.t b/tests/bugs/bug-904300.t
new file mode 100755
index 000000000..4276ee229
--- /dev/null
+++ b/tests/bugs/bug-904300.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../nfs.rc
+
+cleanup;
+
+# 1-8
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0;
+TEST $CLI volume start $V0
+EXPECT_WITHIN 20 1 is_nfs_export_available
+
+TEST mount -t nfs -o vers=3,nolock,soft,intr $H0:/$V0 $N0
+TEST mkdir $N0/dir1
+TEST umount $N0
+
+#
+# Case 1: Allow "dir1" to be mounted only from 127.0.0.1
+# 9-12
+TEST $CLI volume set $V0 export-dir \""/dir1(127.0.0.1)"\"
+EXPECT_WITHIN 20 2 is_nfs_export_available
+
+TEST mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0/dir1 $N0
+TEST umount $N0
+
+#
+# Case 2: Allow "dir1" to be mounted only from 8.8.8.8. This is
+# a negative test case therefore the mount should fail.
+# 13-16
+TEST $CLI volume set $V0 export-dir \""/dir1(8.8.8.8)"\"
+EXPECT_WITHIN 20 2 is_nfs_export_available
+
+TEST ! mount -t nfs -o vers=3,nolock,soft,intr $H0:/$V0/dir1 $N0
+TEST ! umount $N0
+
+
+# Case 3: Variation of test case1. Here we are checking with hostname
+# instead of ip address.
+# 17-20
+TEST $CLI volume set $V0 export-dir \""/dir1($H0)"\"
+EXPECT_WITHIN 20 2 is_nfs_export_available
+
+TEST mount -t nfs -o vers=3,nolock,soft,intr $H0:/$V0/dir1 $N0
+TEST umount $N0
+
+# Case 4: Variation of test case1. Here we are checking with IP range
+# 21-24
+TEST $CLI volume set $V0 export-dir \""/dir1(127.0.0.0/24)"\"
+EXPECT_WITHIN 20 2 is_nfs_export_available
+
+TEST mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0/dir1 $N0
+TEST umount $N0
+
+## Finish up
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-905307.t b/tests/bugs/bug-905307.t
new file mode 100644
index 000000000..d81d81c9f
--- /dev/null
+++ b/tests/bugs/bug-905307.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+#test functionality of post-op-delay-secs
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+
+#Strings should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs abc
+
+#-ve ints should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs -1
+
+#INT_MAX+1 should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 2147483648
+
+#floats should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 1.25
+
+#min val 0 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 0
+EXPECT "0" volume_option $V0 cluster.post-op-delay-secs
+
+#max val 2147483647 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147483647
+EXPECT "2147483647" volume_option $V0 cluster.post-op-delay-secs
+
+#some middle val in range 2147 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147
+EXPECT "2147" volume_option $V0 cluster.post-op-delay-secs
+cleanup;
diff --git a/tests/bugs/bug-905864.c b/tests/bugs/bug-905864.c
new file mode 100644
index 000000000..ed09b6e2b
--- /dev/null
+++ b/tests/bugs/bug-905864.c
@@ -0,0 +1,82 @@
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <pthread.h>
+
+
+pthread_t th[5] = {0};
+void
+flock_init (struct flock *f, short int type, off_t start, off_t len)
+{
+ f->l_type = type;
+ f->l_start = start;
+ f->l_len = len;
+}
+
+int
+flock_range_in_steps (int fd, int is_set, short l_type,
+ int start, int end, int step)
+{
+ int ret = 0;
+ int i = 0;
+ struct flock f = {0,};
+
+ for (i = start; i+step < end; i += step) {
+ flock_init (&f, l_type, i, step);
+ ret = fcntl (fd, (is_set)? F_SETLKW:F_GETLK, &f);
+ if (ret) {
+ perror ("fcntl");
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+void *
+random_locker (void *arg)
+{
+ int fd = *(int *)arg;
+ int i = 0;
+ int is_set = 0;
+
+ /* use thread id to choose GETLK or SETLK operation*/
+ is_set = pthread_self () % 2;
+ (void)flock_range_in_steps (fd, is_set, F_WRLCK, 0, 400, 1);
+
+ return NULL;
+}
+
+
+int main (int argc, char **argv)
+{
+ int fd = -1;
+ int ret = 1;
+ int i = 0;
+ char *fname = NULL;
+
+ if (argc < 2)
+ goto out;
+
+ fname = argv[1];
+ fd = open (fname, O_RDWR);
+ if (fd == -1) {
+ perror ("open");
+ goto out;
+ }
+
+ ret = flock_range_in_steps (fd, 1, F_WRLCK, 0, 2000, 2);
+ for (i = 0; i < 5; i++) {
+ pthread_create (&th[i], NULL, random_locker, (void *) &fd);
+ }
+ ret = flock_range_in_steps (fd, 1, F_WRLCK, 0, 2000, 2);
+ for (i = 0; i < 5; i++) {
+ pthread_join (th[i], NULL);
+ }
+out:
+ if (fd != -1)
+ close (fd);
+
+ return ret;
+}
diff --git a/tests/bugs/bug-905864.t b/tests/bugs/bug-905864.t
new file mode 100644
index 000000000..44bb469f2
--- /dev/null
+++ b/tests/bugs/bug-905864.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+TEST $CLI volume start $V0;
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M1;
+
+TEST touch $M0/file1;
+
+#following C program tries open up race(s) if any, in F_GETLK/F_SETLKW codepaths
+#of locks xlator
+gcc -lpthread -g3 $(dirname $0)/bug-905864.c -o $(dirname $0)/bug-905864
+$(dirname $0)/bug-905864 $M0/file1 &
+$(dirname $0)/bug-905864 $M1/file1;
+wait
+rm -f $(dirname $0)/bug-905864
+
+EXPECT $(brick_count $V0) online_brick_count
+
+cleanup
+
diff --git a/tests/bugs/bug-906646.t b/tests/bugs/bug-906646.t
new file mode 100644
index 000000000..0e6a3bcb6
--- /dev/null
+++ b/tests/bugs/bug-906646.t
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+REPLICA=2
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica $REPLICA $H0:$B0/${V0}-00 $H0:$B0/${V0}-01 $H0:$B0/${V0}-10 $H0:$B0/${V0}-11
+TEST $CLI volume start $V0
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+function xattr_query_check()
+{
+ local path=$1
+ local xa_name=$2
+
+ local ret=`getfattr -m . -n $xa_name $path 2>&1 | grep -o "$xa_name: No such attribute" | wc -l`
+ echo $ret
+}
+
+function set_xattr()
+{
+ local path=$1
+ local xa_name=$2
+ local xa_val=$3
+
+ setfattr -n $xa_name -v $xa_val $path
+ echo $?
+}
+
+function remove_xattr()
+{
+ local path=$1
+ local xa_name=$2
+
+ setfattr -x $xa_name $path
+ echo $?
+}
+
+f=f00f
+pth=$M0/$f
+
+touch $pth
+
+# fetch backend paths
+backend_paths=`get_backend_paths $pth`
+
+# convert it into and array
+backend_paths_array=($backend_paths)
+
+# setxattr xattr for this file
+EXPECT 0 set_xattr $pth "trusted.name" "test"
+
+# confirm the set on backend
+EXPECT 0 xattr_query_check ${backend_paths_array[0]} "trusted.name"
+EXPECT 0 xattr_query_check ${backend_paths_array[1]} "trusted.name"
+
+brick_path=`echo ${backend_paths_array[0]} | sed -n 's/\(.*\)\/'$f'/\1/p'`
+brick_id=`$CLI volume info $V0 | grep "Brick[[:digit:]]" | grep -n $brick_path | cut -f1 -d:`
+
+# Kill a brick process
+TEST kill_brick $V0 $H0 $brick_path
+
+# remove the xattr from the mount point
+EXPECT 0 remove_xattr $pth "trusted.name"
+
+# we killed ${backend_paths[0]} - so expect the xattr to be there
+# on the backend there
+EXPECT 0 xattr_query_check ${backend_paths_array[0]} "trusted.name"
+EXPECT 1 xattr_query_check ${backend_paths_array[1]} "trusted.name"
+
+# restart the brick process
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 `expr $brick_id - 1`
+
+stat $pth
+
+# check backends - xattr should not be present anywhere
+EXPECT 1 xattr_query_check ${backend_paths_array[0]} "trusted.name"
+EXPECT 1 xattr_query_check ${backend_paths_array[1]} "trusted.name"
+
+cleanup;
diff --git a/tests/bugs/bug-907072.t b/tests/bugs/bug-907072.t
new file mode 100755
index 000000000..49b477767
--- /dev/null
+++ b/tests/bugs/bug-907072.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../fileio.rc
+. $(dirname $0)/../dht.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3};
+TEST $CLI volume start $V0;
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST mkdir $M0/test;
+
+OLD_LAYOUT0=`get_layout $B0/${V0}0/test`;
+OLD_LAYOUT1=`get_layout $B0/${V0}1/test`;
+OLD_LAYOUT2=`get_layout $B0/${V0}2/test`;
+OLD_LAYOUT3=`get_layout $B0/${V0}3/test`;
+
+TEST killall glusterfsd;
+
+# Delete directory on one brick
+TEST rm -rf $B0/${V}1/test;
+
+# And only layout xattr on another brick
+TEST setfattr -x trusted.glusterfs.dht $B0/${V0}2/test;
+
+TEST $CLI volume start $V0 force;
+
+TEST umount $M0;
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+TEST stat $M0/test;
+
+NEW_LAYOUT0=`get_layout $B0/${V0}0/test`;
+NEW_LAYOUT1=`get_layout $B0/${V0}1/test`;
+NEW_LAYOUT2=`get_layout $B0/${V0}2/test`;
+NEW_LAYOUT3=`get_layout $B0/${V0}3/test`;
+
+EXPECT $OLD_LAYOUT0 echo $NEW_LAYOUT0;
+EXPECT $OLD_LAYOUT1 echo $NEW_LAYOUT1;
+EXPECT $OLD_LAYOUT2 echo $NEW_LAYOUT2;
+EXPECT $OLD_LAYOUT3 echo $NEW_LAYOUT3;
diff --git a/tests/bugs/bug-908146.t b/tests/bugs/bug-908146.t
new file mode 100755
index 000000000..87b456e6e
--- /dev/null
+++ b/tests/bugs/bug-908146.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+function get_fd_count {
+ local vol=$1
+ local host=$2
+ local brick=$3
+ local fname=$4
+ local gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brick/$fname))
+ local statedump=$(generate_brick_statedump $vol $host $brick)
+ local count=$(grep "gfid=$gfid_str" $statedump -A2 | grep fd-count | cut -f2 -d'=' | tail -1)
+ rm -f $statedump
+ echo $count
+}
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M1 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable
+
+TEST touch $M0/a
+
+exec 4>"$M0/a"
+exec 5>"$M1/a"
+EXPECT "2" get_fd_count $V0 $H0 $B0/${V0}0 a
+
+exec 4>&-
+EXPECT "1" get_fd_count $V0 $H0 $B0/${V0}0 a
+
+exec 5>&-
+EXPECT "0" get_fd_count $V0 $H0 $B0/${V0}0 a
+
+cleanup
diff --git a/tests/bugs/bug-912297.t b/tests/bugs/bug-912297.t
new file mode 100755
index 000000000..f5a5babf5
--- /dev/null
+++ b/tests/bugs/bug-912297.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting owner-uid as -12
+TEST ! $CLI volume set $V0 owner-uid -12
+EXPECT '' volinfo_field $V0 'storage.owner-uid'
+
+## Setting owner-gid as -5
+TEST ! $CLI volume set $V0 owner-gid -5
+EXPECT '' volinfo_field $V0 'storage.owner-gid'
+
+## Setting owner-uid as 36
+TEST $CLI volume set $V0 owner-uid 36
+EXPECT '36' volinfo_field $V0 'storage.owner-uid'
+
+## Setting owner-gid as 36
+TEST $CLI volume set $V0 owner-gid 36
+EXPECT '36' volinfo_field $V0 'storage.owner-gid'
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-912564.t b/tests/bugs/bug-912564.t
new file mode 100755
index 000000000..b24268fbc
--- /dev/null
+++ b/tests/bugs/bug-912564.t
@@ -0,0 +1,92 @@
+#!/bin/bash
+
+# Test that the rsync and "extra" regexes cause rename-in-place without
+# creating linkfiles, when they're supposed to. Without the regex we'd have a
+# 1/4 chance of each file being assigned to the right place, so with 16 files
+# we have a 1/2^32 chance of getting the correct result by accident.
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+function count_linkfiles {
+ local i
+ local count=0
+ for i in $(seq $2 $3); do
+ x=$(find $1$i -perm -1000 | wc -l)
+ # Divide by two because of the .glusterfs links.
+ count=$((count+x/2))
+ done
+ echo $count
+}
+
+# This function only exists to get around quoting difficulties in TEST.
+function set_regex {
+ $CLI volume set $1 cluster.extra-hash-regex '^foo(.+)bar$'
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+mkdir -p $H0:$B0/${V0}0
+mkdir -p $H0:$B0/${V0}1
+mkdir -p $H0:$B0/${V0}2
+mkdir -p $H0:$B0/${V0}3
+
+# Create and start a volume.
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}3
+TEST $CLI volume start $V0
+EXPECT_WITHIN 15 'Started' volinfo_field $V0 'Status';
+
+# Mount it.
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+# Make sure the rsync regex works, by verifying that no linkfiles are
+# created.
+rm -f $M0/file*
+for i in $(seq 0 15); do
+ fn=$(printf file%x $i)
+ tmp_fn=$(printf .%s.%d $fn $RANDOM)
+ echo testing > $M0/$tmp_fn
+ mv $M0/$tmp_fn $M0/$fn
+done
+lf=$(count_linkfiles $B0/$V0 0 3)
+TEST [ "$lf" -eq "0" ]
+
+# Make sure that linkfiles *are* created for normal files.
+rm -f $M0/file*
+for i in $(seq 0 15); do
+ fn=$(printf file%x $i)
+ tmp_fn=$(printf foo%sbar $fn)
+ echo testing > $M0/$tmp_fn
+ mv $M0/$tmp_fn $M0/$fn
+done
+lf=$(count_linkfiles $B0/$V0 0 3)
+TEST [ "$lf" -ne "0" ]
+
+# Make sure that setting an extra regex suppresses the linkfiles.
+TEST set_regex $V0
+rm -f $M0/file*
+for i in $(seq 0 15); do
+ fn=$(printf file%x $i)
+ tmp_fn=$(printf foo%sbar $fn)
+ echo testing > $M0/$tmp_fn
+ mv $M0/$tmp_fn $M0/$fn
+done
+lf=$(count_linkfiles $B0/$V0 0 3)
+TEST [ "$lf" -eq "0" ]
+
+# Re-test the rsync regex, to make sure the extra one didn't break it.
+rm -f $M0/file*
+for i in $(seq 0 15); do
+ fn=$(printf file%x $i)
+ tmp_fn=$(printf .%s.%d $fn $RANDOM)
+ echo testing > $M0/$tmp_fn
+ mv $M0/$tmp_fn $M0/$fn
+done
+lf=$(count_linkfiles $B0/$V0 0 3)
+TEST [ "$lf" -eq "0" ]
+
+cleanup
diff --git a/tests/bugs/bug-913051.t b/tests/bugs/bug-913051.t
new file mode 100644
index 000000000..69e90cf66
--- /dev/null
+++ b/tests/bugs/bug-913051.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../fileio.rc
+
+cleanup;
+
+#Test that afr opens the file on the bricks that were offline at the time of
+# open after the brick comes online. This tests for writev, readv triggering
+# open-fd-fix in afr.
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/a
+TEST touch $M0/dir/b
+echo abc > $M0/dir/b
+
+TEST wfd=`fd_available`
+TEST fd_open $wfd "w" $M0/dir/a
+TEST rfd=`fd_available`
+TEST fd_open $rfd "r" $M0/dir/b
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+
+#check that the files are not opned on brick-0
+realpatha=$(gf_get_gfid_backend_file_path $B0/${V0}0 "dir/a")
+EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpatha"
+EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/a
+
+realpathb=$(gf_get_gfid_backend_file_path $B0/${V0}0 "dir/b")
+EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpathb"
+EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/b
+
+#attempt self-heal so that the files are created on brick-0
+
+TEST ls -l $M0/dir/a
+TEST ls -l $M0/dir/b
+
+#trigger writev for attempting open-fd-fix in afr
+TEST fd_write $wfd "open sesame"
+
+#trigger readv for attempting open-fd-fix in afr
+TEST fd_cat $rfd
+
+EXPECT_WITHIN 20 "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/a
+EXPECT_WITHIN 20 "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/b
+
+TEST fd_close $wfd
+TEST fd_close $rfd
+cleanup;
diff --git a/tests/bugs/bug-913487.t b/tests/bugs/bug-913487.t
new file mode 100644
index 000000000..2095903d9
--- /dev/null
+++ b/tests/bugs/bug-913487.t
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST ! $CLI volume set $V0 performance.open-behind off;
+
+TEST pidof glusterd;
+
+cleanup;
diff --git a/tests/bugs/bug-913544.t b/tests/bugs/bug-913544.t
new file mode 100644
index 000000000..790bc0898
--- /dev/null
+++ b/tests/bugs/bug-913544.t
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+#simulate a split-brain of a file and do truncate. This should not crash the mount point
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume set $V0 stat-prefetch off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+cd $M0
+TEST touch a
+#simulate no-changelog data split-brain
+echo "abc" > $B0/${V0}1/a
+echo "abcd" > $B0/${V0}0/a
+TEST ! truncate -s 0 a
+TEST ls
+cd
+
+cleanup
diff --git a/tests/bugs/bug-913555.t b/tests/bugs/bug-913555.t
new file mode 100755
index 000000000..f58d7bd6d
--- /dev/null
+++ b/tests/bugs/bug-913555.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+# Test that a volume becomes unwritable when the cluster loses quorum.
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../cluster.rc
+
+
+function check_fs {
+ df $1 &> /dev/null
+ echo $?
+}
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+function glusterfsd_count {
+ pidof glusterfsd | wc -w;
+}
+
+cleanup;
+
+TEST launch_cluster 3; # start 3-node virtual cluster
+TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
+
+EXPECT_WITHIN 20 2 check_peers
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+TEST $CLI_1 volume start $V0
+TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
+
+# Kill one pseudo-node, make sure the others survive and volume stays up.
+TEST kill_node 3;
+EXPECT_WITHIN 20 1 check_peers;
+EXPECT 0 check_fs $M0;
+EXPECT 2 glusterfsd_count;
+
+# Kill another pseudo-node, make sure the last one dies and volume goes down.
+TEST kill_node 2;
+EXPECT_WITHIN 20 0 check_peers
+EXPECT 1 check_fs $M0;
+EXPECT 0 glusterfsd_count; # the two glusterfsds of the other two glusterds
+ # must be dead
+
+TEST $glusterd_2;
+TEST $glusterd_3;
+EXPECT_WITHIN 20 3 glusterfsd_count; # restore quorum, all ok
+EXPECT_WITHIN 5 0 check_fs $M0;
+
+cleanup
diff --git a/tests/bugs/bug-915280.t b/tests/bugs/bug-915280.t
new file mode 100755
index 000000000..a1f92f201
--- /dev/null
+++ b/tests/bugs/bug-915280.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+sleep 3
+
+MOUNTDIR=$N0;
+TEST mount -t nfs -o vers=3,nolock,soft,timeo=30,retrans=1 $H0:/$V0 $N0
+TEST touch $N0/testfile
+
+TEST $CLI volume set $V0 debug.error-gen client
+TEST $CLI volume set $V0 debug.error-fops stat
+TEST $CLI volume set $V0 debug.error-failure 100
+
+sleep 1
+
+pid_file=$(read_nfs_pidfile);
+
+getfacl $N0/testfile 2>/dev/null
+
+nfs_pid=$(get_nfs_pid);
+if [ ! $nfs_pid ]
+then
+ nfs_pid=0;
+fi
+
+TEST [ $nfs_pid -eq $pid_file ]
+
+TEST umount $MOUNTDIR -l
+
+cleanup;
diff --git a/tests/bugs/bug-915554.t b/tests/bugs/bug-915554.t
new file mode 100755
index 000000000..beb669f8c
--- /dev/null
+++ b/tests/bugs/bug-915554.t
@@ -0,0 +1,75 @@
+#!/bin/bash
+#
+# Bug <915554>
+#
+# This test checks for a condition where a rebalance migrates a file and does
+# not preserve the original file size. This can occur due to hole preservation
+# logic in the file migration code. If a file size is aligned to a disk sector
+# boundary (512b) and the tail portion of the file is zero-filled, the file
+# may end up truncated to the end of the last data region in the file.
+#
+###
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../dht.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+BRICK_COUNT=3
+# create, start and mount a two brick DHT volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+
+TEST glusterfs --attribute-timeout=0 --entry-timeout=0 --gid-timeout=-1 -s $H0 --volfile-id $V0 $M0;
+
+i=1
+# Write some data to a file and extend such that the file is sparse to a sector
+# aligned boundary.
+echo test > $M0/$i
+TEST truncate --size=1m $M0/$i
+
+# cache the original size
+SIZE1=`stat -c %s $M0/$i`
+
+# rename till file gets a linkfile
+
+while [ $i -ne 0 ]
+do
+ test=`mv $M0/$i $M0/$(( $i+1 )) 2>/dev/null`
+ if [ $? -ne 0 ]
+ then
+ echo "rename failed"
+ break
+ fi
+ let i++
+ file_has_linkfile $i
+ has_link=$?
+ if [ $has_link -eq 2 ]
+ then
+ break;
+ fi
+done
+
+# start a rebalance (force option to overide checks) to trigger migration of
+# file
+
+TEST $CLI volume rebalance $V0 start force
+
+# check if rebalance has completed for upto 15 secs
+
+EXPECT_WITHIN 30 "0" rebalance_completed
+
+# validate the file size after the migration
+SIZE2=`stat -c %s $M0/$i`
+
+TEST [ $SIZE1 -eq $SIZE2 ]
+
+TEST rm -f $M0/$i
+TEST umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/bug-916226.t b/tests/bugs/bug-916226.t
new file mode 100644
index 000000000..2abfa1fc6
--- /dev/null
+++ b/tests/bugs/bug-916226.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3
+TEST $CLI volume set $V0 cluster.eager-lock on
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST mkdir $M0/dir{1..10};
+TEST touch $M0/dir{1..10}/files{1..10};
+
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4 $H0:/$B0/${V0}5
+
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN 60 "completed" rebalance_status_field $V0
+
+cleanup;
diff --git a/tests/bugs/bug-916549.t b/tests/bugs/bug-916549.t
new file mode 100755
index 000000000..d6a45b827
--- /dev/null
+++ b/tests/bugs/bug-916549.t
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd;
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+TEST $CLI volume start $V0;
+
+pid_file=$(ls /var/lib/glusterd/vols/$V0/run);
+brick_pid=$(cat /var/lib/glusterd/vols/$V0/run/$pid_file);
+
+
+kill -SIGKILL $brick_pid;
+TEST $CLI volume start $V0 force;
+TEST process_leak_count $(pidof glusterd);
+
+cleanup;
diff --git a/tests/bugs/bug-918437-sh-mtime.t b/tests/bugs/bug-918437-sh-mtime.t
new file mode 100644
index 000000000..080956f51
--- /dev/null
+++ b/tests/bugs/bug-918437-sh-mtime.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+function get_mtime {
+ local f=$1
+ stat $f | grep Modify | awk '{print $2 $3}' | cut -f1 -d'.'
+}
+cleanup;
+
+## Tests if mtime is correct after self-heal.
+TEST glusterd
+TEST pidof glusterd
+TEST mkdir -p $B0/gfs0/brick0{1,2}
+TEST $CLI volume create $V0 replica 2 transport tcp $H0:$B0/gfs0/brick01 $H0:$B0/gfs0/brick02
+TEST $CLI volume set $V0 nfs.disable on
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --direct-io-mode=enable
+# file 'a' is healed from brick02 to brick01 where as file 'b' is healed from
+# brick01 to brick02
+
+TEST cp -p /etc/passwd $M0/a
+TEST cp -p /etc/passwd $M0/b
+
+#Store mtimes before self-heals
+TEST modify_atstamp=$(get_mtime $B0/gfs0/brick02/a)
+TEST modify_btstamp=$(get_mtime $B0/gfs0/brick02/b)
+
+TEST $CLI volume stop $V0
+TEST gf_rm_file_and_gfid_link $B0/gfs0/brick01 a
+TEST gf_rm_file_and_gfid_link $B0/gfs0/brick02 b
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+
+find $M0 | xargs stat 1>/dev/null
+
+TEST modify_atstamp1=$(get_mtime $B0/gfs0/brick01/a)
+TEST modify_atstamp2=$(get_mtime $B0/gfs0/brick02/a)
+EXPECT $modify_atstamp echo $modify_atstamp1
+EXPECT $modify_atstamp echo $modify_atstamp2
+
+TEST modify_btstamp1=$(get_mtime $B0/gfs0/brick01/b)
+TEST modify_btstamp2=$(get_mtime $B0/gfs0/brick02/b)
+EXPECT $modify_btstamp echo $modify_btstamp1
+EXPECT $modify_btstamp echo $modify_btstamp2
+cleanup;
diff --git a/tests/bugs/bug-921072.t b/tests/bugs/bug-921072.t
new file mode 100755
index 000000000..e101d5b46
--- /dev/null
+++ b/tests/bugs/bug-921072.t
@@ -0,0 +1,118 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../nfs.rc
+
+cleanup;
+
+#1
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+EXPECT_WITHIN 20 1 is_nfs_export_available
+TEST mount -t nfs -o vers=3,nolock,soft,intr $H0:/$V0 $N0
+TEST umount $N0
+
+# based on ip addresses (1-4)
+# case 1: allow only localhost ip
+TEST $CLI volume set $V0 nfs.rpc-auth-allow 127.0.0.1
+EXPECT_WITHIN 20 1 is_nfs_export_available
+
+TEST mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0 $N0
+TEST umount $N0
+
+# case 2: allow only non-localhost ip
+TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.1.1
+EXPECT_WITHIN 20 1 is_nfs_export_available
+#11
+TEST ! mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0 $N0
+TEST $CLI volume reset --mode=script $V0
+# case 3: reject only localhost ip
+TEST $CLI volume set $V0 nfs.rpc-auth-reject 127.0.0.1
+EXPECT_WITHIN 20 1 is_nfs_export_available
+
+TEST ! mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0 $N0
+
+# case 4: reject only non-localhost ip
+TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.168.1.1
+EXPECT_WITHIN 20 1 is_nfs_export_available
+
+TEST mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0 $N0
+TEST umount $N0
+
+
+
+# NEED TO CHECK BOTH IP AND NAME BASED AUTH.
+# CASES WITH NFS.ADDR-NAMELOOKUP ON (5-12)
+TEST $CLI volume reset --mode=script $V0
+TEST $CLI volume set $V0 nfs.addr-namelookup on
+EXPECT_WITHIN 20 1 is_nfs_export_available
+#20
+TEST mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0 $N0
+TEST umount $N0
+
+# case 5: allow only localhost
+TEST $CLI volume set $V0 nfs.rpc-auth-allow localhost
+EXPECT_WITHIN 20 1 is_nfs_export_available
+
+TEST mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0 $N0
+TEST umount $N0
+
+# case 6: allow only somehost
+TEST $CLI volume set $V0 nfs.rpc-auth-allow somehost
+EXPECT_WITHIN 20 1 is_nfs_export_available
+
+TEST ! mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0 $N0
+
+# case 7: reject only localhost
+TEST $CLI volume reset --mode=script $V0
+TEST $CLI volume set $V0 nfs.addr-namelookup on
+TEST $CLI volume set $V0 nfs.rpc-auth-reject localhost
+EXPECT_WITHIN 20 1 is_nfs_export_available
+#30
+TEST ! mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0 $N0
+
+# case 8: reject only somehost
+TEST $CLI volume set $V0 nfs.rpc-auth-reject somehost
+EXPECT_WITHIN 20 1 is_nfs_export_available
+
+TEST mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0 $N0
+TEST umount $N0
+
+# based on ip addresses: repeat of cases 1-4
+# case 9: allow only localhost ip
+TEST $CLI volume reset --mode=script $V0
+TEST $CLI volume set $V0 nfs.addr-namelookup on
+TEST $CLI volume set $V0 nfs.rpc-auth-allow 127.0.0.1
+EXPECT_WITHIN 20 1 is_nfs_export_available
+
+TEST mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0 $N0
+TEST umount $N0
+
+# case 10: allow a non-localhost ip
+TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.1.1
+EXPECT_WITHIN 20 1 is_nfs_export_available
+#40
+TEST ! mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0 $N0
+
+# case 11: reject only localhost ip
+TEST $CLI volume reset --mode=script $V0
+TEST $CLI volume set $V0 nfs.addr-namelookup on
+TEST $CLI volume set $V0 nfs.rpc-auth-reject 127.0.0.1
+EXPECT_WITHIN 20 1 is_nfs_export_available
+
+TEST ! mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0 $N0
+
+# case 12: reject only non-localhost ip
+TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.168.1.1
+EXPECT_WITHIN 20 1 is_nfs_export_available
+
+TEST mount -t nfs -o vers=3,nolock,soft,intr localhost:/$V0 $N0
+TEST umount $N0
+
+TEST $CLI volume stop --mode=script $V0
+#49
+TEST $CLI volume delete --mode=script $V0
+cleanup
diff --git a/tests/bugs/bug-921231.t b/tests/bugs/bug-921231.t
new file mode 100644
index 000000000..db9cf3b6f
--- /dev/null
+++ b/tests/bugs/bug-921231.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+# This test writes to same file with 2 fds and tests that eager-lock is not
+# causing extra delay because of post-op-delay-secs
+cleanup;
+
+function write_to_file {
+ dd of=$M0/1 if=/dev/zero bs=1M count=128 oflag=append 2>&1 >/dev/null
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume set $V0 eager-lock on
+TEST $CLI volume set $V0 post-op-delay-secs 3
+TEST $CLI volume set $V0 client-log-level DEBUG
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST $CLI volume set $V0 ensure-durability off
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+write_to_file &
+write_to_file &
+wait
+#Test if the MAX [F]INODELK fop latency is of the order of seconds.
+inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}")
+TEST [ -z $inodelk_max_latency ]
+
+cleanup;
diff --git a/tests/bugs/bug-921408.t b/tests/bugs/bug-921408.t
new file mode 100755
index 000000000..ef2b4fb21
--- /dev/null
+++ b/tests/bugs/bug-921408.t
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../fileio.rc
+. $(dirname $0)/../dht.rc
+
+cleanup;
+wait_check_status ()
+{
+ n=0
+ while [ $n -lt $1 ]
+ do
+ ret=$(rebalance_completed)
+ if [ $ret == "0" ]
+ then
+ return 0;
+ else
+ sleep 1
+ n=`expr $n + 1`;
+ fi
+ done
+ return 1;
+}
+
+addbr_rebal_till_layout_change()
+{
+ val=1
+ l=$1
+ i=1
+ while [ $i -lt 5 ]
+ do
+ $CLI volume add-brick $V0 $H0:$B0/${V0}$l &>/dev/null
+ $CLI volume rebalance $V0 fix-layout start &>/dev/null
+ wait_check_status 15
+ if [ $? -eq 1 ]
+ then
+ break
+ fi
+ NEW_LAYOUT=`get_layout $B0/${V0}0`
+ if [ $OLD_LAYOUT == $NEW_LAYOUT ]
+ then
+ i=`expr $i + 1`;
+ l=`expr $l + 1`;
+ else
+ val=0
+ break
+ fi
+ done
+ return $val
+}
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 subvols-per-directory 1
+TEST $CLI volume start $V0
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST mkdir $M0/test
+TEST touch $M0/test/test
+
+fd=`fd_available`
+TEST fd_open $fd "rw" $M0/test/test
+
+OLD_LAYOUT=`get_layout $B0/${V0}0`
+
+addbr_rebal_till_layout_change 1
+
+TEST [ $? -eq 0 ]
+
+for i in $(seq 1 1000)
+do
+ ls -l $M0/ >/dev/null
+ ret=$?
+ if [ $ret != 0 ]
+ then
+ break
+ fi
+done
+
+TEST [ $ret == 0 ];
+TEST fd_close $fd;
+
+TEST umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/bug-924075.t b/tests/bugs/bug-924075.t
new file mode 100755
index 000000000..f4e03e33a
--- /dev/null
+++ b/tests/bugs/bug-924075.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+#FIXME: there is another patch which moves the following function into
+#include.rc
+function process_leak_count ()
+{
+ local pid=$1;
+ return $(ls -lh /proc/$pid/fd | grep "(deleted)" | wc -l)
+}
+
+TEST glusterd;
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+TEST $CLI volume start $V0;
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+mount_pid=$(get_mount_process_pid $V0);
+TEST process_leak_count $mount_pid;
+
+cleanup;
diff --git a/tests/bugs/bug-924265.t b/tests/bugs/bug-924265.t
new file mode 100755
index 000000000..13491356d
--- /dev/null
+++ b/tests/bugs/bug-924265.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# Test that setting cluster.dht-xattr-name works, and that DHT consistently
+# uses the specified name instead of the default.
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+# We only care about the exit code, so keep it quiet.
+function silent_getfattr {
+ getfattr $* &> /dev/null
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+mkdir -p $H0:$B0/${V0}0
+
+# Create a volume and set the option.
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 cluster.dht-xattr-name trusted.foo.bar
+
+# Start and mount the volume.
+TEST $CLI volume start $V0
+EXPECT_WITHIN 15 'Started' volinfo_field $V0 'Status';
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+# Create a directory and make sure it has the right xattr.
+mkdir $M0/test
+TEST ! silent_getfattr -n trusted.glusterfs.dht $B0/${V0}0/test
+TEST silent_getfattr -n trusted.foo.bar $B0/${V0}0/test
+
+cleanup
diff --git a/tests/bugs/bug-927616.t b/tests/bugs/bug-927616.t
new file mode 100755
index 000000000..22b20aff2
--- /dev/null
+++ b/tests/bugs/bug-927616.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 performance.open-behind off;
+TEST $CLI volume start $V0
+
+sleep 1;
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+sleep 1;
+
+TEST mount -t nfs -o vers=3,nolock $H0:/$V0 $N0;
+
+TEST mkdir $M0/dir;
+
+mkdir $M0/other;
+cp /etc/passwd $M0/;
+cp $M0/passwd $M0/file;
+chmod 600 $M0/file;
+
+chown -R nfsnobody:nfsnobody $M0/dir;
+
+TEST $CLI volume set $V0 server.root-squash on;
+
+sleep 1;
+
+# tests should fail.
+touch $M0/foo 2>/dev/null;
+TEST [ $? -ne 0 ]
+touch $N0/foo 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $M0/new 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $N0/new 2>/dev/null;
+TEST [ $? -ne 0 ]
+
+TEST $CLI volume set $V0 server.root-squash off;
+
+sleep 1;
+
+# tests should pass.
+touch $M0/foo 2>/dev/null;
+TEST [ $? -eq 0 ]
+touch $N0/bar 2>/dev/null;
+TEST [ $? -eq 0 ]
+mkdir $M0/new 2>/dev/null;
+TEST [ $? -eq 0 ]
+mkdir $N0/old 2>/dev/null;
+TEST [ $? -eq 0 ]
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-948686.t b/tests/bugs/bug-948686.t
new file mode 100644
index 000000000..db9c198a9
--- /dev/null
+++ b/tests/bugs/bug-948686.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+cleanup;
+#setup cluster and test volume
+TEST launch_cluster 3; # start 3-node virtual cluster
+TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
+
+EXPECT_WITHIN 20 2 check_peers;
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0
+TEST $CLI_1 volume start $V0
+TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
+
+#kill a node
+TEST kill_node 3
+
+#modify volume config to see change in volume-sync
+TEST $CLI_1 volume set $V0 write-behind off
+#add some files to the volume to see effect of volume-heal cmd
+TEST touch $M0/{1..100};
+TEST $CLI_1 volume stop $V0;
+TEST $glusterd_3;
+sleep 3;
+TEST $CLI_3 volume start $V0;
+TEST $CLI_2 volume stop $V0;
+TEST $CLI_2 volume delete $V0;
+
+cleanup;
+
+TEST glusterd;
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+pkill glusterd;
+pkill glusterfsd;
+TEST glusterd
+TEST $CLI volume status $V0
+
+cleanup;
diff --git a/tests/bugs/bug-948729/bug-948729-force.t b/tests/bugs/bug-948729/bug-948729-force.t
new file mode 100644
index 000000000..d14e94061
--- /dev/null
+++ b/tests/bugs/bug-948729/bug-948729-force.t
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+uuid1=`uuidgen`;
+uuid2=`uuidgen`;
+uuid3=`uuidgen`;
+
+V1=patchy1
+V2=patchy2
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN 20 1 check_peers;
+
+B3=/d/backends/3
+B4=/d/backends/4
+B5=/d/backends/5
+B6=/d/backends/6
+
+mkdir -p $B3 $B4 $B5 $B6
+
+TEST truncate -s 16M $B1/brick1
+TEST truncate -s 16M $B2/brick2
+TEST truncate -s 16M $B3/brick3
+TEST truncate -s 16M $B4/brick4
+TEST truncate -s 16M $B5/brick5
+TEST truncate -s 16M $B6/brick6
+
+TEST LD1=`losetup --find --show $B1/brick1`
+TEST mkfs.xfs $LD1
+TEST LD2=`losetup --find --show $B2/brick2`
+TEST mkfs.xfs $LD2
+TEST LD3=`losetup --find --show $B3/brick3`
+TEST mkfs.xfs $LD3
+TEST LD4=`losetup --find --show $B4/brick4`
+TEST mkfs.xfs $LD4
+TEST LD5=`losetup --find --show $B5/brick5`
+TEST mkfs.xfs $LD5
+TEST LD6=`losetup --find --show $B6/brick6`
+TEST mkfs.xfs $LD6
+
+mkdir -p $B1/$V0 $B2/$V0 $B3/$V0 $B4/$V0 $B5/$V0 $B6/$V0
+
+TEST mount -t xfs $LD1 $B1/$V0
+TEST mount -t xfs $LD2 $B2/$V0
+TEST mount -t xfs $LD3 $B3/$V0
+TEST mount -t xfs $LD4 $B4/$V0
+TEST mount -t xfs $LD5 $B5/$V0
+TEST mount -t xfs $LD6 $B6/$V0
+
+#Case 0: Parent directory of the brick is absent
+TEST ! $CLI1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2 force
+
+#Case 1: File system root is being used as brick directory
+TEST $CLI1 volume create $V0 $H1:$B5/$V0 $H2:$B6/$V0 force
+
+#Case 2: Brick directory contains only one component
+TEST $CLI1 volume create $V1 $H1:/$uuid1 $H2:/$uuid2 force
+
+#Case 3: Sub-directories of the backend FS being used as brick directory
+TEST $CLI1 volume create $V2 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2 force
+
+#add-brick tests
+TEST ! $CLI1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/brick3 force
+TEST $CLI1 volume add-brick $V0 $H1:$B3/$V0 force
+TEST $CLI1 volume add-brick $V1 $H1:/$uuid3 force
+TEST $CLI1 volume add-brick $V2 $H1:$B4/$V0/brick3 force
+
+#####replace-brick tests
+#FIX-ME: replace-brick does not work with the newly introduced cluster test
+#####framework
+
+rmdir /$uuid1 /$uuid2 /$uuid3;
+
+cleanup;
diff --git a/tests/bugs/bug-948729/bug-948729-mode-script.t b/tests/bugs/bug-948729/bug-948729-mode-script.t
new file mode 100644
index 000000000..541ca897d
--- /dev/null
+++ b/tests/bugs/bug-948729/bug-948729-mode-script.t
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+uuid1=`uuidgen`;
+uuid2=`uuidgen`;
+uuid3=`uuidgen`;
+
+V1=patchy1
+V2=patchy2
+V3=patchy3
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN 20 1 check_peers;
+
+B3=/d/backends/3
+B4=/d/backends/4
+B5=/d/backends/5
+B6=/d/backends/6
+mkdir -p $B3 $B4 $B5 $B6
+
+TEST truncate -s 16M $B1/brick1
+TEST truncate -s 16M $B2/brick2
+TEST truncate -s 16M $B3/brick3
+TEST truncate -s 16M $B4/brick4
+TEST truncate -s 16M $B5/brick5
+TEST truncate -s 16M $B6/brick6
+
+TEST LD1=`losetup --find --show $B1/brick1`
+TEST mkfs.xfs $LD1
+TEST LD2=`losetup --find --show $B2/brick2`
+TEST mkfs.xfs $LD2
+TEST LD3=`losetup --find --show $B3/brick3`
+TEST mkfs.xfs $LD3
+TEST LD4=`losetup --find --show $B4/brick4`
+TEST mkfs.xfs $LD4
+TEST LD5=`losetup --find --show $B5/brick5`
+TEST mkfs.xfs $LD5
+TEST LD6=`losetup --find --show $B6/brick6`
+TEST mkfs.xfs $LD6
+
+mkdir -p $B1/$V0 $B2/$V0 $B3/$V0 $B4/$V0 $B5/$V0 $B6/$V0
+
+TEST mount -t xfs $LD1 $B1/$V0
+TEST mount -t xfs $LD2 $B2/$V0
+TEST mount -t xfs $LD3 $B3/$V0
+TEST mount -t xfs $LD4 $B4/$V0
+TEST mount -t xfs $LD5 $B5/$V0
+TEST mount -t xfs $LD6 $B6/$V0
+
+#Case 0: Parent directory of the brick is absent
+TEST ! $CLI_1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2
+
+#Case 1: File system root being used as brick directory
+TEST $CLI_1 volume create $V0 $H1:$B5/$V0 $H2:$B6/$V0
+
+#Case 2: Brick directory contains only one component
+TEST $CLI_1 volume create $V1 $H1:/$uuid1 $H2:/$uuid2
+
+#Case 3: Sub-directories of the backend FS being used as brick directory
+TEST $CLI_1 volume create $V2 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2
+
+#add-brick tests
+TEST ! $CLI_1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/brick3
+TEST $CLI_1 volume add-brick $V0 $H1:$B3/$V0
+TEST $CLI_1 volume add-brick $V1 $H1:/$uuid3
+TEST $CLI_1 volume add-brick $V2 $H1:$B4/$V0/brick3
+
+#####replace-brick tests
+#FIX-ME : replace-brick does not currently work in the newly introduced
+#####cluster test framework
+
+rmdir /$uuid1 /$uuid2 /$uuid3
+
+cleanup;
diff --git a/tests/bugs/bug-948729/bug-948729.t b/tests/bugs/bug-948729/bug-948729.t
new file mode 100644
index 000000000..f94db1ea0
--- /dev/null
+++ b/tests/bugs/bug-948729/bug-948729.t
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+uuid1=`uuidgen`;
+uuid2=`uuidgen`;
+uuid3=`uuidgen`;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN 20 1 check_peers;
+
+B3=/d/backends/3
+
+mkdir -p $B3
+
+TEST truncate -s 16M $B1/brick1
+TEST truncate -s 16M $B2/brick2
+TEST truncate -s 16M $B3/brick3
+
+TEST LD1=`losetup --find --show $B1/brick1`
+TEST mkfs.xfs $LD1
+TEST LD2=`losetup --find --show $B2/brick2`
+TEST mkfs.xfs $LD2
+TEST LD3=`losetup --find --show $B3/brick3`
+TEST mkfs.xfs $LD3
+
+mkdir -p $B1/$V0 $B2/$V0 $B3/$V0
+
+TEST mount -t xfs $LD1 $B1/$V0
+TEST mount -t xfs $LD2 $B2/$V0
+TEST mount -t xfs $LD3 $B3/$V0
+
+#Tests without --mode=script option
+cli1=$(echo $CLI1 | sed 's/ --mode=script//')
+#Case 0: Parent directory of the brick is absent
+TEST ! $cli1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2
+
+#Case 1: File system root being used as brick directory
+TEST ! $cli1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+
+#Case 2: Brick directory contains only one component
+TEST ! $cli1 volume create $V0 $H1:/$uuid1 $H2:/$uuid2
+
+#Case 3: Sub-directories of the backend FS being used as brick directory
+TEST $cli1 volume create $V0 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2
+
+#add-brick tests
+TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/b3
+TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0
+TEST ! $cli1 volume add-brick $V0 $H1:/$uuid3
+TEST $cli1 volume add-brick $V0 $H1:$B3/$V0/brick3
+
+#####replace-brick tests
+#FIX-ME: Replace-brick does not work currently in the newly introduced cluster
+#####test framework.
+
+cleanup;
diff --git a/tests/bugs/bug-949242.t b/tests/bugs/bug-949242.t
new file mode 100644
index 000000000..717084673
--- /dev/null
+++ b/tests/bugs/bug-949242.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+#
+# Bug 949242 - Test basic fallocate functionality.
+#
+# Run several commands to verify basic fallocate functionality. We verify that
+# fallocate creates and allocates blocks to a file. We also verify that the keep
+# size option does not modify the file size.
+###
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../fallocate.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+# check for fallocate support before continuing the test
+require_fallocate -l 1m -n $M0/file && rm -f $M0/file
+
+# fallocate a file and verify blocks are allocated
+TEST fallocate -l 1m $M0/file
+blksz=`stat --printf=%b $M0/file`
+nblks=`stat --printf=%B $M0/file`
+TEST [ $(($blksz * $nblks)) -eq 1048576 ]
+
+TEST unlink $M0/file
+
+# truncate a file to a fixed size, fallocate and verify that the size does not
+# change
+TEST truncate --size=1m $M0/file
+TEST fallocate -l 2m -n $M0/file
+blksz=`stat --printf=%b $M0/file`
+nblks=`stat --printf=%B $M0/file`
+sz=`stat --printf=%s $M0/file`
+TEST [ $sz -eq 1048576 ]
+# Note that gluster currently incorporates a hack to limit the number of blocks
+# reported as allocated to the file by the file size. We have allocated beyond the
+# file size here. Just check for non-zero allocation to avoid setting a land mine
+# for if/when that behavior might change.
+TEST [ ! $(($blksz * $nblks)) -eq 0 ]
+
+TEST unlink $M0/file
+
+TEST umount $M0
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/bug-949298.t b/tests/bugs/bug-949298.t
new file mode 100644
index 000000000..1394127ec
--- /dev/null
+++ b/tests/bugs/bug-949298.t
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI --xml volume info $V0
+
+cleanup;
diff --git a/tests/bugs/bug-949930.t b/tests/bugs/bug-949930.t
new file mode 100644
index 000000000..4a738befa
--- /dev/null
+++ b/tests/bugs/bug-949930.t
@@ -0,0 +1,27 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+V1=patchy2
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0;
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{1,2};
+TEST $CLI volume start $V1;
+
+TEST ! $CLI volume set $V0 performance.nfs.read-ahead blah
+EXPECT '' volume_option $V0 performance.nfs.read-ahead
+
+TEST $CLI volume set $V0 performance.nfs.read-ahead on
+EXPECT "on" volume_option $V0 performance.nfs.read-ahead
+
+EXPECT '' volume_option $V1 performance.nfs.read-ahead
+
+cleanup;
+
diff --git a/tests/bugs/bug-955588.t b/tests/bugs/bug-955588.t
new file mode 100755
index 000000000..3f0361167
--- /dev/null
+++ b/tests/bugs/bug-955588.t
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+function get_brick_host_uuid()
+{
+ local vol=$1;
+ local uuid_regex='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
+ local host_uuid_list=$($CLI volume info $vol --xml | grep "brick.uuid" | grep -o -E "$uuid_regex");
+
+ echo $host_uuid_list | awk '{print $1}'
+}
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+
+uuid=`grep UUID /var/lib/glusterd/glusterd.info | cut -f2 -d=`
+EXPECT $uuid get_brick_host_uuid $V0
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-957877.t b/tests/bugs/bug-957877.t
new file mode 100644
index 000000000..23aefea25
--- /dev/null
+++ b/tests/bugs/bug-957877.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../afr.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+kill_brick $V0 $H0 $B0/${V0}0
+TEST touch $M0/f1
+TEST setfattr -n "user.foo" -v "test" $M0/f1
+
+BRICK=$B0"/${V0}1"
+
+TEST $CLI volume start $V0 force
+sleep 5
+TEST $CLI volume heal $V0
+
+# Wait for self-heal to complete
+EXPECT_WITHIN 30 '0' count_sh_entries $BRICK;
+
+TEST getfattr -n "user.foo" $B0/${V0}0/f1;
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-958691.t b/tests/bugs/bug-958691.t
new file mode 100644
index 000000000..a5ac406c9
--- /dev/null
+++ b/tests/bugs/bug-958691.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+sleep 1;
+TEST mount -t nfs -o vers=3,nolock $H0:/$V0 $N0;
+
+sleep 2;
+
+# Tests for the fuse mount
+TEST mkdir $M0/dir;
+TEST chmod 1777 $M0/dir;
+TEST touch $M0/dir/file{1,2};
+
+TEST $CLI volume set $V0 server.root-squash enable;
+
+mv $M0/dir/file1 $M0/dir/file11 2>/dev/null;
+TEST [ $? -ne 0 ];
+
+TEST $CLI volume set $V0 server.root-squash disable;
+TEST rm -rf $M0/dir;
+
+sleep 1;
+
+# tests for nfs mount
+TEST mkdir $N0/dir;
+TEST chmod 1777 $N0/dir;
+TEST touch $N0/dir/file{1,2};
+
+TEST $CLI volume set $V0 server.root-squash enable;
+
+mv $N0/dir/file1 $N0/dir/file11 2>/dev/null;
+TEST [ $? -ne 0 ];
+
+TEST $CLI volume set $V0 server.root-squash disable;
+TEST rm -rf $N0/dir;
+TEST umount $N0;
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-958790.t b/tests/bugs/bug-958790.t
new file mode 100644
index 000000000..6cc799c25
--- /dev/null
+++ b/tests/bugs/bug-958790.t
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+touch /var/lib/glusterd/groups/test
+echo "read-ahead=off" > /var/lib/glusterd/groups/test
+echo "open-behind=off" >> /var/lib/glusterd/groups/test
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 group test
+EXPECT "off" volume_option $V0 performance.read-ahead
+EXPECT "off" volume_option $V0 performance.open-behind
+
+cleanup;
diff --git a/tests/bugs/bug-961307.t b/tests/bugs/bug-961307.t
new file mode 100644
index 000000000..96e93a56f
--- /dev/null
+++ b/tests/bugs/bug-961307.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+REPLICA=2
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica $REPLICA $H0:$B0/${V0}-00 $H0:$B0/${V0}-01 $H0:$B0/${V0}-10 $H0:$B0/${V0}-11
+TEST $CLI volume start $V0
+
+var1=$(gluster volume remove-brick $H0:$B0/${V0}-00 $H0:$B0/${V0}-01 start 2>&1)
+var2="volume remove-brick start: failed: Volume $H0:$B0/${V0}-00 does not exist"
+
+
+function compare_string()
+{
+ val="-1"
+ if [ "$1" == "$2" ]; then
+ val="0"
+ else
+ val="1"
+ fi
+ echo $val
+}
+
+EXPECT 0 compare_string "$var1" "$var2"
+cleanup;
diff --git a/tests/bugs/bug-961615.t b/tests/bugs/bug-961615.t
new file mode 100644
index 000000000..d183e6c52
--- /dev/null
+++ b/tests/bugs/bug-961615.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#This test tests that an extra fd_unref does not happen in rebalance
+#migration completion check code path in dht
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST touch $M0/1
+#This rename creates a link file for 10 in the other volume.
+TEST mv $M0/1 $M0/10
+#Lets keep writing to the file which will trigger rebalance completion check
+dd if=/dev/zero of=$M0/10 bs=1k &
+bg_pid=$!
+#Now rebalance force will migrate file '10'
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN 60 "completed" rebalance_status_field $V0
+#If the bug exists mount would have crashed by now
+TEST ls $M0
+kill -9 $bg_pid > /dev/null 2>&1
+wait > /dev/null 2>&1
+cleanup
diff --git a/tests/bugs/bug-961669.t b/tests/bugs/bug-961669.t
new file mode 100644
index 000000000..751a63df2
--- /dev/null
+++ b/tests/bugs/bug-961669.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+#Test case: Fail remove-brick 'start' variant when reducing the replica count of a volume.
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a 3x3 dist-rep volume
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5,6,7,8};
+TEST $CLI volume start $V0
+
+# Mount FUSE and create file/directory
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/zerobytefile.txt
+TEST mkdir $M0/test_dir
+TEST dd if=/dev/zero of=$M0/file bs=1024 count=1024
+
+function remove_brick_start {
+ $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{1,4,7} start 2>&1|grep -oE 'success|failed'
+}
+
+function remove_brick {
+ $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{1,4,7} 2>&1|grep -oE 'success|failed'
+}
+
+#remove-brick start variant
+#Actual message displayed at cli is:
+#"volume remove-brick start: failed: Rebalancing not needed when reducing replica count. Try without the 'start' option"
+EXPECT "failed" remove_brick_start;
+
+#remove-brick commit-force
+#Actual message displayed at cli is:
+#"volume remove-brick commit force: success"
+EXPECT "success" remove_brick
+
+TEST umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-963541.t b/tests/bugs/bug-963541.t
new file mode 100755
index 000000000..950c7db54
--- /dev/null
+++ b/tests/bugs/bug-963541.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
+TEST $CLI volume start $V0;
+
+# Start a remove-brick and try to start a rebalance/remove-brick without committing
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
+
+TEST ! $CLI volume rebalance $V0 start
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
+
+#Try to start rebalance/remove-brick again after commit
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 commit
+
+gluster volume status
+
+TEST $CLI volume rebalance $V0 start
+TEST $CLI volume rebalance $V0 stop
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop
+
+TEST $CLI volume stop $V0
+
+cleanup;
+
diff --git a/tests/bugs/bug-963678.t b/tests/bugs/bug-963678.t
new file mode 100644
index 000000000..14d566579
--- /dev/null
+++ b/tests/bugs/bug-963678.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+#
+# Bug 963678 - Test discard functionality
+#
+# Test that basic discard (hole punch) functionality works via the fallocate
+# command line tool. Hole punch deallocates a region of a file, creating a hole
+# and a zero-filled data region. We verify that hole punch works, frees blocks
+# and that subsequent reads do not read stale data (caches are invalidated).
+#
+# NOTE: fuse fallocate is known to be broken with regard to cache invalidation
+# up to 3.9.0 kernels. Therefore, FOPEN_KEEP_CACHE is not used in this
+# test (opens will invalidate the fuse cache).
+###
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../fallocate.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+# check for fallocate and hole punch support
+require_fallocate -l 1m $M0/file
+require_fallocate -p -l 512k $M0/file && rm -f $M0/file
+
+# allocate some blocks, punch a hole and verify block allocation
+TEST fallocate -l 1m $M0/file
+blksz=`stat --printf=%B $M0/file`
+nblks=`stat --printf=%b $M0/file`
+TEST [ $(($blksz * $nblks)) -ge 1048576 ]
+TEST fallocate -p -o 512k -l 128k $M0/file
+
+nblks=`stat --printf=%b $M0/file`
+# allow some room for xattr blocks
+TEST [ $(($blksz * $nblks)) -lt $((917504 + 16384)) ]
+TEST unlink $M0/file
+
+# write some data, punch a hole and verify the file content changes
+TEST dd if=/dev/urandom of=$M0/file bs=1M count=1
+TEST cp $M0/file $M0/file.copy.pre
+TEST fallocate -p -o 512k -l 128k $M0/file
+TEST cp $M0/file $M0/file.copy.post
+TEST ! cmp $M0/file.copy.pre $M0/file.copy.post
+TEST unlink $M0/file
+
+TEST umount $M0
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/bug-964059.t b/tests/bugs/bug-964059.t
new file mode 100755
index 000000000..df07f95ee
--- /dev/null
+++ b/tests/bugs/bug-964059.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+function volume_count {
+ local cli=$1;
+ if [ $cli -eq '1' ] ; then
+ $CLI_1 volume info | grep 'Volume Name' | wc -l;
+ else
+ $CLI_2 volume info | grep 'Volume Name' | wc -l;
+ fi
+}
+
+cleanup;
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN 20 1 check_peers
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/$V0 start
+TEST $CLI_1 volume status
+cleanup;
diff --git a/tests/bugs/bug-966018.t b/tests/bugs/bug-966018.t
new file mode 100644
index 000000000..2a4697241
--- /dev/null
+++ b/tests/bugs/bug-966018.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#This tests if eager-lock blocks metadata operations on nfs/fuse mounts.
+#If it is not woken up, INODELK from the next command waits
+#for post-op-delay secs.
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1
+TEST $CLI volume set $V0 ensure-durability off
+TEST $CLI volume set $V0 cluster.eager-lock on
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 3
+
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+sleep 5
+TEST mount -t nfs -o vers=3,nolock $H0:/$V0 $N0;
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0
+echo 1 > $N0/1 && chmod +x $N0/1
+echo 1 > $M0/1 && chmod +x $M0/1
+
+#Check that INODELK MAX latency is not in the order of seconds
+#Test if the MAX INODELK fop latency is of the order of seconds.
+inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}")
+
+TEST [ -z $inodelk_max_latency ]
+TEST umount $N0
+
+cleanup;
diff --git a/tests/bugs/bug-969193.t b/tests/bugs/bug-969193.t
new file mode 100755
index 000000000..e78a2980e
--- /dev/null
+++ b/tests/bugs/bug-969193.t
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+# Test that "system getspec" works without op_version problems.
+
+. $(dirname $0)/../include.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+TEST $CLI volume create $V0 $H0:$B0/brick1
+TEST $CLI system getspec $V0
+cleanup;
diff --git a/tests/bugs/bug-970070.t b/tests/bugs/bug-970070.t
new file mode 100755
index 000000000..da28b1ed7
--- /dev/null
+++ b/tests/bugs/bug-970070.t
@@ -0,0 +1,14 @@
+#!/bin/bash
+# TEST the nfs.acl option
+. $(dirname $0)/../include.rc
+
+cleanup
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+sleep 5
+TEST $CLI volume set $V0 nfs.acl off
+TEST $CLI volume set $V0 nfs.acl on
+cleanup
diff --git a/tests/bugs/bug-973073.t b/tests/bugs/bug-973073.t
new file mode 100755
index 000000000..83e2839c6
--- /dev/null
+++ b/tests/bugs/bug-973073.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../dht.rc
+
+## Steps followed are one descibed in bugzilla
+
+cleanup;
+
+function get_layout()
+{
+ layout1=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1`
+
+ if [ $? -ne 0 ]
+ then
+ echo 1
+ else
+ echo 0
+ fi
+
+}
+
+BRICK_COUNT=3
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
+
+## remove-brick status == rebalance_status
+EXPECT_WITHIN 30 "0" rebalance_completed
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop
+
+TEST $CLI volume rebalance $V0 fix-layout start
+
+EXPECT_WITHIN 30 "0" rebalance_completed
+
+TEST mkdir $M0/dir 2>/dev/null;
+
+EXPECT "0" get_layout $B0/${V0}2/dir
+cleanup;
diff --git a/tests/bugs/bug-974007.t b/tests/bugs/bug-974007.t
new file mode 100644
index 000000000..c8c1c862b
--- /dev/null
+++ b/tests/bugs/bug-974007.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+#Test case: Create a distributed replicate volume, and remove multiple
+#replica pairs in a single remove-brick command.
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a 3X2 distributed-replicate volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..6};
+TEST $CLI volume start $V0
+
+# Mount FUSE and create files
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/file{1..10}
+
+# Remove bricks from two sub-volumes to make it a 1x2 vol.
+# Bricks in question are given in a random order but from the same subvols.
+function remove_brick_start_status {
+ $CLI volume remove-brick $V0 \
+ $H0:$B0/${V0}6 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}5 start 2>&1 |grep -oE "success|failed"
+}
+EXPECT "success" remove_brick_start_status;
+
+# Wait for rebalance to complete
+EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0" "$H0:$B0/${V0}6 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}5"
+
+# Check commit status
+function remove_brick_commit_status {
+ $CLI volume remove-brick $V0 \
+ $H0:$B0/${V0}6 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}5 commit 2>&1 |grep -oE "success|failed"
+}
+EXPECT "success" remove_brick_commit_status;
+
+# Check the volume type
+EXPECT "Replicate" echo `$CLI volume info |grep Type |awk '{print $2}'`
+
+TEST umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-974972.t b/tests/bugs/bug-974972.t
new file mode 100755
index 000000000..15deac090
--- /dev/null
+++ b/tests/bugs/bug-974972.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#This script checks that nfs mount does not fail lookup on files with split-brain
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume start $V0
+sleep 5
+TEST mount -t nfs -o vers=3 $H0:/$V0 $N0
+TEST touch $N0/1
+TEST kill_brick ${V0} ${H0} ${B0}/${V0}1
+echo abc > $N0/1
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "Y" nfs_up_status
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_nfs $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_nfs $V0 1
+
+TEST kill_brick ${V0} ${H0} ${B0}/${V0}0
+echo def > $N0/1
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "Y" nfs_up_status
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_nfs $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_nfs $V0 1
+
+#Lookup should not fail
+TEST ls $N0/1
+TEST ! cat $N0/1
+
+TEST umount $N0
+cleanup
diff --git a/tests/bugs/bug-976800.t b/tests/bugs/bug-976800.t
new file mode 100644
index 000000000..2aee8cc11
--- /dev/null
+++ b/tests/bugs/bug-976800.t
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+# This test checks if there are any open fds on the brick
+# even after the file is closed on the mount. This particular
+# test tests dd with "fsync" to check afr's fsync codepath
+cleanup;
+
+function is_fd_open {
+ local v=$1
+ local h=$2
+ local b=$3
+ local bpid=$(get_brick_pid $v $h $b)
+ ls -l /proc/$bpid/fd | grep -w "\-> $b/1"
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 ensure-durability off
+TEST $CLI volume set $V0 eager-lock off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST dd of=$M0/1 if=/dev/zero bs=1k count=1 conv=fsync
+TEST ! is_fd_open $V0 $H0 $B0/${V0}0
+cleanup;
diff --git a/tests/bugs/bug-977246.t b/tests/bugs/bug-977246.t
new file mode 100644
index 000000000..e07ee1919
--- /dev/null
+++ b/tests/bugs/bug-977246.t
@@ -0,0 +1,21 @@
+#! /bin/bash
+
+# This test checks if address validation, correctly catches hostnames
+# with consective dots, such as 'example..org', as invalid
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1
+TEST $CLI volume info $V0
+TEST $CLI volume start $V0
+
+TEST ! $CLI volume set $V0 auth.allow example..org
+
+TEST $CLI volume stop $V0
+
+cleanup;
diff --git a/tests/bugs/bug-977797.t b/tests/bugs/bug-977797.t
new file mode 100755
index 000000000..08cdbe8f1
--- /dev/null
+++ b/tests/bugs/bug-977797.t
@@ -0,0 +1,114 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 open-behind off
+TEST $CLI volume set $V0 quick-read off
+TEST $CLI volume set $V0 read-ahead off
+TEST $CLI volume set $V0 write-behind off
+TEST $CLI volume set $V0 io-cache off
+TEST $CLI volume set $V0 background-self-heal-count 0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+
+TEST mkdir -p $M0/a
+TEST `echo "GLUSTERFS" > $M0/a/file`
+
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+
+TEST chown root $M0/a
+TEST chown root $M0/a/file
+TEST `echo "GLUSTER-FILE-SYSTEM" > $M0/a/file`
+TEST mkdir $M0/a/b
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0;
+
+
+
+TEST kill_brick $V0 $H0 $B0/$V0"2"
+
+TEST chmod 757 $M0/a
+TEST chmod 757 $M0/a/file
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1;
+
+TEST ls -l $M0/a/file
+
+b1c0dir=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a \
+ trusted.afr.$V0-client-0 "entry")
+b1c1dir=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a \
+ trusted.afr.$V0-client-1 "entry")
+b2c0dir=$(afr_get_specific_changelog_xattr \
+ $B0/$V0"2"/a trusted.afr.$V0-client-0 "entry")
+b2c1dir=$(afr_get_specific_changelog_xattr \
+ $B0/$V0"2"/a trusted.afr.$V0-client-1 "entry")
+
+
+b1c0f=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a/file \
+ trusted.afr.$V0-client-0 "data")
+b1c1f=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a/file \
+ trusted.afr.$V0-client-1 "data")
+b2c0f=$(afr_get_specific_changelog_xattr $B0/$V0"2"/a/file \
+ trusted.afr.$V0-client-0 "data")
+b2c1f=$(afr_get_specific_changelog_xattr $B0/$V0"2"/a/file \
+ trusted.afr.$V0-client-1 "data")
+
+EXPECT "00000000" echo $b1c0f
+EXPECT "00000000" echo $b1c1f
+EXPECT "00000000" echo $b2c0f
+EXPECT "00000000" echo $b2c1f
+
+EXPECT "00000000" echo $b1c0dir
+EXPECT "00000000" echo $b1c1dir
+EXPECT "00000000" echo $b2c0dir
+EXPECT "00000000" echo $b2c1dir
+
+contains() {
+ string="$1"
+ substring="$2"
+ var="-1"
+ if test "${string#*$substring}" != "$string"
+ then
+ var="0" # $substring is in $string
+ else
+ var="1" # $substring is not in $string
+ fi
+ echo $var
+}
+
+var1=$(cat $M0/a/file 2>&1)
+var2="Input/output error"
+
+
+EXPECT "0" contains "$var1" "$var2"
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-978794.t b/tests/bugs/bug-978794.t
new file mode 100644
index 000000000..d22d3cde3
--- /dev/null
+++ b/tests/bugs/bug-978794.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../fileio.rc
+
+
+# This test opens 100 fds and triggers graph switches to check if fsync
+# as part of graph-switch causes crash or not.
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST touch $M0/{1..100}
+for i in {1..100}; do fd[$i]=`fd_available`; fd_open ${fd[$i]} 'w' $M0/$i; done
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{2,3}
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN 120 "completed" rebalance_status_field $V0
+TEST cat $M0/{1..100}
+for i in {1..100}; do fd_write ${fd[$i]} 'abc'; done
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{4,5}
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN 120 "completed" rebalance_status_field $V0
+for i in {1..100}; do fd_write ${fd[$i]} 'abc'; done
+TEST cat $M0/{1..100}
+cleanup
diff --git a/tests/bugs/bug-979365.t b/tests/bugs/bug-979365.t
new file mode 100755
index 000000000..e94dc9aa8
--- /dev/null
+++ b/tests/bugs/bug-979365.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#This script checks that ensure-durability option enables/disables afr
+#sending fsyncs
+cleanup;
+
+function num_fsyncs {
+ $CLI volume profile $V0 info | grep -w FSYNC | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 ensure-durability on
+TEST $CLI volume set $V0 eager-lock off
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd of=$M0/a if=/dev/zero bs=1M count=10
+#fsyncs take a while to complete.
+sleep 5
+
+# There can be zero or more fsyncs, depending on the order
+# in which the writes reached the server, in turn deciding
+# whether they were treated as "appending" writes or not.
+
+TEST [[ $(num_fsyncs) -ge 0 ]]
+#Stop the volume to erase the profile info of old operations
+TEST $CLI volume profile $V0 stop
+TEST $CLI volume stop $V0
+umount $M0
+#Disable ensure-durability now to disable fsyncs in afr.
+TEST $CLI volume set $V0 ensure-durability off
+TEST $CLI volume start $V0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST $CLI volume profile $V0 start
+TEST dd of=$M0/a if=/dev/zero bs=1M count=10
+#fsyncs take a while to complete.
+sleep 5
+TEST [[ $(num_fsyncs) -eq 0 ]]
+
+cleanup
diff --git a/tests/bugs/bug-982174.t b/tests/bugs/bug-982174.t
new file mode 100644
index 000000000..460af7511
--- /dev/null
+++ b/tests/bugs/bug-982174.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+# Test to check
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#Check if incorrect log-level keywords does not crash the CLI
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2
+TEST $CLI volume start $V0
+
+function set_log_level_status {
+ local level=$1
+ $CLI volume set $V0 diagnostics.client-log-level $level 2>&1 |grep -oE 'success|failed'
+}
+
+
+LOG_LEVEL="trace"
+EXPECT "failed" set_log_level_status $LOG_LEVEL
+
+
+LOG_LEVEL="error-gen"
+EXPECT "failed" set_log_level_status $LOG_LEVEL
+
+
+LOG_LEVEL="TRACE"
+EXPECT "success" set_log_level_status $LOG_LEVEL
+
+EXPECT "$LOG_LEVEL" echo `$CLI volume info | grep diagnostics | awk '{print $2}'`
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/bug-983477.t b/tests/bugs/bug-983477.t
new file mode 100755
index 000000000..c19fa96c8
--- /dev/null
+++ b/tests/bugs/bug-983477.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#This script checks if use-readdirp option works as accepted in mount options
+
+function get_use_readdirp_value {
+ local vol=$1
+ local statedump=$(generate_mount_statedump $vol)
+ local val=$(grep "use_readdirp=" $statedump | cut -f2 -d'=' | tail -1)
+ rm -f $statedump
+ echo $val
+}
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}
+TEST $CLI volume start $V0
+#If readdirp is enabled statedump should reflect it
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=yes
+TEST cd $M0
+EXPECT_WITHIN 20 "1" get_use_readdirp_value $V0
+TEST cd -
+TEST umount $M0
+
+#If readdirp is enabled statedump should reflect it
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=no
+TEST cd $M0
+EXPECT_WITHIN 20 "0" get_use_readdirp_value $V0
+TEST cd -
+TEST umount $M0
+
+#Since args are optional on this argument just specifying "--use-readdirp" should also turn it `on` not `off`
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp
+TEST cd $M0
+EXPECT_WITHIN 20 "1" get_use_readdirp_value $V0
+TEST cd -
+TEST umount $M0
+
+#By default it is enabled.
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST cd $M0
+EXPECT_WITHIN 20 "1" get_use_readdirp_value $V0
+TEST cd -
+TEST umount $M0
+
+#Invalid values for use-readdirp should not be accepted
+TEST ! glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=please-fail
+
+cleanup
diff --git a/tests/bugs/bug-985074.t b/tests/bugs/bug-985074.t
new file mode 100644
index 000000000..80052129e
--- /dev/null
+++ b/tests/bugs/bug-985074.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+#
+# Bug 985074 - Verify stale inode/dentry mappings are cleaned out.
+#
+# This test verifies that an inode/dentry mapping for a file removed via a
+# separate mount point is cleaned up appropriately. We create a file and hard
+# link from client 1. Next we remove the link via client 2. Finally, from client
+# 1 we attempt to rename the original filename to the name of the just removed
+# hard link.
+#
+# If the inode is not unlinked properly, the removed directory entry can resolve
+# to an inode (on the client that never saw the rm) that ends up passed down
+# through the lookup call. If md-cache holds valid metadata on the inode (due to
+# a large timeout value or recent lookup on the valid name), it is tricked into
+# returning a successful lookup that should have returned ENOENT. This manifests
+# as an error from the mv command in the following test sequence because file
+# and file.link resolve to the same file:
+#
+# # mv /mnt/glusterfs/0/file /mnt/glusterfs/0/file.link
+# mv: `/mnt/glusterfs/0/file' and `/mnt/glusterfs/0/file.link' are the same file
+#
+###
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 md-cache-timeout 3
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M1 --entry-timeout=0 --attribute-timeout=0
+
+TEST touch $M0/file
+TEST ln $M0/file $M0/file.link
+TEST ls -ali $M0 $M1
+TEST rm -f $M1/file.link
+TEST ls -ali $M0 $M1
+# expire the md-cache timeout
+sleep 3
+TEST mv $M0/file $M0/file.link
+TEST stat $M0/file.link
+TEST ! stat $M0/file
+
+TEST umount $M1
+TEST umount $M0
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/bug-986429.t b/tests/bugs/bug-986429.t
new file mode 100644
index 000000000..6e43f72b7
--- /dev/null
+++ b/tests/bugs/bug-986429.t
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+## This tests failover achieved by providing multiple
+## servers from the trusted pool for fetching volume
+## specification
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s non-existent -s $H0 --volfile-id=/$V0 $M0
+
+cleanup;
diff --git a/tests/bugs/bug-986905.t b/tests/bugs/bug-986905.t
new file mode 100755
index 000000000..0fac40fb4
--- /dev/null
+++ b/tests/bugs/bug-986905.t
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#This script checks if hardlinks that are created while a brick is down are
+#healed properly.
+
+cleanup;
+function get_inum {
+ ls -i $1 | awk '{print $1}'
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST touch $M0/a
+TEST ln $M0/a $M0/link_a
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+TEST ls -l $M0
+inum=$(get_inum $B0/${V0}0/a)
+EXPECT "$inum" get_inum $B0/${V0}0/link_a
+cleanup
diff --git a/tests/bugs/bug-991622.t b/tests/bugs/bug-991622.t
new file mode 100644
index 000000000..5c3243465
--- /dev/null
+++ b/tests/bugs/bug-991622.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../fileio.rc
+
+#This tests that no fd leaks are observed in unlink/rename in open-behind
+function leaked_fds {
+ ls -l /proc/$(get_brick_pid $V0 $H0 $B0/$V0)/fd | grep deleted
+}
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume set $V0 open-behind on
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+
+TEST fd1=`fd_available`
+TEST fd_open $fd1 'w' "$M0/testfile1"
+TEST fd_write $fd1 "content"
+
+TEST fd2=`fd_available`
+TEST fd_open $fd2 'w' "$M0/testfile2"
+TEST fd_write $fd2 "content"
+
+TEST touch $M0/a
+TEST rm $M0/testfile1
+TEST mv $M0/a $M0/testfile2
+TEST fd_close $fd1
+TEST fd_close $fd2
+TEST ! leaked_fds
+cleanup;
diff --git a/tests/bugs/getlk_owner.c b/tests/bugs/getlk_owner.c
new file mode 100644
index 000000000..619c2e32d
--- /dev/null
+++ b/tests/bugs/getlk_owner.c
@@ -0,0 +1,120 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+
+#define GETLK_OWNER_CHECK(f, cp, label) \
+ do { \
+ switch (f.l_type) { \
+ case F_RDLCK: \
+ case F_WRLCK: \
+ ret = 1; \
+ goto label; \
+ case F_UNLCK: \
+ if (!are_flocks_sane (&f, &cp)) { \
+ ret = 1; \
+ goto label; \
+ } \
+ break; \
+ } \
+ } while (0)
+
+void
+flock_init (struct flock *f, short int type, off_t start, off_t len)
+{
+ f->l_type = type;
+ f->l_start = start;
+ f->l_len = len;
+}
+
+int
+flock_cp (struct flock *dst, struct flock *src)
+{
+ memcpy ((void *) dst, (void *) src, sizeof (struct flock));
+}
+
+int
+are_flocks_sane (struct flock *src, struct flock *cpy)
+{
+ return ((src->l_whence == cpy->l_whence) &&
+ (src->l_start == cpy->l_start) &&
+ (src->l_len == cpy->l_len));
+}
+
+/*
+ * Test description:
+ * SETLK (0,3), F_WRLCK
+ * SETLK (3,3), F_WRLCK
+ *
+ * the following GETLK requests must return flock struct unmodified
+ * except for l_type to F_UNLCK
+ * GETLK (3,3), F_WRLCK
+ * GETLK (3,3), F_RDLCK
+ *
+ * */
+
+int main (int argc, char **argv)
+{
+ int fd = -1;
+ int ret = 1;
+ char *fname = NULL;
+ struct flock f = {0,};
+ struct flock cp = {0,};
+
+ if (argc < 2)
+ goto out;
+
+ fname = argv[1];
+ fd = open (fname, O_RDWR);
+ if (fd == -1) {
+ perror ("open");
+ goto out;
+ }
+
+ flock_init (&f, F_WRLCK, 0, 3);
+ flock_cp (&cp, &f);
+ ret = fcntl (fd, F_SETLK, &f);
+ if (ret) {
+ perror ("fcntl");
+ goto out;
+ }
+ if (!are_flocks_sane (&f, &cp)) {
+ ret = 1;
+ goto out;
+ }
+
+ flock_init (&f, F_WRLCK, 3, 3);
+ flock_cp (&cp, &f);
+ ret = fcntl (fd, F_SETLK, &f);
+ if (ret) {
+ perror ("fcntl");
+ goto out;
+ }
+ if (!are_flocks_sane (&f, &cp)) {
+ ret = 1;
+ goto out;
+ }
+
+ flock_init (&f, F_WRLCK, 3, 3);
+ flock_cp (&cp, &f);
+ ret = fcntl (fd, F_GETLK, &f);
+ if (ret) {
+ perror ("fcntl");
+ return 1;
+ }
+ GETLK_OWNER_CHECK (f, cp, out);
+
+ flock_init (&f, F_RDLCK, 3, 3);
+ flock_cp (&cp, &f);
+ ret = fcntl (fd, F_GETLK, &f);
+ if (ret) {
+ perror ("fcntl");
+ return 1;
+ }
+ GETLK_OWNER_CHECK (f, cp, out);
+
+out:
+ if (fd != -1)
+ close (fd);
+ return ret;
+}
diff --git a/tests/bugs/overlap.py b/tests/bugs/overlap.py
new file mode 100755
index 000000000..15f2da473
--- /dev/null
+++ b/tests/bugs/overlap.py
@@ -0,0 +1,59 @@
+#!/usr/bin/python
+
+import sys
+
+def calculate_one (ov, nv):
+ old_start = int(ov[18:26],16)
+ old_end = int(ov[26:34],16)
+ new_start = int(nv[18:26],16)
+ new_end = int(nv[26:34],16)
+ if (new_end < old_start) or (new_start > old_end):
+ #print '%s, %s -> ZERO' % (ov, nv)
+ return 0
+ all_start = max(old_start,new_start)
+ all_end = min(old_end,new_end)
+ #print '%s, %s -> %08x' % (ov, nv, all_end - all_start + 1)
+ return all_end - all_start + 1
+
+def calculate_all (values):
+ total = 0
+ nv_index = len(values) / 2
+ for old_val in values[:nv_index]:
+ new_val = values[nv_index]
+ nv_index += 1
+ total += calculate_one(old_val,new_val)
+ return total
+
+"""
+test1_vals = [
+ '0x0000000000000000000000003fffffff', # first quarter
+ '0x0000000000000000400000007fffffff', # second quarter
+ '0x000000000000000080000000ffffffff', # second half
+ '0x00000000000000000000000055555554', # first third
+ '0x000000000000000055555555aaaaaaa9', # second third
+ '0x0000000000000000aaaaaaaaffffffff', # last third
+]
+
+test2_vals = [
+ '0x0000000000000000000000003fffffff', # first quarter
+ '0x0000000000000000400000007fffffff', # second quarter
+ '0x000000000000000080000000ffffffff', # second half
+ '0x00000000000000000000000055555554', # first third
+ # Next two are (incorrectly) swapped.
+ '0x0000000000000000aaaaaaaaffffffff', # last third
+ '0x000000000000000055555555aaaaaaa9', # second third
+]
+
+print '%08x' % calculate_one(test1_vals[0],test1_vals[3])
+print '%08x' % calculate_one(test1_vals[1],test1_vals[4])
+print '%08x' % calculate_one(test1_vals[2],test1_vals[5])
+print '= %08x' % calculate_all(test1_vals)
+print '%08x' % calculate_one(test2_vals[0],test2_vals[3])
+print '%08x' % calculate_one(test2_vals[1],test2_vals[4])
+print '%08x' % calculate_one(test2_vals[2],test2_vals[5])
+print '= %08x' % calculate_all(test2_vals)
+"""
+
+if __name__ == '__main__':
+ # Return decimal so bash can reason about it.
+ print '%d' % calculate_all(sys.argv[1:])