summaryrefslogtreecommitdiffstats
path: root/tests/bugs/replicate
diff options
context:
space:
mode:
Diffstat (limited to 'tests/bugs/replicate')
-rw-r--r--tests/bugs/replicate/886998/strict-readdir.t52
-rwxr-xr-xtests/bugs/replicate/bug-1015990-rep.t61
-rwxr-xr-xtests/bugs/replicate/bug-1015990.t69
-rw-r--r--tests/bugs/replicate/bug-1032927.t32
-rwxr-xr-xtests/bugs/replicate/bug-1037501.t104
-rwxr-xr-xtests/bugs/replicate/bug-1046624.t47
-rw-r--r--tests/bugs/replicate/bug-1058797.t48
-rw-r--r--tests/bugs/replicate/bug-1101647.t31
-rw-r--r--tests/bugs/replicate/bug-1130892.t76
-rw-r--r--tests/bugs/replicate/bug-1132102.t28
-rw-r--r--tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t56
-rw-r--r--tests/bugs/replicate/bug-1139230.t58
-rw-r--r--tests/bugs/replicate/bug-1180545.t77
-rw-r--r--tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t57
-rw-r--r--tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t42
-rw-r--r--tests/bugs/replicate/bug-1238398-split-brain-resolution.t51
-rw-r--r--tests/bugs/replicate/bug-1238508-self-heal.t51
-rw-r--r--tests/bugs/replicate/bug-1250170-fsync.c56
-rw-r--r--tests/bugs/replicate/bug-1250170-fsync.t35
-rw-r--r--tests/bugs/replicate/bug-1266876-allow-reset-brick-for-same-path.t54
-rw-r--r--tests/bugs/replicate/bug-1292379.t58
-rw-r--r--tests/bugs/replicate/bug-1297695.t43
-rw-r--r--tests/bugs/replicate/bug-1305031-block-reads-on-metadata-sbrain.t40
-rw-r--r--tests/bugs/replicate/bug-1325792.t25
-rw-r--r--tests/bugs/replicate/bug-1335652.t29
-rw-r--r--tests/bugs/replicate/bug-1340623-mkdir-fails-remove-brick-started.t46
-rw-r--r--tests/bugs/replicate/bug-1341650.t63
-rw-r--r--tests/bugs/replicate/bug-1363721.t118
-rw-r--r--tests/bugs/replicate/bug-1365455.t54
-rw-r--r--tests/bugs/replicate/bug-1386188-sbrain-fav-child.t82
-rw-r--r--tests/bugs/replicate/bug-1402730.t47
-rw-r--r--tests/bugs/replicate/bug-1408712.t101
-rw-r--r--tests/bugs/replicate/bug-1417522-block-split-brain-resolution.t69
-rw-r--r--tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t79
-rw-r--r--tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t46
-rw-r--r--tests/bugs/replicate/bug-1448804-check-quorum-type-values.t47
-rw-r--r--tests/bugs/replicate/bug-1473026.t31
-rw-r--r--tests/bugs/replicate/bug-1477169-entry-selfheal-rename.t52
-rw-r--r--tests/bugs/replicate/bug-1480525.t18
-rw-r--r--tests/bugs/replicate/bug-1493415-gfid-heal.t78
-rw-r--r--tests/bugs/replicate/bug-1498570-client-iot-graph-check.t48
-rwxr-xr-xtests/bugs/replicate/bug-1539358-split-brain-detection.t89
-rw-r--r--tests/bugs/replicate/bug-1561129-enospc.t24
-rw-r--r--tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t72
-rw-r--r--tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t128
-rw-r--r--tests/bugs/replicate/bug-1626994-info-split-brain.t62
-rw-r--r--tests/bugs/replicate/bug-1637249-gfid-heal.t149
-rw-r--r--tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t45
-rw-r--r--tests/bugs/replicate/bug-1655050-dir-sbrain-size-policy.t55
-rwxr-xr-xtests/bugs/replicate/bug-1655052-sbrain-policy-same-size.t55
-rw-r--r--tests/bugs/replicate/bug-1655854-support-dist-to-rep3-arb-conversion.t95
-rw-r--r--tests/bugs/replicate/bug-1657783-do-not-update-read-subvol-on-rename-link.t40
-rw-r--r--tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t38
-rwxr-xr-xtests/bugs/replicate/bug-1696599-io-hang.t47
-rw-r--r--tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t136
-rw-r--r--tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t116
-rw-r--r--tests/bugs/replicate/bug-1728770-pass-xattrs.t52
-rw-r--r--tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t102
-rw-r--r--tests/bugs/replicate/bug-1744548-heal-timeout.t47
-rw-r--r--tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t89
-rw-r--r--tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t111
-rw-r--r--tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t74
-rw-r--r--tests/bugs/replicate/bug-1801624-entry-heal.t58
-rw-r--r--tests/bugs/replicate/bug-765564.t86
-rwxr-xr-xtests/bugs/replicate/bug-767585-gfid.t42
-rwxr-xr-xtests/bugs/replicate/bug-802417.t120
-rw-r--r--tests/bugs/replicate/bug-821056.t52
-rwxr-xr-xtests/bugs/replicate/bug-830665.t127
-rwxr-xr-xtests/bugs/replicate/bug-859581.t53
-rwxr-xr-xtests/bugs/replicate/bug-865825.t82
-rw-r--r--tests/bugs/replicate/bug-880898.t30
-rw-r--r--tests/bugs/replicate/bug-884328.t12
-rw-r--r--tests/bugs/replicate/bug-886998.t52
-rw-r--r--tests/bugs/replicate/bug-888174.t62
-rw-r--r--tests/bugs/replicate/bug-913051.t56
-rw-r--r--tests/bugs/replicate/bug-916226.t26
-rw-r--r--tests/bugs/replicate/bug-918437-sh-mtime.t71
-rwxr-xr-xtests/bugs/replicate/bug-921231.t31
-rw-r--r--tests/bugs/replicate/bug-957877.t33
-rw-r--r--tests/bugs/replicate/bug-976800.t29
-rwxr-xr-xtests/bugs/replicate/bug-977797.t96
-rw-r--r--tests/bugs/replicate/bug-978794.t29
-rwxr-xr-xtests/bugs/replicate/bug-979365.t47
-rwxr-xr-xtests/bugs/replicate/bug-986905.t27
-rw-r--r--tests/bugs/replicate/issue-1254-prioritize-enospc.t80
-rw-r--r--tests/bugs/replicate/mdata-heal-no-xattrs.t59
-rw-r--r--tests/bugs/replicate/ta-inode-refresh-read.t40
87 files changed, 5285 insertions, 0 deletions
diff --git a/tests/bugs/replicate/886998/strict-readdir.t b/tests/bugs/replicate/886998/strict-readdir.t
new file mode 100644
index 00000000000..63fe313b201
--- /dev/null
+++ b/tests/bugs/replicate/886998/strict-readdir.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+
+function num_files_in_dir {
+ d=$1
+ ls $d | sort | uniq | wc -l
+}
+
+#Basic sanity tests for readdir functionality
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/r2d2_0 $H0:$B0/r2d2_1 $H0:$B0/r2d2_2 $H0:$B0/r2d2_3
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-server=$H0 --volfile-id=/$V0 $M0
+
+TEST touch $M0/{1..100}
+EXPECT "100" num_files_in_dir $M0
+
+TEST kill_brick $V0 $H0 $B0/r2d2_0
+TEST kill_brick $V0 $H0 $B0/r2d2_2
+EXPECT "100" num_files_in_dir $M0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST kill_brick $V0 $H0 $B0/r2d2_1
+TEST kill_brick $V0 $H0 $B0/r2d2_3
+EXPECT "100" num_files_in_dir $M0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 3
+
+TEST $CLI volume set $V0 cluster.strict-readdir on
+EXPECT "on" volinfo_field $V0 cluster.strict-readdir
+TEST kill_brick $V0 $H0 $B0/r2d2_0
+TEST kill_brick $V0 $H0 $B0/r2d2_2
+EXPECT "100" num_files_in_dir $M0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST kill_brick $V0 $H0 $B0/r2d2_1
+TEST kill_brick $V0 $H0 $B0/r2d2_3
+EXPECT "100" num_files_in_dir $M0
+cleanup;
diff --git a/tests/bugs/replicate/bug-1015990-rep.t b/tests/bugs/replicate/bug-1015990-rep.t
new file mode 100755
index 00000000000..ab8166e372a
--- /dev/null
+++ b/tests/bugs/replicate/bug-1015990-rep.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 3
+
+
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST kill_brick $V0 $H0 $B0/$V0"3"
+
+for i in {1..100}; do echo "STRING" > $M0/File$i; done
+
+# Check shd is connected to all up bricks before running statistics command.
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3
+
+command_output=$(gluster volume heal $V0 statistics heal-count replica $H0:$B0/$V0"1")
+substring="Number of entries:"
+count=0
+while read -r line;
+do
+ if [[ "$line" == *$substring* ]]
+ then
+ value=$(echo $line | cut -f 2 -d :)
+ count=$(($count + $value))
+ fi
+
+done <<< "$command_output"
+
+brick_2_entries_count=$count
+xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2")
+EXPECT $brick_2_entries_count echo $xattrop_count_brick_2
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1015990.t b/tests/bugs/replicate/bug-1015990.t
new file mode 100755
index 00000000000..a8b12f2c202
--- /dev/null
+++ b/tests/bugs/replicate/bug-1015990.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+
+
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+sleep 5
+TEST kill_brick $V0 $H0 $B0/$V0"3"
+sleep 5
+
+for i in {1..100}; do echo "STRING" > $M0/File$i; done
+
+brick_2_sh_entries=$(count_sh_entries $B0/$V0"2")
+brick_4_sh_entries=$(count_sh_entries $B0/$V0"4")
+
+
+command_output=$(gluster volume heal $V0 statistics heal-count)
+
+
+substring="Number of entries:"
+count=0
+while read -r line;
+do
+ if [[ "$line" == *$substring* ]]
+ then
+ value=$(echo $line | cut -f 2 -d :)
+ count=$(($count + $value))
+ fi
+
+done <<< "$command_output"
+
+brick_2_entries_count=$(($count-$value))
+brick_4_entries_count=$value
+
+xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2")
+xattrop_count_brick_4=$(count_sh_entries $B0/$V0"4")
+EXPECT $brick_2_entries_count echo $xattrop_count_brick_2
+EXPECT $brick_4_entries_count echo $xattrop_count_brick_4
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0
+
+cleanup;
+
diff --git a/tests/bugs/replicate/bug-1032927.t b/tests/bugs/replicate/bug-1032927.t
new file mode 100644
index 00000000000..eb663d03fed
--- /dev/null
+++ b/tests/bugs/replicate/bug-1032927.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This tests if pathinfo getxattr fails when one of the bricks is down
+#Lets hope it doesn't
+
+cleanup;
+function get_pathinfo_in_loop {
+ failed=0
+ for i in {1..1000}
+ do
+ getfattr -n trusted.glusterfs.pathinfo $M0 2>/dev/null
+ if [ $? -ne 0 ]; then failed=1;break; fi
+ done
+ return $failed
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+cd $M0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+#when one of the bricks is down getfattr of pathinfo should not fail
+#Lets just do the test for 1000 times to see if we hit the race
+TEST get_pathinfo_in_loop
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1037501.t b/tests/bugs/replicate/bug-1037501.t
new file mode 100755
index 00000000000..ce079555b50
--- /dev/null
+++ b/tests/bugs/replicate/bug-1037501.t
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function write_file()
+{
+ path="$1"; shift
+ echo "$*" > "$path"
+}
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Start and create a volume
+mkdir -p ${B0}/${V0}-0
+mkdir -p ${B0}/${V0}-1
+mkdir -p ${B0}/${V0}-2
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2}
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount native
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+TEST `echo "TEST-FILE" > $M0/File`
+TEST `mkdir $M0/Dir`
+TEST `ln $M0/File $M0/Link`
+TEST `mknod $M0/FIFO p`
+
+TEST $CLI volume add-brick $V0 replica 4 $H0:$B0/$V0-3 force
+TEST $CLI volume add-brick $V0 replica 5 $H0:$B0/$V0-4 force
+TEST $CLI volume add-brick $V0 replica 6 $H0:$B0/$V0-5 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 4
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 5
+TEST gluster volume heal $V0 full
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/File
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/File
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/File
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/File
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/File
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/File
+
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/Link
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/Link
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/Link
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/Link
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/Link
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/Link
+
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/Dir
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/Dir
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/Dir
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/Dir
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/Dir
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/Dir
+
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/FIFO
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/FIFO
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/FIFO
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/FIFO
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/FIFO
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/FIFO
+
+EXPECT 10 stat -c '%s' $B0/$V0-0/File
+EXPECT 10 stat -c '%s' $B0/$V0-1/File
+EXPECT 10 stat -c '%s' $B0/$V0-2/File
+EXPECT 10 stat -c '%s' $B0/$V0-3/File
+EXPECT 10 stat -c '%s' $B0/$V0-4/File
+EXPECT 10 stat -c '%s' $B0/$V0-5/File
+
+EXPECT 3 stat -c '%h' $B0/$V0-0/Link
+EXPECT 3 stat -c '%h' $B0/$V0-1/Link
+EXPECT 3 stat -c '%h' $B0/$V0-2/Link
+EXPECT 3 stat -c '%h' $B0/$V0-3/Link
+EXPECT 3 stat -c '%h' $B0/$V0-4/Link
+EXPECT 3 stat -c '%h' $B0/$V0-5/Link
+
+EXPECT 'directory' stat -c '%F' $B0/$V0-0/Dir
+EXPECT 'directory' stat -c '%F' $B0/$V0-1/Dir
+EXPECT 'directory' stat -c '%F' $B0/$V0-2/Dir
+EXPECT 'directory' stat -c '%F' $B0/$V0-3/Dir
+EXPECT 'directory' stat -c '%F' $B0/$V0-4/Dir
+EXPECT 'directory' stat -c '%F' $B0/$V0-5/Dir
+
+EXPECT 'fifo' stat -c '%F' $B0/$V0-0/FIFO
+EXPECT 'fifo' stat -c '%F' $B0/$V0-1/FIFO
+EXPECT 'fifo' stat -c '%F' $B0/$V0-2/FIFO
+EXPECT 'fifo' stat -c '%F' $B0/$V0-3/FIFO
+EXPECT 'fifo' stat -c '%F' $B0/$V0-4/FIFO
+EXPECT 'fifo' stat -c '%F' $B0/$V0-5/FIFO
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1046624.t b/tests/bugs/replicate/bug-1046624.t
new file mode 100755
index 00000000000..e2762ea6764
--- /dev/null
+++ b/tests/bugs/replicate/bug-1046624.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+## Start and create a volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+
+## Make sure automatic self-heal doesn't perturb our results.
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 stat-prefetch off
+TEST $CLI volume set $V0 background-self-heal-count 0
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount native
+TEST ${GFS} --volfile-server=$H0 --volfile-id=$V0 --use-readdirp=no $M0
+
+TEST `echo "TEST-FILE" > $M0/File`
+TEST `mkdir $M0/Dir`
+TEST kill_brick $V0 $H0 $B0/${V0}-0
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 0
+
+TEST `ln -s $M0/File $M0/Link1`
+TEST `ln -s $M0/Dir $M0/Link2`
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+TEST `find $M0/ 2>/dev/null 1>/dev/null`
+TEST `find $M0/ | xargs stat 2>/dev/null 1>/dev/null`
+
+TEST stat $B0/${V0}-0/Link1
+TEST stat $B0/${V0}-0/Link2
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1058797.t b/tests/bugs/replicate/bug-1058797.t
new file mode 100644
index 00000000000..598062a0dab
--- /dev/null
+++ b/tests/bugs/replicate/bug-1058797.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+#Test that the setuid bit is healed correctly.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+#Basic checks
+TEST glusterd
+
+#Create a 1x2 replica volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1};
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+
+# FUSE mount;create a file
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/file
+
+#Kill brick1 and set S_ISUID and S_ISGID bits from mount point
+kill_brick $V0 $H0 $B0/brick1
+TEST chmod +x,+s $M0/file
+
+#Get file permissions from backend brick0 and verify that S_ISUID is indeed set
+file_permissions1=`ls -l $B0/brick0/file | awk '{print $1}'| cut -d. -f1 | cut -d- -f2,3,4,5,6`
+setuid_bit1=`echo $file_permissions1 | cut -b3`
+EXPECT "s" echo $setuid_bit1
+
+#Restart volume and do lookup from mount to trigger heal
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST dd if=$M0/file of=/dev/null
+
+#Get file permissions from healed brick1 and verify that S_ISUID is indeed set
+file_permissions2=`ls -l $B0/brick1/file | awk '{print $1}' | cut -d. -f1 | cut -d- -f2,3,4,5,6`
+setuid_bit2=`echo $file_permissions2 | cut -b3`
+EXPECT "s" echo $setuid_bit2
+
+#Also compare the entire permission string,just to be sure
+EXPECT $file_permissions1 echo $file_permissions2
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1101647.t b/tests/bugs/replicate/bug-1101647.t
new file mode 100644
index 00000000000..708bc1a1e29
--- /dev/null
+++ b/tests/bugs/replicate/bug-1101647.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0;
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+#Create base entry in indices/xattrop
+echo "Data">$M0/file
+
+TEST $CLI volume heal $V0
+#Entries from indices/xattrop should not be cleared after a heal.
+EXPECT 1 count_index_entries $B0/$V0"1"
+EXPECT 1 count_index_entries $B0/$V0"2"
+
+TEST kill_brick $V0 $H0 $B0/${V0}2
+echo "More data">>$M0/file
+
+EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count|grep "Number of entries:"|head -n1|awk '{print $4}'`
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1130892.t b/tests/bugs/replicate/bug-1130892.t
new file mode 100644
index 00000000000..c7509f33cc2
--- /dev/null
+++ b/tests/bugs/replicate/bug-1130892.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+# Create a 1X2 replica
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Disable self-heal daemon
+TEST gluster volume set $V0 self-heal-daemon off
+
+# Enable Client side heal
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+
+# Disable all perf-xlators
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+
+# Volume start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+# FUSE Mount
+TEST ${GFS} -s $H0 --volfile-id $V0 $M0
+
+# Create files and dirs
+TEST mkdir -p $M0/one/two/
+TEST `echo "Carpe diem" > $M0/one/two/three`
+
+# Simulate disk-replacement
+TEST kill_brick $V0 $H0 $B0/${V0}-1
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 1
+TEST rm -rf $B0/${V0}-1/one
+TEST rm -rf $B0/${V0}-1/.glusterfs
+
+#Ideally, disk replacement is done using reset-brick or replace-brick gluster CLI
+#which will create .glusterfs folder.
+mkdir $B0/${V0}-1/.glusterfs && chmod 600 $B0/${V0}-1/.glusterfs
+
+# Start force
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST stat $M0/one
+
+sleep 1
+
+# Check pending xattrs
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 data
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 entry
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 metadata
+
+TEST gluster volume set $V0 self-heal-daemon on
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_file_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two/three
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1132102.t b/tests/bugs/replicate/bug-1132102.t
new file mode 100644
index 00000000000..c7dbbf818aa
--- /dev/null
+++ b/tests/bugs/replicate/bug-1132102.t
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+#This tests that mknod and create fops mark necessary pending changelog
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+cd $M0
+TEST mkfifo fifo
+TEST mknod block b 0 0
+TEST touch a
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 entry
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 metadata
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 entry
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 metadata
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 entry
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 metadata
+cleanup
diff --git a/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t b/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
new file mode 100644
index 00000000000..b69a38ae788
--- /dev/null
+++ b/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+#### Test iatt and user xattr heal from lookup code path ####
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2}
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+cd $M0
+TEST touch file
+TEST setfattr -n user.attribute1 -v "value" $B0/brick0/file
+TEST kill_brick $V0 $H0 $B0/brick2
+TEST chmod +x file
+iatt=$(stat -c "%g:%u:%A" file)
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT 2 get_pending_heal_count $V0
+
+#Trigger metadataheal
+TEST stat file
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+#iattrs must be matching
+iatt1=$(stat -c "%g:%u:%A" $B0/brick0/file)
+iatt2=$(stat -c "%g:%u:%A" $B0/brick1/file)
+iatt3=$(stat -c "%g:%u:%A" $B0/brick2/file)
+EXPECT $iatt echo $iatt1
+EXPECT $iatt echo $iatt2
+EXPECT $iatt echo $iatt3
+
+#xattrs must be matching
+xatt1_cnt=$(getfattr -d $B0/brick0/file|wc|awk '{print $1}')
+xatt2_cnt=$(getfattr -d $B0/brick1/file|wc|awk '{print $1}')
+xatt3_cnt=$(getfattr -d $B0/brick2/file|wc|awk '{print $1}')
+EXPECT "$xatt1_cnt" echo $xatt2_cnt
+EXPECT "$xatt1_cnt" echo $xatt3_cnt
+
+#changelogs must be zero
+xattr1=$(get_hex_xattr trusted.afr.$V0-client-2 $B0/brick0/file)
+xattr2=$(get_hex_xattr trusted.afr.$V0-client-2 $B0/brick1/file)
+EXPECT "000000000000000000000000" echo $xattr1
+EXPECT "000000000000000000000000" echo $xattr2
+
+cd -
+cleanup;
diff --git a/tests/bugs/replicate/bug-1139230.t b/tests/bugs/replicate/bug-1139230.t
new file mode 100644
index 00000000000..9ceac6c4f4e
--- /dev/null
+++ b/tests/bugs/replicate/bug-1139230.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+# Create a 1X2 replica
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Volume start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+# FUSE Mount
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+TEST mkdir -p $M0/one
+
+# Kill a brick
+TEST kill_brick $V0 $H0 $B0/${V0}-1
+
+TEST `echo "A long" > $M0/one/two`
+
+# Start force
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_file_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two
+
+# Pending xattrs should be set for all the bricks once self-heal is done
+# Check pending xattrs
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-0
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.$V0-client-0
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.$V0-client-1
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.dirty
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.dirty
+
+TEST `echo "time ago" > $M0/one/three`
+
+# Pending xattrs should be set for all the bricks once transaction is done
+# Check pending xattrs
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.$V0-client-0
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.$V0-client-1
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.$V0-client-0
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.$V0-client-1
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.dirty
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.dirty
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1180545.t b/tests/bugs/replicate/bug-1180545.t
new file mode 100644
index 00000000000..5e40edd6c38
--- /dev/null
+++ b/tests/bugs/replicate/bug-1180545.t
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+#Create gfid split-brain of directory and check if conservative merge
+#completes successfully.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+function check_sh_entries() {
+ local expected="$1"
+ local count=
+ local good="0"
+ shift
+
+ for i in $*; do
+ count="$(count_sh_entries $i)"
+ if [[ "x${count}" == "x${expected}" ]]; then
+ good="$((good + 1))"
+ fi
+ done
+ if [[ "x${good}" != "x${last_good}" ]]; then
+ last_good="${good}"
+# This triggers a sweep of the heal index. However if more than one brick
+# tries to heal the same directory at the same time, one of them will take
+# the lock and the other will give up, waiting for the next heal cycle, which
+# is set to 60 seconds (the minimum valid value). So, each time we detect
+# that one brick has completed the heal, we trigger another heal.
+ $CLI volume heal $V0
+ fi
+
+ echo "${good}"
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 cluster.heal-timeout 60
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 stat-prefetch off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+#Create files with alternate brick down. One file has gfid mismatch.
+TEST mkdir $M0/DIR
+
+TEST kill_brick $V0 $H0 $B0/brick1
+TEST touch $M0/DIR/FILE
+TEST touch $M0/DIR/file{1..5}
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST touch $M0/DIR/FILE
+TEST touch $M0/DIR/file{6..10}
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#Trigger heal and verify number of entries in backend
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+
+last_good=""
+
+EXPECT_WITHIN $HEAL_TIMEOUT "2" check_sh_entries 2 $B0/brick{0,1}
+
+#Two entries for DIR and two for FILE
+EXPECT_WITHIN $HEAL_TIMEOUT "4" get_pending_heal_count $V0
+TEST diff <(ls $B0/brick0/DIR) <(ls $B0/brick1/DIR)
+cleanup
diff --git a/tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t b/tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t
new file mode 100644
index 00000000000..fe8e992e8f8
--- /dev/null
+++ b/tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t
@@ -0,0 +1,57 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+#Stale entries in xattrop folder for files which do not need heal must be removed during the next index heal crawl.
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0;
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST `echo hello>$M0/datafile`
+TEST touch $M0/mdatafile
+
+#Create split-brain and reset the afr xattrs, so that we have only the entry inside xattrop folder.
+#This is to simulate the case where the brick crashed just before pre-op happened, but index xlator created the entry inside xattrop folder.
+
+#Create data, metadata SB.
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST stat $M0/datafile
+TEST `echo append>>$M0/datafile`
+TEST chmod +x $M0/mdatafile
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 1
+TEST kill_brick $V0 $H0 $B0/$V0"0"
+TEST stat $M0/datafile
+TEST `echo append>>$M0/datafile`
+TEST chmod -x $M0/mdatafile
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 0
+TEST ! cat $M0/datafile
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT '2' count_sh_entries $B0/$V0"0"
+EXPECT_WITHIN $HEAL_TIMEOUT '2' count_sh_entries $B0/$V0"1"
+
+#Reset xattrs and trigger heal.
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}0/datafile
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}1/datafile
+
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}0/mdatafile
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}1/mdatafile
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT '0' count_sh_entries $B0/$V0"0"
+EXPECT_WITHIN $HEAL_TIMEOUT '0' count_sh_entries $B0/$V0"1"
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t b/tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t
new file mode 100644
index 00000000000..6ff471fbf15
--- /dev/null
+++ b/tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+#Allow readdirs to proceed on directories that are in split-brain
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0;
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file{1..5}
+
+#Create entry split-brain
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 1
+TEST touch $M0/dir/FILE
+EXPECT_WITHIN ${UMOUNT_TIMEOUT} "^Y$" force_umount $M0
+TEST $CLI volume start $V0 force
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 1
+TEST kill_brick $V0 $H0 $B0/$V0"0"
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 0
+TEST touch $M0/dir/FILE
+EXPECT_WITHIN ${UMOUNT_TIMEOUT} "^Y$" force_umount $M0
+TEST $CLI volume start $V0 force
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 1
+
+cd $M0/dir
+EXPECT "6" echo $(ls | wc -l)
+TEST ! cat FILE
+TEST `echo hello>hello.txt`
+cd -
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup
diff --git a/tests/bugs/replicate/bug-1238398-split-brain-resolution.t b/tests/bugs/replicate/bug-1238398-split-brain-resolution.t
new file mode 100644
index 00000000000..8ef3aae979f
--- /dev/null
+++ b/tests/bugs/replicate/bug-1238398-split-brain-resolution.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+function get_split_brain_status {
+ local path=$1
+ echo `getfattr -n replica.split-brain-status $path` | cut -f2 -d"=" | sed -e 's/^"//' -e 's/"$//'
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+
+#Disable self-heal-daemon
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+TEST `echo "some-data" > $M0/metadata-split-brain.txt`
+
+#Create metadata split-brain
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST chmod 666 $M0/metadata-split-brain.txt
+
+TEST $CLI volume start $V0 force
+TEST kill_brick $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+TEST chmod 757 $M0/metadata-split-brain.txt
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+EXPECT 2 get_pending_heal_count $V0
+
+#Inspect the file in metadata-split-brain
+EXPECT "data-split-brain:no metadata-split-brain:yes Choices:patchy-client-0,patchy-client-1" get_split_brain_status $M0/metadata-split-brain.txt
+TEST setfattr -n replica.split-brain-choice -v $V0-client-0 $M0/metadata-split-brain.txt
+
+EXPECT "757" stat -c %a $M0/metadata-split-brain.txt
+
+TEST setfattr -n replica.split-brain-choice -v $V0-client-1 $M0/metadata-split-brain.txt
+EXPECT "666" stat -c %a $M0/metadata-split-brain.txt
+
+cleanup;
+
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/replicate/bug-1238508-self-heal.t b/tests/bugs/replicate/bug-1238508-self-heal.t
new file mode 100644
index 00000000000..24fb07d31f0
--- /dev/null
+++ b/tests/bugs/replicate/bug-1238508-self-heal.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+
+# Disable self-heal-daemon
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+TEST mkdir $M0/olddir;
+TEST `echo "some-data" > $M0/olddir/oldfile`
+
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST mv $M0/olddir/oldfile $M0/olddir/newfile;
+TEST mv $M0/olddir $M0/newdir;
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+# Test if the files are present on both the bricks
+EXPECT "newdir" ls $B0/${V0}0/
+EXPECT "newdir" ls $B0/${V0}1/
+EXPECT "newfile" ls $B0/${V0}0/newdir/
+EXPECT "newfile" ls $B0/${V0}1/newdir/
+
+# Test if gfid-link files in .glusterfs also provide correct info
+brick0gfid=$(gf_get_gfid_backend_file_path $B0/${V0}0 newdir)
+brick1gfid=$(gf_get_gfid_backend_file_path $B0/${V0}1 newdir)
+EXPECT "newfile" ls $brick0gfid
+EXPECT "newfile" ls $brick1gfid
+
+# Test if the files are accessible from the mount
+EXPECT "newdir" ls $M0/
+EXPECT "newfile" ls $M0/newdir
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1250170-fsync.c b/tests/bugs/replicate/bug-1250170-fsync.c
new file mode 100644
index 00000000000..21fd96594aa
--- /dev/null
+++ b/tests/bugs/replicate/bug-1250170-fsync.c
@@ -0,0 +1,56 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+
+int
+main(int argc, char **argv)
+{
+ char *file = NULL;
+ int fd = -1;
+ char *buffer = NULL;
+ size_t buf_size = 0;
+ size_t written = 0;
+ int ret = 0;
+ off_t offset = 0;
+ int i = 0;
+ int loop_count = 5;
+
+ if (argc < 2) {
+ printf("Usage:%s <filename>\n", argv[0]);
+ return -1;
+ }
+
+ file = argv[1];
+ buf_size = 1024;
+ buffer = malloc(buf_size);
+ if (!buffer) {
+ perror("malloc");
+ return -1;
+ }
+ memset(buffer, 'R', buf_size);
+
+ fd = open(file, O_WRONLY);
+ if (fd == -1) {
+ perror("open");
+ return -1;
+ }
+
+ for (i = 0; i < loop_count; i++) {
+ ret = write(fd, buffer, buf_size);
+ if (ret == -1) {
+ perror("write");
+ return ret;
+ } else {
+ written += ret;
+ }
+ offset = lseek(fd, 0, SEEK_SET);
+ }
+
+ free(buffer);
+ return 0;
+}
diff --git a/tests/bugs/replicate/bug-1250170-fsync.t b/tests/bugs/replicate/bug-1250170-fsync.t
new file mode 100644
index 00000000000..7a3fdbf8bb5
--- /dev/null
+++ b/tests/bugs/replicate/bug-1250170-fsync.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST gcc $(dirname $0)/bug-1250170-fsync.c -o $(dirname $0)/bug-1250170-fsync
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+TEST touch $M0/file
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST gluster volume profile $V0 start
+#Perform 5 non-sequential writes.
+TEST $(dirname $0)/bug-1250170-fsync $M0/file
+
+#Run profile info initially to filter out the interval statistics in the
+#subsequent runs.
+TEST $CLI volume profile $V0 info
+#We get only cumulative statistics.
+write_count=$($CLI volume profile $V0 info | grep WRITE |awk '{count += $8} END {print count}')
+fsync_count=$($CLI volume profile $V0 info | grep FSYNC |awk '{count += $8} END {print count}')
+
+EXPECT "5" echo $write_count
+TEST [ -z $fsync_count ]
+
+TEST $CLI volume profile $V0 stop
+TEST umount $M0
+rm -f $(dirname $0)/bug-1250170-fsync
+cleanup
diff --git a/tests/bugs/replicate/bug-1266876-allow-reset-brick-for-same-path.t b/tests/bugs/replicate/bug-1266876-allow-reset-brick-for-same-path.t
new file mode 100644
index 00000000000..884b7892954
--- /dev/null
+++ b/tests/bugs/replicate/bug-1266876-allow-reset-brick-for-same-path.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+# Create files
+for i in {1..5}
+do
+ echo $i > $M0/file$i.txt
+done
+
+# Negative case with brick not killed && volume-id xattrs present
+TEST ! $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit force
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+# Negative case with brick killed but volume-id xattr present
+TEST ! $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit
+
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1 start
+# Simulated reset disk
+for i in {1..5}
+do
+ rm -rf $B0/${V0}{1}/file$i.txt
+done
+for i in {6..10}
+do
+ echo $i > $M0/file$i.txt
+done
+
+# Now reset the brick
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+# Check if entry-heal has happened
+TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1 | sort)
+EXPECT "10" echo $(ls $B0/${V0}1 | wc -l)
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1292379.t b/tests/bugs/replicate/bug-1292379.t
new file mode 100644
index 00000000000..be1bf699173
--- /dev/null
+++ b/tests/bugs/replicate/bug-1292379.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+. $(dirname $0)/../../fileio.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.eager-lock off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST wfd=`fd_available`
+TEST fd_open $wfd "w" $M0/a
+
+TEST fd_write $wfd "abcd"
+
+# Kill brick-0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# While brick-0 is down, rename 'a' to 'b'
+TEST mv $M0/a $M0/b
+
+TEST fd_write $wfd "lmn"
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST fd_write $wfd "pqrs"
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST fd_write $wfd "xyz"
+TEST fd_close $wfd
+
+md5sum_b0=$(md5sum $B0/${V0}0/b | awk '{print $1}')
+
+EXPECT "$md5sum_b0" echo `md5sum $B0/${V0}1/b | awk '{print $1}'`
+
+TEST umount $M0
+cleanup
diff --git a/tests/bugs/replicate/bug-1297695.t b/tests/bugs/replicate/bug-1297695.t
new file mode 100644
index 00000000000..d5c1a214fe2
--- /dev/null
+++ b/tests/bugs/replicate/bug-1297695.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+function write_to_file {
+ dd of=$M0/dir/file if=/dev/urandom bs=1024k count=128 2>&1 >/dev/null
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+
+TEST $CLI volume set $V0 cluster.eager-lock on
+TEST $CLI volume set $V0 post-op-delay-secs 3
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST $CLI volume set $V0 ensure-durability off
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST `echo 'abc' > $M0/dir/file`
+
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+write_to_file &
+#Test if the MAX [F]INODELK fop latency is of the order of seconds.
+EXPECT "^1$" get_pending_heal_count $V0
+inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}")
+TEST [ -z $inodelk_max_latency ]
+cleanup
diff --git a/tests/bugs/replicate/bug-1305031-block-reads-on-metadata-sbrain.t b/tests/bugs/replicate/bug-1305031-block-reads-on-metadata-sbrain.t
new file mode 100644
index 00000000000..780ddb9250c
--- /dev/null
+++ b/tests/bugs/replicate/bug-1305031-block-reads-on-metadata-sbrain.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Test that for files in metadata-split-brain, we do not wind even a single read.
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST chmod 700 $M0/file
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST chmod 777 $M0/file
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST umount $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+lines=`cat $M0/file|wc|awk '{print $1}'`
+EXPECT 0 echo $lines
+TEST umount $M0
+cleanup
diff --git a/tests/bugs/replicate/bug-1325792.t b/tests/bugs/replicate/bug-1325792.t
new file mode 100644
index 00000000000..73190e5f341
--- /dev/null
+++ b/tests/bugs/replicate/bug-1325792.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 3
+
+
+EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count replica $H0:$B0/${V0}0 | grep -A 1 ${V0}0 | grep "entries" | wc -l`
+EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count replica $H0:$B0/${V0}1 | grep -A 1 ${V0}1 | grep "entries" | wc -l`
+EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count replica $H0:$B0/${V0}2 | grep -A 1 ${V0}2 | grep "entries" | wc -l`
+EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count replica $H0:$B0/${V0}3 | grep -A 1 ${V0}3 | grep "entries" | wc -l`
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1335652.t b/tests/bugs/replicate/bug-1335652.t
new file mode 100644
index 00000000000..653a1b05ce2
--- /dev/null
+++ b/tests/bugs/replicate/bug-1335652.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 shard on
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+#Kill the zero'th brick so that 1st and 2nd get marked dirty
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+TEST dd if=/dev/urandom of=$M0/file bs=10MB count=20
+
+#At any point value of dirty should not be greater than 0 on source bricks
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}1/.shard
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}2/.shard
+
+rm -rf $M0/file;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1340623-mkdir-fails-remove-brick-started.t b/tests/bugs/replicate/bug-1340623-mkdir-fails-remove-brick-started.t
new file mode 100644
index 00000000000..6d177a7d3f8
--- /dev/null
+++ b/tests/bugs/replicate/bug-1340623-mkdir-fails-remove-brick-started.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+## Create a 2x2 volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/r11 $H0:$B0/r12 $H0:$B0/r21 $H0:$B0/r22;
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start the volume
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## for release > 3.7 , gluster nfs is off by default
+TEST $CLI vol set $V0 nfs.disable off;
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+
+TEST mount_nfs $H0:/$V0 $N0;
+
+## create some directories and files inside mount
+mkdir $N0/io;
+for j in {1..10}; do mkdir $N0/io/b$j; for k in {1..10}; do touch $N0/io/b$j/c$k; done done
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/r11 $H0:$B0/r12 start;
+
+TEST mkdir $N0/dir1;
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$B0/r11 $H0:$B0/r12"
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/r11 $H0:$B0/r12 commit;
+
+TEST mkdir $N0/dir2;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1341650.t b/tests/bugs/replicate/bug-1341650.t
new file mode 100644
index 00000000000..610342ca5bd
--- /dev/null
+++ b/tests/bugs/replicate/bug-1341650.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+TEST $CLI volume quota $V0 enable
+TEST $CLI volume set $V0 quota-deem-statfs on
+TEST $CLI volume set $V0 soft-timeout 0
+TEST $CLI volume set $V0 hard-timeout 0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST mkdir $M0/dir
+TEST $CLI volume quota $V0 limit-objects /dir 10
+
+TEST touch $M0/dir/file1
+TEST touch $M0/dir/file2
+TEST touch $M0/dir/file3
+TEST touch $M0/dir/file4
+TEST touch $M0/dir/file5
+
+# Kill 3rd brick and create entries
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST touch $M0/dir/file6
+TEST touch $M0/dir/file7
+TEST touch $M0/dir/file8
+TEST touch $M0/dir/file9
+
+# Quota object limit is reached. Remove object for create to succeed.
+TEST ! touch $M0/dir/file10
+
+TEST rm $M0/dir/file1
+TEST touch $M0/dir/file10
+
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}0/dir
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}1/dir
+
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+#Check that no convervative merge happened.file1 must not be present on any brick.
+TEST ! stat $B0/${V0}0/dir/file1
+TEST ! stat $B0/${V0}1/dir/file1
+TEST ! stat $B0/${V0}2/dir/file1
+
+TEST umount $M0
+cleanup
diff --git a/tests/bugs/replicate/bug-1363721.t b/tests/bugs/replicate/bug-1363721.t
new file mode 100644
index 00000000000..0ed34d8a4f4
--- /dev/null
+++ b/tests/bugs/replicate/bug-1363721.t
@@ -0,0 +1,118 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+FILE_UPDATE_TIMEOUT=20
+cleanup
+
+function size_increased {
+ local file=$1
+ local size=$2
+ local new_size=$(stat -c%s $file)
+ if [ $new_size -gt $size ];
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function has_write_failed {
+ local pid=$1
+ if [ -d /proc/$pid ]; then echo "N"; else echo "Y"; fi
+}
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --direct-io-mode=enable $M0
+
+cd $M0
+
+# Start writing to a file.
+(dd if=/dev/urandom of=$M0/file1 bs=1k 2>/dev/null 1>/dev/null)&
+dd_pid=$!
+
+# Let IO happen
+EXPECT_WITHIN $FILE_UPDATE_TIMEOUT "Y" size_increased file1 0
+
+# Now kill the zeroth brick
+kill_brick $V0 $H0 $B0/${V0}0
+
+# Let IO continue
+EXPECT_WITHIN $FILE_UPDATE_TIMEOUT "Y" size_increased file1 $(stat -c%s file1)
+
+# Now bring the brick back up
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+# Let IO continue
+EXPECT_WITHIN $FILE_UPDATE_TIMEOUT "Y" size_increased file1 $(stat -c%s file1)
+
+# Now kill the first brick
+kill_brick $V0 $H0 $B0/${V0}1
+
+# Let IO continue
+EXPECT_WITHIN $FILE_UPDATE_TIMEOUT "Y" size_increased file1 $(stat -c%s file1)
+
+# Now bring the brick back up
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+# Let IO continue for 3 seconds
+sleep 3
+
+# Now kill the second brick
+kill_brick $V0 $H0 $B0/${V0}2
+
+# At this point the write should have been failed.
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "Y" has_write_failed $dd_pid
+
+# Also make sure that the second brick is never an accused.
+
+md5sum_2=$(md5sum $B0/${V0}2/file1 | awk '{print $1}')
+
+EXPECT_NOT "$md5sum_2" echo `md5sum $B0/${V0}0/file1 | awk '{print $1}'`
+EXPECT_NOT "$md5sum_2" echo `md5sum $B0/${V0}1/file1 | awk '{print $1}'`
+
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}0/file1 trusted.afr.dirty data
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/file1 trusted.afr.dirty data
+
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}0/file1 trusted.afr.$V0-client-2 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/file1 trusted.afr.$V0-client-2 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}2/file1 trusted.afr.$V0-client-2 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}0/file1 trusted.afr.$V0-client-2 metadata
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/file1 trusted.afr.$V0-client-2 metadata
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}2/file1 trusted.afr.$V0-client-2 metadata
+
+# Now bring the brick back up
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Enable shd
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$md5sum_2" echo `md5sum $B0/${V0}0/file1 | awk '{print $1}'`
+EXPECT "$md5sum_2" echo `md5sum $B0/${V0}1/file1 | awk '{print $1}'`
+EXPECT "$md5sum_2" echo `md5sum $B0/${V0}2/file1 | awk '{print $1}'`
+
+cd ~
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1365455.t b/tests/bugs/replicate/bug-1365455.t
new file mode 100644
index 00000000000..1953e2a9327
--- /dev/null
+++ b/tests/bugs/replicate/bug-1365455.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function check_size
+{
+ for i in {1..10}; do
+ size1=`stat -c %s $B0/${V0}0/tmp$i`
+ size2=`stat -c %s $B0/${V0}1/tmp$i`
+ if [[ $size1 -eq 0 ]] || [[ $size2 -eq 0 ]] || [[ $size1 -ne $size2 ]]; then
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0;
+
+TEST $CLI volume start $V0;
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+for i in {1..10}
+do
+ echo abc > $M0/tmp$i
+done
+
+
+# Add Another brick
+TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}1
+
+#Check if self heal daemon has come up
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+
+#Check if self heal daemon is able to see all bricks
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Check if entry-heal has happened
+TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1 | sort)
+
+#Check size of files on bricks
+TEST check_size
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1386188-sbrain-fav-child.t b/tests/bugs/replicate/bug-1386188-sbrain-fav-child.t
new file mode 100644
index 00000000000..d049d95ef9a
--- /dev/null
+++ b/tests/bugs/replicate/bug-1386188-sbrain-fav-child.t
@@ -0,0 +1,82 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST touch $M0/data.txt
+TEST touch $M0/mdata.txt
+
+#Create data and metadata split-brain
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd if=/dev/urandom of=$M0/data.txt bs=1024 count=1024
+TEST setfattr -n user.value -v value1 $M0/mdata.txt
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/data.txt bs=1024 count=1024
+TEST setfattr -n user.value -v value2 $M0/mdata.txt
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+## Check that the file still in split-brain,
+ ## I/O fails
+ cat $M0/data.txt > /dev/null
+ EXPECT "1" echo $?
+ ## pending xattrs blame each other.
+ brick0_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/data.txt)
+ brick1_pending=$(get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/data.txt)
+ TEST [ $brick0_pending -ne "000000000000000000000000" ]
+ TEST [ $brick1_pending -ne "000000000000000000000000" ]
+
+ ## I/O fails
+ getfattr -n user.value $M0/mdata.txt
+ EXPECT "1" echo $?
+ brick0_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/mdata.txt)
+ brick1_pending=$(get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/mdata.txt)
+ TEST [ $brick0_pending -ne "000000000000000000000000" ]
+ TEST [ $brick1_pending -ne "000000000000000000000000" ]
+
+## Let us use mtime as fav-child policy. So brick0 will be source.
+ # Set dirty (data part) on the sink brick to check if it is reset later along with the pending xattr.
+ TEST setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/${V0}1/data.txt
+ # Set dirty (metadata part) on the sink brick to check if it is reset later along with the pending xattr.
+ TEST setfattr -n trusted.afr.dirty -v 0x000000000000000100000000 $B0/${V0}1/mdata.txt
+
+ TEST $CLI volume set $V0 favorite-child-policy mtime
+
+ # Reading the file should be allowed and sink brick xattrs must be reset.
+ cat $M0/data.txt > /dev/null
+ EXPECT "0" echo $?
+ TEST brick1_pending=$(get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/data.txt)
+ TEST brick1_dirty=$(get_hex_xattr trusted.afr.dirty $B0/${V0}1/data.txt)
+ TEST [ $brick1_dirty -eq "000000000000000000000000" ]
+ TEST [ $brick1_pending -eq "000000000000000000000000" ]
+
+ # Accessing the file should be allowed and sink brick xattrs must be reset.
+ EXPECT "value2" echo $(getfattr --only-values -n user.value $M0/mdata.txt)
+ TEST brick1_pending=$(get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/data.txt)
+ TEST brick1_dirty=$(get_hex_xattr trusted.afr.dirty $B0/${V0}1/data.txt)
+ TEST [ $brick1_dirty -eq "000000000000000000000000" ]
+ TEST [ $brick1_pending -eq "000000000000000000000000" ]
+
+#Enable shd and heal the file.
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT 0 get_pending_heal_count $V0
+cleanup;
diff --git a/tests/bugs/replicate/bug-1402730.t b/tests/bugs/replicate/bug-1402730.t
new file mode 100644
index 00000000000..c7866df463b
--- /dev/null
+++ b/tests/bugs/replicate/bug-1402730.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 granular-entry-heal on
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0
+
+TEST mkdir -p $M0/a/b/c -p
+cd $M0/a/b/c
+
+TEST kill_brick $V0 $H0 $B0/${V0}2
+rm -rf $B0/${V0}2/*
+rm -rf $B0/${V0}2/.glusterfs
+
+#Ideally, disk replacement is done using reset-brick or replace-brick gluster CLI
+#which will create .glusterfs folder.
+mkdir $B0/${V0}2/.glusterfs && chmod 600 $B0/${V0}2/.glusterfs
+
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST touch file
+
+GFID_C=$(get_gfid_string $M0/a/b/c)
+TEST stat $B0/${V0}0/.glusterfs/indices/entry-changes/$GFID_C/file
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$GFID_C/file
+
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}0/a/b/c trusted.afr.$V0-client-2 entry
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a/b/c trusted.afr.$V0-client-2 entry
+
+cd ~
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1408712.t b/tests/bugs/replicate/bug-1408712.t
new file mode 100644
index 00000000000..9499a598ef1
--- /dev/null
+++ b/tests/bugs/replicate/bug-1408712.t
@@ -0,0 +1,101 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+TESTS_EXPECTED_IN_LOOP=12
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume heal $V0 granular-entry-heal enable
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 performance.flush-behind off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 2
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M1 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M1 $V0-replicate-0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M1 $V0-replicate-0 2
+
+TEST cd $M0
+TEST dd if=/dev/zero of=file bs=1M count=8
+
+# Kill brick-0.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status $V0 $H0 $B0/${V0}0
+
+TEST "dd if=/dev/zero bs=1M count=8 >> file"
+
+FILE_GFID=$(get_gfid_string $M0/file)
+
+# Test that the index associated with '/.shard' is created on B1 and B2.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+TEST stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+# Check for successful creation of granular entry indices
+for i in {2..3}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+ TEST_IN_LOOP stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+done
+
+TEST cd ~
+TEST md5sum $M1/file
+
+# Test that the index associated with '/.shard' and the created shards do not disappear on B1 and B2.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+TEST stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+for i in {2..3}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+ TEST_IN_LOOP stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+done
+
+# Start the brick that was down
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Enable shd
+TEST gluster volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Now verify that there are no name indices left after self-heal
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+TEST ! stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+
+for i in {2..3}
+do
+ TEST_IN_LOOP ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+ TEST_IN_LOOP ! stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+done
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1417522-block-split-brain-resolution.t b/tests/bugs/replicate/bug-1417522-block-split-brain-resolution.t
new file mode 100644
index 00000000000..d0e2fee8bcd
--- /dev/null
+++ b/tests/bugs/replicate/bug-1417522-block-split-brain-resolution.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST touch $M0/file
+
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=10
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=20
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST ! dd if=$M0/file of=/dev/null
+SOURCE_BRICK_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+
+# Various fav-child policies must not heal the file when some bricks are down.
+TEST $CLI volume set $V0 favorite-child-policy size
+TEST ! dd if=$M0/file of=/dev/null
+TEST $CLI volume set $V0 favorite-child-policy ctime
+TEST ! dd if=$M0/file of=/dev/null
+TEST $CLI volume set $V0 favorite-child-policy mtime
+TEST ! dd if=$M0/file of=/dev/null
+TEST $CLI volume set $V0 favorite-child-policy majority
+TEST ! dd if=$M0/file of=/dev/null
+
+# CLI/mount based split-brain resolution must also not work.
+TEST ! $CLI volume heal $V0 split-brain bigger-file /file
+TEST ! $CLI volume heal $V0 split-brain mtime /file
+TEST ! $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}2 /file1
+
+TEST ! getfattr -n replica.split-brain-status $M0/file
+TEST ! setfattr -n replica.split-brain-choice -v $V0-client-1 $M0/file
+
+# Bring all bricks back up and launch heal.
+TEST $CLI volume set $V0 self-heal-daemon on
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT 0 get_pending_heal_count $V0
+B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+B2_MD5=$(md5sum $B0/${V0}2/file | cut -d\ -f1)
+TEST [ "$SOURCE_BRICK_MD5" == "$B1_MD5" ]
+TEST [ "$SOURCE_BRICK_MD5" == "$B2_MD5" ]
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
+
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
new file mode 100644
index 00000000000..10ce0131f4f
--- /dev/null
+++ b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
@@ -0,0 +1,79 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Disable self-heal-daemon, client-side-heal and set quorum-type to none
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.quorum-type none
+
+#Kill bricks 0 & 1 and create a file to have pending entry for 0 & 1 on brick 2
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+echo "file 1" >> $M0/f1
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+#Kill bricks 1 & 2 and create a file to have pending entry for 1 & 2 on brick 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+echo "file 2" >> $M0/f2
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+#Kill bricks 2 & 0 and create a file to have pending entry for 2 & 0 on brick 1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}0
+echo "file 3" >> $M0/f3
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# We were killing one brick and checking that entry heal does not reset the
+# pending xattrs for the down brick. Now that we need all bricks to be up for
+# entry heal, I'm removing that test from the .t
+
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+
+TEST ls $M0
+TEST cat $M0/f1
+TEST cat $M0/f2
+TEST cat $M0/f3
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2
+
+#Check whether all the bricks contains all the 3 files.
+EXPECT "3" echo $(ls $B0/${V0}0 | wc -l)
+EXPECT "3" echo $(ls $B0/${V0}1 | wc -l)
+EXPECT "3" echo $(ls $B0/${V0}2 | wc -l)
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t b/tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t
new file mode 100644
index 00000000000..cdcaf62c925
--- /dev/null
+++ b/tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+NEW_USER=bug1438255
+NEW_UID=1438255
+NEW_GID=1438255
+
+TEST groupadd -o -g ${NEW_GID} ${NEW_USER}-${NEW_GID}
+TEST useradd -o -M -u ${NEW_UID} -g ${NEW_GID} -K MAIL_DIR=/dev/null ${NEW_USER}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+TEST touch $M0/FILE
+TEST kill_brick $V0 $H0 $B0/${V0}2
+chown $NEW_UID:$NEW_GID $M0/FILE
+EXPECT "000000000000000100000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE
+EXPECT "000000000000000100000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1/FILE
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# setfattr done as NEW_USER fails on 3rd brick with EPERM but suceeds on
+# the first 2 and hence on the mount.
+su -m bug1438255 -c "setfattr -n user.myattr -v myvalue $M0/FILE"
+TEST [ $? -eq 0 ]
+EXPECT "000000000000000200000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE
+EXPECT "000000000000000200000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1/FILE
+# Brick 3 does not have any self-blaming pending xattr.
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}2/FILE
+
+TEST userdel --force ${NEW_USER}
+TEST groupdel ${NEW_USER}-${NEW_GID}
+cleanup
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/bugs/replicate/bug-1448804-check-quorum-type-values.t b/tests/bugs/replicate/bug-1448804-check-quorum-type-values.t
new file mode 100644
index 00000000000..5bacf3edcfe
--- /dev/null
+++ b/tests/bugs/replicate/bug-1448804-check-quorum-type-values.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0..1}
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Default quorum-type for replica 2 is none. quorum-count is zero but it is not displayed.
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "none" get_quorum_type $M0 $V0 0
+cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count
+TEST [ $? -ne 0 ]
+
+# Convert to replica-3.
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Default quorum-type for replica 3 is auto. quorum-count is INT_MAX but it is not displayed.
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "auto" get_quorum_type $M0 $V0 0
+cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count
+TEST [ $? -ne 0 ]
+
+# Change the type to fixed.
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+# We haven't set quorum-count yet, so it takes the default value of zero in reconfigure() and hence the quorum-type is displayed as none.
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "none" get_quorum_type $M0 $V0 0
+cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count
+TEST [ $? -ne 0 ]
+
+# set quorum-count and check.
+TEST $CLI volume set $V0 cluster.quorum-count 1
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "fixed" get_quorum_type $M0 $V0 0
+EXPECT "1" echo `cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count|awk '{print $3}'`
+
+# reset to default values.
+TEST $CLI volume reset $V0 cluster.quorum-type
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "auto" get_quorum_type $M0 $V0 0
+cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count
+TEST [ $? -ne 0 ]
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1473026.t b/tests/bugs/replicate/bug-1473026.t
new file mode 100644
index 00000000000..efb3ffa0d39
--- /dev/null
+++ b/tests/bugs/replicate/bug-1473026.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=KNOWN_ISSUE,BUG=1473026
+#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=1473026
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0;
+
+#kill one brick (this has some issue)
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+#kill the brick to be replaced
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# We know this command would fail because file system is read only now
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}4 commit force
+
+TEST pkill glusterd
+
+# Glusterd should start but the volume info and brick volfiles don't match
+TEST glusterd
+TEST pidof glusterd
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1477169-entry-selfheal-rename.t b/tests/bugs/replicate/bug-1477169-entry-selfheal-rename.t
new file mode 100644
index 00000000000..bb858a8a63d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1477169-entry-selfheal-rename.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST mkdir -p $M0/d1/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 $M0/d2
+gfid_d1=$(gf_get_gfid_xattr $B0/${V0}0/d1)
+gfid_d2=$(gf_get_gfid_xattr $B0/${V0}0/d2)
+gfid_dir=$(gf_get_gfid_xattr $B0/${V0}0/d1/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789)
+
+gfid_str_d1=$(gf_gfid_xattr_to_str $gfid_d1)
+gfid_str_d2=$(gf_gfid_xattr_to_str $gfid_d2)
+gfid_str_d3=$(gf_gfid_xattr_to_str $gfid_dir)
+
+# Kill 3rd brick and rename the dir from mount.
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST mv $M0/d1/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 $M0/d2
+
+# Bring it back and trigger heal.
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Check that .glusterfs symlink for dir exists and points to d2/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
+TEST linkname=$(readlink $B0/${V0}2/.glusterfs/${gfid_str_d3:0:2}/${gfid_str_d3:2:2}/$gfid_str_d3)
+EXPECT "dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" basename $linkname
+TEST parent_dir_gfid_str=$(echo $linkname|cut -d / -f5)
+EXPECT $gfid_str_d2 echo $parent_dir_gfid_str
+
+TEST rmdir $M0/d2/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
+
+TEST ! stat $B0/${V0}0/.glusterfs/${gfid_str_d3:0:2}/${gfid_str_d3:2:2}/$gfid_str_d3
+TEST ! stat $B0/${V0}1/.glusterfs/${gfid_str_d3:0:2}/${gfid_str_d3:2:2}/$gfid_str_d3
+TEST ! stat $B0/${V0}2/.glusterfs/${gfid_str_d3:0:2}/${gfid_str_d3:2:2}/$gfid_str_d3
+cleanup;
diff --git a/tests/bugs/replicate/bug-1480525.t b/tests/bugs/replicate/bug-1480525.t
new file mode 100644
index 00000000000..7c63bb2e4ea
--- /dev/null
+++ b/tests/bugs/replicate/bug-1480525.t
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+EXPECT_NOT "-1" echo `cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep read_child |awk '{print $3}'`
+TEST $CLI volume set $V0 choose-local off
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "-1" echo `cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep read_child |awk '{print $3}'`
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1493415-gfid-heal.t b/tests/bugs/replicate/bug-1493415-gfid-heal.t
new file mode 100644
index 00000000000..8a79febf4b4
--- /dev/null
+++ b/tests/bugs/replicate/bug-1493415-gfid-heal.t
@@ -0,0 +1,78 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST $CLI volume set $V0 self-heal-daemon off
+
+# Create base entry in indices/xattrop
+echo "Data" > $M0/FILE
+
+#------------------------------------------------------------------------------#
+TEST touch $M0/f1
+gfid_f1=$(gf_get_gfid_xattr $B0/${V0}0/f1)
+gfid_str_f1=$(gf_gfid_xattr_to_str $gfid_f1)
+
+# Remove gfid xattr and .glusterfs hard link from 2nd brick. This simulates a
+# brick crash at the point where file got created but no xattrs were set.
+TEST setfattr -x trusted.gfid $B0/${V0}1/f1
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 2
+
+# Assume there were no pending xattrs on parent dir due to 1st brick crashing
+# too. Then name heal from client must heal the gfid.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST stat $M0/f1
+EXPECT "$gfid_f1" gf_get_gfid_xattr $B0/${V0}1/f1
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+#------------------------------------------------------------------------------#
+TEST mkdir $M0/dir
+TEST touch $M0/dir/f2
+gfid_f2=$(gf_get_gfid_xattr $B0/${V0}0/dir/f2)
+gfid_str_f2=$(gf_gfid_xattr_to_str $gfid_f2)
+
+# Remove gfid xattr and .glusterfs hard link from 2nd brick. This simulates a
+# brick crash at the point where file got created but no xattrs were set.
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir/f2
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+
+#Now simulate setting of pending entry xattr on parent dir of 1st brick.
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/dir
+create_brick_xattrop_entry $B0/${V0}0 dir
+
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 2
+
+#Trigger entry-heal via shd
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$gfid_f2" gf_get_gfid_xattr $B0/${V0}1/dir/f2
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+
+#------------------------------------------------------------------------------#
+cleanup;
diff --git a/tests/bugs/replicate/bug-1498570-client-iot-graph-check.t b/tests/bugs/replicate/bug-1498570-client-iot-graph-check.t
new file mode 100644
index 00000000000..2b3b3040228
--- /dev/null
+++ b/tests/bugs/replicate/bug-1498570-client-iot-graph-check.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+TESTS_EXPECTED_IN_LOOP=21
+function reset_cluster
+{
+ cleanup
+ TEST glusterd
+ TEST pidof glusterd
+
+}
+function check_iot_option
+{
+ local enabled=$1
+ local is_loaded_in_graph=$2
+
+ EXPECT "$enabled" volume_get_field $V0 client-io-threads
+ IOT_STRING="volume\ $V0-io-threads"
+ grep "$IOT_STRING" $GLUSTERD_WORKDIR/vols/$V0/trusted-$V0.tcp-fuse.vol
+ TEST ret=$?
+ EXPECT_NOT "$is_loaded_in_graph" echo $ret
+}
+
+reset_cluster
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+check_iot_option on 1
+
+reset_cluster
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+check_iot_option off 0
+
+reset_cluster
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}1
+check_iot_option off 0
+TEST $CLI volume remove-brick $V0 replica 1 $H0:$B0/${V0}1 force
+check_iot_option on 1
+
+reset_cluster
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..5}
+TEST $CLI volume set $V0 client-io-threads on
+check_iot_option on 1
+TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}2 $H0:$B0/${V0}5 force
+check_iot_option on 1
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1539358-split-brain-detection.t b/tests/bugs/replicate/bug-1539358-split-brain-detection.t
new file mode 100755
index 00000000000..7b71a7a9e7d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1539358-split-brain-detection.t
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+###############################################################################yy
+# Case of 2 bricks blaming the third and the third blaming the other two.
+
+TEST `echo "hello" >> $M0/file`
+
+# B0 and B2 must blame B1
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST `echo "append" >> $M0/file`
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}0/file trusted.afr.$V0-client-1 data
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}2/file trusted.afr.$V0-client-1 data
+CLIENT_MD5=$(md5sum $M0/file | cut -d\ -f1)
+
+# B1 must blame B0 and B2
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/$V0"1"/file
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/$V0"1"/file
+
+# Launch heal
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+B2_MD5=$(md5sum $B0/${V0}2/file | cut -d\ -f1)
+TEST [ "$CLIENT_MD5" == "$B0_MD5" ]
+TEST [ "$CLIENT_MD5" == "$B1_MD5" ]
+TEST [ "$CLIENT_MD5" == "$B2_MD5" ]
+
+TEST rm $M0/file
+
+###############################################################################yy
+# Case of each brick blaming the next one in a cyclic manner
+
+TEST `echo "hello" >> $M0/file`
+# Mark cyclic xattrs and modify file content directly on the bricks.
+TEST $CLI volume set $V0 self-heal-daemon off
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/$V0"0"/file
+setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"0"/file
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/$V0"1"/file
+setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"1"/file
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/$V0"2"/file
+setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"2"/file
+
+TEST `echo "ab" >> $B0/$V0"0"/file`
+TEST `echo "cdef" >> $B0/$V0"1"/file`
+TEST `echo "ghi" >> $B0/$V0"2"/file`
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/file))
+ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
+
+# Launch heal
+TEST $CLI volume set $V0 self-heal-daemon on
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+B2_MD5=$(md5sum $B0/${V0}2/file | cut -d\ -f1)
+TEST [ "$B0_MD5" == "$B1_MD5" ]
+TEST [ "$B0_MD5" == "$B2_MD5" ]
+###############################################################################yy
+cleanup
diff --git a/tests/bugs/replicate/bug-1561129-enospc.t b/tests/bugs/replicate/bug-1561129-enospc.t
new file mode 100644
index 00000000000..1b402fcc781
--- /dev/null
+++ b/tests/bugs/replicate/bug-1561129-enospc.t
@@ -0,0 +1,24 @@
+#!/bin/bash
+#Tests that sequential write workload doesn't lead to FSYNCs
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST truncate -s 128M $B0/xfs_image
+TEST mkfs.xfs -f $B0/xfs_image
+TEST mkdir $B0/bricks
+TEST mount -t xfs -o loop $B0/xfs_image $B0/bricks
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/bricks/brick{0,1,3}
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Write 50MB of data, which will try to consume 50x3=150MB on $B0/bricks.
+# Before that, we hit ENOSPC in pre-op cbk, which should not crash the mount.
+TEST ! dd if=/dev/zero of=$M0/a bs=1M count=50
+TEST stat $M0/a
+cleanup;
diff --git a/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t b/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
new file mode 100644
index 00000000000..49c4dea4e9c
--- /dev/null
+++ b/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
@@ -0,0 +1,72 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function create_files {
+ local i=1
+ while (true)
+ do
+ dd if=/dev/zero of=$M0/file$i bs=1M count=10
+ if [ -e $B0/${V0}0/file$i ] || [ -e $B0/${V0}1/file$i ]; then
+ ((i++))
+ else
+ break
+ fi
+ done
+ echo $i
+}
+
+TEST glusterd
+
+#Create brick partitions
+TEST truncate -s 100M $B0/brick0
+TEST truncate -s 100M $B0/brick1
+#Have the 3rd brick of a higher size to test the scenario of entry transaction
+#passing on only one brick and not on other bricks.
+TEST truncate -s 110M $B0/brick2
+LO1=`SETUP_LOOP $B0/brick0`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO1
+LO2=`SETUP_LOOP $B0/brick1`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO2
+LO3=`SETUP_LOOP $B0/brick2`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO3
+TEST mkdir -p $B0/${V0}0 $B0/${V0}1 $B0/${V0}2
+TEST MOUNT_LOOP $LO1 $B0/${V0}0
+TEST MOUNT_LOOP $LO2 $B0/${V0}1
+TEST MOUNT_LOOP $LO3 $B0/${V0}2
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+i=$(create_files)
+TEST ! ls $B0/${V0}0/file$i
+TEST ! ls $B0/${V0}1/file$i
+TEST ls $B0/${V0}2/file$i
+dirty=$(get_hex_xattr trusted.afr.dirty $B0/${V0}2)
+TEST [ "$dirty" != "000000000000000000000000" ]
+
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST rm -f $M0/file1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+TEST force_umount $M0
+TEST $CLI volume stop $V0
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume delete $V0;
+UMOUNT_LOOP ${B0}/${V0}{0,1,2}
+rm -f ${B0}/brick{0,1,2}
+cleanup;
diff --git a/tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t b/tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t
new file mode 100644
index 00000000000..c6e5459e9a8
--- /dev/null
+++ b/tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t
@@ -0,0 +1,128 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+function check_gfid_and_link_count
+{
+ local file=$1
+
+ file_gfid_b0=$(gf_get_gfid_xattr $B0/${V0}0/$file)
+ TEST [ ! -z $file_gfid_b0 ]
+ file_gfid_b1=$(gf_get_gfid_xattr $B0/${V0}1/$file)
+ file_gfid_b2=$(gf_get_gfid_xattr $B0/${V0}2/$file)
+ EXPECT $file_gfid_b0 echo $file_gfid_b1
+ EXPECT $file_gfid_b0 echo $file_gfid_b2
+
+ EXPECT "2" stat -c %h $B0/${V0}0/$file
+ EXPECT "2" stat -c %h $B0/${V0}1/$file
+ EXPECT "2" stat -c %h $B0/${V0}2/$file
+}
+TESTS_EXPECTED_IN_LOOP=30
+
+##############################################################################
+# Test on 1x3 volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+
+# Create files directly in the backend on different bricks
+echo $RANDOM >> $B0/${V0}0/file1
+echo $RANDOM >> $B0/${V0}1/file2
+echo $RANDOM >> $B0/${V0}2/file3
+
+# To prevent is_fresh_file code path
+sleep 2
+
+# Access them from mount to trigger name + gfid heal.
+TEST stat $M0/file1
+TEST stat $M0/file2
+TEST stat $M0/file3
+
+# Launch index heal to complete any pending data/metadata heals.
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Check each file has a gfid and the .glusterfs hardlink
+check_gfid_and_link_count file1
+check_gfid_and_link_count file2
+check_gfid_and_link_count file3
+
+TEST rm $M0/file1
+TEST rm $M0/file2
+TEST rm $M0/file3
+cleanup;
+
+##############################################################################
+# Test on 1x (2+1) volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+
+# Create files directly in the backend on different bricks
+echo $RANDOM >> $B0/${V0}0/file1
+echo $RANDOM >> $B0/${V0}1/file2
+touch $B0/${V0}2/file3
+
+# To prevent is_fresh_file code path
+sleep 2
+
+# Access them from mount to trigger name + gfid heal.
+TEST stat $M0/file1
+TEST stat $M0/file2
+
+# Though file is created on all 3 bricks, lookup will fail as arbiter blames the
+# other 2 bricks and ariter is not 'readable'.
+TEST ! stat $M0/file3
+
+# Launch index heal to complete any pending data/metadata heals.
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Now file3 should be accesible from mount.
+TEST stat $M0/file3
+
+# Check each file has a gfid and the .glusterfs hardlink
+check_gfid_and_link_count file1
+check_gfid_and_link_count file2
+check_gfid_and_link_count file3
+
+TEST rm $M0/file1
+TEST rm $M0/file2
+TEST rm $M0/file3
+cleanup;
diff --git a/tests/bugs/replicate/bug-1626994-info-split-brain.t b/tests/bugs/replicate/bug-1626994-info-split-brain.t
new file mode 100644
index 00000000000..86bfecb1a9e
--- /dev/null
+++ b/tests/bugs/replicate/bug-1626994-info-split-brain.t
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+# Test to check dirs having dirty xattr do not show up in info split-brain.
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Create base entry in indices/xattrop
+echo "Data" > $M0/FILE
+rm -f $M0/FILE
+EXPECT "1" count_index_entries $B0/${V0}0
+EXPECT "1" count_index_entries $B0/${V0}1
+EXPECT "1" count_index_entries $B0/${V0}2
+
+TEST mkdir $M0/dirty_dir
+TEST mkdir $M0/pending_dir
+
+# Set dirty xattrs on all bricks to simulate the case where entry transaction
+# succeeded only the pre-op phase.
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}0/dirty_dir
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/dirty_dir
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}2/dirty_dir
+create_brick_xattrop_entry $B0/${V0}0 dirty_dir
+# Should not show up as split-brain.
+EXPECT "0" afr_get_split_brain_count $V0
+
+# replace/reset brick case where the new brick has dirty and the other 2 bricks
+# blame it should not be reported as split-brain.
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/${V0}0
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/${V0}1
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}2
+create_brick_xattrop_entry $B0/${V0}0 "/"
+# Should not show up as split-brain.
+EXPECT "0" afr_get_split_brain_count $V0
+
+# Set pending xattrs on all bricks blaming each other to simulate the case of
+# entry split-brain.
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/pending_dir
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/${V0}1/pending_dir
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}2/pending_dir
+create_brick_xattrop_entry $B0/${V0}0 pending_dir
+# Should show up as split-brain.
+EXPECT "1" afr_get_split_brain_count $V0
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1637249-gfid-heal.t b/tests/bugs/replicate/bug-1637249-gfid-heal.t
new file mode 100644
index 00000000000..e824f14531e
--- /dev/null
+++ b/tests/bugs/replicate/bug-1637249-gfid-heal.t
@@ -0,0 +1,149 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1};
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume start $V0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+###############################################################################
+
+# Test for gfid + name heal when there is no 'source' brick, i.e. parent dir
+# xattrs are in split-brain or have dirty xattrs.
+
+TEST mkdir $M0/dir_pending
+TEST dd if=/dev/urandom of=$M0/dir_pending/file1 bs=1024 count=1024
+TEST mkdir $M0/dir_pending/dir11
+TEST mkdir $M0/dir_dirty
+TEST touch $M0/dir_dirty/file2
+
+# Set pending entry xattrs on dir_pending and remove gfid of entries under it on one brick.
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/dir_pending
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1/dir_pending
+
+gfid_f1=$(gf_get_gfid_xattr $B0/${V0}0/dir_pending/file1)
+gfid_str_f1=$(gf_gfid_xattr_to_str $gfid_f1)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_pending/file1
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+gfid_d11=$(gf_get_gfid_xattr $B0/${V0}0/dir_pending/dir11)
+gfid_str_d11=$(gf_gfid_xattr_to_str $gfid_d11)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_pending/dir11
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_d11:0:2}/${gfid_str_d11:2:2}/$gfid_str_d11
+
+
+# Set dirty entry xattrs on dir_dirty and remove gfid of entries under it on one brick.
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/dir_dirty
+gfid_f2=$(gf_get_gfid_xattr $B0/${V0}0/dir_dirty/file2)
+gfid_str_f2=$(gf_gfid_xattr_to_str $gfid_f2)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_dirty/file2
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+
+# Create a file under dir_pending directly on the backend only on 1 brick
+TEST touch $B0/${V0}1/dir_pending/file3
+
+# Create a file under dir_pending directly on the backend on all bricks
+TEST touch $B0/${V0}0/dir_pending/file4
+TEST touch $B0/${V0}1/dir_pending/file4
+
+# Stop & start the volume and mount client again.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST stat $M0/dir_pending/file1
+EXPECT "$gfid_f1" gf_get_gfid_xattr $B0/${V0}1/dir_pending/file1
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+TEST stat $M0/dir_pending/dir11
+EXPECT "$gfid_d11" gf_get_gfid_xattr $B0/${V0}1/dir_pending/dir11
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_d11:0:2}/${gfid_str_d11:2:2}/$gfid_str_d11
+
+
+TEST stat $M0/dir_dirty/file2
+EXPECT "$gfid_f2" gf_get_gfid_xattr $B0/${V0}1/dir_dirty/file2
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+
+TEST stat $M0/dir_pending/file3 # This assigns gfid on 2nd brick and heals the entry on to the 1st brick.
+gfid_f3=$(gf_get_gfid_xattr $B0/${V0}1/dir_pending/file3)
+TEST [ ! -z "$gfid_f3" ]
+EXPECT "$gfid_f3" gf_get_gfid_xattr $B0/${V0}0/dir_pending/file3
+
+TEST stat $M0/dir_pending/file4
+gfid_f4=$(gf_get_gfid_xattr $B0/${V0}0/dir_pending/file4)
+TEST [ ! -z "$gfid_f4" ]
+EXPECT "$gfid_f4" gf_get_gfid_xattr $B0/${V0}1/dir_pending/file4
+###############################################################################
+
+# Test for gfid + name heal when all bricks are 'source', i.e. parent dir
+# does not have any pending or dirty xattrs.
+
+TEST mkdir $M0/dir_clean
+TEST dd if=/dev/urandom of=$M0/dir_clean/file1 bs=1024 count=1024
+TEST mkdir $M0/dir_clean/dir11
+
+gfid_f1=$(gf_get_gfid_xattr $B0/${V0}0/dir_clean/file1)
+gfid_str_f1=$(gf_gfid_xattr_to_str $gfid_f1)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_clean/file1
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+gfid_d11=$(gf_get_gfid_xattr $B0/${V0}0/dir_clean/dir11)
+gfid_str_d11=$(gf_gfid_xattr_to_str $gfid_d11)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_clean/dir11
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_d11:0:2}/${gfid_str_d11:2:2}/$gfid_str_d11
+
+# Create a file under dir_clean directly on the backend only on 1 brick
+TEST touch $B0/${V0}1/dir_clean/file3
+
+# Create a file under dir_clean directly on the backend on all bricks
+TEST touch $B0/${V0}0/dir_clean/file4
+TEST touch $B0/${V0}1/dir_clean/file4
+
+# Stop & start the volume and mount client again.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST stat $M0/dir_clean/file1
+EXPECT "$gfid_f1" gf_get_gfid_xattr $B0/${V0}1/dir_clean/file1
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+TEST stat $M0/dir_clean/dir11
+EXPECT "$gfid_d11" gf_get_gfid_xattr $B0/${V0}1/dir_clean/dir11
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_d11:0:2}/${gfid_str_d11:2:2}/$gfid_str_d11
+
+TEST stat $M0/dir_clean/file3 # This assigns gfid on 2nd brick and heals the entry on to the 1st brick.
+gfid_f3=$(gf_get_gfid_xattr $B0/${V0}1/dir_clean/file3)
+TEST [ ! -z "$gfid_f3" ]
+EXPECT "$gfid_f3" gf_get_gfid_xattr $B0/${V0}0/dir_clean/file3
+
+TEST stat $M0/dir_clean/file4
+gfid_f4=$(gf_get_gfid_xattr $B0/${V0}0/dir_clean/file4)
+TEST [ ! -z "$gfid_f4" ]
+EXPECT "$gfid_f4" gf_get_gfid_xattr $B0/${V0}1/dir_clean/file4
+###############################################################################
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t b/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t
new file mode 100644
index 00000000000..d7d1f285e01
--- /dev/null
+++ b/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+# Test to check that data self-heal does not leave any stale lock.
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Create base entry in indices/xattrop
+echo "Data" > $M0/FILE
+
+# Kill arbiter brick and write to FILE.
+TEST kill_brick $V0 $H0 $B0/${V0}2
+echo "arbiter down" >> $M0/FILE
+EXPECT 2 get_pending_heal_count $V0
+
+# Bring it back up and let heal complete.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# write to the FILE must succeed.
+echo "this must succeed" >> $M0/FILE
+TEST [ $? -eq 0 ]
+cleanup;
diff --git a/tests/bugs/replicate/bug-1655050-dir-sbrain-size-policy.t b/tests/bugs/replicate/bug-1655050-dir-sbrain-size-policy.t
new file mode 100644
index 00000000000..63f72e86bf6
--- /dev/null
+++ b/tests/bugs/replicate/bug-1655050-dir-sbrain-size-policy.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+#Create replica 2 volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+cd $M0
+TEST mkdir dir
+
+#Create metadata split-brain
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST chmod 757 dir
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST chmod 747 dir
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+#Use size as fav-child policy.
+TEST $CLI volume set $V0 cluster.favorite-child-policy size
+
+#Enable shd and heal the file.
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "2" get_pending_heal_count $V0
+
+b1c1dir=$(afr_get_specific_changelog_xattr $B0/${V0}0/dir \
+ trusted.afr.$V0-client-1 "metadata")
+b2c0dir=$(afr_get_specific_changelog_xattr $B0/${V0}1/dir \
+ trusted.afr.$V0-client-0 "metadata")
+
+EXPECT "00000001" echo $b1c1dir
+EXPECT "00000001" echo $b2c0dir
+
+#Finish up
+TEST force_umount $M0
+cleanup;
diff --git a/tests/bugs/replicate/bug-1655052-sbrain-policy-same-size.t b/tests/bugs/replicate/bug-1655052-sbrain-policy-same-size.t
new file mode 100755
index 00000000000..319736e1157
--- /dev/null
+++ b/tests/bugs/replicate/bug-1655052-sbrain-policy-same-size.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+#Test the split-brain resolution CLI commands.
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+#Create replica 2 volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST touch $M0/file
+
+############ Healing using favorite-child-policy = size and size of bricks is same #################
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+#file still in split-brain
+EXPECT_WITHIN $HEAL_TIMEOUT "2" get_pending_heal_count $V0
+cat $M0/file > /dev/null
+EXPECT_NOT "^0$" echo $?
+
+#We know that both bricks have same size file
+TEST $CLI volume set $V0 cluster.favorite-child-policy size
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "2" get_pending_heal_count $V0
+cat $M0/file > /dev/null
+EXPECT_NOT "^0$" echo $?
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup
+
diff --git a/tests/bugs/replicate/bug-1655854-support-dist-to-rep3-arb-conversion.t b/tests/bugs/replicate/bug-1655854-support-dist-to-rep3-arb-conversion.t
new file mode 100644
index 00000000000..783016dc3c0
--- /dev/null
+++ b/tests/bugs/replicate/bug-1655854-support-dist-to-rep3-arb-conversion.t
@@ -0,0 +1,95 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+# Conversion from 2x1 to 2x3
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST mkdir $M0/dir
+TEST dd if=/dev/urandom of=$M0/dir/file bs=100K count=5
+file_md5sum=$(md5sum $M0/dir/file | awk '{print $1}')
+
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{2..5}
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}4
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 4
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 5
+
+# Trigger heal and wait for for it to complete
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Check whether the directory & file are healed to the newly added bricks
+TEST ls $B0/${V0}2/dir
+TEST ls $B0/${V0}3/dir
+TEST ls $B0/${V0}4/dir
+TEST ls $B0/${V0}5/dir
+
+TEST [ $file_md5sum == $(md5sum $B0/${V0}4/dir/file | awk '{print $1}') ]
+TEST [ $file_md5sum == $(md5sum $B0/${V0}5/dir/file | awk '{print $1}') ]
+
+
+# Conversion from 2x1 to 2x(2+1)
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{0,1}
+EXPECT 'Created' volinfo_field $V1 'Status';
+TEST $CLI volume start $V1
+EXPECT 'Started' volinfo_field $V1 'Status';
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
+
+TEST $GFS --volfile-id=$V1 --volfile-server=$H0 $M1;
+TEST mkdir $M1/dir
+TEST dd if=/dev/urandom of=$M1/dir/file bs=100K count=5
+file_md5sum=$(md5sum $M1/dir/file | awk '{print $1}')
+
+TEST $CLI volume add-brick $V1 replica 3 arbiter 1 $H0:$B0/${V1}{2..5}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}3
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}4
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}5
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 3
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 4
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 5
+
+# Trigger heal and wait for for it to complete
+TEST $CLI volume heal $V1
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V1
+
+# Check whether the directory & file are healed to the newly added bricks
+TEST ls $B0/${V1}2/dir
+TEST ls $B0/${V1}3/dir
+TEST ls $B0/${V1}4/dir
+TEST ls $B0/${V1}5/dir
+
+EXPECT "0" stat -c %s $B0/${V1}5/dir/file
+TEST [ $file_md5sum == $(md5sum $B0/${V1}4/dir/file | awk '{print $1}') ]
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1657783-do-not-update-read-subvol-on-rename-link.t b/tests/bugs/replicate/bug-1657783-do-not-update-read-subvol-on-rename-link.t
new file mode 100644
index 00000000000..b180f0e1239
--- /dev/null
+++ b/tests/bugs/replicate/bug-1657783-do-not-update-read-subvol-on-rename-link.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 performance.write-behind off
+
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST mkdir $M0/dir
+TEST "echo abc > $M0/file1"
+TEST "echo uvw > $M0/file2"
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST "echo def > $M0/file1"
+TEST "echo xyz > $M0/file2"
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+# Rename file1 and read it. Read should be served from the 3rd brick
+TEST mv $M0/file1 $M0/file3
+EXPECT "def" cat $M0/file3
+
+# Create a link to file2 and read it. Read should be served from the 3rd brick
+TEST ln $M0/file2 $M0/dir/file4
+EXPECT "xyz" cat $M0/dir/file4
+EXPECT "xyz" cat $M0/file2
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t b/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t
new file mode 100644
index 00000000000..78581e99614
--- /dev/null
+++ b/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+CHANGELOG_PATH_0="$B0/${V0}2/.glusterfs/changelogs"
+ROLLOVER_TIME=100
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 changelog.changelog on
+TEST $CLI volume set $V0 changelog.rollover-time $ROLLOVER_TIME
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST dd if=/dev/zero of=$M0/file1 bs=128K count=5
+
+TEST $CLI volume profile $V0 start
+TEST $CLI volume add-brick $V0 replica 3 arbiter 1 $H0:$B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST $CLI volume profile $V0 info
+truncate_count=$($CLI volume profile $V0 info | grep TRUNCATE | awk '{count += $8} END {print count}')
+
+EXPECT "1" echo $truncate_count
+EXPECT "1" check_changelog_op ${CHANGELOG_PATH_0} "^ D "
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1696599-io-hang.t b/tests/bugs/replicate/bug-1696599-io-hang.t
new file mode 100755
index 00000000000..869cdb94bda
--- /dev/null
+++ b/tests/bugs/replicate/bug-1696599-io-hang.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+#Tests that local structures in afr are removed from granted/blocked list of
+#locks when inodelk fails on all bricks
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.client-io-threads off
+TEST $CLI volume set $V0 delay-gen locks
+TEST $CLI volume set $V0 delay-gen.delay-duration 5000000
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 delay-gen.enable finodelk
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST $GFS -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/file
+#Trigger write and stop bricks so inodelks fail on all bricks leading to
+#lock failure condition
+echo abc >> $M0/file &
+
+TEST $CLI volume stop $V0
+TEST $CLI volume reset $V0 delay-gen
+wait
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 2
+#Test that only one write succeeded, this tests that delay-gen worked as
+#expected
+echo abc >> $M0/file
+EXPECT "abc" cat $M0/file
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t b/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t
new file mode 100644
index 00000000000..76d1f2170f2
--- /dev/null
+++ b/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t
@@ -0,0 +1,136 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $CLI volume heal $V0 disable
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+###############################################################################
+# Case of 2 bricks blaming the third and the third blaming the other two.
+
+TEST mkdir $M0/dir
+
+# B0 and B2 must blame B1
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST setfattr -n user.metadata -v 1 $M0/dir
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}0/dir trusted.afr.$V0-client-1 metadata
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}2/dir trusted.afr.$V0-client-1 metadata
+CLIENT_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $M0/dir)
+
+# B1 must blame B0 and B2
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000100000000 $B0/$V0"1"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"1"/dir
+
+# Launch heal
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}0/dir)
+B1_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}1/dir)
+B2_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}2/dir)
+
+TEST [ "$CLIENT_XATTR" == "$B0_XATTR" ]
+TEST [ "$CLIENT_XATTR" == "$B1_XATTR" ]
+TEST [ "$CLIENT_XATTR" == "$B2_XATTR" ]
+TEST setfattr -x user.metadata $M0/dir
+
+###############################################################################
+# Case of each brick blaming the next one in a cyclic manner
+
+TEST $CLI volume heal $V0 disable
+TEST `echo "hello" >> $M0/dir/file`
+# Mark cyclic xattrs and modify metadata directly on the bricks.
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000100000000 $B0/$V0"0"/dir/file
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"1"/dir/file
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000100000000 $B0/$V0"2"/dir/file
+
+setfattr -n user.metadata -v 1 $B0/$V0"0"/dir/file
+setfattr -n user.metadata -v 2 $B0/$V0"1"/dir/file
+setfattr -n user.metadata -v 3 $B0/$V0"2"/dir/file
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/file))
+ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}0/dir/file)
+B1_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}1/dir/file)
+B2_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}2/dir/file)
+
+TEST [ "$B0_XATTR" == "$B1_XATTR" ]
+TEST [ "$B0_XATTR" == "$B2_XATTR" ]
+TEST rm -f $M0/dir/file
+
+###############################################################################
+# Case of 2 bricks having quorum blaming and the other having only one blaming.
+
+TEST $CLI volume heal $V0 disable
+TEST `echo "hello" >> $M0/dir/file`
+# B0 and B2 must blame B1
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST setfattr -n user.metadata -v 1 $M0/dir/file
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}0/dir/file trusted.afr.$V0-client-1 metadata
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}2/dir/file trusted.afr.$V0-client-1 metadata
+
+# B1 must blame B0 and B2
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000100000000 $B0/$V0"1"/dir/file
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"1"/dir/file
+
+# B0 must blame B2
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"0"/dir/file
+
+# Modify the metadata directly on the bricks B1 & B2.
+setfattr -n user.metadata -v 2 $B0/$V0"1"/dir/file
+setfattr -n user.metadata -v 3 $B0/$V0"2"/dir/file
+
+# Launch heal
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}0/dir/file)
+B1_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}1/dir/file)
+B2_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}2/dir/file)
+
+TEST [ "$B0_XATTR" == "$B1_XATTR" ]
+TEST [ "$B0_XATTR" == "$B2_XATTR" ]
+
+###############################################################################
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
new file mode 100644
index 00000000000..0aeaaafc84c
--- /dev/null
+++ b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
@@ -0,0 +1,116 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume heal $V0 disable
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+
+##########################################################################################
+# GFID link file and the GFID is missing on one brick and all the bricks are being blamed.
+
+TEST touch $M0/dir/file
+#TEST kill_brick $V0 $H0 $B0/$V0"1"
+
+#B0 and B2 must blame B1
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Remove the gfid xattr and the link file on one brick.
+gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file)
+gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file)
+TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file
+TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+
+# Wait for 2 second to force posix to consider that this is a valid file but
+# without gfid.
+sleep 2
+TEST $CLI volume heal $V0
+
+# Heal should not fail as the file is missing gfid xattr and the link file,
+# which is not actually the gfid or type mismatch.
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file
+TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+rm -f $M0/dir/file
+
+
+###########################################################################################
+# GFID link file and the GFID is missing on two bricks and all the bricks are being blamed.
+
+TEST $CLI volume heal $V0 disable
+TEST touch $M0/dir/file
+#TEST kill_brick $V0 $H0 $B0/$V0"1"
+
+#B0 and B2 must blame B1
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Remove the gfid xattr and the link file on two bricks.
+gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file)
+gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file)
+TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file
+TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir/file
+TEST rm -f $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+
+# Wait for 2 second to force posix to consider that this is a valid file but
+# without gfid.
+sleep 2
+TEST $CLI volume heal $V0
+
+# Heal should not fail as the file is missing gfid xattr and the link file,
+# which is not actually the gfid or type mismatch.
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file
+TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}1/dir/file
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1728770-pass-xattrs.t b/tests/bugs/replicate/bug-1728770-pass-xattrs.t
new file mode 100644
index 00000000000..159c4fcc6a1
--- /dev/null
+++ b/tests/bugs/replicate/bug-1728770-pass-xattrs.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+function fop_on_bad_disk {
+ local path=$1
+ mkdir $path/dir{1..1000} 2>/dev/null
+ mv $path/dir1 $path/newdir
+ touch $path/foo.txt
+ echo $?
+}
+
+function ls_fop_on_bad_disk {
+ local path=$1
+ ls $path
+ echo $?
+}
+
+TEST init_n_bricks 6;
+TEST setup_lvm 6;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 replica 3 $H0:$L1 $H0:$L2 $H0:$L3 $H0:$L4 $H0:$L5 $H0:$L6;
+TEST $CLI volume set $V0 health-check-interval 1000;
+
+TEST $CLI volume start $V0;
+
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+#corrupt last disk
+dd if=/dev/urandom of=/dev/mapper/patchy_snap_vg_6-brick_lvm bs=512K count=200 status=progress && sync
+
+
+# Test the disk is now returning EIO for touch and ls
+EXPECT_WITHIN $DISK_FAIL_TIMEOUT "^1$" fop_on_bad_disk "$L6"
+EXPECT_WITHIN $DISK_FAIL_TIMEOUT "^2$" ls_fop_on_bad_disk "$L6"
+
+TEST touch $M0/foo{1..100}
+TEST $CLI volume remove-brick $V0 replica 3 $H0:$L4 $H0:$L5 $H0:$L6 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$L4 $H0:$L5 $H0:$L6";
+
+#check that remove-brick status should not have any failed or skipped files
+var=`$CLI volume remove-brick $V0 $H0:$L4 $H0:$L5 $H0:$L6 status | grep completed`
+TEST [ `echo $var | awk '{print $5}'` = "0" ]
+TEST [ `echo $var | awk '{print $6}'` = "0" ]
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t b/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t
new file mode 100644
index 00000000000..14dfae89135
--- /dev/null
+++ b/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t
@@ -0,0 +1,102 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+function time_stamps_match {
+ path=$1
+ mtime_source_b0=$(get_mtime $B0/${V0}0/$path)
+ atime_source_b0=$(get_atime $B0/${V0}0/$path)
+ mtime_source_b2=$(get_mtime $B0/${V0}2/$path)
+ atime_source_b2=$(get_atime $B0/${V0}2/$path)
+ mtime_sink_b1=$(get_mtime $B0/${V0}1/$path)
+ atime_sink_b1=$(get_atime $B0/${V0}1/$path)
+
+ #The same brick must be the source of heal for both atime and mtime.
+ if [[ ( $mtime_source_b0 -eq $mtime_sink_b1 && $atime_source_b0 -eq $atime_sink_b1 ) || \
+ ( $mtime_source_b2 -eq $mtime_sink_b1 && $atime_source_b2 -eq $atime_sink_b1 ) ]]
+ then
+ echo "Y"
+ else
+ echo "Mtimes: $mtime_source_b0:$mtime_sink_b1:$mtime_source_b2 Atimes: $atime_source_b0:$atime_sink_b1:$atime_source_b2"
+ fi
+
+}
+
+function mtimes_match {
+ path=$1
+ mtime_source_b0=$(get_mtime $B0/${V0}0/$path)
+ mtime_source_b2=$(get_mtime $B0/${V0}2/$path)
+ mtime_sink_b1=$(get_mtime $B0/${V0}1/$path)
+
+ if [[ ( $mtime_source_b0 -eq $mtime_sink_b1) || \
+ ( $mtime_source_b2 -eq $mtime_sink_b1) ]]
+ then
+ echo "Y"
+ else
+ echo "Mtimes: $mtime_source_b0:$mtime_sink_b1:$mtime_source_b2"
+ fi
+
+}
+
+# Test that the parent dir's timestamps are restored during entry-heal.
+GET_MDATA_PATH=$(dirname $0)/../../utils
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+###############################################################################
+TEST mkdir $M0/DIR
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST touch $M0/DIR/FILE
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+EXPECT "Y" time_stamps_match DIR
+ctime_source1=$(get_ctime $B0/${V0}0/$path)
+ctime_source2=$(get_ctime $B0/${V0}2/$path)
+ctime_sink=$(get_ctime $B0/${V0}1/$path)
+TEST [ $ctime_source1 -eq $ctime_sink ]
+TEST [ $ctime_source2 -eq $ctime_sink ]
+
+
+###############################################################################
+# Repeat the test with ctime feature disabled.
+TEST $CLI volume set $V0 features.ctime off
+TEST mkdir $M0/DIR2
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST touch $M0/DIR2/FILE
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+#Executing parallel heal may lead to changing atime after heal. So better
+#to test just the mtime
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+EXPECT "Y" mtimes_match DIR2
+
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
+cleanup;
diff --git a/tests/bugs/replicate/bug-1744548-heal-timeout.t b/tests/bugs/replicate/bug-1744548-heal-timeout.t
new file mode 100644
index 00000000000..011535066f9
--- /dev/null
+++ b/tests/bugs/replicate/bug-1744548-heal-timeout.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+function get_cumulative_opendir_count {
+#sed command prints content between Cumulative and Interval, this keeps content from Cumulative stats
+ $CLI volume profile $V0 info |sed -n '/^Cumulative/,/^Interval/p'|grep OPENDIR| awk '{print $8}'|tr -d '\n'
+}
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST ! $CLI volume heal $V0
+
+# Enable shd and verify that index crawl is triggered immediately.
+TEST $CLI volume profile $V0 start
+TEST $CLI volume profile $V0 info clear
+TEST $CLI volume heal $V0 enable
+# Each brick does 4 opendirs, corresponding to dirty, xattrop and entry-changes, anonymous-inode
+EXPECT_WITHIN 4 "^444$" get_cumulative_opendir_count
+
+# Check that a change in heal-timeout is honoured immediately.
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+sleep 10
+# Two crawls must have happened.
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^121212$" get_cumulative_opendir_count
+
+# shd must not heal if it is disabled and heal-timeout is changed.
+TEST $CLI volume heal $V0 disable
+#Wait for configuration update and any opendir fops to complete
+sleep 10
+TEST $CLI volume profile $V0 info clear
+TEST $CLI volume set $V0 cluster.heal-timeout 6
+#Better to wait for more than 6 seconds to account for configuration updates
+sleep 10
+COUNT=`$CLI volume profile $V0 info incremental |grep OPENDIR|awk '{print $8}'|tr -d '\n'`
+TEST [ -z $COUNT ]
+cleanup;
diff --git a/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t b/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t
new file mode 100644
index 00000000000..96279084065
--- /dev/null
+++ b/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+function check_gfid_and_link_count
+{
+ local file=$1
+
+ file_gfid_b0=$(gf_get_gfid_xattr $B0/${V0}0/$file)
+ TEST [ ! -z $file_gfid_b0 ]
+ file_gfid_b1=$(gf_get_gfid_xattr $B0/${V0}1/$file)
+ file_gfid_b2=$(gf_get_gfid_xattr $B0/${V0}2/$file)
+ EXPECT $file_gfid_b0 echo $file_gfid_b1
+ EXPECT $file_gfid_b0 echo $file_gfid_b2
+
+ EXPECT "2" stat -c %h $B0/${V0}0/$file
+ EXPECT "2" stat -c %h $B0/${V0}1/$file
+ EXPECT "2" stat -c %h $B0/${V0}2/$file
+}
+TESTS_EXPECTED_IN_LOOP=18
+
+################################################################################
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume heal $V0 disable
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST `echo "File 1 " > $M0/dir/file1`
+TEST touch $M0/dir/file{2..4}
+
+# Remove file2 from 1st & 3rd bricks
+TEST rm -f $B0/$V0"0"/dir/file2
+TEST rm -f $B0/$V0"2"/dir/file2
+
+# Remove file3 and the .glusterfs hardlink from 1st & 2nd bricks
+gfid_file3=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file3)
+gfid_str_file3=$(gf_gfid_xattr_to_str $gfid_file3)
+TEST rm $B0/$V0"0"/.glusterfs/${gfid_str_file3:0:2}/${gfid_str_file3:2:2}/$gfid_str_file3
+TEST rm $B0/$V0"1"/.glusterfs/${gfid_str_file3:0:2}/${gfid_str_file3:2:2}/$gfid_str_file3
+TEST rm -f $B0/$V0"0"/dir/file3
+TEST rm -f $B0/$V0"1"/dir/file3
+
+# Remove the .glusterfs hardlink and the gfid xattr of file4 on 3rd brick
+gfid_file4=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file4)
+gfid_str_file4=$(gf_gfid_xattr_to_str $gfid_file4)
+TEST rm $B0/$V0"2"/.glusterfs/${gfid_str_file4:0:2}/${gfid_str_file4:2:2}/$gfid_str_file4
+TEST setfattr -x trusted.gfid $B0/$V0"2"/dir/file4
+
+# B0 and B2 blame each other
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir on first brick.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# All the files must be present on all the bricks after conservative merge and
+# should have the gfid xattr and the .glusterfs hardlink.
+check_gfid_and_link_count dir/file1
+check_gfid_and_link_count dir/file2
+check_gfid_and_link_count dir/file3
+check_gfid_and_link_count dir/file4
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t b/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t
new file mode 100644
index 00000000000..c1bdf34ee6d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t
@@ -0,0 +1,111 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard enable
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST glusterfs --volfile-server=$H0 --volfile-id=/$V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+#Create split-brain by setting afr xattrs/gfids manually.
+#file1 is non-sharded and will be in data split-brain.
+#file2 will have one shard which will be in data split-brain.
+#file3 will have one shard which will be in gfid split-brain.
+#file4 will have one shard which will be in data & metadata split-brain.
+TEST dd if=/dev/zero of=$M0/file1 bs=1024 count=1024 oflag=direct
+TEST dd if=/dev/zero of=$M0/file2 bs=1M count=6 oflag=direct
+TEST dd if=/dev/zero of=$M0/file3 bs=1M count=6 oflag=direct
+TEST dd if=/dev/zero of=$M0/file4 bs=1M count=6 oflag=direct
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#-------------------------------------------------------------------------------
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}0/file1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}0/file1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}1/file1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}1/file1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}2/file1
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}2/file1
+
+#-------------------------------------------------------------------------------
+gfid_f2=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file2))
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}0/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}0/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}1/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}1/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}2/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}2/.shard/$gfid_f2.1
+
+#-------------------------------------------------------------------------------
+TESTS_EXPECTED_IN_LOOP=5
+function assign_new_gfid {
+ brickpath=$1
+ filename=$2
+ gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brickpath/$filename))
+ gfid_shard=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brickpath/.shard/$gfid.1))
+
+ TEST rm $brickpath/.glusterfs/${gfid_shard:0:2}/${gfid_shard:2:2}/$gfid_shard
+ TEST setfattr -x trusted.gfid $brickpath/.shard/$gfid.1
+ new_gfid=$(get_random_gfid)
+ new_gfid_str=$(gf_gfid_xattr_to_str $new_gfid)
+ TEST setfattr -n trusted.gfid -v $new_gfid $brickpath/.shard/$gfid.1
+ TEST mkdir -p $brickpath/.glusterfs/${new_gfid_str:0:2}/${new_gfid_str:2:2}
+ TEST ln $brickpath/.shard/$gfid.1 $brickpath/.glusterfs/${new_gfid_str:0:2}/${new_gfid_str:2:2}/$new_gfid_str
+}
+assign_new_gfid $B0/$V0"1" file3
+assign_new_gfid $B0/$V0"2" file3
+
+#-------------------------------------------------------------------------------
+gfid_f4=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file4))
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000100000000 $B0/${V0}0/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000100000000 $B0/${V0}0/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000100000000 $B0/${V0}1/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000100000000 $B0/${V0}1/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000100000000 $B0/${V0}2/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000100000000 $B0/${V0}2/.shard/$gfid_f4.1
+
+#-------------------------------------------------------------------------------
+#Add entry to xattrop dir on first brick and check for split-brain.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+
+gfid_f1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/file1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f1
+
+gfid_f2_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f2.1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f2_shard1
+
+gfid_f3=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file3))
+gfid_f3_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f3.1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f3_shard1
+
+gfid_f4_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f4.1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f4_shard1
+
+#-------------------------------------------------------------------------------
+#gfid split-brain won't show up in split-brain count.
+EXPECT "3" afr_get_split_brain_count $V0
+EXPECT_NOT "^0$" get_pending_heal_count $V0
+
+#Resolve split-brains
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /file1
+GFIDSTR="gfid:$gfid_f2_shard1"
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 $GFIDSTR
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /.shard/$gfid_f3.1
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /.shard/$gfid_f4.1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+cleanup;
diff --git a/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
new file mode 100644
index 00000000000..7e24eaec03d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup
+
+GET_MDATA_PATH=$(dirname $0)/../../utils
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0..2}
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST touch $M0/a
+sleep 1
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST touch $M0/a
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+mtime0=$(get_mtime $B0/brick0/a)
+mtime1=$(get_mtime $B0/brick1/a)
+TEST [ $mtime0 -eq $mtime1 ]
+
+ctime0=$(get_ctime $B0/brick0/a)
+ctime1=$(get_ctime $B0/brick1/a)
+TEST [ $ctime0 -eq $ctime1 ]
+
+###############################################################################
+# Repeat the test with ctime feature disabled.
+TEST $CLI volume set $V0 features.ctime off
+
+TEST touch $M0/b
+sleep 1
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST touch $M0/b
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+mtime2=$(get_mtime $B0/brick0/b)
+mtime3=$(get_mtime $B0/brick1/b)
+TEST [ $mtime2 -eq $mtime3 ]
+
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
+
+TEST force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1801624-entry-heal.t b/tests/bugs/replicate/bug-1801624-entry-heal.t
new file mode 100644
index 00000000000..94b465181fa
--- /dev/null
+++ b/tests/bugs/replicate/bug-1801624-entry-heal.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2}
+TEST $CLI volume set $V0 heal-timeout 5
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+echo "Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+
+# Re-create the file when a brick is down.
+TEST kill_brick $V0 $H0 $B0/brick1
+TEST rm $M0/FILE
+echo "New Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+EXPECT_WITHIN $HEAL_TIMEOUT "4" get_pending_heal_count $V0
+
+# Launching index heal must not reset parent dir afr xattrs or remove granular entry indices.
+$CLI volume heal $V0 # CLI will fail but heal is launched anyway.
+TEST sleep 5 # give index heal a chance to do one run.
+brick0_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/brick0/)
+brick2_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/brick2/)
+TEST [ $brick0_pending -eq "000000000000000000000002" ]
+TEST [ $brick2_pending -eq "000000000000000000000002" ]
+EXPECT "FILE" ls $B0/brick0/.glusterfs/indices/entry-changes/00000000-0000-0000-0000-000000000001/
+EXPECT "FILE" ls $B0/brick2/.glusterfs/indices/entry-changes/00000000-0000-0000-0000-000000000001/
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+$CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+# No gfid-split-brain (i.e. EIO) must be seen. Try on fresh mount to avoid cached values.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+TEST cat $M0/FILE
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
diff --git a/tests/bugs/replicate/bug-765564.t b/tests/bugs/replicate/bug-765564.t
new file mode 100644
index 00000000000..098d225018f
--- /dev/null
+++ b/tests/bugs/replicate/bug-765564.t
@@ -0,0 +1,86 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+## Start and create a volume
+mkdir -p ${B0}/${V0}-0
+mkdir -p ${B0}/${V0}-1
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+
+TEST $CLI volume set $V0 performance.io-cache off;
+TEST $CLI volume set $V0 performance.write-behind off;
+TEST $CLI volume set $V0 performance.stat-prefetch off
+
+TEST $CLI volume start $V0;
+
+## Mount native
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+#returns success if 'olddir' is absent
+#'olddir' must be absent in both replicas
+function rm_succeeded () {
+ local dir1=$1
+ [[ -d $H0:$B0/${V0}-0/$dir1 || -d $H0:$B0/${V0}-1/$dir1 ]] && return 0
+ return 1
+}
+
+# returns successes if 'newdir' is present
+#'newdir' must be present in both replicas
+function mv_succeeded () {
+ local dir1=$1
+ [[ -d $H0:$B0/${V0}-0/$dir1 && -d $H0:$B0/${V0}-1/$dir1 ]] && return 1
+ return 0
+}
+
+# returns zero on success
+# Only one of rm and mv can succeed. This is captured by the XOR below
+
+function chk_backend_consistency(){
+ local dir1=$1
+ local dir2=$2
+ local rm_status=rm_succeeded $dir1
+ local mv_status=mv_succeeded $dir2
+ [[ ( $rm_status && ! $mv_status ) || ( ! $rm_status && $mv_status ) ]] && return 0
+ return 1
+}
+
+#concurrent removal/rename of dirs
+function rm_mv_correctness () {
+ ret=0
+ for i in {1..100}; do
+ mkdir $M0/"dir"$i
+ rmdir $M0/"dir"$i &
+ mv $M0/"dir"$i $M0/"adir"$i &
+ wait
+ tmp_ret=$(chk_backend_consistency "dir"$i "adir"$i)
+ (( ret += tmp_ret ))
+ rm -rf $M0/"dir"$i
+ rm -rf $M0/"adir"$i
+ done
+ return $ret
+}
+
+TEST touch $M0/a;
+TEST mv $M0/a $M0/b;
+
+#test rename fop when one of the bricks is down
+kill_brick ${V0} ${H0} ${B0}/${V0}-1;
+TEST touch $M0/h;
+TEST mv $M0/h $M0/1;
+
+TEST $CLI volume start $V0 force;
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1;
+find $M0 2>/dev/null 1>/dev/null;
+find $M0 | xargs stat 2>/dev/null 1>/dev/null;
+
+TEST rm_mv_correctness;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
+
diff --git a/tests/bugs/replicate/bug-767585-gfid.t b/tests/bugs/replicate/bug-767585-gfid.t
new file mode 100755
index 00000000000..4176aabb544
--- /dev/null
+++ b/tests/bugs/replicate/bug-767585-gfid.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#Test cases to perform gfid-self-heal
+#file 'a' should be assigned a fresh gfid
+#file 'b' should be healed with gfid1 from brick1
+#file 'c' should be healed with gfid2 from brick2
+
+gfid1="0x8428b7193a764bf8be8046fb860b8993"
+gfid2="0x85ad91afa2f74694bf52c3326d048209"
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable
+touch $B0/${V0}0/a $B0/${V0}1/a
+touch $B0/${V0}0/b $B0/${V0}1/b
+touch $B0/${V0}0/c $B0/${V0}1/c
+
+TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}0/b
+TEST setfattr -n trusted.gfid -v $gfid2 $B0/${V0}1/c
+
+sleep 2
+
+TEST stat $M0/a
+TEST stat $M0/b
+TEST stat $M0/c
+
+TEST gf_get_gfid_xattr $B0/${V0}0/a
+TEST gf_get_gfid_xattr $B0/${V0}1/a
+
+EXPECT "$gfid1" gf_get_gfid_xattr $B0/${V0}0/b
+EXPECT "$gfid1" gf_get_gfid_xattr $B0/${V0}1/b
+
+EXPECT "$gfid2" gf_get_gfid_xattr $B0/${V0}0/c
+EXPECT "$gfid2" gf_get_gfid_xattr $B0/${V0}1/c
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-802417.t b/tests/bugs/replicate/bug-802417.t
new file mode 100755
index 00000000000..f213439401e
--- /dev/null
+++ b/tests/bugs/replicate/bug-802417.t
@@ -0,0 +1,120 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function write_file()
+{
+ path="$1"; shift
+ echo "$*" > "$path"
+}
+
+cleanup;
+
+#####################################################
+# We are currently not triggering data heal unless all bricks of the replica are
+# up. We will need to modify this .t once the fix for preventing stale reads
+# being served to clients for files in spurious split-brains is done. Spurious
+# split-brains here means afr xattrs indicates sbrain but it is actually not.
+# Self-heal will heal such files automatically but before the heal completes,
+# reads can be served which needs fixing.
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
+######################################################
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Start and create a volume
+mkdir -p ${B0}/${V0}-0
+mkdir -p ${B0}/${V0}-1
+mkdir -p ${B0}/${V0}-2
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2}
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Make sure io-cache and write-behind don't interfere.
+TEST $CLI volume set $V0 performance.io-cache off;
+TEST $CLI volume set $V0 performance.write-behind off;
+TEST $CLI volume set $V0 performance.stat-prefetch off
+
+## Make sure automatic self-heal doesn't perturb our results.
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume set $V0 cluster.quorum-type none
+## Mount native
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+## Create a file with some recognizably stale data.
+TEST write_file $M0/a_file "old_data"
+
+## Kill two of the bricks and write some newer data.
+TEST kill_brick ${V0} ${H0} ${B0}/${V0}-1
+TEST kill_brick ${V0} ${H0} ${B0}/${V0}-2
+TEST write_file $M0/a_file "new_data"
+
+## Bring all the bricks up and kill one so we do a partial self-heal.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+TEST kill_brick ${V0} ${H0} ${B0}/${V0}-2
+TEST dd if=${M0}/a_file of=/dev/null
+
+
+obs_path_0=${B0}/${V0}-0/a_file
+obs_path_1=${B0}/${V0}-1/a_file
+obs_path_2=${B0}/${V0}-2/a_file
+
+tgt_xattr_0="trusted.afr.${V0}-client-0"
+tgt_xattr_1="trusted.afr.${V0}-client-1"
+tgt_xattr_2="trusted.afr.${V0}-client-2"
+
+actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_0)
+EXPECT "0x000000000000000000000000|^\$" echo $actual
+
+EXPECT_WITHIN $HEAL_TIMEOUT "0x000000000000000000000000" \
+afr_get_changelog_xattr $obs_path_0 $tgt_xattr_1
+
+actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_2)
+EXPECT "0x000000030000000000000000" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_0)
+EXPECT "0x000000000000000000000000|^\$" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_1)
+EXPECT "0x000000000000000000000000|^\$" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_2)
+EXPECT "0x000000010000000000000000" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_0)
+EXPECT "0x000000000000000000000000|^\$" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_1)
+EXPECT "0x000000000000000000000000|^\$" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_2)
+EXPECT "0x000000000000000000000000|^\$" echo $actual
+
+if [ "$EXIT_EARLY" = "1" ]; then
+ exit 0;
+fi
+
+## Finish up
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-821056.t b/tests/bugs/replicate/bug-821056.t
new file mode 100644
index 00000000000..81186d86309
--- /dev/null
+++ b/tests/bugs/replicate/bug-821056.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind on
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+touch $M0/a
+
+#Open file with fd as 5
+exec 5>$M0/a
+realpath=$(gf_get_gfid_backend_file_path $B0/${V0}0 "a")
+
+kill_brick $V0 $H0 $B0/${V0}0
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+EXPECT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
+
+kill_brick $V0 $H0 $B0/${V0}0
+TEST gf_rm_file_and_gfid_link $B0/${V0}0 "a"
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+ls -l $M0/a > /dev/null 2>&1 #Make sure the file is re-created
+EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
+EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/a
+
+for i in {1..1024}; do
+ echo "open sesame" >&5
+done
+
+EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
+#close the fd
+exec 5>&-
+
+#Check that anon-fd based file is not leaking.
+EXPECT_WITHIN $REOPEN_TIMEOUT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
+cleanup;
diff --git a/tests/bugs/replicate/bug-830665.t b/tests/bugs/replicate/bug-830665.t
new file mode 100755
index 00000000000..68180424803
--- /dev/null
+++ b/tests/bugs/replicate/bug-830665.t
@@ -0,0 +1,127 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+function recreate {
+ rm -rf $1 && mkdir -p $1
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Start and create a volume
+recreate ${B0}/${V0}-0
+recreate ${B0}/${V0}-1
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+TEST $CLI volume set $V0 nfs.disable false
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+#EXPECT_WITHIN fails the test if the command it executes fails. This function
+#returns "" when the file doesn't exist
+function friendly_cat {
+ if [ ! -f $1 ];
+ then
+ echo "";
+ else
+ cat $1;
+ fi
+}
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Make sure stat-prefetch doesn't prevent self-heal checks.
+TEST $CLI volume set $V0 performance.stat-prefetch off;
+
+## Make sure automatic self-heal doesn't perturb our results.
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+## Mount NFS
+TEST mount_nfs $H0:/$V0 $N0 nolock;
+
+## Create some files and directories
+echo "test_data" > $N0/a_file;
+mkdir $N0/a_dir;
+echo "more_test_data" > $N0/a_dir/another_file;
+
+## Unmount and stop the volume.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+TEST $CLI volume stop $V0;
+
+# Recreate the brick. Note that because of http://review.gluster.org/#change,4202
+# we need to preserve and restore the volume ID or else the brick (and thus the
+# entire not-very-HA-any-more volume) won't start. When that bug is fixed, we can
+# remove the [gs]etxattr calls.
+volid=$(getfattr -e hex -n trusted.glusterfs.volume-id $B0/${V0}-0 2> /dev/null \
+ | grep = | cut -d= -f2)
+rm -rf $B0/${V0}-0;
+mkdir $B0/${V0}-0;
+#Ideally, disk replacement is done using reset-brick or replace-brick gluster CLI
+#which will create .glusterfs folder.
+mkdir $B0/${V0}-0/.glusterfs && chmod 600 $B0/${V0}-0/.glusterfs
+
+setfattr -n trusted.glusterfs.volume-id -v $volid $B0/${V0}-0
+
+## Restart and remount. Note that we use actimeo=0 so that the stat calls
+## we need for self-heal don't get blocked by the NFS client.
+TEST $CLI volume start $V0;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock,actimeo=0;
+
+## The Linux NFS client has a really charming habit of caching stuff right
+## after mount, even though we set actimeo=0 above. Life would be much easier
+## if NFS developers cared as much about correctness as they do about shaving
+## a few seconds off of benchmarks.
+ls -l $N0 &> /dev/null;
+sleep 5;
+
+## Force entry self-heal.
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST gluster volume heal $V0 full
+#ls -lR $N0 > /dev/null;
+
+## Do NOT check through the NFS mount here. That will force a new self-heal
+## check, but we want to test whether self-heal already happened.
+
+## Make sure everything's in order on the recreated brick.
+EXPECT_WITHIN $HEAL_TIMEOUT 'test_data' friendly_cat $B0/${V0}-0/a_file;
+EXPECT_WITHIN $HEAL_TIMEOUT 'more_test_data' friendly_cat $B0/${V0}-0/a_dir/another_file;
+
+if [ "$EXIT_EARLY" = "1" ]; then
+ exit 0;
+fi
+
+## Finish up
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-859581.t b/tests/bugs/replicate/bug-859581.t
new file mode 100755
index 00000000000..d8b45a257a1
--- /dev/null
+++ b/tests/bugs/replicate/bug-859581.t
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs --direct-io-mode=yes --use-readdirp=no --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+mkdir -p $M0/dir1/dir2
+
+TEST rm -f $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1")
+TEST rmdir $B0/${V0}1/dir1/dir2
+
+TEST stat $M0/dir1/dir2
+
+TEST [ -d $B0/${V0}1/dir1/dir2 ]
+TEST [ ! -d $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1") ]
+
+# Stop the volume to flush caches and force symlink recreation
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+# Till now, protocol/server was not doing inode linking as part of readdirp.
+# But pas part of user servicable snapshots patcth, changes to do inode linking
+# in protocol/server in readdirp, were introduced. So now to make sure
+# the gfid handle of dir1 is healed, explicit lookup has to be sent on it.
+# Otherwise, whenever ls -l is done just on the mount point $M0, lookup on the
+# entries received as part of readdirp, is not sent, because the inodes for
+# those entries were linked as part of readdirp itself. i.e instead of doing
+# "ls -l $M0", it has to be the below command.
+ls -l $M0/dir1;
+
+TEST [ -h $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1") ]
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
+
diff --git a/tests/bugs/replicate/bug-865825.t b/tests/bugs/replicate/bug-865825.t
new file mode 100755
index 00000000000..ffb2e0f6437
--- /dev/null
+++ b/tests/bugs/replicate/bug-865825.t
@@ -0,0 +1,82 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Start and create a volume
+mkdir -p ${B0}/${V0}-0
+mkdir -p ${B0}/${V0}-1
+mkdir -p ${B0}/${V0}-2
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2}
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Make sure io-cache and write-behind don't interfere.
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 performance.io-cache off;
+TEST $CLI volume set $V0 performance.quick-read off;
+TEST $CLI volume set $V0 performance.write-behind off;
+TEST $CLI volume set $V0 performance.stat-prefetch off
+
+## Make sure automatic self-heal doesn't perturb our results.
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount native
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+## Create a file with some recognizable contents.
+echo "test_data" > $M0/a_file;
+
+## Unmount.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+## Mess with the flags as though brick-0 accuses brick-2 while brick-1 is
+## missing its brick-2 changelog altogether.
+value=0x000000010000000000000000
+setfattr -n trusted.afr.${V0}-client-2 -v $value $B0/${V0}-0/a_file
+setfattr -x trusted.afr.${V0}-client-2 $B0/${V0}-1/a_file
+echo "wrong_data" > $B0/${V0}-2/a_file
+
+gluster volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+gluster volume heal $V0 full
+
+## Make sure brick 2 now has the correct contents.
+EXPECT_WITHIN $HEAL_TIMEOUT "test_data" cat $B0/${V0}-2/a_file
+
+if [ "$EXIT_EARLY" = "1" ]; then
+ exit 0;
+fi
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-880898.t b/tests/bugs/replicate/bug-880898.t
new file mode 100644
index 00000000000..660d34ca25f
--- /dev/null
+++ b/tests/bugs/replicate/bug-880898.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+pkill glusterfs
+uuid=""
+for line in $(cat $GLUSTERD_WORKDIR/glusterd.info)
+do
+ if [[ $line == UUID* ]]
+ then
+ uuid=`echo $line | sed -r 's/^.{5}//'`
+ fi
+done
+
+#Command execution should fail reporting that the bricks are not running.
+TEST ! $CLI volume heal $V0 info
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-884328.t b/tests/bugs/replicate/bug-884328.t
new file mode 100644
index 00000000000..acc8e542240
--- /dev/null
+++ b/tests/bugs/replicate/bug-884328.t
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST check_option_help_presence "cluster.quorum-type"
+TEST check_option_help_presence "cluster.quorum-count"
+cleanup;
diff --git a/tests/bugs/replicate/bug-886998.t b/tests/bugs/replicate/bug-886998.t
new file mode 100644
index 00000000000..bcac235ff09
--- /dev/null
+++ b/tests/bugs/replicate/bug-886998.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+# This tests that the replicate trash directory(.landfill) has following
+# properties.
+# Note: This is to have backward compatibility with 3.3 glusterfs
+# In the latest releases this dir is present inside .glusterfs of brick.
+# 1) lookup of trash dir fails
+# 2) readdir does not show this directory
+# 3) Self-heal does not do any self-heal of these directories.
+gfid1="0xc2e75dde97f346e7842d1076a8e699f8"
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable
+
+TEST mkdir $B0/${V0}1/.landfill
+TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}1/.landfill
+TEST mkdir $B0/${V0}0/.landfill
+TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}0/.landfill
+
+TEST ! stat $M0/.landfill
+EXPECT "" echo $(ls -a $M0 | grep ".landfill")
+
+TEST rmdir $B0/${V0}0/.landfill
+#Force a conservative merge and it should not create .landfill
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}0/
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/
+
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1/
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/
+
+EXPECT "" echo $(ls -a $M0 | grep ".landfill")
+TEST ! stat $B0/${V0}0/.landfill
+TEST stat $B0/${V0}1/.landfill
+
+#TEST that the dir is not deleted even when xattrs suggest to delete
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}0/
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/
+
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}1/
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/
+
+EXPECT "" echo $(ls -a $M0 | grep ".landfill")
+TEST ! stat $B0/${V0}0/.landfill
+TEST stat $B0/${V0}1/.landfill
+cleanup;
diff --git a/tests/bugs/replicate/bug-888174.t b/tests/bugs/replicate/bug-888174.t
new file mode 100644
index 00000000000..8c70265513d
--- /dev/null
+++ b/tests/bugs/replicate/bug-888174.t
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This tests if flush, fsync wakes up the delayed post-op or not.
+#If it is not woken up, INODELK from the next command waits
+#for post-op-delay secs. There would be pending changelog even after the command
+#completes.
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1
+
+TEST $CLI volume set $V0 cluster.eager-lock on
+
+TEST $CLI volume set $V0 performance.flush-behind off
+EXPECT "off" volume_option $V0 performance.flush-behind
+
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 3
+EXPECT "3" volume_option $V0 cluster.post-op-delay-secs
+
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0
+
+#Check that INODELK MAX latency is not in the order of seconds
+TEST gluster volume profile $V0 start
+for i in {1..5}
+do
+ echo hi > $M0/a
+done
+#Test if the MAX INODELK fop latency is of the order of seconds.
+inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}")
+
+TEST [ -z $inodelk_max_latency ]
+
+TEST dd of=$M0/a if=/dev/urandom bs=1024k count=10 conv=fsync
+#Check for no trace of pending changelog. Flush should make sure of it.
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_0/a trusted.afr.dirty
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_1/a trusted.afr.dirty
+
+
+dd of=$M0/a if=/dev/urandom bs=1024k count=1024 2>/dev/null &
+p=$!
+#trigger graph switches, tests for fsync not leaving any pending flags
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+
+kill -TERM $p
+#wait for dd to exit
+wait > /dev/null 2>&1
+
+#Goal is to check if there is permanent FOOL changelog
+sleep 5
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_0/a trusted.afr.dirty
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_1/a trusted.afr.dirty
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-913051.t b/tests/bugs/replicate/bug-913051.t
new file mode 100644
index 00000000000..6794995e6fe
--- /dev/null
+++ b/tests/bugs/replicate/bug-913051.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+cleanup;
+
+#Test that afr opens the file on the bricks that were offline at the time of
+# open after the brick comes online. This tests for writev, readv triggering
+# open-fd-fix in afr.
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume start $V0
+TEST $GFS --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 --direct-io-mode=enable $M0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/a
+TEST touch $M0/dir/b
+echo abc > $M0/dir/b
+
+TEST wfd=`fd_available`
+TEST fd_open $wfd "w" $M0/dir/a
+TEST rfd=`fd_available`
+TEST fd_open $rfd "r" $M0/dir/b
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#attempt self-heal so that the files are created on brick-0
+
+TEST dd if=$M0/dir/a of=/dev/null bs=1024k
+TEST dd if=$M0/dir/b of=/dev/null bs=1024k
+
+#trigger writev for attempting open-fd-fix in afr
+TEST fd_write $wfd "open sesame"
+
+#trigger readv for attempting open-fd-fix in afr
+TEST fd_cat $rfd
+
+EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpatha"
+EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpathb"
+
+TEST fd_close $wfd
+TEST fd_close $rfd
+cleanup;
diff --git a/tests/bugs/replicate/bug-916226.t b/tests/bugs/replicate/bug-916226.t
new file mode 100644
index 00000000000..893905f9a47
--- /dev/null
+++ b/tests/bugs/replicate/bug-916226.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3
+TEST $CLI volume set $V0 cluster.eager-lock on
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST mkdir $M0/dir{1..10};
+TEST touch $M0/dir{1..10}/files{1..10};
+
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4 $H0:/$B0/${V0}5
+
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-918437-sh-mtime.t b/tests/bugs/replicate/bug-918437-sh-mtime.t
new file mode 100644
index 00000000000..6a194b14a9b
--- /dev/null
+++ b/tests/bugs/replicate/bug-918437-sh-mtime.t
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function get_mtime {
+ local f=$1
+ stat $f | grep Modify | awk '{print $2 $3}' | cut -f1 -d'.'
+}
+
+function file_exists {
+ if [ -f $1 ]; then echo "Y"; else echo "N"; fi
+}
+cleanup;
+
+## Tests if mtime is correct after self-heal.
+TEST glusterd
+TEST pidof glusterd
+TEST mkdir -p $B0/gfs0/brick0{1,2}
+TEST $CLI volume create $V0 replica 2 transport tcp $H0:$B0/gfs0/brick01 $H0:$B0/gfs0/brick02
+TEST $CLI volume set $V0 nfs.disable on
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --direct-io-mode=enable
+# file 'a' is healed from brick02 to brick01 where as file 'b' is healed from
+# brick01 to brick02
+
+TEST cp -p /etc/passwd $M0/a
+TEST cp -p /etc/passwd $M0/b
+
+#Store mtimes before self-heals
+TEST modify_atstamp=$(get_mtime $B0/gfs0/brick02/a)
+TEST modify_btstamp=$(get_mtime $B0/gfs0/brick02/b)
+
+TEST $CLI volume stop $V0
+TEST gf_rm_file_and_gfid_link $B0/gfs0/brick01 a
+TEST gf_rm_file_and_gfid_link $B0/gfs0/brick02 b
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+#TODO remove these 2 lines once heal-full is fixed in v2.
+TEST stat $M0/a
+TEST stat $M0/b
+
+TEST gluster volume heal $V0 full
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick01/a
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick02/b
+EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
+
+size=`stat -c '%s' /etc/passwd`
+EXPECT $size stat -c '%s' $B0/gfs0/brick01/a
+
+TEST modify_atstamp1=$(get_mtime $B0/gfs0/brick01/a)
+TEST modify_atstamp2=$(get_mtime $B0/gfs0/brick02/a)
+EXPECT $modify_atstamp echo $modify_atstamp1
+EXPECT $modify_atstamp echo $modify_atstamp2
+
+TEST modify_btstamp1=$(get_mtime $B0/gfs0/brick01/b)
+TEST modify_btstamp2=$(get_mtime $B0/gfs0/brick02/b)
+EXPECT $modify_btstamp echo $modify_btstamp1
+EXPECT $modify_btstamp echo $modify_btstamp2
+cleanup;
diff --git a/tests/bugs/replicate/bug-921231.t b/tests/bugs/replicate/bug-921231.t
new file mode 100755
index 00000000000..81504612f63
--- /dev/null
+++ b/tests/bugs/replicate/bug-921231.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# This test writes to same file with 2 fds and tests that cluster.eager-lock is not
+# causing extra delay because of post-op-delay-secs
+cleanup;
+
+function write_to_file {
+ dd of=$M0/1 if=/dev/zero bs=1024k count=128 oflag=append 2>&1 >/dev/null
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume set $V0 cluster.eager-lock on
+TEST $CLI volume set $V0 post-op-delay-secs 3
+TEST $CLI volume set $V0 client-log-level DEBUG
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST $CLI volume set $V0 ensure-durability off
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+write_to_file &
+write_to_file &
+wait
+#Test if the MAX [F]INODELK fop latency is of the order of seconds.
+inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}")
+TEST [ -z $inodelk_max_latency ]
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-957877.t b/tests/bugs/replicate/bug-957877.t
new file mode 100644
index 00000000000..bcce7e3c9e7
--- /dev/null
+++ b/tests/bugs/replicate/bug-957877.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+kill_brick $V0 $H0 $B0/${V0}0
+TEST touch $M0/f1
+TEST setfattr -n "user.foo" -v "test" $M0/f1
+
+BRICK=$B0"/${V0}1"
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+# Wait for self-heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT '0' count_sh_entries $BRICK;
+
+TEST getfattr -n "user.foo" $B0/${V0}0/f1;
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-976800.t b/tests/bugs/replicate/bug-976800.t
new file mode 100644
index 00000000000..27f8b27619e
--- /dev/null
+++ b/tests/bugs/replicate/bug-976800.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# This test checks if there are any open fds on the brick
+# even after the file is closed on the mount. This particular
+# test tests dd with "fsync" to check afr's fsync codepath
+cleanup;
+
+function is_fd_open {
+ local v=$1
+ local h=$2
+ local b=$3
+ local bpid=$(get_brick_pid $v $h $b)
+ ls -l /proc/$bpid/fd | grep -w "\-> $b/1"
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 ensure-durability off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 flush-behind off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST dd of=$M0/1 if=/dev/zero bs=1k count=1 conv=fsync
+TEST ! is_fd_open $V0 $H0 $B0/${V0}0
+cleanup;
diff --git a/tests/bugs/replicate/bug-977797.t b/tests/bugs/replicate/bug-977797.t
new file mode 100755
index 00000000000..9a8f36c956c
--- /dev/null
+++ b/tests/bugs/replicate/bug-977797.t
@@ -0,0 +1,96 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 open-behind off
+TEST $CLI volume set $V0 quick-read off
+TEST $CLI volume set $V0 read-ahead off
+TEST $CLI volume set $V0 write-behind off
+TEST $CLI volume set $V0 io-cache off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+
+TEST mkdir -p $M0/a
+TEST `echo "GLUSTERFS" > $M0/a/file`
+
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+
+TEST chown root $M0/a
+TEST chown root $M0/a/file
+TEST `echo "GLUSTER-FILE-SYSTEM" > $M0/a/file`
+TEST mkdir $M0/a/b
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0;
+
+
+
+TEST kill_brick $V0 $H0 $B0/$V0"2"
+
+TEST chmod 757 $M0/a
+TEST chmod 757 $M0/a/file
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1;
+
+#Trigger entry heal of $M0/a
+getfattr -n user.nosuchattr $M0/a
+dd if=$M0/a/file of=/dev/null bs=1024k
+#read fails, but heal is triggered.
+TEST [ $? -ne 0 ]
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"1"/a/file trusted.afr.$V0-client-0 "data"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"1"/a/file trusted.afr.$V0-client-1 "data"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"2"/a/file trusted.afr.$V0-client-0 "data"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"2"/a/file trusted.afr.$V0-client-1 "data"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"1"/a trusted.afr.$V0-client-0 "entry"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"1"/a trusted.afr.$V0-client-1 "entry"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"2"/a trusted.afr.$V0-client-0 "entry"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"2"/a trusted.afr.$V0-client-1 "entry"
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-978794.t b/tests/bugs/replicate/bug-978794.t
new file mode 100644
index 00000000000..8e43e74bf79
--- /dev/null
+++ b/tests/bugs/replicate/bug-978794.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+
+# This test opens 100 fds and triggers graph switches to check if fsync
+# as part of graph-switch causes crash or not.
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST touch $M0/{1..100}
+for i in {1..100}; do fd[$i]=`fd_available`; fd_open ${fd[$i]} 'w' $M0/$i; done
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{2,3}
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+TEST cat $M0/{1..100}
+for i in {1..100}; do fd_write ${fd[$i]} 'abc'; done
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{4,5}
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+for i in {1..100}; do fd_write ${fd[$i]} 'abc'; done
+TEST cat $M0/{1..100}
+cleanup
diff --git a/tests/bugs/replicate/bug-979365.t b/tests/bugs/replicate/bug-979365.t
new file mode 100755
index 00000000000..c09c7d51772
--- /dev/null
+++ b/tests/bugs/replicate/bug-979365.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This script checks that ensure-durability option enables/disables afr
+#sending fsyncs
+cleanup;
+
+function num_fsyncs {
+ $CLI volume profile $V0 info | grep -w FSYNC | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 ensure-durability on
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd of=$M0/a if=/dev/zero bs=1024k count=10
+#fsyncs take a while to complete.
+sleep 5
+
+# There can be zero or more fsyncs, depending on the order
+# in which the writes reached the server, in turn deciding
+# whether they were treated as "appending" writes or not.
+
+TEST [[ $(num_fsyncs) -ge 0 ]]
+#Stop the volume to erase the profile info of old operations
+TEST $CLI volume profile $V0 stop
+TEST $CLI volume stop $V0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+#Disable ensure-durability now to disable fsyncs in afr.
+TEST $CLI volume set $V0 ensure-durability off
+TEST $CLI volume start $V0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST $CLI volume profile $V0 start
+TEST dd of=$M0/a if=/dev/zero bs=1024k count=10
+#fsyncs take a while to complete.
+sleep 5
+TEST [[ $(num_fsyncs) -eq 0 ]]
+
+cleanup
diff --git a/tests/bugs/replicate/bug-986905.t b/tests/bugs/replicate/bug-986905.t
new file mode 100755
index 00000000000..f4f7386ebc4
--- /dev/null
+++ b/tests/bugs/replicate/bug-986905.t
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This script checks if hardlinks that are created while a brick is down are
+#healed properly.
+
+cleanup;
+function get_inum {
+ ls -i $1 | awk '{print $1}'
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST touch $M0/a
+TEST ln $M0/a $M0/link_a
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST ls -l $M0
+inum=$(get_inum $B0/${V0}0/a)
+EXPECT "$inum" get_inum $B0/${V0}0/link_a
+cleanup
diff --git a/tests/bugs/replicate/issue-1254-prioritize-enospc.t b/tests/bugs/replicate/issue-1254-prioritize-enospc.t
new file mode 100644
index 00000000000..fab94b71b27
--- /dev/null
+++ b/tests/bugs/replicate/issue-1254-prioritize-enospc.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+function create_bricks {
+ TEST truncate -s 100M $B0/brick0
+ TEST truncate -s 100M $B0/brick1
+ TEST truncate -s 20M $B0/brick2
+ LO1=`SETUP_LOOP $B0/brick0`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO1
+ LO2=`SETUP_LOOP $B0/brick1`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO2
+ LO3=`SETUP_LOOP $B0/brick2`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO3
+ TEST mkdir -p $B0/${V0}0 $B0/${V0}1 $B0/${V0}2
+ TEST MOUNT_LOOP $LO1 $B0/${V0}0
+ TEST MOUNT_LOOP $LO2 $B0/${V0}1
+ TEST MOUNT_LOOP $LO3 $B0/${V0}2
+}
+
+function create_files {
+ local i=1
+ while (true)
+ do
+ touch $M0/file$i
+ if [ -e $B0/${V0}2/file$i ];
+ then
+ ((i++))
+ else
+ break
+ fi
+ done
+}
+
+TESTS_EXPECTED_IN_LOOP=13
+
+#Arbiter volume: Check for ENOSPC when arbiter brick becomes full#
+TEST glusterd
+create_bricks
+TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+create_files
+TEST kill_brick $V0 $H0 $B0/${V0}1
+error1=$(touch $M0/file-1 2>&1)
+EXPECT "No space left on device" echo $error1
+error2=$(mkdir $M0/dir-1 2>&1)
+EXPECT "No space left on device" echo $error2
+error3=$((echo "Test" > $M0/file-3) 2>&1)
+EXPECT "No space left on device" echo $error3
+
+cleanup
+
+#Replica-3 volume: Check for ENOSPC when one of the brick becomes full#
+#Keeping the third brick of lower size to simulate disk full scenario#
+TEST glusterd
+create_bricks
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+create_files
+TEST kill_brick $V0 $H0 $B0/${V0}1
+error1=$(touch $M0/file-1 2>&1)
+EXPECT "No space left on device" echo $error1
+error2=$(mkdir $M0/dir-1 2>&1)
+EXPECT "No space left on device" echo $error2
+error3=$((cat /dev/zero > $M0/file1) 2>&1)
+EXPECT "No space left on device" echo $error3
+
+cleanup
diff --git a/tests/bugs/replicate/mdata-heal-no-xattrs.t b/tests/bugs/replicate/mdata-heal-no-xattrs.t
new file mode 100644
index 00000000000..d3b0c504c80
--- /dev/null
+++ b/tests/bugs/replicate/mdata-heal-no-xattrs.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+echo "Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+
+# Change permission on brick-0: simulates the case where there is metadata
+# mismatch but no pending xattrs. This brick will become the source for heal.
+TEST chmod +x $B0/$V0"0"/FILE
+
+# Add gfid to xattrop
+xattrop_b0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_b0`
+gfid_str_FILE=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/FILE))
+TEST ln $xattrop_b0/$base_entry_b0 $xattrop_b0/$gfid_str_FILE
+EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Brick-0 should contain xattrs blaming other 2 bricks.
+# The values will be zero because heal is over.
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/FILE
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}0/FILE
+
+# Brick-1 and Brick-2 must not contain any afr xattrs.
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-1 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}2/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-1 $B0/${V0}2/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}2/FILE
+
+# check permission bits.
+EXPECT '755' stat -c %a $B0/${V0}0/FILE
+EXPECT '755' stat -c %a $B0/${V0}1/FILE
+EXPECT '755' stat -c %a $B0/${V0}2/FILE
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
diff --git a/tests/bugs/replicate/ta-inode-refresh-read.t b/tests/bugs/replicate/ta-inode-refresh-read.t
new file mode 100644
index 00000000000..6dd6ff7f163
--- /dev/null
+++ b/tests/bugs/replicate/ta-inode-refresh-read.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+# Test read transaction inode refresh logic for thin-arbiter.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../thin-arbiter.rc
+cleanup;
+TEST ta_create_brick_and_volfile brick0
+TEST ta_create_brick_and_volfile brick1
+TEST ta_create_ta_and_volfile ta
+TEST ta_start_brick_process brick0
+TEST ta_start_brick_process brick1
+TEST ta_start_ta_process ta
+
+TEST ta_create_mount_volfile brick0 brick1 ta
+# Set afr xlator options to choose brick0 as read-subvol.
+sed -i '/iam-self-heal-daemon/a \ option read-subvolume-index 0' $B0/mount.vol
+TEST [ $? -eq 0 ]
+sed -i '/iam-self-heal-daemon/a \ option choose-local false' $B0/mount.vol
+TEST [ $? -eq 0 ]
+
+TEST ta_start_mount_process $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_up_status $V0 $M0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "trusted.afr.patchy-ta-2" ls $B0/ta
+
+TEST touch $M0/FILE
+TEST ls $B0/brick0/FILE
+TEST ls $B0/brick1/FILE
+TEST ! ls $B0/ta/FILE
+TEST setfattr -n user.name -v ravi $M0/FILE
+
+# Remove gfid hardlink from brick0 which is the read-subvol for FILE.
+# This triggers inode refresh up on a getfattr and eventually calls
+# afr_ta_read_txn(). Without this patch, afr_ta_read_txn() will again query
+# brick0 causing getfattr to fail.
+TEST rm -f $(gf_get_gfid_backend_file_path $B0/brick0 FILE)
+TEST getfattr -n user.name $M0/FILE
+
+cleanup;