summaryrefslogtreecommitdiffstats
path: root/tests/basic/afr/granular-esh
diff options
context:
space:
mode:
Diffstat (limited to 'tests/basic/afr/granular-esh')
-rw-r--r--tests/basic/afr/granular-esh/add-brick.t80
-rw-r--r--tests/basic/afr/granular-esh/cli.t114
-rw-r--r--tests/basic/afr/granular-esh/conservative-merge.t138
-rw-r--r--tests/basic/afr/granular-esh/granular-esh.t168
-rw-r--r--tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t76
-rw-r--r--tests/basic/afr/granular-esh/replace-brick.t76
6 files changed, 652 insertions, 0 deletions
diff --git a/tests/basic/afr/granular-esh/add-brick.t b/tests/basic/afr/granular-esh/add-brick.t
new file mode 100644
index 00000000000..270cf1d32a6
--- /dev/null
+++ b/tests/basic/afr/granular-esh/add-brick.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../afr.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Create files
+for i in {1..5}
+do
+ echo $i > $M0/file$i.txt
+done
+
+# Metadata changes
+TEST setfattr -n user.test -v qwerty $M0/file5.txt
+
+# Add brick1
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2
+
+# New-brick should accuse the old-bricks (Simulating case for data-loss)
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}2/
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}2/
+
+# Check if pending data, metadata and entry xattrs are set for newly-added-brick
+EXPECT "000000010000000100000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
+EXPECT "000000010000000100000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
+
+# Also ensure we are not mistakenly tampering with the new-brick's changelog xattrs
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2
+
+# Check if dirty xattr is set for newly-added-brick
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.dirty $B0/${V0}2
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Check if entry-heal has happened
+TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}2 | sort)
+TEST diff <(ls $B0/${V0}1 | sort) <(ls $B0/${V0}2 | sort)
+
+# Test if data was healed
+TEST diff $B0/${V0}0/file1.txt $B0/${V0}2/file1.txt
+
+# Test if metadata was healed and exists on both the bricks
+EXPECT "qwerty" get_text_xattr user.test $B0/${V0}2/file5.txt
+EXPECT "qwerty" get_text_xattr user.test $B0/${V0}0/file5.txt
+
+# Ensure all changelog xattrs are now back to zero.
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}2
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2
+
+cleanup
diff --git a/tests/basic/afr/granular-esh/cli.t b/tests/basic/afr/granular-esh/cli.t
new file mode 100644
index 00000000000..10b6c6398da
--- /dev/null
+++ b/tests/basic/afr/granular-esh/cli.t
@@ -0,0 +1,114 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../afr.rc
+
+cleanup
+
+TESTS_EXPECTED_IN_LOOP=4
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+# Test that enabling the option should work on a newly created volume
+TEST $CLI volume set $V0 cluster.granular-entry-heal on
+TEST $CLI volume set $V0 cluster.granular-entry-heal off
+
+#########################
+##### DISPERSE TEST #####
+#########################
+# Execute the same command on a disperse volume and make sure it fails.
+TEST $CLI volume create $V1 disperse 3 redundancy 1 $H0:$B0/${V1}{0,1,2}
+TEST $CLI volume start $V1
+TEST ! $CLI volume heal $V1 granular-entry-heal enable
+TEST ! $CLI volume heal $V1 granular-entry-heal disable
+
+######################
+### REPLICATE TEST ###
+######################
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 self-heal-daemon off
+# Test that the volume-set way of enabling the option is disallowed
+TEST ! $CLI volume set $V0 granular-entry-heal on
+# Test that the volume-heal way of enabling the option is allowed
+TEST $CLI volume heal $V0 granular-entry-heal enable
+# Volume-reset of the option should be allowed
+TEST $CLI volume reset $V0 granular-entry-heal
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+EXPECT "enable" volume_option $V0 cluster.granular-entry-heal
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Kill brick-0.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# Disabling the option should work even when one or more bricks are down
+TEST $CLI volume heal $V0 granular-entry-heal disable
+# When a brick is down, 'enable' attempt should be failed
+TEST ! $CLI volume heal $V0 granular-entry-heal enable
+
+# Restart the killed brick
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+# When all bricks are up, it should be possible to enable the option
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+# Kill brick-0 again
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# Create files under root
+for i in {1..2}
+do
+ echo $i > $M0/f$i
+done
+
+# Test that the index associated with '/' is created on B1.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+
+# Check for successful creation of granular entry indices
+for i in {1..2}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i
+done
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST gluster volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Test if data was healed
+for i in {1..2}
+do
+ TEST_IN_LOOP diff $B0/${V0}0/f$i $B0/${V0}1/f$i
+done
+
+# Now verify that there are no name indices left after self-heal
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+
+# Perform a volume-reset-all-options operation
+TEST $CLI volume reset $V0
+# Ensure that granular entry heal is also disabled
+EXPECT "no" volume_get_field $V0 cluster.granular-entry-heal
+EXPECT "off" volume_get_field $V0 cluster.entry-self-heal
+
+cleanup
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1399038
diff --git a/tests/basic/afr/granular-esh/conservative-merge.t b/tests/basic/afr/granular-esh/conservative-merge.t
new file mode 100644
index 00000000000..b170e47e0cb
--- /dev/null
+++ b/tests/basic/afr/granular-esh/conservative-merge.t
@@ -0,0 +1,138 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../afr.rc
+
+cleanup
+
+TESTS_EXPECTED_IN_LOOP=4
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+TEST $GFS --volfile-id=$V0 -s $H0 $M0
+
+TEST mkdir $M0/dir
+gfid_dir=$(get_gfid_string $M0/dir)
+
+echo "1" > $M0/f1
+echo "2" > $M0/f2
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+TEST mkdir $M0/dir2
+gfid_dir2=$(get_gfid_string $M0/dir2)
+TEST unlink $M0/f1
+echo "3" > $M0/f3
+TEST mkdir $M0/dir/subdir
+gfid_subdir=$(get_gfid_string $M0/dir/subdir)
+echo "dir2-1" > $M0/dir2/f1
+echo "subdir-1" > $M0/dir/subdir/f1
+
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/dir2
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f3
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir2
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir2/f1
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir/subdir
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_subdir
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_subdir/f1
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+TEST mkdir $M0/dir3
+gfid_dir3=$(get_gfid_string $M0/dir3)
+# Root is now in split-brain.
+TEST mkdir $M0/dir/subdir2
+gfid_subdir2=$(get_gfid_string $M0/dir/subdir2)
+# /dir is now in split-brain.
+echo "4" > $M0/f4
+TEST unlink $M0/f2
+echo "dir3-1" > $M0/dir3/f1
+echo "subdir2-1" > $M0/dir/subdir2/f1
+
+TEST stat $B0/${V0}0/.glusterfs/indices/entry-changes/$ROOT_GFID
+TEST stat $B0/${V0}0/.glusterfs/indices/entry-changes/$ROOT_GFID/dir3
+TEST stat $B0/${V0}0/.glusterfs/indices/entry-changes/$ROOT_GFID/f2
+TEST stat $B0/${V0}0/.glusterfs/indices/entry-changes/$ROOT_GFID/f4
+TEST stat $B0/${V0}0/.glusterfs/indices/entry-changes/$gfid_dir
+TEST stat $B0/${V0}0/.glusterfs/indices/entry-changes/$gfid_dir/subdir2
+TEST stat $B0/${V0}0/.glusterfs/indices/entry-changes/$gfid_dir3
+TEST stat $B0/${V0}0/.glusterfs/indices/entry-changes/$gfid_dir3/f1
+TEST stat $B0/${V0}0/.glusterfs/indices/entry-changes/$gfid_subdir2
+TEST stat $B0/${V0}0/.glusterfs/indices/entry-changes/$gfid_subdir2/f1
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Make sure entry self-heal did the right thing in terms of impunging deleted
+# files in the event of a split-brain.
+TEST stat $M0/f1
+TEST stat $M0/f2
+
+# Test if data was healed
+for i in {1..4}
+do
+ TEST_IN_LOOP diff $B0/${V0}0/f$i $B0/${V0}1/f$i
+done
+
+TEST diff $B0/${V0}0/dir2/f1 $B0/${V0}1/dir2/f1
+EXPECT "dir2-1" cat $M0/dir2/f1
+
+TEST diff $B0/${V0}0/dir/subdir/f1 $B0/${V0}1/dir/subdir/f1
+EXPECT "subdir-1" cat $M0/dir/subdir/f1
+
+TEST diff $B0/${V0}0/dir3/f1 $B0/${V0}1/dir3/f1
+EXPECT "dir3-1" cat $M0/dir3/f1
+
+TEST diff $B0/${V0}0/dir/subdir2/f1 $B0/${V0}1/dir/subdir2/f1
+EXPECT "subdir2-1" cat $M0/dir/subdir2/f1
+
+# Verify that all name indices have been removed after a successful heal.
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/dir2
+TEST ! stat $B0/${V0}0/.glusterfs/indices/entry-changes/$ROOT_GFID/dir3
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1
+TEST ! stat $B0/${V0}0/.glusterfs/indices/entry-changes/$ROOT_GFID/f2
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f3
+TEST ! stat $B0/${V0}0/.glusterfs/indices/entry-changes/$ROOT_GFID/f4
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir2/f1
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir/subdir
+TEST ! stat $B0/${V0}0/.glusterfs/indices/entry-changes/$gfid_dir/subdir2
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_subdir/f1
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir3/f1
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_subdir2/f1
+
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+TEST ! stat $B0/${V0}0/.glusterfs/indices/entry-changes/$ROOT_GFID
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir
+TEST ! stat $B0/${V0}0/.glusterfs/indices/entry-changes/$gfid_dir
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir2
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_subdir
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir3
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_subdir2
+
+cleanup
diff --git a/tests/basic/afr/granular-esh/granular-esh.t b/tests/basic/afr/granular-esh/granular-esh.t
new file mode 100644
index 00000000000..de0e8f4290b
--- /dev/null
+++ b/tests/basic/afr/granular-esh/granular-esh.t
@@ -0,0 +1,168 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../afr.rc
+
+cleanup
+
+TESTS_EXPECTED_IN_LOOP=12
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Create files under root
+for i in {1..4}
+do
+ echo $i > $M0/f$i
+done
+
+# Create a directory and few files under it
+TEST mkdir $M0/dir
+gfid_dir=$(get_gfid_string $M0/dir)
+
+for i in {1..3}
+do
+ echo $i > $M0/dir/f$i
+done
+
+# Kill brick-0.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# Create more files
+for i in {5..6}
+do
+ echo $i > $M0/f$i
+done
+
+# Test that the index associated with '/' is created on B1.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+
+# Check for successful creation of granular entry indices
+for i in {5..6}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i
+done
+
+# Delete an existing file
+TEST unlink $M0/f1
+
+# Rename an existing file
+TEST mv $M0/f2 $M0/f2_renamed
+
+# Create a hard link on f3
+TEST ln $M0/f3 $M0/link
+
+# Create a symlink on f4
+TEST ln -s $M0/f4 $M0/symlink
+
+# Check for successful creation of granular entry indices
+for i in {1..2}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i
+done
+
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2_renamed
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/link
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/symlink
+
+# Create a file and also delete it. This is to test deletion of stale indices during heal.
+TEST touch $M0/file_stale
+TEST unlink $M0/file_stale
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/file_stale
+
+# Create a directory and create its subdirs and files while a brick is down
+TEST mkdir -p $M0/newdir/newsubdir
+
+for i in {1..3}
+do
+ echo $i > $M0/newdir/f$i
+ echo $i > $M0/newdir/newsubdir/f$i
+done
+
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/newdir
+gfid_newdir=$(get_gfid_string $M0/newdir)
+gfid_newsubdir=$(get_gfid_string $M0/newdir/newsubdir)
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newdir/newsubdir
+# Check if 'data' segment of the changelog is set for the newly created directories 'newdir' and 'newsubdir'
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}1/newdir trusted.afr.$V0-client-0 data
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}1/newdir/newsubdir trusted.afr.$V0-client-0 data
+
+# Test that removal of an entire sub-tree in the hierarchy works.
+TEST rm -rf $M0/dir
+
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/dir
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir/f1
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir/f2
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir/f3
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST gluster volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Test if data was healed
+for i in {5..6}
+do
+ TEST_IN_LOOP diff $B0/${V0}0/f$i $B0/${V0}1/f$i
+done
+
+for i in {1..3}
+do
+ TEST_IN_LOOP diff $B0/${V0}0/newdir/f$i $B0/${V0}1/newdir/f$i
+ TEST_IN_LOOP diff $B0/${V0}0/newdir/newsubdir/f$i $B0/${V0}1/newdir/newsubdir/f$i
+done
+
+# Verify that all the deleted names have been removed on the sink brick too by self-heal.
+TEST ! stat $B0/${V0}0/f1
+TEST ! stat $B0/${V0}0/f2
+TEST stat $B0/${V0}0/f2_renamed
+TEST stat $B0/${V0}0/symlink
+EXPECT "3" get_hard_link_count $B0/${V0}0/f3
+EXPECT "f4" readlink $B0/${V0}0/symlink
+TEST ! stat $B0/${V0}0/dir
+
+# Now verify that there are no name indices left after self-heal
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2_renamed
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/link
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/symlink
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/dir
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/newdir
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/file_stale
+
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir/f1
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir/f2
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir/f3
+
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newdir/f1
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newdir/f2
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newdir/f3
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newsubdir/f1
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newsubdir/f2
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newsubdir/f3
+
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newdir
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newsubdir
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir
+
+cleanup
diff --git a/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t
new file mode 100644
index 00000000000..1b5421bf4b6
--- /dev/null
+++ b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../afr.rc
+
+cleanup
+
+TESTS_EXPECTED_IN_LOOP=4
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Kill brick-0.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# Create files under root
+for i in {1..2}
+do
+ echo $i > $M0/f$i
+done
+
+# Test that the index associated with '/' is created on B1.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+
+# Check for successful creation of granular entry indices
+for i in {1..2}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i
+done
+
+# Now disable granular-entry-heal
+TEST $CLI volume heal $V0 granular-entry-heal disable
+
+# Start the brick that was down
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+# Enable shd
+TEST gluster volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+# Now the indices created are granular but the heal is going to be of the
+# normal kind. We test to make sure that heal still completes fine and that
+# the stale granular indices are going to be deleted
+
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Test if data was healed
+for i in {1..2}
+do
+ TEST_IN_LOOP diff $B0/${V0}0/f$i $B0/${V0}1/f$i
+done
+
+# Now verify that there are no name indices left after self-heal
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+
+cleanup
diff --git a/tests/basic/afr/granular-esh/replace-brick.t b/tests/basic/afr/granular-esh/replace-brick.t
new file mode 100644
index 00000000000..5fc7811a8d8
--- /dev/null
+++ b/tests/basic/afr/granular-esh/replace-brick.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Create files
+for i in {1..5}
+do
+ echo $i > $M0/file$i.txt
+done
+
+# Metadata changes
+TEST setfattr -n user.test -v qwerty $M0/file5.txt
+
+# Replace brick1
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1_new commit force
+
+# Replaced-brick should accuse the non-replaced-brick (Simulating case for data-loss)
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1_new/
+
+# Check if data, metadata and entry segments of changelog are set for replaced-brick
+EXPECT "000000010000000100000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
+
+# Also ensure we don't mistakenly tamper with the new brick's changelog xattrs
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1_new
+
+# Ensure the dirty xattr is set on the new brick.
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.dirty $B0/${V0}1_new
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+# Check if entry-heal has happened
+TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1_new | sort)
+
+# To make sure that files were not lost from brick0
+TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1 | sort)
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
+
+# Test if data was healed
+TEST diff $B0/${V0}0/file1.txt $B0/${V0}1_new/file1.txt
+# To make sure that data was not lost from brick0
+TEST diff $B0/${V0}0/file1.txt $B0/${V0}1/file1.txt
+
+# Test if metadata was healed and exists on both the bricks
+EXPECT "qwerty" get_text_xattr user.test $B0/${V0}1_new/file5.txt
+EXPECT "qwerty" get_text_xattr user.test $B0/${V0}0/file5.txt
+
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1_new
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}1_new
+
+cleanup