summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Wareing <rwareing@fb.com>2015-12-17 12:25:01 -0800
committerJeff Darcy <jeff@pl.atyp.us>2017-09-11 20:41:06 +0000
commit60b35dbfa42a65d81a18efda2776c0e733c4e769 (patch)
tree2eecf587a8cf8e1912a790167593ef73364ea801
parent20ca645344e4af89a53b8d304063a6dd6dc5100a (diff)
cluster/afr: Add additional test coverage for unsplit flows
Summary: - Adds test coverage for unsplitting via SHD Test Plan: - Run prove -v tests/bugs/fb2506544* (https://phabricator.fb.com/P56056659) Reviewers: moox, dld, dph, sshreyas Reviewed By: sshreyas Differential Revision: https://phabricator.fb.com/D2770524 Porting note: also added fb*.t tests to test_env. Change-Id: Iac28b595194925a45e62b6438611c9bade58b30b Signed-off-by: Jeff Darcy <jdarcy@fb.com> Reviewed-on: https://review.gluster.org/18261 Reviewed-by: Jeff Darcy <jeff@pl.atyp.us> Tested-by: Jeff Darcy <jeff@pl.atyp.us> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Smoke: Gluster Build System <jenkins@build.gluster.org>
-rw-r--r--test_env1
-rwxr-xr-xtests/bugs/fb2506544_ctime.t150
-rwxr-xr-xtests/bugs/fb2506544_majority.t158
-rwxr-xr-xtests/bugs/fb2506544_mtime.t153
-rwxr-xr-xtests/bugs/fb2506544_size.t153
5 files changed, 615 insertions, 0 deletions
diff --git a/test_env b/test_env
index 1e64217945e..b868a6afb06 100644
--- a/test_env
+++ b/test_env
@@ -4,6 +4,7 @@ DESIRED_TESTS="\
tests/basic/*.t\
tests/basic/afr/*.t\
tests/basic/distribute/*.t\
+ tests/bugs/fb*.t\
tests/features/brick-min-free-space.t\
"
diff --git a/tests/bugs/fb2506544_ctime.t b/tests/bugs/fb2506544_ctime.t
new file mode 100755
index 00000000000..8c7ab02cc8e
--- /dev/null
+++ b/tests/bugs/fb2506544_ctime.t
@@ -0,0 +1,150 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info 2> /dev/null;
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+#EST $CLI volume set $V0 cluster.favorite-child-by-ctime off
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 1
+TEST $CLI volume start $V0
+sleep 5
+
+# Mount the volume
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+# Write some random data into a file
+dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 2>/dev/null
+
+# Create a split-brain by downing a brick, writing some data
+# then downing the other two, write some more data and bring
+# everything back up.
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 oflag=append 2>/dev/null
+
+TEST $CLI volume start $V0 force
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+
+TEST dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 oflag=append 2>/dev/null
+CTIME_MD5=$(md5sum $M0/splitfile | cut -d\ -f1)
+
+# Bring all bricks back up.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 2
+
+# First do a test to prove the file is splitbrained without
+# favorite-child support.
+umount $M0
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 1
+
+#EST ! md5sum $M0/splitfile
+
+# Ok now turn the favorite-child option and we should be able to read it.
+# The MD5 should be of the file which was created first.
+umount $M0
+#EST $CLI volume set $V0 cluster.favorite-child-by-ctime on
+TEST $CLI volume set $V0 cluster.favorite-child-policy ctime
+sleep 1
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 2
+
+HEALED_MD5=$(md5sum $M0/splitfile | cut -d\ -f1)
+TEST [ "$CTIME_MD5" == "$HEALED_MD5" ]
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info 2> /dev/null;
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+#EST $CLI volume set $V0 cluster.favorite-child-by-ctime off
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 1
+TEST $CLI volume start $V0
+sleep 5
+
+# Mount the volume
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+mkdir $M0/d
+# Write some random data into a file
+dd if=/dev/urandom of=$M0/d/splitfile bs=128k count=5 2>/dev/null
+
+# Create a split-brain by downing a brick, writing some data
+# then downing the other two, write some more data and bring
+# everything back up.
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/d/splitfile bs=128k count=5 oflag=append 2>/dev/null
+
+TEST $CLI volume start $V0 force
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+
+TEST dd if=/dev/urandom of=$M0/d/splitfile bs=128k count=5 oflag=append 2>/dev/null
+CTIME_MD5=$(md5sum $M0/d/splitfile | cut -d\ -f1)
+
+# Bring all bricks back up.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 2
+
+# First do a test to prove the file is splitbrained without
+# favorite-child support.
+umount $M0
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 1
+
+#EST ! md5sum $M0/d/splitfile
+
+# Ok now turn the favorite-child option and we should be able to read it.
+# The MD5 should be of the file which was created first.
+umount $M0
+#EST $CLI volume set $V0 cluster.favorite-child-by-ctime on
+TEST $CLI volume set $V0 cluster.favorite-child-policy ctime
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+sleep 1
+/etc/init.d/glusterd restart_shd
+EXPECT_WITHIN 60 "0" get_pending_heal_count $V0
+sleep 1
+
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 2
+
+HEALED_MD5=$(md5sum $M0/d/splitfile | cut -d\ -f1)
+TEST [ "$CTIME_MD5" == "$HEALED_MD5" ]
+
+cleanup
diff --git a/tests/bugs/fb2506544_majority.t b/tests/bugs/fb2506544_majority.t
new file mode 100755
index 00000000000..c38a6d59947
--- /dev/null
+++ b/tests/bugs/fb2506544_majority.t
@@ -0,0 +1,158 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info 2> /dev/null;
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+#EST $CLI volume set $V0 cluster.favorite-child-by-majority off
+TEST $CLI volume set $V0 cluster.favorite-child-policy majority
+# This would normally be a toxic combination because it allows us to create a
+# split brain by writing to 1/3 replicas ... but for testing that's exactly
+# what we want.
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 1
+TEST $CLI volume start $V0
+sleep 5
+
+# Mount the volume
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+# Write some random data into a file
+dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 2>/dev/null
+
+# Create a split-brain by downing a brick, writing some data
+# then downing the other two, write some more data and bring
+# everything back up.
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 oflag=append 2>/dev/null
+MAJORITY_MD5=$(md5sum $M0/splitfile | cut -d\ -f1)
+
+# Bring bricks back up
+TEST $CLI volume start $V0 force
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+
+TEST dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 oflag=append 2>/dev/null
+
+# Bring all bricks up
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 2
+
+# First do a test to prove the file is splitbrained without
+# favorite-child support.
+umount $M0
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 1
+
+#EST ! md5sum $M0/splitfile
+
+# Ok now turn the favorite-child option and we should be able to read it.
+# Compare MD5's, the healed file should be that of the file which is
+# on 2/3 bricks.
+umount $M0
+#EST $CLI volume set $V0 cluster.favorite-child-by-majority on
+TEST $CLI volume set $V0 cluster.favorite-child-policy majority
+sleep 1
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 2
+
+HEALED_MD5=$(md5sum $M0/splitfile | cut -d\ -f1)
+TEST [ "$MAJORITY_MD5" == "$HEALED_MD5" ]
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info 2> /dev/null;
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+#EST $CLI volume set $V0 cluster.favorite-child-by-majority off
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 1
+TEST $CLI volume start $V0
+sleep 5
+
+# Mount the volume
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+mkdir $M0/d
+# Write some random data into a file
+dd if=/dev/urandom of=$M0/d/splitfile bs=128k count=5 2>/dev/null
+
+# Create a split-brain by downing a brick, writing some data
+# then downing the other two, write some more data and bring
+# everything back up.
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/d/splitfile bs=128k count=5 oflag=append 2>/dev/null
+MAJORITY_MD5=$(md5sum $M0/d/splitfile | cut -d\ -f1)
+
+# Bring bricks back up
+TEST $CLI volume start $V0 force
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+
+TEST dd if=/dev/urandom of=$M0/d/splitfile bs=128k count=5 oflag=append 2>/dev/null
+
+# Bring all bricks up
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 2
+
+# First do a test to prove the file is splitbrained without
+# favorite-child support.
+umount $M0
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 1
+
+#EST ! md5sum $M0/d/splitfile
+
+# Ok now turn the favorite-child option and we should be able to read it.
+# Compare MD5's, the healed file should be that of the file which is
+# on 2/3 bricks.
+umount $M0
+#EST $CLI volume set $V0 cluster.favorite-child-by-majority on
+TEST $CLI volume set $V0 cluster.favorite-child-policy majority
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+sleep 1
+/etc/init.d/glusterd restart_shd
+EXPECT_WITHIN 60 "0" get_pending_heal_count $V0
+
+sleep 1
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 2
+
+HEALED_MD5=$(md5sum $M0/d/splitfile | cut -d\ -f1)
+TEST [ "$MAJORITY_MD5" == "$HEALED_MD5" ]
+
+cleanup
diff --git a/tests/bugs/fb2506544_mtime.t b/tests/bugs/fb2506544_mtime.t
new file mode 100755
index 00000000000..b908fdaddd5
--- /dev/null
+++ b/tests/bugs/fb2506544_mtime.t
@@ -0,0 +1,153 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info 2> /dev/null;
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+#EST $CLI volume set $V0 cluster.favorite-child-by-mtime off
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 1
+TEST $CLI volume start $V0
+sleep 5
+
+# Mount the volume
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+# Write some random data into a file
+dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 2>/dev/null
+
+# Create a split-brain by downing a brick, writing some data
+# then downing the other two, write some more data and bring
+# everything back up.
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 oflag=append 2>/dev/null
+
+TEST $CLI volume start $V0 force
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+
+TEST dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 oflag=append 2>/dev/null
+MTIME_MD5=$(md5sum $M0/splitfile | cut -d\ -f1)
+
+# Bring all bricks back up and compare the MD5's, healed MD5 should
+# equal the most recently modified file.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 2
+
+# First do a test to prove the file is splitbrained without
+# favorite-child support.
+umount $M0
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 1
+
+#EST ! md5sum $M0/splitfile
+
+# Ok now turn the favorite-child option and we should be able to read it.
+# Compare MD5's, the MD5 should be of the file we modified last.
+umount $M0
+#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on
+TEST $CLI volume set $V0 cluster.favorite-child-policy mtime
+sleep 1
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 2
+
+HEALED_MD5=$(md5sum $M0/splitfile | cut -d\ -f1)
+TEST [ "$MTIME_MD5" == "$HEALED_MD5" ]
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info 2> /dev/null;
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+#EST $CLI volume set $V0 cluster.favorite-child-by-mtime off
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 1
+TEST $CLI volume start $V0
+sleep 5
+
+# Mount the volume
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+mkdir $M0/d
+
+# Write some random data into a file
+dd if=/dev/urandom of=$M0/d/splitfile bs=128k count=5 2>/dev/null
+
+# Create a split-brain by downing a brick, writing some data
+# then downing the other two, write some more data and bring
+# everything back up.
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/d/splitfile bs=128k count=5 oflag=append 2>/dev/null
+
+TEST $CLI volume start $V0 force
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+
+TEST dd if=/dev/urandom of=$M0/d/splitfile bs=128k count=5 oflag=append 2>/dev/null
+MTIME_MD5=$(md5sum $M0/d/splitfile | cut -d\ -f1)
+
+# Bring all bricks back up and compare the MD5's, healed MD5 should
+# equal the most recently modified file.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 2
+
+# First do a test to prove the file is splitbrained without
+# favorite-child support.
+umount $M0
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 1
+
+#EST ! md5sum $M0/d/splitfile
+
+# Ok now turn the favorite-child option and we should be able to read it.
+# Compare MD5's, the MD5 should be of the file we modified last.
+umount $M0
+#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on
+TEST $CLI volume set $V0 cluster.favorite-child-policy mtime
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+sleep 1
+/etc/init.d/glusterd restart_shd
+EXPECT_WITHIN 60 "0" get_pending_heal_count $V0
+sleep 1
+
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 2
+
+HEALED_MD5=$(md5sum $M0/d/splitfile | cut -d\ -f1)
+TEST [ "$MTIME_MD5" == "$HEALED_MD5" ]
+
+cleanup
diff --git a/tests/bugs/fb2506544_size.t b/tests/bugs/fb2506544_size.t
new file mode 100755
index 00000000000..593c1053853
--- /dev/null
+++ b/tests/bugs/fb2506544_size.t
@@ -0,0 +1,153 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info 2> /dev/null;
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+#EST $CLI volume set $V0 cluster.favorite-child-by-size off
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 1
+TEST $CLI volume start $V0
+sleep 5
+
+# Mount the volume
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+# Write some random data into a file
+dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 2>/dev/null
+
+# Create a split-brain by downing a brick, writing some data
+# then downing the other two, write some more data and bring
+# everything back up.
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 oflag=append 2>/dev/null
+
+TEST $CLI volume start $V0 force
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+
+TEST dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 oflag=append 2>/dev/null
+SIZE_MD5=$(md5sum $M0/splitfile | cut -d\ -f1)
+
+# Brick bricks back up
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 2
+
+# First do a test to prove the file is splitbrained without
+# favorite-child support.
+umount $M0
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 1
+
+#EST ! md5sum $M0/splitfile
+
+# Ok now turn the favorite-child option and we should be able to read it.
+# Compare MD5's, the MD5 should be of the file that is the largest.
+umount $M0
+#EST $CLI volume set $V0 cluster.favorite-child-by-size on
+TEST $CLI volume set $V0 cluster.favorite-child-policy size
+sleep 1
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 1
+
+HEALED_MD5=$(md5sum $M0/splitfile | cut -d\ -f1)
+TEST [ "$SIZE_MD5" == "$HEALED_MD5" ]
+
+cleanup
+
+##########################
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info 2> /dev/null;
+
+
+# Setup a cluster with 3 replicas, and fav child by majority on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3};
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+#EST $CLI volume set $V0 cluster.favorite-child-by-size off
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 1
+TEST $CLI volume start $V0
+sleep 5
+
+# Mount the volume
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+
+mkdir $M0/d
+
+# Write some random data into a file
+dd if=/dev/urandom of=$M0/d/splitfile bs=128k count=5 2>/dev/null
+
+# Create a split-brain by downing a brick, writing some data
+# then downing the other two, write some more data and bring
+# everything back up.
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/d/splitfile bs=128k count=5 oflag=append 2>/dev/null
+
+TEST $CLI volume start $V0 force
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+
+TEST dd if=/dev/urandom of=$M0/d/splitfile bs=128k count=5 oflag=append 2>/dev/null
+SIZE_MD5=$(md5sum $M0/d/splitfile | cut -d\ -f1)
+
+# Brick bricks back up
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 2
+
+# First do a test to prove the file is splitbrained without
+# favorite-child support.
+umount $M0
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 1
+
+#EST ! md5sum $M0/d/splitfile
+
+# Ok now turn the favorite-child option and we should be able to read it.
+# Compare MD5's, the MD5 should be of the file that is the largest.
+umount $M0
+#EST $CLI volume set $V0 cluster.favorite-child-by-size on
+TEST $CLI volume set $V0 cluster.favorite-child-policy size
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+sleep 1
+/etc/init.d/glusterd restart_shd
+EXPECT_WITHIN 60 "0" get_pending_heal_count $V0
+sleep 1
+# Mount the volume
+TEST glusterfs --log-level DEBUG --volfile-id=/$V0 --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 1
+
+HEALED_MD5=$(md5sum $M0/d/splitfile | cut -d\ -f1)
+TEST [ "$SIZE_MD5" == "$HEALED_MD5" ]
+
+cleanup