diff options
author | Ravishankar N <ravishankar@redhat.com> | 2018-01-28 13:50:47 +0530 |
---|---|---|
committer | Ravishankar N <ravishankar@redhat.com> | 2018-02-01 14:17:50 +0000 |
commit | 0e6e8216823c2d9dafb81aae0f6ee3497c23d140 (patch) | |
tree | 06481a13e40b18796cbef6a8248f539e6a739951 /tests | |
parent | d663b9a323f34919da3f35bfc221a0aa91d9ab94 (diff) |
afr: don't treat all cases all bricks being blamed as split-brain
Problem:
We currently don't have a roll-back/undoing of post-ops if quorum is not
met. Though the FOP is still unwound with failure, the xattrs remain on
the disk. Due to these partial post-ops and partial heals (healing only when
2 bricks are up), we can end up in split-brain purely from the afr
xattrs point of view i.e each brick is blamed by atleast one of the
others. These scenarios are hit when there is frequent
connect/disconnect of the client/shd to the bricks while I/O or heal
are in progress.
Fix:
Instead of undoing the post-op, pick a source based on the xattr values.
If 2 bricks blame one, the blamed one must be treated as sink.
If there is no majority, all are sources. Once we pick a source,
self-heal will then do the heal instead of erroring out due to
split-brain.
Change-Id: I3d0224b883eb0945785ade0e9697a1c828aec0ae
BUG: 1539358
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/basic/afr/arbiter-add-brick.t | 16 | ||||
-rwxr-xr-x | tests/bugs/replicate/bug-1539358-split-brain-detection.t | 89 | ||||
-rwxr-xr-x | tests/bugs/replicate/bug-802417.t | 12 |
3 files changed, 117 insertions, 0 deletions
diff --git a/tests/basic/afr/arbiter-add-brick.t b/tests/basic/afr/arbiter-add-brick.t index fe919de0ab4..77b93d9a210 100644 --- a/tests/basic/afr/arbiter-add-brick.t +++ b/tests/basic/afr/arbiter-add-brick.t @@ -12,6 +12,8 @@ TEST $CLI volume set $V0 performance.stat-prefetch off TEST $CLI volume start $V0 TEST $CLI volume set $V0 self-heal-daemon off TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 TEST mkdir $M0/dir1 TEST dd if=/dev/urandom of=$M0/file1 bs=1024 count=1 @@ -24,6 +26,7 @@ TEST dd if=/dev/urandom of=$M0/file1 bs=1024 count=1024 #convert replica 2 to arbiter volume TEST $CLI volume start $V0 force EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 #syntax check for add-brick. TEST ! $CLI volume add-brick $V0 replica 2 arbiter 1 $H0:$B0/${V0}2 @@ -31,6 +34,19 @@ TEST ! $CLI volume add-brick $V0 replica 3 arbiter 2 $H0:$B0/${V0}2 TEST $CLI volume add-brick $V0 replica 3 arbiter 1 $H0:$B0/${V0}2 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2 + +#Trigger name heals from client. If we just rely on index heal, the first index +#crawl on B0 fails for /, dir2 and /file either due to lock collision or files +#not being present on the other 2 bricks yet. It is getting healed only in the +#next crawl after priv->shd.timeout (600 seconds) or by manually launching +#index heal again. +TEST $CLI volume set $V0 data-self-heal off +TEST $CLI volume set $V0 metadata-self-heal off +TEST $CLI volume set $V0 entry-self-heal off +TEST stat $M0/dir1 +TEST stat $M0/dir2 +TEST stat $M0/file1 #Heal files TEST $CLI volume set $V0 self-heal-daemon on diff --git a/tests/bugs/replicate/bug-1539358-split-brain-detection.t b/tests/bugs/replicate/bug-1539358-split-brain-detection.t new file mode 100755 index 00000000000..7b71a7a9e7d --- /dev/null +++ b/tests/bugs/replicate/bug-1539358-split-brain-detection.t @@ -0,0 +1,89 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}; +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; +TEST $CLI volume set $V0 self-heal-daemon off +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + +###############################################################################yy +# Case of 2 bricks blaming the third and the third blaming the other two. + +TEST `echo "hello" >> $M0/file` + +# B0 and B2 must blame B1 +TEST kill_brick $V0 $H0 $B0/$V0"1" +TEST `echo "append" >> $M0/file` +EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}0/file trusted.afr.$V0-client-1 data +EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}2/file trusted.afr.$V0-client-1 data +CLIENT_MD5=$(md5sum $M0/file | cut -d\ -f1) + +# B1 must blame B0 and B2 +setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/$V0"1"/file +setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/$V0"1"/file + +# Launch heal +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1 +TEST $CLI volume set $V0 self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 +TEST $CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 +B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1) +B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1) +B2_MD5=$(md5sum $B0/${V0}2/file | cut -d\ -f1) +TEST [ "$CLIENT_MD5" == "$B0_MD5" ] +TEST [ "$CLIENT_MD5" == "$B1_MD5" ] +TEST [ "$CLIENT_MD5" == "$B2_MD5" ] + +TEST rm $M0/file + +###############################################################################yy +# Case of each brick blaming the next one in a cyclic manner + +TEST `echo "hello" >> $M0/file` +# Mark cyclic xattrs and modify file content directly on the bricks. +TEST $CLI volume set $V0 self-heal-daemon off +setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/$V0"0"/file +setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"0"/file +setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/$V0"1"/file +setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"1"/file +setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/$V0"2"/file +setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"2"/file + +TEST `echo "ab" >> $B0/$V0"0"/file` +TEST `echo "cdef" >> $B0/$V0"1"/file` +TEST `echo "ghi" >> $B0/$V0"2"/file` + +# Add entry to xattrop dir to trigger index heal. +xattrop_dir0=$(afr_get_index_path $B0/$V0"0") +base_entry_b0=`ls $xattrop_dir0` +gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/file)) +ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str +EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0 + +# Launch heal +TEST $CLI volume set $V0 self-heal-daemon on +TEST $CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 +B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1) +B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1) +B2_MD5=$(md5sum $B0/${V0}2/file | cut -d\ -f1) +TEST [ "$B0_MD5" == "$B1_MD5" ] +TEST [ "$B0_MD5" == "$B2_MD5" ] +###############################################################################yy +cleanup diff --git a/tests/bugs/replicate/bug-802417.t b/tests/bugs/replicate/bug-802417.t index c5ba98b65fd..f213439401e 100755 --- a/tests/bugs/replicate/bug-802417.t +++ b/tests/bugs/replicate/bug-802417.t @@ -10,6 +10,18 @@ function write_file() } cleanup; + +##################################################### +# We are currently not triggering data heal unless all bricks of the replica are +# up. We will need to modify this .t once the fix for preventing stale reads +# being served to clients for files in spurious split-brains is done. Spurious +# split-brains here means afr xattrs indicates sbrain but it is actually not. +# Self-heal will heal such files automatically but before the heal completes, +# reads can be served which needs fixing. +#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000 +#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000 +###################################################### + TEST glusterd TEST pidof glusterd TEST $CLI volume info; |