From 28c306026982d559827c9dfa6ac3066dd5cd4ca1 Mon Sep 17 00:00:00 2001 From: Ravishankar N Date: Sun, 28 Jan 2018 13:50:47 +0530 Subject: afr: don't treat all cases all bricks being blamed as split-brain Problem: We currently don't have a roll-back/undoing of post-ops if quorum is not met. Though the FOP is still unwound with failure, the xattrs remain on the disk. Due to these partial post-ops and partial heals (healing only when 2 bricks are up), we can end up in split-brain purely from the afr xattrs point of view i.e each brick is blamed by atleast one of the others. These scenarios are hit when there is frequent connect/disconnect of the client/shd to the bricks while I/O or heal are in progress. Fix: Instead of undoing the post-op, pick a source based on the xattr values. If 2 bricks blame one, the blamed one must be treated as sink. If there is no majority, all are sources. Once we pick a source, self-heal will then do the heal instead of erroring out due to split-brain. Change-Id: I3d0224b883eb0945785ade0e9697a1c828aec0ae BUG: 1542380 Signed-off-by: Ravishankar N (cherry picked from commit 0e6e8216823c2d9dafb81aae0f6ee3497c23d140) --- tests/basic/afr/arbiter-add-brick.t | 16 ++++ .../replicate/bug-1539358-split-brain-detection.t | 89 ++++++++++++++++++++++ tests/bugs/replicate/bug-802417.t | 12 +++ xlators/cluster/afr/src/afr-self-heal-common.c | 51 +++++++++++-- xlators/cluster/afr/src/afr-self-heal-data.c | 6 +- 5 files changed, 165 insertions(+), 9 deletions(-) create mode 100755 tests/bugs/replicate/bug-1539358-split-brain-detection.t diff --git a/tests/basic/afr/arbiter-add-brick.t b/tests/basic/afr/arbiter-add-brick.t index fe919de0ab4..77b93d9a210 100644 --- a/tests/basic/afr/arbiter-add-brick.t +++ b/tests/basic/afr/arbiter-add-brick.t @@ -12,6 +12,8 @@ TEST $CLI volume set $V0 performance.stat-prefetch off TEST $CLI volume start $V0 TEST $CLI volume set $V0 self-heal-daemon off TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 TEST mkdir $M0/dir1 TEST dd if=/dev/urandom of=$M0/file1 bs=1024 count=1 @@ -24,6 +26,7 @@ TEST dd if=/dev/urandom of=$M0/file1 bs=1024 count=1024 #convert replica 2 to arbiter volume TEST $CLI volume start $V0 force EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 #syntax check for add-brick. TEST ! $CLI volume add-brick $V0 replica 2 arbiter 1 $H0:$B0/${V0}2 @@ -31,6 +34,19 @@ TEST ! $CLI volume add-brick $V0 replica 3 arbiter 2 $H0:$B0/${V0}2 TEST $CLI volume add-brick $V0 replica 3 arbiter 1 $H0:$B0/${V0}2 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2 + +#Trigger name heals from client. If we just rely on index heal, the first index +#crawl on B0 fails for /, dir2 and /file either due to lock collision or files +#not being present on the other 2 bricks yet. It is getting healed only in the +#next crawl after priv->shd.timeout (600 seconds) or by manually launching +#index heal again. +TEST $CLI volume set $V0 data-self-heal off +TEST $CLI volume set $V0 metadata-self-heal off +TEST $CLI volume set $V0 entry-self-heal off +TEST stat $M0/dir1 +TEST stat $M0/dir2 +TEST stat $M0/file1 #Heal files TEST $CLI volume set $V0 self-heal-daemon on diff --git a/tests/bugs/replicate/bug-1539358-split-brain-detection.t b/tests/bugs/replicate/bug-1539358-split-brain-detection.t new file mode 100755 index 00000000000..7b71a7a9e7d --- /dev/null +++ b/tests/bugs/replicate/bug-1539358-split-brain-detection.t @@ -0,0 +1,89 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}; +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; +TEST $CLI volume set $V0 self-heal-daemon off +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + +###############################################################################yy +# Case of 2 bricks blaming the third and the third blaming the other two. + +TEST `echo "hello" >> $M0/file` + +# B0 and B2 must blame B1 +TEST kill_brick $V0 $H0 $B0/$V0"1" +TEST `echo "append" >> $M0/file` +EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}0/file trusted.afr.$V0-client-1 data +EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}2/file trusted.afr.$V0-client-1 data +CLIENT_MD5=$(md5sum $M0/file | cut -d\ -f1) + +# B1 must blame B0 and B2 +setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/$V0"1"/file +setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/$V0"1"/file + +# Launch heal +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1 +TEST $CLI volume set $V0 self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 +TEST $CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 +B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1) +B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1) +B2_MD5=$(md5sum $B0/${V0}2/file | cut -d\ -f1) +TEST [ "$CLIENT_MD5" == "$B0_MD5" ] +TEST [ "$CLIENT_MD5" == "$B1_MD5" ] +TEST [ "$CLIENT_MD5" == "$B2_MD5" ] + +TEST rm $M0/file + +###############################################################################yy +# Case of each brick blaming the next one in a cyclic manner + +TEST `echo "hello" >> $M0/file` +# Mark cyclic xattrs and modify file content directly on the bricks. +TEST $CLI volume set $V0 self-heal-daemon off +setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/$V0"0"/file +setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"0"/file +setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/$V0"1"/file +setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"1"/file +setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/$V0"2"/file +setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"2"/file + +TEST `echo "ab" >> $B0/$V0"0"/file` +TEST `echo "cdef" >> $B0/$V0"1"/file` +TEST `echo "ghi" >> $B0/$V0"2"/file` + +# Add entry to xattrop dir to trigger index heal. +xattrop_dir0=$(afr_get_index_path $B0/$V0"0") +base_entry_b0=`ls $xattrop_dir0` +gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/file)) +ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str +EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0 + +# Launch heal +TEST $CLI volume set $V0 self-heal-daemon on +TEST $CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 +B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1) +B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1) +B2_MD5=$(md5sum $B0/${V0}2/file | cut -d\ -f1) +TEST [ "$B0_MD5" == "$B1_MD5" ] +TEST [ "$B0_MD5" == "$B2_MD5" ] +###############################################################################yy +cleanup diff --git a/tests/bugs/replicate/bug-802417.t b/tests/bugs/replicate/bug-802417.t index c5ba98b65fd..f213439401e 100755 --- a/tests/bugs/replicate/bug-802417.t +++ b/tests/bugs/replicate/bug-802417.t @@ -10,6 +10,18 @@ function write_file() } cleanup; + +##################################################### +# We are currently not triggering data heal unless all bricks of the replica are +# up. We will need to modify this .t once the fix for preventing stale reads +# being served to clients for files in spurious split-brains is done. Spurious +# split-brains here means afr xattrs indicates sbrain but it is actually not. +# Self-heal will heal such files automatically but before the heal completes, +# reads can be served which needs fixing. +#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000 +#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000 +###################################################### + TEST glusterd TEST pidof glusterd TEST $CLI volume info; diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c index 311da68e8ee..7e2a6027126 100644 --- a/xlators/cluster/afr/src/afr-self-heal-common.c +++ b/xlators/cluster/afr/src/afr-self-heal-common.c @@ -1455,6 +1455,36 @@ afr_does_witness_exist (xlator_t *this, uint64_t *witness) return _gf_false; } +unsigned int +afr_get_quorum_count (afr_private_t *priv) +{ + if (priv->quorum_count == AFR_QUORUM_AUTO) { + return priv->child_count/2 + 1; + } else { + return priv->quorum_count; + } +} + +void +afr_selfheal_post_op_failure_accounting (afr_private_t *priv, char *accused, + unsigned char *sources, + unsigned char *locked_on) +{ + int i = 0; + unsigned int quorum_count = 0; + + if (AFR_COUNT (sources, priv->child_count) != 0) + return; + + quorum_count = afr_get_quorum_count (priv); + for (i = 0; i < priv->child_count; i++) { + if ((accused[i] < quorum_count) && locked_on[i]) { + sources[i] = 1; + } + } + return; +} + /* * This function determines if a self-heal is required for a given inode, * and if needed, in what direction. @@ -1490,6 +1520,7 @@ afr_selfheal_find_direction (call_frame_t *frame, xlator_t *this, char *accused = NULL;/* Accused others without any self-accusal */ char *pending = NULL;/* Have pending operations on others */ char *self_accused = NULL; /* Accused itself */ + int min_participants = -1; priv = this->private; @@ -1513,8 +1544,13 @@ afr_selfheal_find_direction (call_frame_t *frame, xlator_t *this, } } + if (type == AFR_DATA_TRANSACTION) { + min_participants = priv->child_count; + } else { + min_participants = AFR_SH_MIN_PARTICIPANTS; + } if (afr_success_count (replies, - priv->child_count) < AFR_SH_MIN_PARTICIPANTS) { + priv->child_count) < min_participants) { /* Treat this just like locks not being acquired */ return -ENOTCONN; } @@ -1530,11 +1566,10 @@ afr_selfheal_find_direction (call_frame_t *frame, xlator_t *this, for (i = 0; i < priv->child_count; i++) { for (j = 0; j < priv->child_count; j++) { if (matrix[i][j]) { - if (!self_accused[i]) - accused[j] = 1; - - if (i != j) - pending[i] = 1; + if (!self_accused[i]) + accused[j] += 1; + if (i != j) + pending[i] += 1; } } } @@ -1575,6 +1610,10 @@ afr_selfheal_find_direction (call_frame_t *frame, xlator_t *this, } } + if (type == AFR_DATA_TRANSACTION) + afr_selfheal_post_op_failure_accounting (priv, accused, + sources, locked_on); + /* If no sources, all locked nodes are sinks - split brain */ if (AFR_COUNT (sources, priv->child_count) == 0) { for (i = 0; i < priv->child_count; i++) { diff --git a/xlators/cluster/afr/src/afr-self-heal-data.c b/xlators/cluster/afr/src/afr-self-heal-data.c index 7880a611592..c033a8afc07 100644 --- a/xlators/cluster/afr/src/afr-self-heal-data.c +++ b/xlators/cluster/afr/src/afr-self-heal-data.c @@ -684,7 +684,7 @@ __afr_selfheal_data (call_frame_t *frame, xlator_t *this, fd_t *fd, ret = afr_selfheal_inodelk (frame, this, fd->inode, this->name, 0, 0, data_lock); { - if (ret < AFR_SH_MIN_PARTICIPANTS) { + if (ret < priv->child_count) { gf_msg_debug (this->name, 0, "%s: Skipping " "self-heal as only %d number " "of subvolumes " @@ -749,7 +749,7 @@ restore_time: if (!is_arbiter_the_only_sink) { ret = afr_selfheal_inodelk (frame, this, fd->inode, this->name, 0, 0, data_lock); - if (ret < AFR_SH_MIN_PARTICIPANTS) { + if (ret < priv->child_count) { ret = -ENOTCONN; did_sh = _gf_false; goto skip_undo_pending; @@ -878,7 +878,7 @@ afr_selfheal_data (call_frame_t *frame, xlator_t *this, inode_t *inode) priv->sh_domain, 0, 0, locked_on); { - if (ret < AFR_SH_MIN_PARTICIPANTS) { + if (ret < priv->child_count) { gf_msg_debug (this->name, 0, "%s: Skipping " "self-heal as only %d number of " "subvolumes could be locked", -- cgit