diff options
-rw-r--r-- | tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t | 76 | ||||
-rw-r--r-- | xlators/features/index/src/index.c | 23 |
2 files changed, 97 insertions, 2 deletions
diff --git a/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t new file mode 100644 index 00000000000..2da90a98c76 --- /dev/null +++ b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t @@ -0,0 +1,76 @@ +#!/bin/bash + +. $(dirname $0)/../../../include.rc +. $(dirname $0)/../../../volume.rc +. $(dirname $0)/../../../afr.rc + +cleanup + +TESTS_EXPECTED_IN_LOOP=4 + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST $CLI volume set $V0 self-heal-daemon off +TEST $CLI volume set $V0 granular-entry-heal on + +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 + +# Kill brick-0. +TEST kill_brick $V0 $H0 $B0/${V0}0 + +# Create files under root +for i in {1..2} +do + echo $i > $M0/f$i +done + +# Test that the index associated with '/' is created on B1. +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID + +# Check for successful creation of granular entry indices +for i in {1..2} +do + TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i +done + +# Now disable granular-entry-heal +TEST $CLI volume set $V0 granular-entry-heal off + +# Start the brick that was down +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +# Enable shd +TEST gluster volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 + +# Now the indices created are granular but the heal is going to be of the +# normal kind. We test to make sure that heal still completes fine and that +# the stale granular indices are going to be deleted + +TEST $CLI volume heal $V0 + +# Wait for heal to complete +EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 + +# Test if data was healed +for i in {1..2} +do + TEST_IN_LOOP diff $B0/${V0}0/f$i $B0/${V0}1/f$i +done + +# Now verify that there are no name indices left after self-heal +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID + +cleanup diff --git a/xlators/features/index/src/index.c b/xlators/features/index/src/index.c index 67f5c6fdfa8..975d5f998bd 100644 --- a/xlators/features/index/src/index.c +++ b/xlators/features/index/src/index.c @@ -654,6 +654,8 @@ index_del (xlator_t *this, uuid_t gfid, const char *subdir, int type) index_priv_t *priv = NULL; int ret = 0; char gfid_path[PATH_MAX] = {0}; + char rename_dst[PATH_MAX] = {0,}; + uuid_t uuid; priv = this->private; GF_ASSERT_AND_GOTO_WITH_ERROR (this->name, !gf_uuid_is_null (gfid), @@ -661,10 +663,27 @@ index_del (xlator_t *this, uuid_t gfid, const char *subdir, int type) make_gfid_path (priv->index_basepath, subdir, gfid, gfid_path, sizeof (gfid_path)); - if ((strcmp (subdir, ENTRY_CHANGES_SUBDIR)) == 0) + if ((strcmp (subdir, ENTRY_CHANGES_SUBDIR)) == 0) { ret = sys_rmdir (gfid_path); - else + /* rmdir above could fail with ENOTEMPTY if the indices under + * it were created when granular-entry-heal was enabled, whereas + * the actual heal that happened was non-granular (or full) in + * nature, resulting in name indices getting left out. To + * clean up this directory without it affecting the IO path perf, + * the directory is renamed to a unique name under + * indices/entry-changes. Self-heal will pick up this entry + * during crawl and on lookup into the file system figure that + * the index is stale and subsequently wipe it out using rmdir(). + */ + if ((ret) && (errno == ENOTEMPTY)) { + gf_uuid_generate (uuid); + make_gfid_path (priv->index_basepath, subdir, uuid, + rename_dst, sizeof (rename_dst)); + ret = sys_rename (gfid_path, rename_dst); + } + } else { ret = sys_unlink (gfid_path); + } if (ret && (errno != ENOENT)) { gf_msg (this->name, GF_LOG_ERROR, errno, |