From 7c422b6fd0028300f7b46a084bcd5123c2439cc9 Mon Sep 17 00:00:00 2001 From: Richard Wareing Date: Tue, 6 Oct 2015 20:09:35 -0700 Subject: afr/cluster: PGFID heal support Summary: PGFID healing enables heals which might otherwise fail due due to the lack of a entry heal to succeed by performing the entry healing within the same heal flow. It does this by leveraging the PGFID tracking feature of the POSIX xlator, and examining lookup replies for the PGFID attribute. If detected, the pgfid will be decoded and stored for later use in case the heal fails for whatever reason. Cascading heal failures are handled through recursion. This feature is critical for a couple reasons: 1. General healing predictability - When the SHD attempts to heal a given GFID, it should be able to do so without having to wait for some other dependent heal to take place. 2. Reliability - In some cases the parent directory may require healing, but the req'd entry in the indices/xattrop directory may not exist (e.g. bugs/crashes etc). Prior to PGFID heal support some sort of external script would be required to queue up these heals by using FS specific utilities to lookup the parent directory by hardlink or worse...do a costly full heal to clean them up. 3. Performance - In combination with multi-threaded SHD this feature will make SHD healing _much_ faster as directories with large amount of files to be healed will no longer have to wait for an entry heal to come along, the first file in that directory queued for healing will trigger an entry heal for the directory and this will allow the other files in that directory to be (immediatelly) healed in parallel. Test Plan: - run prove tests/basic/afr/shd_pgfid_heal.t - run prove tests/basic/afr/shd*.t - run prove tests/basic/afr/gfid*.t Differential Revision: https://phabricator.fb.com/D2546133 Change-Id: I25f586047f8bcafa900c0cc9ee8f0e2128688c73 Signed-off-by: Jeff Darcy Reviewed-on: https://review.gluster.org/17929 Smoke: Gluster Build System Tested-by: Jeff Darcy CentOS-regression: Gluster Build System Reviewed-by: Jeff Darcy --- tests/basic/afr/gfid-unsplit-shd.t | 98 ++++++++++++++++++++++ tests/basic/afr/gfid-unsplit-type-mismatch.t | 86 +++++++++++++++++++ tests/basic/afr/gfid-unsplit.t | 120 +++++++++++++++++++++++++++ tests/basic/afr/shd-autofix-nogfid.t | 68 +++++++++++++++ tests/basic/afr/shd-force-inspect.t | 61 ++++++++++++++ tests/basic/afr/shd-pgfid-heal.t | 81 ++++++++++++++++++ tests/basic/gfid_unsplit.t | 118 -------------------------- tests/basic/gfid_unsplit_shd.t | 98 ---------------------- tests/basic/gfid_unsplit_type_mismatch.t | 85 ------------------- tests/basic/shd_autofix_nogfid.t | 68 --------------- tests/basic/shd_force_inspect.t | 61 -------------- 11 files changed, 514 insertions(+), 430 deletions(-) create mode 100644 tests/basic/afr/gfid-unsplit-shd.t create mode 100644 tests/basic/afr/gfid-unsplit-type-mismatch.t create mode 100644 tests/basic/afr/gfid-unsplit.t create mode 100644 tests/basic/afr/shd-autofix-nogfid.t create mode 100644 tests/basic/afr/shd-force-inspect.t create mode 100644 tests/basic/afr/shd-pgfid-heal.t delete mode 100644 tests/basic/gfid_unsplit.t delete mode 100644 tests/basic/gfid_unsplit_shd.t delete mode 100644 tests/basic/gfid_unsplit_type_mismatch.t delete mode 100644 tests/basic/shd_autofix_nogfid.t delete mode 100644 tests/basic/shd_force_inspect.t (limited to 'tests/basic') diff --git a/tests/basic/afr/gfid-unsplit-shd.t b/tests/basic/afr/gfid-unsplit-shd.t new file mode 100644 index 00000000000..77da5243724 --- /dev/null +++ b/tests/basic/afr/gfid-unsplit-shd.t @@ -0,0 +1,98 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +# Setup a cluster with 3 replicas, and fav child by majority on +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}; +TEST $CLI volume set $V0 cluster.choose-local off +TEST $CLI volume set $V0 cluster.self-heal-daemon on +TEST $CLI volume set $V0 nfs.disable off +TEST $CLI volume set $V0 cluster.quorum-type none +TEST $CLI volume set $V0 cluster.heal-timeout 5 +TEST $CLI volume set $V0 cluster.favorite-child-policy majority +#EST $CLI volume set $V0 cluster.favorite-child-by-majority off +#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on +#EST $CLI volume set $V0 cluster.favorite-child-by-size off +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST $CLI volume start $V0 +sleep 5 + +# Part I: FUSE Test +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \ + --attribute-timeout=0 --entry-timeout=0 + +cd $M0 +mkdir foo +dd if=/dev/urandom of=foo/splitfile bs=128k count=5 2>/dev/null + +MD5=$(md5sum foo/splitfile | cut -d\ -f1) + +sleep 1 +cd ~ + +GFID_PARENT_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/foo 2>/dev/null | grep trusted.gfid | cut -d= -f2) +GFID_PARENT_FORMATTED=$(echo "$GFID_PARENT_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}') +GFID_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/foo/splitfile 2>/dev/null | grep trusted.gfid | cut -d= -f2) +GFID_FORMATTED=$(echo "$GFID_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}') +GFID_LINK_B1="$B0/${V0}1/.glusterfs/$(echo $GFID_RAW | awk '{print substr($0,3,2)"/"substr($0,5,2)"/"substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')" + +# Create a split-brain by downing a brick, and flipping the +# gfid on the down brick, then bring the brick back up. + +# For good measure kill the first brick so the inode cache is wiped, we don't +# want any funny business +TEST kill_brick $V0 $H0 $B0/${V0}1 +TEST $CLI volume start $V0 force +pkill -f gluster/glustershd + +rm -f $GFID_LINK_B1 +TEST setfattr -n "trusted.gfid" -v "0xfd551a5cfddd4c1aa4d096ef09ef5c08" $B0/${V0}1/foo/splitfile +sleep 1 +TEST touch $B0/${V0}1/foo/splitfile + +mkdir -p $B0/${V0}1/.glusterfs/fd/55 +ln $B0/${V0}1/foo/splitfile $B0/${V0}1/.glusterfs/fd/55/fd551a5c-fddd-4c1a-a4d0-96ef09ef5c08 +cd ~ + +touch $B0/${V0}3/.glusterfs/indices/xattrop/$GFID_FORMATTED +touch $B0/${V0}3/.glusterfs/indices/xattrop/$GFID_PARENT_FORMATTED + +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +sleep 5 + +EXPECT_WITHIN 60 "0" get_pending_heal_count $V0 + +TEST stat $B0/${V0}1/foo/splitfile + +cd $M0 + +# Tickle the file to trigger the gfid unsplit +TEST stat foo/splitfile +sleep 1 + +# Verify the file is readable +TEST dd if=foo/splitfile of=/dev/null 2>/dev/null + +# Verify entry healing happened on the back-end regardless of the +# gfid-splitbrain state of the directory. +TEST stat $B0/${V0}1/foo/splitfile + +# Verify the MD5 signature of the file +HEALED_MD5=$(md5sum foo/splitfile | cut -d\ -f1) +TEST [ "$MD5" == "$HEALED_MD5" ] + +# Verify the file can be removed +TEST rm -f foo/splitfile +cd ~ + +cleanup diff --git a/tests/basic/afr/gfid-unsplit-type-mismatch.t b/tests/basic/afr/gfid-unsplit-type-mismatch.t new file mode 100644 index 00000000000..9e205021a0d --- /dev/null +++ b/tests/basic/afr/gfid-unsplit-type-mismatch.t @@ -0,0 +1,86 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +# Setup a cluster with 3 replicas, and fav child by majority on +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}; +TEST $CLI volume set $V0 cluster.choose-local off +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 nfs.disable on +TEST $CLI volume set $V0 cluster.quorum-type none +TEST $CLI volume set $V0 cluster.favorite-child-policy mtime +#EST $CLI volume set $V0 cluster.favorite-child-by-majority on +#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST $CLI volume start $V0 +sleep 5 + +pkill -f gluster/glustershd + +# Part I: FUSE Test +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \ + --attribute-timeout=0 --entry-timeout=0 + +cd $M0 +dd if=/dev/urandom of=splitfile bs=128k count=5 2>/dev/null + +MD5=$(md5sum splitfile | cut -d\ -f1) + +# Create a split-brain by downing a brick, and flipping the +# gfid on the down brick, then bring the brick back up. +TEST kill_brick $V0 $H0 $B0/${V0}1 +GFID_DIR_B1="$B0/${V0}1/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}1/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')" +rm -rf $GFID_DIR_B1 +rm -fv $B0/${V0}1/splitfile + +# Now really screw the file up, by changing it's type to a directory +# not a file...the so-called "type mismatch" situation. Our test +# should prove we can un-mangle this situation using the same strategy. +mkdir $B0/${V0}1/splitfile +touch -t 199011011510 $B0/${V0}1/splitfile +TEST setfattr -n "trusted.gfid" -v "0xfd551a5cfddd4c1aa4d096ef09ef5c08" $B0/${V0}1/splitfile +cd ~ + +touch $M0/newfile + +# Synthetically force a conservative merge of the directory. We want +# to ensure that conservative merges happen in-spite of GFID mis-matches, +# since we can handle them there's no sense in not doing these. In fact, +# if we stop them it will block GFID split-brain resolution. +setfattr -n trusted.afr.patchy-client-1 -v 0x000000000000000000000002 $B0/${V0}1 +setfattr -n trusted.afr.patchy-client-2 -v 0x000000000000000000000002 $B0/${V0}1 + +# Restart the down brick +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +sleep 5 +cd $M0 + +# Tickle the file to trigger the gfid unsplit +TEST stat splitfile +sleep 1 + +# Verify the file is readable +TEST dd if=splitfile of=/dev/null 2>/dev/null +# Verify entry healing happened on the back-end regardless of the +# gfid-splitbrain state of the directory. +TEST stat $B0/${V0}1/splitfile + +# Verify the MD5 signature of the file +HEALED_MD5=$(md5sum splitfile | cut -d\ -f1) +TEST [ "$MD5" == "$HEALED_MD5" ] + +# Verify the file can be removed +TEST rm -f splitfile +cd ~ + +cleanup diff --git a/tests/basic/afr/gfid-unsplit.t b/tests/basic/afr/gfid-unsplit.t new file mode 100644 index 00000000000..0b883ab658f --- /dev/null +++ b/tests/basic/afr/gfid-unsplit.t @@ -0,0 +1,120 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +# Setup a cluster with 3 replicas, and fav child by majority on +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}; +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 cluster.choose-local off +TEST $CLI volume set $V0 cluster.quorum-type none +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 nfs.disable off +#EST $CLI volume set $V0 cluster.favorite-child-by-majority on +#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on +TEST $CLI volume set $V0 cluster.favorite-child-policy mtime +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST $CLI volume start $V0 +sleep 5 + +# Part I: FUSE Test +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \ + --attribute-timeout=0 --entry-timeout=0 + +dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 2>/dev/null + +MD5=$(md5sum $M0/splitfile | cut -d\ -f1) + +# Create a split-brain by downing a brick, and flipping the +# gfid on the down brick, then bring the brick back up. +TEST kill_brick $V0 $H0 $B0/${V0}1 +GFID_DIR_B1="$B0/${V0}1/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}1/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')" +rm -rf $GFID_DIR_B1 +mkdir -p $B0/${V0}1/.glusterfs/fd/55 +ln $B0/${V0}1/splitfile $B0/${V0}1/.glusterfs/fd/55/fd551a5c-fddd-4c1a-a4d0-96ef09ef5c08 +TEST setfattr -n "trusted.gfid" -v "0xfd551a5cfddd4c1aa4d096ef09ef5c08" $B0/${V0}1/splitfile + +GFID_DIR_B3="$B0/${V0}3/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}3/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')" +#EST rm -f $B0/${V0}3/splitfile +#m -rf $GFID_DIR_B3 + +touch $M0/newfile + +# Synthetically force a conservative merge of the directory. We want +# to ensure that conservative merges happen in-spite of GFID mis-matches, +# since we can handle them there's no sense in not doing these. In fact, +# if we stop them it will block GFID split-brain resolution. +setfattr -n trusted.afr.patchy-client-1 -v 0x000000000000000000000002 $B0/${V0}1 +setfattr -n trusted.afr.patchy-client-2 -v 0x000000000000000000000002 $B0/${V0}1 + +# Restart the down brick +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +sleep 5 + +# Tickle the file to trigger the gfid unsplit +TEST stat $M0/splitfile +sleep 1 + +# Verify the file is readable +TEST dd if=$M0/splitfile of=/dev/null 2>/dev/null + +# Verify entry healing happened on the back-end regardless of the +# gfid-splitbrain state of the directory. +TEST stat $B0/${V0}1/splitfile + +# Verify the MD5 signature of the file +HEALED_MD5=$(md5sum $M0/splitfile | cut -d\ -f1) +TEST [ "$MD5" == "$HEALED_MD5" ] + +# Verify the file can be removed +TEST rm -f $M0/splitfile + +# Part II: NFS test +TEST mount_nfs $H0:/$V0 $N0 nolock +#EST mount -t nfs -o nolock,noatime,noacl,soft,intr $H0:/$V0 $N0; + +dd if=/dev/urandom of=$N0/splitfile bs=128k count=5 2>/dev/null + +MD5=$(md5sum $N0/splitfile | cut -d\ -f1) + +# Create a split-brain by downing a brick, and flipping the +# gfid on the down brick, then bring the brick back up. +TEST kill_brick $V0 $H0 $B0/${V0}1 +GFID_DIR_B1="$B0/${V0}1/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}1/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')" +rm -rf $GFID_DIR_B1 +TEST setfattr -n "trusted.gfid" -v "0xfd551a5cfddd4c1aa4d096ef09ef5c08" $B0/${V0}1/splitfile + +GFID_DIR_B3="$B0/${V0}3/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}3/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')" +#EST rm -f $B0/${V0}3/splitfile +#m -rf $GFID_DIR_B3 + +# Restart the down brick +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +sleep 5 + +# Tickle the file to trigger the gfid unsplit +TEST stat $N0/splitfile +sleep 1 + +# Verify the file is readable +TEST dd if=$N0/splitfile of=/dev/null 2>/dev/null + +# Verify the MD5 signature of the file +HEALED_MD5=$(md5sum $N0/splitfile | cut -d\ -f1) +TEST [ "$MD5" == "$HEALED_MD5" ] + +# Verify the file can be removed +TEST rm -f $N0/splitfile + +cleanup diff --git a/tests/basic/afr/shd-autofix-nogfid.t b/tests/basic/afr/shd-autofix-nogfid.t new file mode 100644 index 00000000000..7c9026dce62 --- /dev/null +++ b/tests/basic/afr/shd-autofix-nogfid.t @@ -0,0 +1,68 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +# Setup a cluster with 3 replicas, and fav child by majority on +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}; +TEST $CLI volume set $V0 cluster.choose-local off +TEST $CLI volume set $V0 cluster.self-heal-daemon on +TEST $CLI volume set $V0 nfs.disable on +TEST $CLI volume set $V0 cluster.quorum-type auto +TEST $CLI volume set $V0 cluster.favorite-child-policy majority +#EST $CLI volume set $V0 cluster.favorite-child-by-majority on +#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST $CLI volume start $V0 +sleep 5 + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \ + --attribute-timeout=0 --entry-timeout=0 + +# Kill the SHD while we setup the test +pkill -f gluster/glustershd +TEST kill_brick $V0 $H0 $B0/${V0}1 + +mkdir $M0/foo +dd if=/dev/urandom of=$M0/foo/testfile bs=128k count=5 2>/dev/null +MD5=$(md5sum $M0/foo/testfile | cut -d\ -f1) + +mkdir $B0/${V0}1/foo + +# Kick off the SHD and wait 30 seconds for healing to take place +TEST gluster vol start $V0 force +EXPECT_WITHIN 30 "0" get_pending_heal_count $V0 + +# Verify the file was healed back to brick 1 +TEST stat $B0/${V0}1/foo/testfile + +# Part II: Test recovery for a file without a GFID +# Kill the SHD while we setup the test +pkill -f gluster/glustershd +TEST kill_brick $V0 $H0 $B0/${V0}1 +rm -f $GFID_LINK_B1 +rm -f $B0/${V0}1/foo/testfile +touch $B0/${V0}1/foo/testfile + +# Queue the directories for healing, don't bother the queue the file +# as this shouldn't be required. +touch $B0/${V0}3/.glusterfs/indices/xattrop/00000000-0000-0000-0000-000000000001 +touch $B0/${V0}3/.glusterfs/indices/xattrop/$GFID_PARENT_FORMATTED + +TEST gluster vol start $V0 force +EXPECT_WITHIN 30 "0" get_pending_heal_count $V0 +TEST stat $B0/${V0}1/foo/testfile + +# Prove the directory and file are removable +TEST rm -f $B0/${V0}1/foo/testfile +TEST rmdir $B0/${V0}1/foo + +cleanup diff --git a/tests/basic/afr/shd-force-inspect.t b/tests/basic/afr/shd-force-inspect.t new file mode 100644 index 00000000000..caceb841322 --- /dev/null +++ b/tests/basic/afr/shd-force-inspect.t @@ -0,0 +1,61 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +# Setup a cluster with 3 replicas, and fav child by majority on +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}; +TEST $CLI volume set $V0 cluster.choose-local off +TEST $CLI volume set $V0 cluster.self-heal-daemon on +TEST $CLI volume set $V0 nfs.disable on +TEST $CLI volume set $V0 cluster.quorum-type none +TEST $CLI volume set $V0 cluster.favorite-child-policy majority +#EST $CLI volume set $V0 cluster.favorite-child-by-majority on +#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST $CLI volume start $V0 +sleep 5 + +# Part I: FUSE Test +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \ + --attribute-timeout=0 --entry-timeout=0 + +cd $M0 +mkdir foo +dd if=/dev/urandom of=foo/testfile bs=128k count=5 2>/dev/null +MD5=$(md5sum foo/testfile | cut -d\ -f1) + +# Kill the SHD while we setup the test +pkill -f gluster/glustershd + +# Grab the GFID of the file and parent dir +GFID_PARENT_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/foo 2>/dev/null | grep trusted.gfid | cut -d= -f2) +GFID_PARENT_FORMATTED=$(echo "$GFID_PARENT_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}') +GFID_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/foo/testfile 2>/dev/null | grep trusted.gfid | cut -d= -f2) +GFID_FORMATTED=$(echo "$GFID_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}') +GFID_LINK_B1="$B0/${V0}1/.glusterfs/$(echo $GFID_RAW | awk '{print substr($0,3,2)"/"substr($0,5,2)"/"substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')" + +# Nuke the file from brick 1 +rm -f $GFID_LINK_B1 +rm -f $B0/${V0}1/foo/testfile + +# Now manually queue up the parent directory for healing +touch $B0/${V0}2/.glusterfs/indices/xattrop/$GFID_PARENT_FORMATTED +touch $B0/${V0}3/.glusterfs/indices/xattrop/$GFID_PARENT_FORMATTED + +# Kick off the SHD and wait 30 seconds for healing to take place +TEST gluster vol start patchy force +EXPECT_WITHIN 30 "0" get_pending_heal_count $V0 + +# Verify the file was healed back to brick 1 +TEST stat $B0/${V0}1/foo/testfile + +cleanup diff --git a/tests/basic/afr/shd-pgfid-heal.t b/tests/basic/afr/shd-pgfid-heal.t new file mode 100644 index 00000000000..d12d29e13ba --- /dev/null +++ b/tests/basic/afr/shd-pgfid-heal.t @@ -0,0 +1,81 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +# Setup a cluster with 3 replicas, and fav child by majority on +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}; +TEST $CLI volume set $V0 cluster.choose-local off +TEST $CLI volume set $V0 cluster.self-heal-daemon on +TEST $CLI volume set $V0 nfs.disable on +TEST $CLI volume set $V0 cluster.quorum-type none +#EST $CLI volume set $V0 cluster.favorite-child-by-majority on +#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on +TEST $CLI volume set $V0 cluster.favorite-child-policy majority +TEST $CLI volume set $V0 storage.build-pgfid on +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST $CLI volume start $V0 +sleep 5 + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \ + --attribute-timeout=0 --entry-timeout=0 + +cd $M0 +mkdir -p a/b/c +dd if=/dev/urandom of=a/b/c/testfile bs=128k count=5 2>/dev/null +MD5=$(md5sum a/b/c/testfile | cut -d\ -f1) + +# Kill the SHD while we setup the test +pkill -f gluster/glustershd +# Kill the brick as well such that +TEST kill_brick $V0 $H0 $B0/${V0}1 + +# Grab the GFID of the file and parent dir +GFID_PARENT_B_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/a/b 2>/dev/null | grep trusted.gfid | cut -d= -f2) +GFID_PARENT_B_FORMATTED=$(echo "$GFID_PARENT_B_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}') +GFID_PARENT_B_LINK_B1="$B0/${V0}1/.glusterfs/$(echo $GFID_PARENT_B_RAW | awk '{print substr($0,3,2)"/"substr($0,5,2)"/"substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')" +GFID_PARENT_C_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/a/b/c 2>/dev/null | grep trusted.gfid | cut -d= -f2) +GFID_PARENT_C_FORMATTED=$(echo "$GFID_PARENT_C_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}') +GFID_PARENT_C_LINK_B1="$B0/${V0}1/.glusterfs/$(echo $GFID_PARENT_C_RAW | awk '{print substr($0,3,2)"/"substr($0,5,2)"/"substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')" +GFID_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/a/b/c/testfile 2>/dev/null | grep trusted.gfid | cut -d= -f2) +GFID_FORMATTED=$(echo "$GFID_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}') +GFID_LINK_B1="$B0/${V0}1/.glusterfs/$(echo $GFID_RAW | awk '{print substr($0,3,2)"/"substr($0,5,2)"/"substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')" + +# +# Here we are going to create a situation such that a file 3 +# levels deep into the FS requires healing, along with 2 levels +# of parent directories. The only signal SHD has is that the +# file itself needs healing. The directory (entry) heals are +# missing; simulating a crash or some sort of bug that we need +# to be able to recover from. +# + +# Nuke the file from brick 1, along with the parent directories +# and all backend hard/symbolic links +rm -f $B0/${V0}1/a/b/c/testfile +rm -f $GFID_LINK_B1 +rmdir $B0/${V0}1/a/b/c +rm -f $GFID_PARENT_C_LINK_B1 +rmdir $B0/${V0}1/a/b +rm -f $GFID_PARENT_B_LINK_B1 + +# Now manually queue up the parent directory for healing +touch $B0/${V0}3/.glusterfs/indices/xattrop/$GFID_FORMATTED + +# Kick off the SHD and wait 30 seconds for healing to take place +TEST gluster vol start patchy force +EXPECT_WITHIN 30 "0" get_pending_heal_count $V0 +sleep 5 + +# Verify the file was healed back to brick 1 +TEST stat $B0/${V0}1/a/b/c/testfile + +cleanup diff --git a/tests/basic/gfid_unsplit.t b/tests/basic/gfid_unsplit.t deleted file mode 100644 index 0df96bd5ed6..00000000000 --- a/tests/basic/gfid_unsplit.t +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc -. $(dirname $0)/../nfs.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -# Setup a cluster with 3 replicas, and fav child by majority on -TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}; -TEST $CLI volume set $V0 performance.stat-prefetch off -TEST $CLI volume set $V0 cluster.choose-local off -TEST $CLI volume set $V0 cluster.quorum-type none -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume set $V0 nfs.disable off -#EST $CLI volume set $V0 cluster.favorite-child-by-majority on -#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on -TEST $CLI volume set $V0 cluster.favorite-child-policy majority -TEST $CLI volume set $V0 cluster.metadata-self-heal off -TEST $CLI volume set $V0 cluster.data-self-heal off -TEST $CLI volume set $V0 cluster.entry-self-heal off -TEST $CLI volume start $V0 -sleep 5 - -# Part I: FUSE Test -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \ - --attribute-timeout=0 --entry-timeout=0 - -dd if=/dev/urandom of=$M0/splitfile bs=128k count=5 2>/dev/null - -MD5=$(md5sum $M0/splitfile | cut -d\ -f1) - -# Create a split-brain by downing a brick, and flipping the -# gfid on the down brick, then bring the brick back up. -TEST kill_brick $V0 $H0 $B0/${V0}1 -GFID_DIR_B1="$B0/${V0}1/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}1/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')" -rm -rf $GFID_DIR_B1 -TEST setfattr -n "trusted.gfid" -v "0xfd551a5cfddd4c1aa4d096ef09ef5c08" $B0/${V0}1/splitfile - -GFID_DIR_B3="$B0/${V0}3/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}3/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')" -#EST rm -f $B0/${V0}3/splitfile -#m -rf $GFID_DIR_B3 - -touch $M0/newfile - -# Synthetically force a conservative merge of the directory. We want -# to ensure that conservative merges happen in-spite of GFID mis-matches, -# since we can handle them there's no sense in not doing these. In fact, -# if we stop them it will block GFID split-brain resolution. -setfattr -n trusted.afr.patchy-client-1 -v 0x000000000000000000000002 $B0/${V0}1 -setfattr -n trusted.afr.patchy-client-2 -v 0x000000000000000000000002 $B0/${V0}1 - -# Restart the down brick -TEST $CLI volume start $V0 force -EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 -sleep 5 - -# Tickle the file to trigger the gfid unsplit -TEST stat $M0/splitfile -sleep 1 - -# Verify the file is readable -TEST dd if=$M0/splitfile of=/dev/null 2>/dev/null - -# Verify entry healing happened on the back-end regardless of the -# gfid-splitbrain state of the directory. -TEST stat $B0/${V0}1/splitfile - -# Verify the MD5 signature of the file -HEALED_MD5=$(md5sum $M0/splitfile | cut -d\ -f1) -TEST [ "$MD5" == "$HEALED_MD5" ] - -# Verify the file can be removed -TEST rm -f $M0/splitfile - -# Part II: NFS test -TEST mount_nfs $H0:/$V0 $N0 nolock -#EST mount -t nfs -o nolock,noatime,noacl,soft,intr $H0:/$V0 $N0; - -dd if=/dev/urandom of=$N0/splitfile bs=128k count=5 2>/dev/null - -MD5=$(md5sum $N0/splitfile | cut -d\ -f1) - -# Create a split-brain by downing a brick, and flipping the -# gfid on the down brick, then bring the brick back up. -TEST kill_brick $V0 $H0 $B0/${V0}1 -GFID_DIR_B1="$B0/${V0}1/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}1/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')" -rm -rf $GFID_DIR_B1 -TEST setfattr -n "trusted.gfid" -v "0xfd551a5cfddd4c1aa4d096ef09ef5c08" $B0/${V0}1/splitfile - -GFID_DIR_B3="$B0/${V0}3/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}3/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')" -#EST rm -f $B0/${V0}3/splitfile -#m -rf $GFID_DIR_B3 - -# Restart the down brick -TEST $CLI volume start $V0 force -EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 -sleep 5 - -# Tickle the file to trigger the gfid unsplit -TEST stat $N0/splitfile -sleep 1 - -# Verify the file is readable -TEST dd if=$N0/splitfile of=/dev/null 2>/dev/null - -# Verify the MD5 signature of the file -HEALED_MD5=$(md5sum $N0/splitfile | cut -d\ -f1) -TEST [ "$MD5" == "$HEALED_MD5" ] - -# Verify the file can be removed -TEST rm -f $N0/splitfile - -cleanup diff --git a/tests/basic/gfid_unsplit_shd.t b/tests/basic/gfid_unsplit_shd.t deleted file mode 100644 index 25fab290177..00000000000 --- a/tests/basic/gfid_unsplit_shd.t +++ /dev/null @@ -1,98 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -# Setup a cluster with 3 replicas, and fav child by majority on -TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}; -TEST $CLI volume set $V0 cluster.choose-local off -TEST $CLI volume set $V0 cluster.self-heal-daemon on -TEST $CLI volume set $V0 nfs.disable off -TEST $CLI volume set $V0 cluster.quorum-type none -TEST $CLI volume set $V0 cluster.heal-timeout 5 -TEST $CLI volume set $V0 cluster.favorite-child-policy majority -#EST $CLI volume set $V0 cluster.favorite-child-by-majority off -#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on -#EST $CLI volume set $V0 cluster.favorite-child-by-size off -TEST $CLI volume set $V0 cluster.metadata-self-heal off -TEST $CLI volume set $V0 cluster.data-self-heal off -TEST $CLI volume set $V0 cluster.entry-self-heal off -TEST $CLI volume start $V0 -sleep 5 - -# Part I: FUSE Test -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \ - --attribute-timeout=0 --entry-timeout=0 - -cd $M0 -mkdir foo -dd if=/dev/urandom of=foo/splitfile bs=128k count=5 2>/dev/null - -MD5=$(md5sum foo/splitfile | cut -d\ -f1) - -sleep 1 -cd ~ - -GFID_PARENT_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/foo 2>/dev/null | grep trusted.gfid | cut -d= -f2) -GFID_PARENT_FORMATTED=$(echo "$GFID_PARENT_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}') -GFID_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/foo/splitfile 2>/dev/null | grep trusted.gfid | cut -d= -f2) -GFID_FORMATTED=$(echo "$GFID_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}') -GFID_LINK_B1="$B0/${V0}1/.glusterfs/$(echo $GFID_RAW | awk '{print substr($0,3,2)"/"substr($0,5,2)"/"substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')" - -# Create a split-brain by downing a brick, and flipping the -# gfid on the down brick, then bring the brick back up. - -# For good measure kill the first brick so the inode cache is wiped, we don't -# want any funny business -TEST kill_brick $V0 $H0 $B0/${V0}1 -TEST $CLI volume start $V0 force -pkill -f gluster/glustershd - -rm -f $GFID_LINK_B1 -TEST setfattr -n "trusted.gfid" -v "0xfd551a5cfddd4c1aa4d096ef09ef5c08" $B0/${V0}1/foo/splitfile -sleep 1 -TEST touch $B0/${V0}1/foo/splitfile - -mkdir -p $B0/${V0}1/.glusterfs/fd/55 -ln $B0/${V0}1/foo/splitfile $B0/${V0}1/.glusterfs/fd/55/fd551a5c-fddd-4c1a-a4d0-96ef09ef5c08 -cd ~ - -touch $B0/${V0}3/.glusterfs/indices/xattrop/$GFID_FORMATTED -touch $B0/${V0}3/.glusterfs/indices/xattrop/$GFID_PARENT_FORMATTED - -TEST $CLI volume start $V0 force -EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 -sleep 5 - -EXPECT_WITHIN 60 "0" get_pending_heal_count $V0 - -TEST stat $B0/${V0}1/foo/splitfile - -cd $M0 - -# Tickle the file to trigger the gfid unsplit -TEST stat foo/splitfile -sleep 1 - -# Verify the file is readable -TEST dd if=foo/splitfile of=/dev/null 2>/dev/null - -# Verify entry healing happened on the back-end regardless of the -# gfid-splitbrain state of the directory. -TEST stat $B0/${V0}1/foo/splitfile - -# Verify the MD5 signature of the file -HEALED_MD5=$(md5sum foo/splitfile | cut -d\ -f1) -TEST [ "$MD5" == "$HEALED_MD5" ] - -# Verify the file can be removed -TEST rm -f foo/splitfile -cd ~ - -cleanup diff --git a/tests/basic/gfid_unsplit_type_mismatch.t b/tests/basic/gfid_unsplit_type_mismatch.t deleted file mode 100644 index 51e6a36445b..00000000000 --- a/tests/basic/gfid_unsplit_type_mismatch.t +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -# Setup a cluster with 3 replicas, and fav child by majority on -TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}; -TEST $CLI volume set $V0 cluster.choose-local off -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume set $V0 nfs.disable off -TEST $CLI volume set $V0 cluster.quorum-type none -TEST $CLI volume set $V0 cluster.favorite-child-policy majority -#EST $CLI volume set $V0 cluster.favorite-child-by-majority on -#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on -TEST $CLI volume set $V0 cluster.metadata-self-heal off -TEST $CLI volume set $V0 cluster.data-self-heal off -TEST $CLI volume set $V0 cluster.entry-self-heal off -TEST $CLI volume start $V0 -sleep 5 - -# Part I: FUSE Test -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \ - --attribute-timeout=0 --entry-timeout=0 - -cd $M0 -dd if=/dev/urandom of=splitfile bs=128k count=5 2>/dev/null - -MD5=$(md5sum splitfile | cut -d\ -f1) - -# Create a split-brain by downing a brick, and flipping the -# gfid on the down brick, then bring the brick back up. -TEST kill_brick $V0 $H0 $B0/${V0}1 -GFID_DIR_B1="$B0/${V0}1/.glusterfs/$(getfattr -n trusted.gfid -e hex $B0/${V0}1/splitfile 2>/dev/null | grep ^trusted | cut -d= -f2 | awk '{print substr($0,3,2)}')" -rm -rf $GFID_DIR_B1 -rm -fv $B0/${V0}1/splitfile - -# Now really screw the file up, by changing it's type to a directory -# not a file...the so-called "type mismatch" situation. Our test -# should prove we can un-mangle this situation using the same strategy. -mkdir $B0/${V0}1/splitfile -touch -t 199011011510 $B0/${V0}1/splitfile -TEST setfattr -n "trusted.gfid" -v "0xfd551a5cfddd4c1aa4d096ef09ef5c08" $B0/${V0}1/splitfile -cd ~ - -touch $M0/newfile - -# Synthetically force a conservative merge of the directory. We want -# to ensure that conservative merges happen in-spite of GFID mis-matches, -# since we can handle them there's no sense in not doing these. In fact, -# if we stop them it will block GFID split-brain resolution. -setfattr -n trusted.afr.patchy-client-1 -v 0x000000000000000000000002 $B0/${V0}1 -setfattr -n trusted.afr.patchy-client-2 -v 0x000000000000000000000002 $B0/${V0}1 - -# Restart the down brick -TEST $CLI volume start $V0 force -EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 -sleep 5 -cd $M0 - -# Tickle the file to trigger the gfid unsplit -TEST stat splitfile -sleep 1 - -# Verify the file is readable -TEST dd if=splitfile of=/dev/null 2>/dev/null - -# Verify entry healing happened on the back-end regardless of the -# gfid-splitbrain state of the directory. -TEST stat $B0/${V0}1/splitfile - -# Verify the MD5 signature of the file -HEALED_MD5=$(md5sum splitfile | cut -d\ -f1) -TEST [ "$MD5" == "$HEALED_MD5" ] - -# Verify the file can be removed -TEST rm -f splitfile -cd ~ - -cleanup diff --git a/tests/basic/shd_autofix_nogfid.t b/tests/basic/shd_autofix_nogfid.t deleted file mode 100644 index 5a6ed66f522..00000000000 --- a/tests/basic/shd_autofix_nogfid.t +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -# Setup a cluster with 3 replicas, and fav child by majority on -TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}; -TEST $CLI volume set $V0 cluster.choose-local off -TEST $CLI volume set $V0 cluster.self-heal-daemon on -TEST $CLI volume set $V0 nfs.disable on -TEST $CLI volume set $V0 cluster.quorum-type auto -TEST $CLI volume set $V0 cluster.favorite-child-policy majority -#EST $CLI volume set $V0 cluster.favorite-child-by-majority on -#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on -TEST $CLI volume set $V0 cluster.metadata-self-heal off -TEST $CLI volume set $V0 cluster.data-self-heal off -TEST $CLI volume set $V0 cluster.entry-self-heal off -TEST $CLI volume start $V0 -sleep 5 - -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \ - --attribute-timeout=0 --entry-timeout=0 - -# Kill the SHD while we setup the test -pkill -f gluster/glustershd -TEST kill_brick $V0 $H0 $B0/${V0}1 - -mkdir $M0/foo -dd if=/dev/urandom of=$M0/foo/testfile bs=128k count=5 2>/dev/null -MD5=$(md5sum $M0/foo/testfile | cut -d\ -f1) - -mkdir $B0/${V0}1/foo - -# Kick off the SHD and wait 30 seconds for healing to take place -TEST gluster vol start $V0 force -EXPECT_WITHIN 30 "0" get_pending_heal_count $V0 - -# Verify the file was healed back to brick 1 -TEST stat $B0/${V0}1/foo/testfile - -# Part II: Test recovery for a file without a GFID -# Kill the SHD while we setup the test -pkill -f gluster/glustershd -TEST kill_brick $V0 $H0 $B0/${V0}1 -rm -f $GFID_LINK_B1 -rm -f $B0/${V0}1/foo/testfile -touch $B0/${V0}1/foo/testfile - -# Queue the directories for healing, don't bother the queue the file -# as this shouldn't be required. -touch $B0/${V0}3/.glusterfs/indices/xattrop/00000000-0000-0000-0000-000000000001 -touch $B0/${V0}3/.glusterfs/indices/xattrop/$GFID_PARENT_FORMATTED - -TEST gluster vol start $V0 force -EXPECT_WITHIN 30 "0" get_pending_heal_count $V0 -TEST stat $B0/${V0}1/foo/testfile - -# Prove the directory and file are removable -TEST rm -f $B0/${V0}1/foo/testfile -TEST rmdir $B0/${V0}1/foo - -cleanup diff --git a/tests/basic/shd_force_inspect.t b/tests/basic/shd_force_inspect.t deleted file mode 100644 index ebf3f7a17ad..00000000000 --- a/tests/basic/shd_force_inspect.t +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -# Setup a cluster with 3 replicas, and fav child by majority on -TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}; -TEST $CLI volume set $V0 cluster.choose-local off -TEST $CLI volume set $V0 cluster.self-heal-daemon on -TEST $CLI volume set $V0 nfs.disable on -TEST $CLI volume set $V0 cluster.quorum-type none -TEST $CLI volume set $V0 cluster.favorite-child-policy majority -#EST $CLI volume set $V0 cluster.favorite-child-by-majority on -#EST $CLI volume set $V0 cluster.favorite-child-by-mtime on -TEST $CLI volume set $V0 cluster.metadata-self-heal off -TEST $CLI volume set $V0 cluster.data-self-heal off -TEST $CLI volume set $V0 cluster.entry-self-heal off -TEST $CLI volume start $V0 -sleep 5 - -# Part I: FUSE Test -TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 \ - --attribute-timeout=0 --entry-timeout=0 - -cd $M0 -mkdir foo -dd if=/dev/urandom of=foo/testfile bs=128k count=5 2>/dev/null -MD5=$(md5sum foo/testfile | cut -d\ -f1) - -# Kill the SHD while we setup the test -pkill -f gluster/glustershd - -# Grab the GFID of the file and parent dir -GFID_PARENT_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/foo 2>/dev/null | grep trusted.gfid | cut -d= -f2) -GFID_PARENT_FORMATTED=$(echo "$GFID_PARENT_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}') -GFID_RAW=$(getfattr -n trusted.gfid -e hex $B0/${V0}1/foo/testfile 2>/dev/null | grep trusted.gfid | cut -d= -f2) -GFID_FORMATTED=$(echo "$GFID_RAW" | awk '{print substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}') -GFID_LINK_B1="$B0/${V0}1/.glusterfs/$(echo $GFID_RAW | awk '{print substr($0,3,2)"/"substr($0,5,2)"/"substr($1,3,8)"-"substr($1,11,4)"-"substr($1,15,4)"-"substr($1,19,4)"-"substr($1,23,12)}')" - -# Nuke the file from brick 1 -rm -f $GFID_LINK_B1 -rm -f $B0/${V0}1/foo/testfile - -# Now manually queue up the parent directory for healing -touch $B0/${V0}2/.glusterfs/indices/xattrop/$GFID_PARENT_FORMATTED -touch $B0/${V0}3/.glusterfs/indices/xattrop/$GFID_PARENT_FORMATTED - -# Kick off the SHD and wait 30 seconds for healing to take place -TEST gluster vol start patchy force -EXPECT_WITHIN 30 "0" get_pending_heal_count $V0 - -# Verify the file was healed back to brick 1 -TEST stat $B0/${V0}1/foo/testfile - -cleanup -- cgit