summaryrefslogtreecommitdiffstats
path: root/tests/basic/afr/self-heald.t
diff options
context:
space:
mode:
Diffstat (limited to 'tests/basic/afr/self-heald.t')
-rw-r--r--tests/basic/afr/self-heald.t57
1 files changed, 33 insertions, 24 deletions
diff --git a/tests/basic/afr/self-heald.t b/tests/basic/afr/self-heald.t
index a2d101edb59..24c82777921 100644
--- a/tests/basic/afr/self-heald.t
+++ b/tests/basic/afr/self-heald.t
@@ -4,6 +4,7 @@
. $(dirname $0)/../../volume.rc
cleanup;
+START_TIMESTAMP=`date +%s`
function kill_multiple_bricks {
local vol=$1
@@ -37,7 +38,8 @@ function check_bricks_up {
function disconnected_brick_count {
local vol=$1
- $CLI volume heal $vol info | grep -i transport | wc -l
+ $CLI volume heal $vol info | \
+ egrep -i '(transport|Socket is not connected)' | wc -l
}
TESTS_EXPECTED_IN_LOOP=20
@@ -46,8 +48,9 @@ TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3,4,5}
TEST $CLI volume set $V0 cluster.background-self-heal-count 0
TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 performance.flush-behind off
TEST $CLI volume start $V0
-TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
decide_kill=$((`date +"%j"|sed 's/^0*//'` % 2 ))
@@ -56,7 +59,7 @@ cd $M0
HEAL_FILES=0
for i in {1..10}
do
- dd if=/dev/urandom of=f bs=1M count=10 2>/dev/null
+ dd if=/dev/urandom of=f bs=1024k count=10
HEAL_FILES=$(($HEAL_FILES+1)) #+1 for data/metadata self-heal of 'f'
mkdir a; cd a;
#+3 for metadata self-heal of 'a' one per subvolume of DHT
@@ -66,7 +69,7 @@ done
HEAL_FILES=$(($HEAL_FILES + 3))
cd ~
-EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
+EXPECT "$HEAL_FILES" get_pending_heal_count $V0
#When bricks are down, it says Transport End point Not connected for them
EXPECT "3" disconnected_brick_count $V0
@@ -76,12 +79,12 @@ EXPECT "3" disconnected_brick_count $V0
#replica pair.
for i in {11..20}; do echo abc > $M0/$i; done
HEAL_FILES=$(($HEAL_FILES + 10)) #count extra 10 files
-EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
+EXPECT "$HEAL_FILES" get_pending_heal_count $V0
#delete the files now, so that stale indices will remain.
for i in {11..20}; do rm -f $M0/$i; done
#After deleting files they should not appear in heal info
HEAL_FILES=$(($HEAL_FILES - 10))
-EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
+EXPECT "$HEAL_FILES" get_pending_heal_count $V0
TEST ! $CLI volume heal $V0
@@ -90,30 +93,30 @@ TEST ! $CLI volume heal $V0
TEST ! $CLI volume heal $V0 full
TEST $CLI volume start $V0 force
TEST $CLI volume set $V0 cluster.self-heal-daemon on
-EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
check_bricks_up $V0
TEST $CLI volume heal $V0
sleep 5 #Until the heal-statistics command implementation
#check that this heals the contents partially
-TEST [ $HEAL_FILES -gt $(afr_get_pending_heal_count $V0) ]
+TEST [ $HEAL_FILES -gt $(get_pending_heal_count $V0) ]
TEST $CLI volume heal $V0 full
-EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
#Test that ongoing IO is not considered as Pending heal
-(dd if=/dev/zero of=$M0/file1 bs=1K 2>/dev/null 1>/dev/null)&
+(dd if=/dev/zero of=$M0/file1 bs=1k 2>/dev/null 1>/dev/null)&
back_pid1=$!;
-(dd if=/dev/zero of=$M0/file2 bs=1K 2>/dev/null 1>/dev/null)&
+(dd if=/dev/zero of=$M0/file2 bs=1k 2>/dev/null 1>/dev/null)&
back_pid2=$!;
-(dd if=/dev/zero of=$M0/file3 bs=1K 2>/dev/null 1>/dev/null)&
+(dd if=/dev/zero of=$M0/file3 bs=1k 2>/dev/null 1>/dev/null)&
back_pid3=$!;
-(dd if=/dev/zero of=$M0/file4 bs=1K 2>/dev/null 1>/dev/null)&
+(dd if=/dev/zero of=$M0/file4 bs=1k 2>/dev/null 1>/dev/null)&
back_pid4=$!;
-(dd if=/dev/zero of=$M0/file5 bs=1K 2>/dev/null 1>/dev/null)&
+(dd if=/dev/zero of=$M0/file5 bs=1k 2>/dev/null 1>/dev/null)&
back_pid5=$!;
-EXPECT 0 afr_get_pending_heal_count $V0
+EXPECT 0 get_pending_heal_count $V0
kill -SIGTERM $back_pid1;
kill -SIGTERM $back_pid2;
kill -SIGTERM $back_pid3;
@@ -130,13 +133,13 @@ TEST $CLI volume set $V0 cluster.data-self-heal off
EXPECT "off" volume_option $V0 cluster.data-self-heal
kill_multiple_bricks $V0 $H0 $B0
echo abc > $M0/f
-EXPECT 1 afr_get_pending_heal_count $V0
+EXPECT 1 get_pending_heal_count $V0
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
check_bricks_up $V0
TEST $CLI volume heal $V0
-EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
TEST $CLI volume set $V0 cluster.data-self-heal on
#METADATA
@@ -145,13 +148,13 @@ EXPECT "off" volume_option $V0 cluster.metadata-self-heal
kill_multiple_bricks $V0 $H0 $B0
TEST chmod 777 $M0/f
-EXPECT 1 afr_get_pending_heal_count $V0
+EXPECT 1 get_pending_heal_count $V0
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
check_bricks_up $V0
TEST $CLI volume heal $V0
-EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
TEST $CLI volume set $V0 cluster.metadata-self-heal on
#ENTRY
@@ -159,12 +162,15 @@ TEST $CLI volume set $V0 cluster.entry-self-heal off
EXPECT "off" volume_option $V0 cluster.entry-self-heal
kill_multiple_bricks $V0 $H0 $B0
TEST touch $M0/d/a
-EXPECT 2 afr_get_pending_heal_count $V0
+# 4 if mtime/ctime is modified for d in bricks without a
+# 2 otherwise
+PENDING=$( get_pending_heal_count $V0 )
+TEST test $PENDING -eq 2 -o $PENDING -eq 4
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
check_bricks_up $V0
TEST $CLI volume heal $V0
-EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
TEST $CLI volume set $V0 cluster.entry-self-heal on
#Negative test cases
@@ -181,4 +187,7 @@ TEST $CLI volume create $V0 $H0:$B0/${V0}{6}
TEST $CLI volume start $V0
TEST ! $CLI volume heal $V0 info
+# Check for non Linux systems that we did not mess with directory offsets
+TEST ! log_newer $START_TIMESTAMP "offset reused from another DIR"
+
cleanup