From 21c282ef311d3d7385bba37ddb0a26fb12178409 Mon Sep 17 00:00:00 2001 From: Pranith Kumar K Date: Tue, 25 Mar 2014 11:07:31 +0530 Subject: cluster/afr: Sparse file self-heal canges - Fix boundary condition for offset - Honour data-self-heal-algorithm option - Added tests for sparse file self-healing Change-Id: I14bb1c9d04118a3df4072f962fc8f2f197391d95 BUG: 1080707 Signed-off-by: Pranith Kumar K Reviewed-on: http://review.gluster.org/7339 Tested-by: Gluster Build System Reviewed-by: Anand Avati --- tests/basic/afr/sparse-file-self-heal.t | 121 ++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 tests/basic/afr/sparse-file-self-heal.t (limited to 'tests/basic/afr') diff --git a/tests/basic/afr/sparse-file-self-heal.t b/tests/basic/afr/sparse-file-self-heal.t new file mode 100644 index 000000000..9b795c331 --- /dev/null +++ b/tests/basic/afr/sparse-file-self-heal.t @@ -0,0 +1,121 @@ +#!/bin/bash + +#This file checks if self-heal of files with holes is working properly or not +#bigger is 2M, big is 1M, small is anything less +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 data-self-heal-algorithm full +TEST $CLI volume start $V0 + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST dd if=/dev/urandom of=$M0/small count=1 bs=1M +TEST dd if=/dev/urandom of=$M0/bigger2big count=1 bs=2M +TEST dd if=/dev/urandom of=$M0/big2bigger count=1 bs=1M + +TEST kill_brick $V0 $H0 $B0/${V0}0 + +#File with >128k size hole +TEST truncate -s 1M $M0/big +big_md5sum=$(md5sum $M0/big | awk '{print $1}') + +#File with <128k hole +TEST truncate -s 0 $M0/small +TEST truncate -s 64k $M0/small +small_md5sum=$(md5sum $M0/small | awk '{print $1}') + +#Bigger file truncated to big size hole. +TEST truncate -s 0 $M0/bigger2big +TEST truncate -s 1M $M0/bigger2big +bigger2big_md5sum=$(md5sum $M0/bigger2big | awk '{print $1}') + +#Big file truncated to Bigger size hole +TEST truncate -s 2M $M0/big2bigger +big2bigger_md5sum=$(md5sum $M0/big2bigger | awk '{print $1}') + +$CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST gluster volume heal $V0 full +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +big_md5sum_0=$(md5sum $B0/${V0}0/big | awk '{print $1}') +small_md5sum_0=$(md5sum $B0/${V0}0/small | awk '{print $1}') +bigger2big_md5sum_0=$(md5sum $B0/${V0}0/bigger2big | awk '{print $1}') +big2bigger_md5sum_0=$(md5sum $B0/${V0}0/big2bigger | awk '{print $1}') + +EXPECT $big_md5sum echo $big_md5sum_0 +EXPECT $small_md5sum echo $small_md5sum_0 +EXPECT $big2bigger_md5sum echo $big2bigger_md5sum_0 +EXPECT $bigger2big_md5sum echo $bigger2big_md5sum_0 + + +EXPECT "1" has_holes $B0/${V0}0/big +#Because self-heal writes the final chunk hole should not be there for +#files < 128K +EXPECT "0" has_holes $B0/${V0}0/small +# Since source is smaller than sink, self-heal does blind copy so no holes will +# be present +EXPECT "0" has_holes $B0/${V0}0/bigger2big +EXPECT "1" has_holes $B0/${V0}0/big2bigger + +TEST rm -f $M0/* + +#check the same tests with diff self-heal +TEST $CLI volume set $V0 data-self-heal-algorithm diff + +TEST dd if=/dev/urandom of=$M0/small count=1 bs=1M +TEST dd if=/dev/urandom of=$M0/big2bigger count=1 bs=1M +TEST dd if=/dev/urandom of=$M0/bigger2big count=1 bs=2M + +TEST kill_brick $V0 $H0 $B0/${V0}0 + +#File with >128k size hole +TEST truncate -s 1M $M0/big +big_md5sum=$(md5sum $M0/big | awk '{print $1}') + +#File with <128k hole +TEST truncate -s 0 $M0/small +TEST truncate -s 64k $M0/small +small_md5sum=$(md5sum $M0/small | awk '{print $1}') + +#Bigger file truncated to big size hole +TEST truncate -s 0 $M0/bigger2big +TEST truncate -s 1M $M0/bigger2big +bigger2big_md5sum=$(md5sum $M0/bigger2big | awk '{print $1}') + +#Big file truncated to Bigger size hole +TEST truncate -s 2M $M0/big2bigger +big2bigger_md5sum=$(md5sum $M0/big2bigger | awk '{print $1}') + +$CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST gluster volume heal $V0 full +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +big_md5sum_0=$(md5sum $B0/${V0}0/big | awk '{print $1}') +small_md5sum_0=$(md5sum $B0/${V0}0/small | awk '{print $1}') +bigger2big_md5sum_0=$(md5sum $B0/${V0}0/bigger2big | awk '{print $1}') +big2bigger_md5sum_0=$(md5sum $B0/${V0}0/big2bigger | awk '{print $1}') + +EXPECT $big_md5sum echo $big_md5sum_0 +EXPECT $small_md5sum echo $small_md5sum_0 +EXPECT $big2bigger_md5sum echo $big2bigger_md5sum_0 +EXPECT $bigger2big_md5sum echo $bigger2big_md5sum_0 + +EXPECT "1" has_holes $B0/${V0}0/big +EXPECT "1" has_holes $B0/${V0}0/big2bigger +EXPECT "0" has_holes $B0/${V0}0/bigger2big +EXPECT "0" has_holes $B0/${V0}0/small + +cleanup -- cgit From 283ae136d4974eefabd65880098449ae244b2d50 Mon Sep 17 00:00:00 2001 From: Pranith Kumar K Date: Wed, 26 Mar 2014 11:03:01 +0530 Subject: tests: Stale file lookup test Change-Id: I6edfc5b7ee42677e92d9cff6a7180692d20e9310 BUG: 1080759 Signed-off-by: Pranith Kumar K Reviewed-on: http://review.gluster.org/7341 Reviewed-by: Ravishankar N Tested-by: Gluster Build System Reviewed-by: Vijay Bellur --- tests/basic/afr/stale-file-lookup.t | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 tests/basic/afr/stale-file-lookup.t (limited to 'tests/basic/afr') diff --git a/tests/basic/afr/stale-file-lookup.t b/tests/basic/afr/stale-file-lookup.t new file mode 100644 index 000000000..24a478d5c --- /dev/null +++ b/tests/basic/afr/stale-file-lookup.t @@ -0,0 +1,30 @@ +#!/bin/bash + +#This file checks if stale file lookup fails or not. +#A file is deleted when a brick was down. Before self-heal could happen to it +#the file is accessed. It should fail. +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 self-heal-daemon off +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume start $V0 + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST touch $M0/a +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST rm -f $M0/a +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +TEST stat $B0/${V0}0/a +TEST ! stat $B0/${V0}1/a +TEST ! ls -l $M0/a + +cleanup -- cgit From 36c7f8341540a1c93b5b0aa84688e58ed93422f8 Mon Sep 17 00:00:00 2001 From: Ravishankar N Date: Wed, 26 Mar 2014 11:41:37 +0530 Subject: tests/afr: select correct read-child for data OPs. Change-Id: If84bc489b6c45bde3bdb858da5f1600cea78c8a5 BUG: 1080759 Signed-off-by: Ravishankar N Reviewed-on: http://review.gluster.org/7345 Tested-by: Gluster Build System Reviewed-by: Vijay Bellur --- tests/basic/afr/read-subvol-data.t | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 tests/basic/afr/read-subvol-data.t (limited to 'tests/basic/afr') diff --git a/tests/basic/afr/read-subvol-data.t b/tests/basic/afr/read-subvol-data.t new file mode 100644 index 000000000..7db4988fa --- /dev/null +++ b/tests/basic/afr/read-subvol-data.t @@ -0,0 +1,33 @@ +#!/bin/bash +#Test if the source is selected based on data transaction for a regular file. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +cleanup; + +#Init +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1} +TEST $CLI volume set $V0 self-heal-daemon off +TEST $CLI volume set $V0 stat-prefetch off +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0; + +#Test +TEST $CLI volume set $V0 cluster.read-subvolume $V0-client-1 +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST dd if=/dev/urandom of=$M0/afr_success_5.txt bs=1M count=1 +TEST kill_brick $V0 $H0 $B0/brick0 +TEST dd if=/dev/urandom of=$M0/afr_success_5.txt bs=1M count=10 +TEST $CLI volume start $V0 force +EXPECT_WITHIN 5 "10485760" echo `ls -l $M0/afr_success_5.txt | awk '{ print $5}'` + +#Cleanup +TEST umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 +TEST rm -rf $B0/* -- cgit From e7dcc7f8240ef3f54f39b2f243c1eb0eb1cd3844 Mon Sep 17 00:00:00 2001 From: Ravishankar N Date: Wed, 26 Mar 2014 11:09:17 +0530 Subject: tests/afr: gfid mismatch test Change-Id: I12bae9c4035d5b28292e8085a5b600a3e22abaf4 BUG: 1080759 Signed-off-by: Ravishankar N Reviewed-on: http://review.gluster.org/7342 Reviewed-by: Vijay Bellur Tested-by: Gluster Build System --- tests/basic/afr/gfid-mismatch.t | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 tests/basic/afr/gfid-mismatch.t (limited to 'tests/basic/afr') diff --git a/tests/basic/afr/gfid-mismatch.t b/tests/basic/afr/gfid-mismatch.t new file mode 100644 index 000000000..05f48d43a --- /dev/null +++ b/tests/basic/afr/gfid-mismatch.t @@ -0,0 +1,26 @@ +#!/bin/bash +#Test that GFID mismatches result in EIO + +. $(dirname $0)/../../include.rc +cleanup; + +#Init +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1} +TEST $CLI volume set $V0 self-heal-daemon off +TEST $CLI volume set $V0 stat-prefetch off +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0; + +#Test +TEST touch $M0/file +TEST setfattr -n trusted.gfid -v 0sBfz5vAdHTEK1GZ99qjqTIg== $B0/brick0/file +TEST ! "find $M0/file | xargs stat" + +#Cleanup +TEST umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 +TEST rm -rf $B0/* -- cgit From 49fbc578ef96b7952d4d77993fb8a7212ae486dd Mon Sep 17 00:00:00 2001 From: Ravishankar N Date: Wed, 26 Mar 2014 11:22:12 +0530 Subject: tests/afr: select correct read-child for entry OPs. Change-Id: If375c937579a18d603ed70232130a4664060e9d6 BUG: 1080759 Signed-off-by: Ravishankar N Reviewed-on: http://review.gluster.org/7344 Reviewed-by: Pranith Kumar Karampuri Tested-by: Gluster Build System Reviewed-by: Vijay Bellur --- tests/basic/afr/read-subvol-entry.t | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 tests/basic/afr/read-subvol-entry.t (limited to 'tests/basic/afr') diff --git a/tests/basic/afr/read-subvol-entry.t b/tests/basic/afr/read-subvol-entry.t new file mode 100644 index 000000000..91110b8cd --- /dev/null +++ b/tests/basic/afr/read-subvol-entry.t @@ -0,0 +1,35 @@ +#!/bin/bash +#Test if the read child is selected based on entry transaction for directory + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +cleanup; + +#Init +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1} +TEST $CLI volume set $V0 self-heal-daemon off +TEST $CLI volume set $V0 stat-prefetch off +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0; + +#Test +TEST mkdir -p $M0/abc/def + +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off + +TEST kill_brick $V0 $H0 $B0/brick0 + +TEST touch $M0/abc/def/ghi +TEST $CLI volume start $V0 force +EXPECT_WITHIN 5 "ghi" echo `ls $M0/abc/def/` + +#Cleanup +TEST umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 +TEST rm -rf $B0/* -- cgit From 0c20b17c09b2eca82f3c79013fd3fe1c72a957fd Mon Sep 17 00:00:00 2001 From: Ravishankar N Date: Thu, 27 Mar 2014 15:04:40 +0530 Subject: tests/afr: self-heal Basic functional tests related to self-heal. arequal-checksum.c is taken from https://github.com/raghavendrabhat/arequal after consent from all authors. Change-Id: I43facc31c61375f4dbe58bbb46238e15df5c9011 BUG: 1080759 Signed-off-by: Ravishankar N Reviewed-on: http://review.gluster.org/7357 Tested-by: Gluster Build System Reviewed-by: Vijay Bellur --- tests/basic/afr/self-heal.t | 237 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 237 insertions(+) create mode 100644 tests/basic/afr/self-heal.t (limited to 'tests/basic/afr') diff --git a/tests/basic/afr/self-heal.t b/tests/basic/afr/self-heal.t new file mode 100644 index 000000000..df9526bcf --- /dev/null +++ b/tests/basic/afr/self-heal.t @@ -0,0 +1,237 @@ +#!/bin/bash +#Self-heal tests + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +cleanup; + +#Init +AREQUAL_PATH=$(dirname $0)/../../utils +build_tester $AREQUAL_PATH/arequal-checksum.c +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1} +TEST $CLI volume set $V0 stat-prefetch off +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0; + +############################################################################### +#1.Test successful data, metadata and entry self-heal + +#Test +TEST mkdir -p $M0/abc/def $M0/abc/ghi +TEST dd if=/dev/urandom of=$M0/abc/file_abc.txt bs=1M count=2 2>/dev/null +TEST dd if=/dev/urandom of=$M0/abc/def/file_abc_def_1.txt bs=1M count=2 2>/dev/null +TEST dd if=/dev/urandom of=$M0/abc/def/file_abc_def_2.txt bs=1M count=3 2>/dev/null +TEST dd if=/dev/urandom of=$M0/abc/ghi/file_abc_ghi.txt bs=1M count=4 2>/dev/null + +TEST kill_brick $V0 $H0 $B0/brick0 +TEST truncate -s 0 $M0/abc/def/file_abc_def_1.txt +NEW_UID=36 +NEW_GID=36 +TEST chown $NEW_UID:$NEW_GID $M0/abc/def/file_abc_def_2.txt +TEST rm -rf $M0/abc/ghi +TEST mkdir -p $M0/def/ghi $M0/jkl/mno +TEST dd if=/dev/urandom of=$M0/def/ghi/file1.txt bs=1M count=2 2>/dev/null +TEST dd if=/dev/urandom of=$M0/def/ghi/file2.txt bs=1M count=3 2>/dev/null +TEST dd if=/dev/urandom of=$M0/jkl/mno/file.txt bs=1M count=4 2>/dev/null +TEST chown $NEW_UID:$NEW_GID $M0/def/ghi/file2.txt + +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +#check all files created/deleted on brick1 are also replicated on brick 0 +#(i.e. no reverse heal has happened) +TEST ls $B0/brick0/def/ghi/file1.txt +TEST ls $B0/brick0/def/ghi/file2.txt +TEST ls $B0/brick0/jkl/mno/file.txt +TEST ! ls $B0/brick0/abc/ghi +EXPECT "$NEW_UID$NEW_GID" stat --printf=%u%g $B0/brick0/abc/def/file_abc_def_2.txt +TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs) + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +#2.Test successful self-heal of different file types. + +#Test +TEST touch $M0/file +TEST kill_brick $V0 $H0 $B0/brick0 +TEST rm -f $M0/file +TEST mkdir $M0/file + +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +#check heal has happened in the correct direction +TEST test -d $B0/brick0/file +TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs) + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +#3.Test successful self-heal of file permissions. + +#Test +TEST touch $M0/file +TEST chmod 666 $M0/file +TEST kill_brick $V0 $H0 $B0/brick0 +TEST chmod 777 $M0/file +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +#check heal has happened in the correct direction +EXPECT "777" stat --printf=%a $B0/brick0/file +TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs) + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +#4.Test successful self-heal of file ownership + +#Test +TEST touch $M0/file +TEST kill_brick $V0 $H0 $B0/brick0 +NEW_UID=36 +NEW_GID=36 +TEST chown $NEW_UID:$NEW_GID $M0/file +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +#check heal has happened in the correct direction +EXPECT "$NEW_UID$NEW_GID" stat --printf=%u%g $B0/brick0/file +TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs) + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +#5.File size test + +#Test +TEST touch $M0/file +TEST `echo "write1">$M0/file` +TEST kill_brick $V0 $H0 $B0/brick0 +TEST `echo "write2">>$M0/file` +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +TEST kill_brick $V0 $H0 $B0/brick1 +TEST truncate -s 0 $M0/file +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +#check heal has happened in the correct direction +EXPECT 0 stat --printf=%s $B0/brick1/file +TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs) + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +#6.GFID heal + +#Test +TEST touch $M0/file +TEST kill_brick $V0 $H0 $B0/brick0 +TEST rm -f $M0/file +TEST touch $M0/file +GFID=$(gf_get_gfid_xattr $B1/brick1/file) +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +#check heal has happened in the correct direction +EXPECT "$GFID" gf_get_gfid_xattr $B0/brick0/file + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +#7. Link/symlink heal + +#Test +TEST touch $M0/file +TEST ln $M0/file $M0/link_to_file +TEST kill_brick $V0 $H0 $B0/brick0 +TEST rm -f $M0/link_to_file +TEST ln -s $M0/file $M0/link_to_file +TEST ln $M0/file $M0/hard_link_to_file +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +#check heal has happened in the correct direction +TEST test -f $B0/brick0/hard_link_to_file +TEST test -h $B0/brick0/link_to_file +TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs) + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +#8. Heal xattrs set by application + +#Test +TEST touch $M0/file +TEST setfattr -n user.myattr_1 -v My_attribute_1 $M0/file +TEST setfattr -n user.myattr_2 -v "My_attribute_2" $M0/file +TEST kill_brick $V0 $H0 $B0/brick0 +TEST setfattr -n user.myattr_1 -v "My_attribute_1_modified" $M0/file +TEST setfattr -n user.myattr_3 -v "My_attribute_3" $M0/file +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +TEST diff <(echo "user.myattr_1=\"My_attribute_1_modified\"") <(getfattr -n user.myattr_1 $B0/brick1/file|grep user.myattr_1) +TEST diff <(echo "user.myattr_3=\"My_attribute_3\"") <(getfattr -n user.myattr_3 $B0/brick1/file|grep user.myattr_3) + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +TEST rm -rf $AREQUAL_PATH/arequal-checksum +cleanup; -- cgit