summaryrefslogtreecommitdiffstats
path: root/tests/basic
diff options
context:
space:
mode:
authorRavishankar N <ravishankar@redhat.com>2015-05-30 10:23:33 +0530
committerPranith Kumar Karampuri <pkarampu@redhat.com>2015-06-03 03:50:47 -0700
commitda111ae21429d33179cd11409bc171fae9d55194 (patch)
tree75b00dccac53a17255c6e856fb39fa1c8dc7a5da /tests/basic
parent9798a24febba9bbf28e97656b81b8a01a1325f68 (diff)
afr: honour selfheal enable/disable volume set options
afr-v1 had the following volume set options that are used to enable/ disable self-heals from happening in AFR xlator when loaded in the client graph: cluster.metadata-self-heal cluster.data-self-heal cluster.entry-self-heal In afr-v2, these 3 heals can happen from the client if there is an inode refresh. This patch allows such heals to proceed only if the corresponding volume set options are set to true. Change-Id: I8d97d6020611152e73a269f3fdb607652c66cc86 BUG: 1226507 Signed-off-by: Ravishankar N <ravishankar@redhat.com> Reviewed-on: http://review.gluster.org/11012 Tested-by: NetBSD Build System <jenkins@build.gluster.org> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
Diffstat (limited to 'tests/basic')
-rw-r--r--tests/basic/afr/client-side-heal.t86
1 files changed, 86 insertions, 0 deletions
diff --git a/tests/basic/afr/client-side-heal.t b/tests/basic/afr/client-side-heal.t
new file mode 100644
index 00000000000..c9b3e355802
--- /dev/null
+++ b/tests/basic/afr/client-side-heal.t
@@ -0,0 +1,86 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+echo "some data" > $M0/datafile
+EXPECT 0 echo $?
+TEST touch $M0/mdatafile
+TEST mkdir $M0/dir
+
+#Kill a brick and perform I/O to have pending heals.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" afr_child_up_status $V0 0
+
+#pending data heal
+echo "some more data" >> $M0/datafile
+EXPECT 0 echo $?
+
+#pending metadata heal
+TEST chmod +x $M0/mdatafile
+
+#pending entry heal. Also causes pending metadata/data heals on file{1..5}
+TEST touch $M0/dir/file{1..5}
+
+EXPECT 8 afr_get_pending_heal_count $V0
+
+#After brick comes back up, access from client should not trigger heals
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#Medatada heal via explicit lookup must not happen
+TEST ls $M0/mdatafile
+
+#Inode refresh must not trigger data and entry heals.
+#To trigger inode refresh for sure, the volume is unmounted and mounted each time.
+#Check that data heal does not happen.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST cat $M0/datafile
+#Check that entry heal does not happen.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST ls $M0/dir
+
+#No heal must have happened
+EXPECT 8 afr_get_pending_heal_count $V0
+
+#Enable heal client side heal options and trigger heals
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+
+#Metadata heal is triggered by lookup without need for inode refresh.
+TEST ls $M0/mdatafile
+EXPECT 7 afr_get_pending_heal_count $V0
+
+#Inode refresh must trigger data and entry heals.
+#To trigger inode refresh for sure, the volume is unmounted and mounted each time.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST cat $M0/datafile
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST ls $M0/dir
+EXPECT 5 afr_get_pending_heal_count $V0
+
+TEST cat $M0/dir/file1
+TEST cat $M0/dir/file2
+TEST cat $M0/dir/file3
+TEST cat $M0/dir/file4
+TEST cat $M0/dir/file5
+
+EXPECT 0 afr_get_pending_heal_count $V0
+cleanup;