From 918b3aeae03b3aecc64fbc202f00a7c1955f6db7 Mon Sep 17 00:00:00 2001 From: Pranith Kumar K Date: Tue, 30 Jun 2015 23:01:36 +0530 Subject: cluster/ec: Make background healing optional behavior Provide options to control number of active background heal count and qlen. >Change-Id: Idc2419219d881f47e7d2e9bbc1dcdd999b372033 >BUG: 1237381 >Signed-off-by: Pranith Kumar K >Reviewed-on: http://review.gluster.org/11473 >Reviewed-by: Xavier Hernandez >Tested-by: Gluster Build System BUG: 1238476 Change-Id: I22ba902d9911195656db9e458c01b54cf0afcd7a Signed-off-by: Pranith Kumar K Reviewed-on: http://review.gluster.org/11680 Tested-by: Gluster Build System Reviewed-by: Xavier Hernandez --- tests/basic/afr/client-side-heal.t | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'tests/basic/afr/client-side-heal.t') diff --git a/tests/basic/afr/client-side-heal.t b/tests/basic/afr/client-side-heal.t index c9b3e355802..18f76265b29 100644 --- a/tests/basic/afr/client-side-heal.t +++ b/tests/basic/afr/client-side-heal.t @@ -33,7 +33,7 @@ TEST chmod +x $M0/mdatafile #pending entry heal. Also causes pending metadata/data heals on file{1..5} TEST touch $M0/dir/file{1..5} -EXPECT 8 afr_get_pending_heal_count $V0 +EXPECT 8 get_pending_heal_count $V0 #After brick comes back up, access from client should not trigger heals TEST $CLI volume start $V0 force @@ -54,7 +54,7 @@ TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0; TEST ls $M0/dir #No heal must have happened -EXPECT 8 afr_get_pending_heal_count $V0 +EXPECT 8 get_pending_heal_count $V0 #Enable heal client side heal options and trigger heals TEST $CLI volume set $V0 cluster.data-self-heal on @@ -63,7 +63,7 @@ TEST $CLI volume set $V0 cluster.entry-self-heal on #Metadata heal is triggered by lookup without need for inode refresh. TEST ls $M0/mdatafile -EXPECT 7 afr_get_pending_heal_count $V0 +EXPECT 7 get_pending_heal_count $V0 #Inode refresh must trigger data and entry heals. #To trigger inode refresh for sure, the volume is unmounted and mounted each time. @@ -74,7 +74,7 @@ TEST cat $M0/datafile EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0; TEST ls $M0/dir -EXPECT 5 afr_get_pending_heal_count $V0 +EXPECT 5 get_pending_heal_count $V0 TEST cat $M0/dir/file1 TEST cat $M0/dir/file2 @@ -82,5 +82,5 @@ TEST cat $M0/dir/file3 TEST cat $M0/dir/file4 TEST cat $M0/dir/file5 -EXPECT 0 afr_get_pending_heal_count $V0 +EXPECT 0 get_pending_heal_count $V0 cleanup; -- cgit