From a97ad9b69bb17f2351c59512fa9c6cb25d82b4da Mon Sep 17 00:00:00 2001 From: Xavier Hernandez Date: Thu, 18 Sep 2014 18:42:34 +0200 Subject: test/ec: Fix spurious failures caused by self-heal The sha1sum of a file may update the access time of that file. If this happens while a brick is down, as it is forced in the test, that brick doesn't get the update, getting out of sync. When the brick is restarted, self-heal repairs the file, but the test shouldn't access brick contents until self-heal finishes. If this is combined with a kill of another brick before self-heal has finished repairing the file, the volume could become inaccessible. Since the purpose of these tests is only to check ec functionality (there is another test that checks self-heal), the test that corrupts the file has been removed. Additional checks to validate the state of the volume have been added to avoid some timing issues. BUG: 1144108 Change-Id: Ibd9288de519914663998a1fbc4321ec92ed6082c Signed-off-by: Xavier Hernandez Reviewed-on: http://review.gluster.org/8892 Reviewed-by: Emmanuel Dreyfus Tested-by: Emmanuel Dreyfus Tested-by: Gluster Build System Reviewed-by: Dan Lambright Reviewed-by: Vijay Bellur --- tests/basic/ec/ec.t | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) (limited to 'tests/basic/ec/ec.t') diff --git a/tests/basic/ec/ec.t b/tests/basic/ec/ec.t index 864f9f72bad..c12aba3afe6 100644 --- a/tests/basic/ec/ec.t +++ b/tests/basic/ec/ec.t @@ -137,10 +137,11 @@ EXPECT 'Created' volinfo_field $V0 'Status' EXPECT '10' brick_count $V0 TEST $CLI volume start $V0 -EXPECT 'Started' volinfo_field $V0 'Status' +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status' # Mount FUSE with caching disabled TEST $GFS -s $H0 --volfile-id $V0 $M0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "10" ec_child_up_count $V0 0 # Create local files for comparisons etc. tmpdir=$(mktemp -d -t ${0##*/}.XXXXXX) @@ -186,6 +187,7 @@ TEST setup_perm_file $M0 # Unmount/remount so that create/write and truncate don't see cached data. TEST umount $M0 TEST $GFS -s $H0 --volfile-id $V0 $M0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "8" ec_child_up_count $V0 0 # Test create/write and truncate *before* the bricks are brought back. TEST check_create_write $M0 @@ -193,11 +195,13 @@ TEST check_truncate $M0 # Restart the bricks and allow repair to occur. TEST $CLI volume start $V0 force -sleep 10 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status' +EXPECT_WITHIN $CHILD_UP_TIMEOUT "10" ec_child_up_count $V0 0 # Unmount/remount again, same reason as before. TEST umount $M0 TEST $GFS -s $H0 --volfile-id $V0 $M0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "10" ec_child_up_count $V0 0 # Make sure everything is as it should be. Most tests check for consistency # between the bricks and the front end. This is not valid for disperse, so we @@ -217,14 +221,14 @@ TEST stat $M0/removexattr TEST stat $M0/perm_dir TEST stat $M0/perm_dir/perm_file -EXPECT_WITHIN 5 "Y" check_hard_link $B0/${V0}{0..9} -EXPECT_WITHIN 5 "Y" check_soft_link $B0/${V0}{0..9} -EXPECT_WITHIN 5 "Y" check_unlink $B0/${V0}{0..9} -EXPECT_WITHIN 5 "Y" check_rmdir $B0/${V0}{0..9} -EXPECT_WITHIN 5 "Y" check_mkdir $B0/${V0}{0..9} -EXPECT_WITHIN 5 "Y" check_setxattr $B0/${V0}{0..9} -EXPECT_WITHIN 5 "Y" check_removexattr $B0/${V0}{0..9} -EXPECT_WITHIN 5 "Y" check_perm_file $B0/${V0}{0..9} +EXPECT_WITHIN $HEAL_TIMEOUT "Y" check_hard_link $B0/${V0}{0..9} +EXPECT_WITHIN $HEAL_TIMEOUT "Y" check_soft_link $B0/${V0}{0..9} +EXPECT_WITHIN $HEAL_TIMEOUT "Y" check_unlink $B0/${V0}{0..9} +EXPECT_WITHIN $HEAL_TIMEOUT "Y" check_rmdir $B0/${V0}{0..9} +EXPECT_WITHIN $HEAL_TIMEOUT "Y" check_mkdir $B0/${V0}{0..9} +EXPECT_WITHIN $HEAL_TIMEOUT "Y" check_setxattr $B0/${V0}{0..9} +EXPECT_WITHIN $HEAL_TIMEOUT "Y" check_removexattr $B0/${V0}{0..9} +EXPECT_WITHIN $HEAL_TIMEOUT "Y" check_perm_file $B0/${V0}{0..9} TEST rm -rf $tmpdir TEST userdel --force ${TEST_USER} -- cgit