summaryrefslogtreecommitdiffstats
path: root/tests/basic/afr
diff options
context:
space:
mode:
authorJeff Darcy <jdarcy@redhat.com>2014-04-22 15:37:09 +0000
committerJeff Darcy <jdarcy@redhat.com>2014-04-22 15:37:09 +0000
commita827c5eab32a43ade5551259ea56a6a1af7e861b (patch)
treee6707df68f72baa8645210ba931272285116ad85 /tests/basic/afr
parent46d333783a968ab39e0beade9c7a1eec8035f8b1 (diff)
parent99bfc2a2a1689da1e173cb2f8ef54d2b09ef3a5d (diff)
Merge branch 'upstream'
Conflicts: glusterfs.spec.in xlators/mgmt/glusterd/src/Makefile.am xlators/mgmt/glusterd/src/glusterd-utils.c xlators/mgmt/glusterd/src/glusterd.h Change-Id: I27bdcf42b003cfc42d6ad981bd2bf8180176806d
Diffstat (limited to 'tests/basic/afr')
-rw-r--r--tests/basic/afr/gfid-mismatch.t26
-rw-r--r--tests/basic/afr/read-subvol-data.t33
-rw-r--r--tests/basic/afr/read-subvol-entry.t35
-rw-r--r--tests/basic/afr/self-heal.t237
-rw-r--r--tests/basic/afr/sparse-file-self-heal.t121
-rw-r--r--tests/basic/afr/stale-file-lookup.t30
6 files changed, 482 insertions, 0 deletions
diff --git a/tests/basic/afr/gfid-mismatch.t b/tests/basic/afr/gfid-mismatch.t
new file mode 100644
index 000000000..05f48d43a
--- /dev/null
+++ b/tests/basic/afr/gfid-mismatch.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+#Test that GFID mismatches result in EIO
+
+. $(dirname $0)/../../include.rc
+cleanup;
+
+#Init
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 stat-prefetch off
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0;
+
+#Test
+TEST touch $M0/file
+TEST setfattr -n trusted.gfid -v 0sBfz5vAdHTEK1GZ99qjqTIg== $B0/brick0/file
+TEST ! "find $M0/file | xargs stat"
+
+#Cleanup
+TEST umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+TEST rm -rf $B0/*
diff --git a/tests/basic/afr/read-subvol-data.t b/tests/basic/afr/read-subvol-data.t
new file mode 100644
index 000000000..7db4988fa
--- /dev/null
+++ b/tests/basic/afr/read-subvol-data.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+#Test if the source is selected based on data transaction for a regular file.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+#Init
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 stat-prefetch off
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0;
+
+#Test
+TEST $CLI volume set $V0 cluster.read-subvolume $V0-client-1
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST dd if=/dev/urandom of=$M0/afr_success_5.txt bs=1M count=1
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST dd if=/dev/urandom of=$M0/afr_success_5.txt bs=1M count=10
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 5 "10485760" echo `ls -l $M0/afr_success_5.txt | awk '{ print $5}'`
+
+#Cleanup
+TEST umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+TEST rm -rf $B0/*
diff --git a/tests/basic/afr/read-subvol-entry.t b/tests/basic/afr/read-subvol-entry.t
new file mode 100644
index 000000000..91110b8cd
--- /dev/null
+++ b/tests/basic/afr/read-subvol-entry.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+#Test if the read child is selected based on entry transaction for directory
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+#Init
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 stat-prefetch off
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0;
+
+#Test
+TEST mkdir -p $M0/abc/def
+
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+
+TEST kill_brick $V0 $H0 $B0/brick0
+
+TEST touch $M0/abc/def/ghi
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 5 "ghi" echo `ls $M0/abc/def/`
+
+#Cleanup
+TEST umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+TEST rm -rf $B0/*
diff --git a/tests/basic/afr/self-heal.t b/tests/basic/afr/self-heal.t
new file mode 100644
index 000000000..df9526bcf
--- /dev/null
+++ b/tests/basic/afr/self-heal.t
@@ -0,0 +1,237 @@
+#!/bin/bash
+#Self-heal tests
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+#Init
+AREQUAL_PATH=$(dirname $0)/../../utils
+build_tester $AREQUAL_PATH/arequal-checksum.c
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 stat-prefetch off
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0;
+
+###############################################################################
+#1.Test successful data, metadata and entry self-heal
+
+#Test
+TEST mkdir -p $M0/abc/def $M0/abc/ghi
+TEST dd if=/dev/urandom of=$M0/abc/file_abc.txt bs=1M count=2 2>/dev/null
+TEST dd if=/dev/urandom of=$M0/abc/def/file_abc_def_1.txt bs=1M count=2 2>/dev/null
+TEST dd if=/dev/urandom of=$M0/abc/def/file_abc_def_2.txt bs=1M count=3 2>/dev/null
+TEST dd if=/dev/urandom of=$M0/abc/ghi/file_abc_ghi.txt bs=1M count=4 2>/dev/null
+
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST truncate -s 0 $M0/abc/def/file_abc_def_1.txt
+NEW_UID=36
+NEW_GID=36
+TEST chown $NEW_UID:$NEW_GID $M0/abc/def/file_abc_def_2.txt
+TEST rm -rf $M0/abc/ghi
+TEST mkdir -p $M0/def/ghi $M0/jkl/mno
+TEST dd if=/dev/urandom of=$M0/def/ghi/file1.txt bs=1M count=2 2>/dev/null
+TEST dd if=/dev/urandom of=$M0/def/ghi/file2.txt bs=1M count=3 2>/dev/null
+TEST dd if=/dev/urandom of=$M0/jkl/mno/file.txt bs=1M count=4 2>/dev/null
+TEST chown $NEW_UID:$NEW_GID $M0/def/ghi/file2.txt
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+
+#check all files created/deleted on brick1 are also replicated on brick 0
+#(i.e. no reverse heal has happened)
+TEST ls $B0/brick0/def/ghi/file1.txt
+TEST ls $B0/brick0/def/ghi/file2.txt
+TEST ls $B0/brick0/jkl/mno/file.txt
+TEST ! ls $B0/brick0/abc/ghi
+EXPECT "$NEW_UID$NEW_GID" stat --printf=%u%g $B0/brick0/abc/def/file_abc_def_2.txt
+TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs)
+
+#Cleanup
+TEST rm -rf $M0/*
+###############################################################################
+
+#2.Test successful self-heal of different file types.
+
+#Test
+TEST touch $M0/file
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST rm -f $M0/file
+TEST mkdir $M0/file
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+
+#check heal has happened in the correct direction
+TEST test -d $B0/brick0/file
+TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs)
+
+#Cleanup
+TEST rm -rf $M0/*
+###############################################################################
+
+#3.Test successful self-heal of file permissions.
+
+#Test
+TEST touch $M0/file
+TEST chmod 666 $M0/file
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST chmod 777 $M0/file
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+
+#check heal has happened in the correct direction
+EXPECT "777" stat --printf=%a $B0/brick0/file
+TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs)
+
+#Cleanup
+TEST rm -rf $M0/*
+###############################################################################
+
+#4.Test successful self-heal of file ownership
+
+#Test
+TEST touch $M0/file
+TEST kill_brick $V0 $H0 $B0/brick0
+NEW_UID=36
+NEW_GID=36
+TEST chown $NEW_UID:$NEW_GID $M0/file
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+
+#check heal has happened in the correct direction
+EXPECT "$NEW_UID$NEW_GID" stat --printf=%u%g $B0/brick0/file
+TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs)
+
+#Cleanup
+TEST rm -rf $M0/*
+###############################################################################
+
+#5.File size test
+
+#Test
+TEST touch $M0/file
+TEST `echo "write1">$M0/file`
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST `echo "write2">>$M0/file`
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+TEST kill_brick $V0 $H0 $B0/brick1
+TEST truncate -s 0 $M0/file
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+
+#check heal has happened in the correct direction
+EXPECT 0 stat --printf=%s $B0/brick1/file
+TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs)
+
+#Cleanup
+TEST rm -rf $M0/*
+###############################################################################
+
+#6.GFID heal
+
+#Test
+TEST touch $M0/file
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST rm -f $M0/file
+TEST touch $M0/file
+GFID=$(gf_get_gfid_xattr $B1/brick1/file)
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+
+#check heal has happened in the correct direction
+EXPECT "$GFID" gf_get_gfid_xattr $B0/brick0/file
+
+#Cleanup
+TEST rm -rf $M0/*
+###############################################################################
+
+#7. Link/symlink heal
+
+#Test
+TEST touch $M0/file
+TEST ln $M0/file $M0/link_to_file
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST rm -f $M0/link_to_file
+TEST ln -s $M0/file $M0/link_to_file
+TEST ln $M0/file $M0/hard_link_to_file
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+
+#check heal has happened in the correct direction
+TEST test -f $B0/brick0/hard_link_to_file
+TEST test -h $B0/brick0/link_to_file
+TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs)
+
+#Cleanup
+TEST rm -rf $M0/*
+###############################################################################
+
+#8. Heal xattrs set by application
+
+#Test
+TEST touch $M0/file
+TEST setfattr -n user.myattr_1 -v My_attribute_1 $M0/file
+TEST setfattr -n user.myattr_2 -v "My_attribute_2" $M0/file
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST setfattr -n user.myattr_1 -v "My_attribute_1_modified" $M0/file
+TEST setfattr -n user.myattr_3 -v "My_attribute_3" $M0/file
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+
+TEST diff <(echo "user.myattr_1=\"My_attribute_1_modified\"") <(getfattr -n user.myattr_1 $B0/brick1/file|grep user.myattr_1)
+TEST diff <(echo "user.myattr_3=\"My_attribute_3\"") <(getfattr -n user.myattr_3 $B0/brick1/file|grep user.myattr_3)
+
+#Cleanup
+TEST rm -rf $M0/*
+###############################################################################
+
+TEST rm -rf $AREQUAL_PATH/arequal-checksum
+cleanup;
diff --git a/tests/basic/afr/sparse-file-self-heal.t b/tests/basic/afr/sparse-file-self-heal.t
new file mode 100644
index 000000000..9b795c331
--- /dev/null
+++ b/tests/basic/afr/sparse-file-self-heal.t
@@ -0,0 +1,121 @@
+#!/bin/bash
+
+#This file checks if self-heal of files with holes is working properly or not
+#bigger is 2M, big is 1M, small is anything less
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 data-self-heal-algorithm full
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST dd if=/dev/urandom of=$M0/small count=1 bs=1M
+TEST dd if=/dev/urandom of=$M0/bigger2big count=1 bs=2M
+TEST dd if=/dev/urandom of=$M0/big2bigger count=1 bs=1M
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+#File with >128k size hole
+TEST truncate -s 1M $M0/big
+big_md5sum=$(md5sum $M0/big | awk '{print $1}')
+
+#File with <128k hole
+TEST truncate -s 0 $M0/small
+TEST truncate -s 64k $M0/small
+small_md5sum=$(md5sum $M0/small | awk '{print $1}')
+
+#Bigger file truncated to big size hole.
+TEST truncate -s 0 $M0/bigger2big
+TEST truncate -s 1M $M0/bigger2big
+bigger2big_md5sum=$(md5sum $M0/bigger2big | awk '{print $1}')
+
+#Big file truncated to Bigger size hole
+TEST truncate -s 2M $M0/big2bigger
+big2bigger_md5sum=$(md5sum $M0/big2bigger | awk '{print $1}')
+
+$CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+TEST gluster volume heal $V0 full
+EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+
+big_md5sum_0=$(md5sum $B0/${V0}0/big | awk '{print $1}')
+small_md5sum_0=$(md5sum $B0/${V0}0/small | awk '{print $1}')
+bigger2big_md5sum_0=$(md5sum $B0/${V0}0/bigger2big | awk '{print $1}')
+big2bigger_md5sum_0=$(md5sum $B0/${V0}0/big2bigger | awk '{print $1}')
+
+EXPECT $big_md5sum echo $big_md5sum_0
+EXPECT $small_md5sum echo $small_md5sum_0
+EXPECT $big2bigger_md5sum echo $big2bigger_md5sum_0
+EXPECT $bigger2big_md5sum echo $bigger2big_md5sum_0
+
+
+EXPECT "1" has_holes $B0/${V0}0/big
+#Because self-heal writes the final chunk hole should not be there for
+#files < 128K
+EXPECT "0" has_holes $B0/${V0}0/small
+# Since source is smaller than sink, self-heal does blind copy so no holes will
+# be present
+EXPECT "0" has_holes $B0/${V0}0/bigger2big
+EXPECT "1" has_holes $B0/${V0}0/big2bigger
+
+TEST rm -f $M0/*
+
+#check the same tests with diff self-heal
+TEST $CLI volume set $V0 data-self-heal-algorithm diff
+
+TEST dd if=/dev/urandom of=$M0/small count=1 bs=1M
+TEST dd if=/dev/urandom of=$M0/big2bigger count=1 bs=1M
+TEST dd if=/dev/urandom of=$M0/bigger2big count=1 bs=2M
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+#File with >128k size hole
+TEST truncate -s 1M $M0/big
+big_md5sum=$(md5sum $M0/big | awk '{print $1}')
+
+#File with <128k hole
+TEST truncate -s 0 $M0/small
+TEST truncate -s 64k $M0/small
+small_md5sum=$(md5sum $M0/small | awk '{print $1}')
+
+#Bigger file truncated to big size hole
+TEST truncate -s 0 $M0/bigger2big
+TEST truncate -s 1M $M0/bigger2big
+bigger2big_md5sum=$(md5sum $M0/bigger2big | awk '{print $1}')
+
+#Big file truncated to Bigger size hole
+TEST truncate -s 2M $M0/big2bigger
+big2bigger_md5sum=$(md5sum $M0/big2bigger | awk '{print $1}')
+
+$CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+TEST gluster volume heal $V0 full
+EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+
+big_md5sum_0=$(md5sum $B0/${V0}0/big | awk '{print $1}')
+small_md5sum_0=$(md5sum $B0/${V0}0/small | awk '{print $1}')
+bigger2big_md5sum_0=$(md5sum $B0/${V0}0/bigger2big | awk '{print $1}')
+big2bigger_md5sum_0=$(md5sum $B0/${V0}0/big2bigger | awk '{print $1}')
+
+EXPECT $big_md5sum echo $big_md5sum_0
+EXPECT $small_md5sum echo $small_md5sum_0
+EXPECT $big2bigger_md5sum echo $big2bigger_md5sum_0
+EXPECT $bigger2big_md5sum echo $bigger2big_md5sum_0
+
+EXPECT "1" has_holes $B0/${V0}0/big
+EXPECT "1" has_holes $B0/${V0}0/big2bigger
+EXPECT "0" has_holes $B0/${V0}0/bigger2big
+EXPECT "0" has_holes $B0/${V0}0/small
+
+cleanup
diff --git a/tests/basic/afr/stale-file-lookup.t b/tests/basic/afr/stale-file-lookup.t
new file mode 100644
index 000000000..24a478d5c
--- /dev/null
+++ b/tests/basic/afr/stale-file-lookup.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+#This file checks if stale file lookup fails or not.
+#A file is deleted when a brick was down. Before self-heal could happen to it
+#the file is accessed. It should fail.
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST touch $M0/a
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST rm -f $M0/a
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+TEST stat $B0/${V0}0/a
+TEST ! stat $B0/${V0}1/a
+TEST ! ls -l $M0/a
+
+cleanup