summaryrefslogtreecommitdiffstats
path: root/tests/basic/quota-anon-fd-nfs.t
diff options
context:
space:
mode:
Diffstat (limited to 'tests/basic/quota-anon-fd-nfs.t')
-rwxr-xr-xtests/basic/quota-anon-fd-nfs.t39
1 files changed, 36 insertions, 3 deletions
diff --git a/tests/basic/quota-anon-fd-nfs.t b/tests/basic/quota-anon-fd-nfs.t
index be7bc35db9b..9e6675af6ec 100755
--- a/tests/basic/quota-anon-fd-nfs.t
+++ b/tests/basic/quota-anon-fd-nfs.t
@@ -1,10 +1,18 @@
#!/bin/bash
. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../nfs.rc
. $(dirname $0)/../fileio.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
cleanup;
+QDD=$(dirname $0)/quota
+# compile the test write program and run it
+build_tester $(dirname $0)/quota.c -o $QDD
+
TESTS_EXPECTED_IN_LOOP=16
TEST glusterd
TEST pidof glusterd
@@ -12,6 +20,7 @@ TEST $CLI volume info;
TEST $CLI volume create $V0 $H0:$B0/brick1;
EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 nfs.disable false
# The test makes use of inode-lru-limit to hit a scenario, where we
@@ -40,8 +49,11 @@ EXPECT 'Started' volinfo_field $V0 'Status';
TEST $CLI volume quota $V0 enable
TEST $CLI volume quota $V0 limit-usage / 1
+TEST $CLI volume quota $V0 soft-timeout 0
+TEST $CLI volume quota $V0 hard-timeout 0
-TEST mount -t nfs -o noac,soft,nolock,vers=3 $H0:/$V0 $N0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 noac,soft,nolock,vers=3;
deep=/0/1/2/3/4/5/6/7/8/9
TEST mkdir -p $N0/$deep
@@ -53,7 +65,14 @@ TEST fd_open 5 'w' "$N0/$deep/file3"
TEST fd_open 6 'w' "$N0/$deep/file4"
# consume all quota
-TEST ! dd if=/dev/zero of="$N0/$deep/file" bs=1MB count=1
+echo "Hello" > $N0/$deep/new_file_1
+echo "World" >> $N0/$deep/new_file_1
+echo 1 >> $N0/$deep/new_file_1
+echo 2 >> $N0/$deep/new_file_1
+
+# Try to create a 1M file which should fail
+TEST ! $QDD $N0/$deep/new_file_2 256 4
+
# At the end of each fop in server, reference count of the
# inode associated with each of the file above drops to zero and hence
@@ -79,6 +98,20 @@ exec 6>&-
$CLI volume statedump $V0 all
-TEST umount -l $N0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+# This is ugly, but there seems to be a latent race between other actions and
+# stopping the volume. The visible symptom is that "umount -l" (run from
+# gf_umount_lazy in glusterd) hangs. This happens pretty consistently with the
+# new mem-pool code, though it's not really anything to do with memory pools -
+# just with changed timing. Adding the sleep here makes it work consistently.
+#
+# If anyone else wants to debug the race condition, feel free.
+sleep 3
+
+TEST $CLI volume stop $V0
+
+rm -f $QDD
cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000