summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorRichard Wareing <rwareing@fb.com>2014-06-16 15:30:27 -0700
committerKevin Vigor <kvigor@fb.com>2016-12-17 07:03:12 -0800
commit5ff0afc179c190dce16dab781bdc4a2d9b7b26a8 (patch)
treee8192fd91f3590e566cc8f9231a9a8af1189b8b6 /tests
parentf97b4e6fba382345687d08020e541e8b1f38ef4f (diff)
Adding halo-enable option
Summary: - Master option for halo geo-replication - Added prove test for halo mode - Updated options do values which should work "out of the box" for most use-cases, just run "enable" halo mode and you are done, the options can be tweaked as needed from there. Test Plan: - Enabled option, verified halo works, disabled option verified async behavior is disabled - Ran "prove -v tests/basic/halo.t" -> https://phabricator.fb.com/P12074204 Reviewers: jackl, dph, cjh Reviewed By: cjh Subscribers: meyering Differential Revision: https://phabricator.fb.com/D1386675 Tasks: 4117827 Conflicts: xlators/cluster/afr/src/afr-self-heal-common.c xlators/cluster/afr/src/afr.h Change-Id: Ib704528d438c3a42150e30974eb6bb01d9e795ae Signed-off-by: Kevin Vigor <kvigor@fb.com> Reviewed-on: http://review.gluster.org/16172 NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> Smoke: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Shreyas Siravara <sshreyas@fb.com> CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Diffstat (limited to 'tests')
-rw-r--r--tests/basic/halo.t50
1 files changed, 50 insertions, 0 deletions
diff --git a/tests/basic/halo.t b/tests/basic/halo.t
new file mode 100644
index 00000000000..03fc0f88a19
--- /dev/null
+++ b/tests/basic/halo.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+#
+# Test for the Halo geo-replication feature
+#
+# 1. Create volume w/ 3x replication w/ max-replicas = 2 for clients,
+# heal daemon is off to start.
+# 2. Write some data
+# 3. Verify at least one of the bricks did not receive the writes.
+# 4. Turn the heal daemon on
+# 5. Within 30 seconds the SHD should async heal the data over
+# to the 3rd brick.
+#
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.shd-max-threads 1
+TEST $CLI volume set $V0 cluster.halo-enabled True
+TEST $CLI volume set $V0 cluster.halo-max-replicas 2
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+cd $M0
+
+for i in {1..5}
+do
+ dd if=/dev/urandom of=f bs=1M count=1 2>/dev/null
+ mkdir a; cd a;
+done
+
+B0_CNT=$(ls $B0/${V0}0 | wc -l)
+B1_CNT=$(ls $B0/${V0}1 | wc -l)
+B2_CNT=$(ls $B0/${V0}2 | wc -l)
+
+# One of the brick dirs should be empty
+TEST "(($B0_CNT == 0 || $B1_CNT == 0 || $B2_CNT == 0))"
+
+# Ok, turn the heal daemon on and verify it heals it up
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
+cleanup