summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorSanju Rakonde <srakonde@redhat.com>2018-01-04 10:35:29 +0530
committerAtin Mukherjee <amukherj@redhat.com>2018-02-10 16:25:01 +0000
commit535fd517c6b188732f9d69c0301dd78c3dc3d09c (patch)
tree1ca37c83a6f4dd299f7d74413fb9eafaa0cf6514 /tests
parent446ddbf1b10ce835e0e40790bc997ec6ac53766a (diff)
glusterd: optimization of test cases
To reduce the overall time taken by the every regression job for all glusterd test cases, avoiding some duplicate tests by clubbing similar test cases into one. real time taken for all regression jobs of glusterd without this patch is 1959 seconds, with this patch it is 1059 seconds. Look at the below document for your reference. https://docs.google.com/document/d/1u8o4-wocrsuPDI8BwuBU6yi_x4xA_pf2qSrFY6WEQpo/edit?usp=sharing Change-Id: Ib14c61ace97e62c3abce47230dd40598640fe9cb BUG: 1530905 Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t110
-rw-r--r--tests/bugs/glusterd/brick-mux-validation.t (renamed from tests/bugs/glusterd/bug-1446172-brick-mux-reset-brick.t)51
-rw-r--r--tests/bugs/glusterd/brick-mux.t81
-rwxr-xr-xtests/bugs/glusterd/bug-000000.t9
-rwxr-xr-xtests/bugs/glusterd/bug-1002556.t25
-rw-r--r--tests/bugs/glusterd/bug-1004744.t46
-rwxr-xr-xtests/bugs/glusterd/bug-1022055.t26
-rw-r--r--tests/bugs/glusterd/bug-1027171.t53
-rw-r--r--tests/bugs/glusterd/bug-1040408.t31
-rw-r--r--tests/bugs/glusterd/bug-1046308.t19
-rw-r--r--tests/bugs/glusterd/bug-1047955.t23
-rw-r--r--tests/bugs/glusterd/bug-1075087.t33
-rw-r--r--[-rwxr-xr-x]tests/bugs/glusterd/bug-1085330-and-bug-916549.t (renamed from tests/bugs/glusterd/bug-1085330.t)17
-rwxr-xr-xtests/bugs/glusterd/bug-1089668.t26
-rw-r--r--tests/bugs/glusterd/bug-1092841.t24
-rw-r--r--tests/bugs/glusterd/bug-1094119-remove-replace-brick-support-from-glusterd.t30
-rwxr-xr-xtests/bugs/glusterd/bug-1095097.t19
-rw-r--r--tests/bugs/glusterd/bug-1102656.t20
-rw-r--r--tests/bugs/glusterd/bug-1104642.t47
-rw-r--r--tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t50
-rw-r--r--tests/bugs/glusterd/bug-1120647.t18
-rw-r--r--tests/bugs/glusterd/bug-1121584-brick-existing-validation-for-remove-brick-status-stop.t34
-rw-r--r--tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t37
-rwxr-xr-xtests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t34
-rw-r--r--tests/bugs/glusterd/bug-1177132-quorum-validation.t82
-rw-r--r--tests/bugs/glusterd/bug-1179175-uss-option-validation.t37
-rw-r--r--tests/bugs/glusterd/bug-1199451-op-version-retrieving-fix.t22
-rw-r--r--tests/bugs/glusterd/bug-1209329_daemon-svcs-on-reset-volume.t72
-rw-r--r--tests/bugs/glusterd/bug-1213295-snapd-svc-uninitialized.t26
-rwxr-xr-xtests/bugs/glusterd/bug-1223213-peerid-fix.t32
-rw-r--r--tests/bugs/glusterd/bug-1225716-brick-online-validation-remove-brick.t35
-rw-r--r--tests/bugs/glusterd/bug-1231437-rebalance-test-in-cluster.t31
-rw-r--r--tests/bugs/glusterd/bug-1238135-lazy-daemon-initialization-on-demand.t16
-rw-r--r--tests/bugs/glusterd/bug-1242543-replace-brick.t25
-rw-r--r--tests/bugs/glusterd/bug-1245045-remove-brick-validation.t56
-rw-r--r--tests/bugs/glusterd/bug-1265479-validate-replica-volume-options.t67
-rw-r--r--tests/bugs/glusterd/bug-1266818-shared-storage-disable.t36
-rwxr-xr-xtests/bugs/glusterd/bug-1293414-import-brickinfo-uuid.t31
-rw-r--r--tests/bugs/glusterd/bug-1314649-group-virt.t14
-rw-r--r--tests/bugs/glusterd/bug-1315186-reject-lowering-down-op-version.t22
-rw-r--r--tests/bugs/glusterd/bug-1318591-skip-non-directories-inside-vols.t31
-rw-r--r--tests/bugs/glusterd/bug-1321836-fix-opret-for-volume-info-xml-output.t24
-rw-r--r--tests/bugs/glusterd/bug-1323287-real_path-handshake-test.t39
-rwxr-xr-xtests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t19
-rw-r--r--tests/bugs/glusterd/bug-1345727-bricks-stop-on-no-quorum-validation.t63
-rwxr-xr-xtests/bugs/glusterd/bug-1351021-rebalance-info-post-glusterd-restart.t59
-rw-r--r--tests/bugs/glusterd/bug-1352277-spawn-daemons-on-two-node-setup.t37
-rw-r--r--tests/bugs/glusterd/bug-1367478-volume-start-validation-after-glusterd-restart.t40
-rw-r--r--tests/bugs/glusterd/bug-1406411-fail-add-brick-on-replica-count-change.t40
-rw-r--r--tests/bugs/glusterd/bug-1420637-volume-sync-fix.t40
-rw-r--r--tests/bugs/glusterd/bug-1433578-invalid-peer-glusterd-crash.t14
-rw-r--r--tests/bugs/glusterd/bug-1444596_brick_mux_gd_status_restart.t68
-rw-r--r--tests/bugs/glusterd/bug-1444596_brick_mux_posix_hlth_chk_status.t47
-rw-r--r--tests/bugs/glusterd/bug-1451248-mux-reboot-node.t54
-rw-r--r--tests/bugs/glusterd/bug-1454418-seg-fault.t25
-rw-r--r--tests/bugs/glusterd/bug-1482344-volume-option-set-cluster-level.t25
-rw-r--r--tests/bugs/glusterd/bug-1483058-replace-brick-quorum-validation.t58
-rw-r--r--tests/bugs/glusterd/bug-1499509-disconnect-in-brick-mux.t27
-rw-r--r--tests/bugs/glusterd/bug-1507466-reset-brick-commit-force.t24
-rwxr-xr-xtests/bugs/glusterd/bug-765230-remove-quota-related-option-after-disabling-quota.t62
-rwxr-xr-xtests/bugs/glusterd/bug-782095.t48
-rw-r--r--tests/bugs/glusterd/bug-839595.t31
-rwxr-xr-xtests/bugs/glusterd/bug-859927.t70
-rwxr-xr-xtests/bugs/glusterd/bug-862834.t46
-rw-r--r--tests/bugs/glusterd/bug-878004.t29
-rw-r--r--tests/bugs/glusterd/bug-888752.t24
-rwxr-xr-xtests/bugs/glusterd/bug-889630.t56
-rw-r--r--tests/bugs/glusterd/bug-905307.t36
-rw-r--r--tests/bugs/glusterd/bug-913487.t14
-rwxr-xr-xtests/bugs/glusterd/bug-913555.t57
-rwxr-xr-xtests/bugs/glusterd/bug-916549.t19
-rwxr-xr-xtests/bugs/glusterd/bug-948686.t46
-rwxr-xr-xtests/bugs/glusterd/bug-955588.t27
-rw-r--r--tests/bugs/glusterd/bug-958790.t21
-rw-r--r--tests/bugs/glusterd/bug-961669.t48
-rwxr-xr-xtests/bugs/glusterd/bug-963541.t36
-rwxr-xr-xtests/bugs/glusterd/bug-964059.t30
-rw-r--r--tests/bugs/glusterd/enable-shared-storage-and-remove-brick-validation.t82
-rw-r--r--tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t59
-rw-r--r--tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t97
-rw-r--r--tests/bugs/glusterd/optimized-basic-testcases.t273
-rw-r--r--tests/bugs/glusterd/quorum-validation.t115
-rw-r--r--tests/bugs/glusterd/rebalance-in-cluster.t (renamed from tests/bugs/glusterd/bug-1245142-rebalance_test.t)17
-rw-r--r--tests/bugs/glusterd/rebalance-operations-in-single-node.t131
-rw-r--r--tests/bugs/glusterd/remove-brick-in-cluster.t (renamed from tests/bugs/glusterd/bug-1230121-replica_subvol_count_correct_cal.t)30
-rw-r--r--tests/bugs/glusterd/remove-brick-testcases.t119
-rw-r--r--tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t (renamed from tests/bugs/glusterd/bug-974007.t)39
-rw-r--r--tests/bugs/glusterd/replace-brick-operations.t48
-rw-r--r--tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t (renamed from tests/bugs/glusterd/bug-1383893-daemons-to-follow-quorum.t)36
-rw-r--r--tests/bugs/glusterd/snapshot-operations.t (renamed from tests/bugs/glusterd/bug-1322145-disallow-detatch-peer.t)16
-rw-r--r--tests/bugs/glusterd/sync-post-glusterd-restart.t54
-rw-r--r--tests/bugs/glusterd/validating-options-for-striped-replicated-volume.t144
-rw-r--r--tests/bugs/glusterd/validating-server-quorum.t110
93 files changed, 1575 insertions, 2696 deletions
diff --git a/tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t b/tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t
new file mode 100644
index 00000000000..95d0eb69ac1
--- /dev/null
+++ b/tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t
@@ -0,0 +1,110 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
+
+#bug-1102656 - validating volume top command
+
+TEST $CLI volume top $V0 open
+TEST ! $CLI volume top $V0 open brick $H0:/tmp/brick
+TEST $CLI volume top $V0 read
+
+TEST $CLI volume status
+
+#bug- 1002556
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}3
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks';
+
+TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}3 force
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+TEST killall glusterd
+TEST glusterd
+
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+#bug-1406411- fail-add-brick-when-replica-count-changes
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+#add-brick should fail
+TEST ! $CLI_NO_FORCE volume add-brick $V0 replica 3 $H0:$B0/${V0}3
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}3
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{1,2};
+TEST $CLI volume start $V1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
+TEST kill_brick $V1 $H0 $B0/${V1}1
+
+#add-brick should fail
+TEST ! $CLI_NO_FORCE volume add-brick $V1 replica 2 $H0:$B0/${V1}{3,4}
+
+TEST $CLI volume start $V1 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
+
+TEST $CLI volume add-brick $V1 replica 2 $H0:$B0/${V1}{3,4}
+
+#bug-905307 - validate cluster.post-op-delay-secs option
+
+#Strings should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs abc
+
+#-ve ints should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs -1
+
+#INT_MAX+1 should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 2147483648
+
+#floats should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 1.25
+
+#min val 0 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 0
+EXPECT "0" volume_option $V0 cluster.post-op-delay-secs
+
+#max val 2147483647 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147483647
+EXPECT "2147483647" volume_option $V0 cluster.post-op-delay-secs
+
+#some middle val in range 2147 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147
+EXPECT "2147" volume_option $V0 cluster.post-op-delay-secs
+
+#bug-1265479 - validate-replica-volume-options
+
+#Setting data-self-heal option on for distribute-replicate volume
+TEST $CLI volume set $V1 data-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.data-self-heal';
+TEST $CLI volume set $V1 cluster.data-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.data-self-heal';
+
+#Setting metadata-self-heal option on for distribute-replicate volume
+TEST $CLI volume set $V1 metadata-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.metadata-self-heal';
+TEST $CLI volume set $V1 cluster.metadata-self-heal on
+
+#Setting entry-self-heal option on for distribute-replicate volume
+TEST $CLI volume set $V1 entry-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.entry-self-heal';
+TEST $CLI volume set $V1 cluster.entry-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.entry-self-heal';
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1446172-brick-mux-reset-brick.t b/tests/bugs/glusterd/brick-mux-validation.t
index e6aaaa4e87c..3c6ad49686e 100644
--- a/tests/bugs/glusterd/bug-1446172-brick-mux-reset-brick.t
+++ b/tests/bugs/glusterd/brick-mux-validation.t
@@ -4,30 +4,46 @@
. $(dirname $0)/../../traps.rc
. $(dirname $0)/../../volume.rc
-cleanup;
-
-function count_up_bricks {
- $CLI --xml volume status | grep '<status>1' | wc -l
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
}
-function count_brick_processes {
- pgrep glusterfsd | wc -l
+function count_brick_pids {
+ $CLI --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -v "N/A" | sort | uniq | wc -l
}
-TEST glusterd
+cleanup;
+
+#bug-1451248 - validate brick mux after glusterd reboot
+TEST glusterd
TEST $CLI volume set all cluster.brick-multiplex on
push_trapfunc "$CLI volume set all cluster.brick-multiplex off"
push_trapfunc "cleanup"
-TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
-TEST $CLI volume create $V1 $H0:$B0/${V1}{0,1}
-
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3}
TEST $CLI volume start $V0
-TEST $CLI volume start $V1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks
EXPECT 1 count_brick_processes
+EXPECT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count
+
+pkill gluster
+TEST glusterd
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{1..3}
+TEST $CLI volume start $V1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
+
+#bug-1446172 - reset brick with brick multiplexing enabled
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
# Create files
@@ -38,7 +54,7 @@ done
TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1 start
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 3 count_up_bricks
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 5 online_brick_count
EXPECT 1 count_brick_processes
# Negative case with brick killed but volume-id xattr present
@@ -47,9 +63,8 @@ TEST ! $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit
# reset-brick commit force should work and should bring up the brick
TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
EXPECT 1 count_brick_processes
-
TEST glusterfs --volfile-id=$V1 --volfile-server=$H0 $M1;
# Create files
for i in {1..5}
@@ -58,8 +73,7 @@ do
done
TEST $CLI volume reset-brick $V1 $H0:$B0/${V1}1 start
-
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 3 count_up_bricks
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 5 online_brick_count
EXPECT 1 count_brick_processes
# Simulate reset disk
@@ -75,5 +89,6 @@ setfattr -x trusted.gfid $B0/${V1}1
# the --wignore flag that essentially makes the command act like "commit force"
TEST $CLI_IGNORE_PARTITION volume reset-brick $V1 $H0:$B0/${V1}1 $H0:$B0/${V1}1 commit
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
EXPECT 1 count_brick_processes
+cleanup
diff --git a/tests/bugs/glusterd/brick-mux.t b/tests/bugs/glusterd/brick-mux.t
new file mode 100644
index 00000000000..eeaa3ebfea8
--- /dev/null
+++ b/tests/bugs/glusterd/brick-mux.t
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+cleanup
+
+#bug-1444596 - validating brick mux
+
+TEST glusterd -LDEBUG
+TEST $CLI volume create $V0 $H0:$B0/brick{0,1}
+TEST $CLI volume create $V1 $H0:$B0/brick{2,3}
+
+TEST $CLI volume set all cluster.brick-multiplex on
+
+TEST $CLI volume start $V0
+TEST $CLI volume start $V1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 1 count_brick_processes
+
+#bug-1499509 - stop all the bricks when a brick process is killed
+kill -9 $(pgrep glusterfsd)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 online_brick_count
+
+TEST $CLI volume start $V0 force
+TEST $CLI volume start $V1 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+
+
+pkill glusterd
+TEST glusterd
+
+#Check brick status after restart glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 1 count_brick_processes
+
+TEST $CLI volume set $V1 performance.cache-size 32MB
+TEST $CLI volume stop $V1
+TEST $CLI volume start $V1
+
+#Check No. of brick processes after change option
+EXPECT 2 count_brick_processes
+
+pkill glusterd
+TEST glusterd
+
+#Check brick status after restart glusterd should not be NA
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 2 count_brick_processes
+
+pkill glusterd
+TEST glusterd
+
+#Check brick status after restart glusterd should not be NA
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 2 count_brick_processes
+
+#bug-1444596_brick_mux_posix_hlth_chk_status
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+TEST rm -rf $H0:$B0/brick{0,1}
+
+#Check No. of brick processes after remove brick from back-end
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 online_brick_count
+
+TEST glusterfs -s $H0 --volfile-id $V1 $M0
+TEST touch $M0/file{1..10}
+
+pkill glusterd
+TEST glusterd -LDEBUG
+sleep 5
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 online_brick_count
+
+cleanup
+
diff --git a/tests/bugs/glusterd/bug-000000.t b/tests/bugs/glusterd/bug-000000.t
deleted file mode 100755
index 55f7b11f598..00000000000
--- a/tests/bugs/glusterd/bug-000000.t
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-TEST glusterd
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1002556.t b/tests/bugs/glusterd/bug-1002556.t
deleted file mode 100755
index ac71d06d533..00000000000
--- a/tests/bugs/glusterd/bug-1002556.t
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
-TEST $CLI volume start $V0
-EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
-
-TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2
-EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks';
-
-TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}1 force
-EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
-
-TEST killall glusterd
-TEST glusterd
-
-EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
-cleanup
diff --git a/tests/bugs/glusterd/bug-1004744.t b/tests/bugs/glusterd/bug-1004744.t
deleted file mode 100644
index 66a827daa74..00000000000
--- a/tests/bugs/glusterd/bug-1004744.t
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-#Test case: After a rebalance fix-layout, check if the rebalance status command
-#displays the appropriate message at the CLI.
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info
-
-#Create a 2x1 distributed volume
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-TEST $CLI volume start $V0
-
-# Mount FUSE and create file/directory
-TEST glusterfs -s $H0 --volfile-id $V0 $M0
-for i in `seq 1 10`;
-do
- mkdir $M0/dir_$i
- echo file>$M0/dir_$i/file_$i
- for j in `seq 1 100`;
- do
- mkdir $M0/dir_$i/dir_$j
- echo file>$M0/dir_$i/dir_$j/file_$j
- done
-done
-
-#add 2 bricks
-TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{3,4};
-
-#perform rebalance fix-layout
-TEST $CLI volume rebalance $V0 fix-layout start
-
-EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" fix-layout_status_field $V0;
-
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1022055.t b/tests/bugs/glusterd/bug-1022055.t
deleted file mode 100755
index 9f39c80b6b6..00000000000
--- a/tests/bugs/glusterd/bug-1022055.t
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-cleanup;
-
-TEST launch_cluster 2;
-
-TEST $CLI_1 peer probe $H2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0;
-
-TEST $CLI_1 volume start $V0;
-
-TEST $CLI_1 volume log rotate $V0;
-
-TEST $CLI_1 volume status;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1027171.t b/tests/bugs/glusterd/bug-1027171.t
deleted file mode 100644
index 1b457d8f660..00000000000
--- a/tests/bugs/glusterd/bug-1027171.t
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/bin/bash
-
-#Test case: Do not allow commit if the bricks are not decommissioned
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info
-
-#Create a Distributed volume
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
-TEST $CLI volume start $V0
-
-#Remove bricks and commit without starting
-function remove_brick_commit_status {
- $CLI volume remove-brick $V0 \
- $H0:$B0/${V0}2 commit 2>&1 |grep -oE "success|decommissioned"
-}
-EXPECT "decommissioned" remove_brick_commit_status;
-
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0
-TEST ! $CLI volume info $V0
-
-#Create a Distributed-Replicate volume
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..4};
-TEST $CLI volume start $V0
-
-#Try to reduce replica count with start option
-function remove_brick_start_status {
- $CLI volume remove-brick $V0 replica 1 \
- $H0:$B0/${V0}1 $H0:$B0/${V0}3 start 2>&1 |grep -oE "success|failed"
-}
-EXPECT "failed" remove_brick_start_status;
-
-#Remove bricks with commit option
-function remove_brick_commit_status2 {
- $CLI volume remove-brick $V0 replica 1 \
- $H0:$B0/${V0}1 $H0:$B0/${V0}3 commit 2>&1 |
- grep -oE "success|decommissioned"
-}
-EXPECT "decommissioned" remove_brick_commit_status2;
-
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0
-TEST ! $CLI volume info $V0
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1040408.t b/tests/bugs/glusterd/bug-1040408.t
deleted file mode 100644
index c378000630b..00000000000
--- a/tests/bugs/glusterd/bug-1040408.t
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-#Test case: Create a distributed replicate volume, and reduce
-#replica count
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info
-
-#Create a 2X3 distributed-replicate volume
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..6};
-TEST $CLI volume start $V0
-
-# Reduce to 2x2 volume by specifying bricks in reverse order
-function remove_brick_status {
- $CLI volume remove-brick $V0 replica 2 \
- $H0:$B0/${V0}6 $H0:$B0/${V0}3 force 2>&1 |grep -oE "success|failed"
-}
-EXPECT "success" remove_brick_status;
-
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1046308.t b/tests/bugs/glusterd/bug-1046308.t
deleted file mode 100644
index 9c827c4a492..00000000000
--- a/tests/bugs/glusterd/bug-1046308.t
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-volname="StartMigrationDuringRebalanceTest"
-TEST glusterd
-TEST pidof glusterd;
-
-TEST $CLI volume info;
-TEST $CLI volume create $volname $H0:$B0/${volname}{1,2};
-TEST $CLI volume start $volname;
-TEST $CLI volume rebalance $volname start;
-
-cleanup;
-
-
-
diff --git a/tests/bugs/glusterd/bug-1047955.t b/tests/bugs/glusterd/bug-1047955.t
deleted file mode 100644
index a409d9f7195..00000000000
--- a/tests/bugs/glusterd/bug-1047955.t
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-cleanup;
-
-# Create a 2x2 dist-rep volume; peer probe a new node.
-# Performing remove-brick from this new node must succeed
-# without crashing it's glusterd
-
-TEST launch_cluster 2;
-TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}{1,2,3,4}
-TEST $CLI_1 volume start $V0;
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
-TEST $CLI_2 volume remove-brick $V0 $H1:$B1/${V0}{3,4} start;
-TEST $CLI_2 volume info
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1075087.t b/tests/bugs/glusterd/bug-1075087.t
deleted file mode 100644
index 35155a0b8c9..00000000000
--- a/tests/bugs/glusterd/bug-1075087.t
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \
- $H0:$B0/${V0}2 $H0:$B0/${V0}3
-TEST $CLI volume start $V0
-
-## Mount FUSE
-TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
-
-TEST mkdir $M0/dir{1..10};
-TEST touch $M0/dir{1..10}/files{1..10};
-
-TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4 $H0:/$B0/${V0}5
-
-TEST $CLI volume rebalance $V0 start force
-EXPECT_WITHIN 60 "completed" rebalance_status_field $V0
-
-TEST pkill gluster
-TEST glusterd
-TEST pidof glusterd
-
-# status should be "completed" immediate after glusterd has respawned.
-EXPECT_WITHIN 5 "completed" rebalance_status_field $V0
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1085330.t b/tests/bugs/glusterd/bug-1085330-and-bug-916549.t
index ffcfe9274eb..892a30d74ea 100755..100644
--- a/tests/bugs/glusterd/bug-1085330.t
+++ b/tests/bugs/glusterd/bug-1085330-and-bug-916549.t
@@ -11,6 +11,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
+#testcase: bug-1085330
# Construct volname string such that its more than 256 characters
for i in {1..30}
@@ -73,8 +74,20 @@ TEST ! $CLI volume create $volname $H0:$B0/$brick;
TEST $CLI volume info;
# Positive test case
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
TEST $CLI volume info;
-cleanup;
+TEST $CLI volume start $V0;
+
+#testcase: bug-916549
+
+pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/);
+brick_pid=$(cat $GLUSTERD_PIDFILEDIR/vols/$V0/$pid_file);
+
+kill -SIGKILL $brick_pid;
+TEST $CLI volume start $V0 force;
+TEST process_leak_count $(pidof glusterd);
+
+cleanup
+
diff --git a/tests/bugs/glusterd/bug-1089668.t b/tests/bugs/glusterd/bug-1089668.t
deleted file mode 100755
index c8eb7c30055..00000000000
--- a/tests/bugs/glusterd/bug-1089668.t
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../dht.rc
-
-cleanup
-
-#This script checks command "gluster volume rebalance <volname> status will not
-#show any output when user have done only remove-brick start and command
-#'gluster volume remove-brick <volname> <brick_name> status' will not show
-#any output when user have triggered only rebalance start.
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
-TEST $CLI volume start $V0
-
-TEST $CLI volume rebalance $V0 start
-TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 status
-EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed
-
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
-TEST ! $CLI volume rebalance $V0 status
-
-cleanup
diff --git a/tests/bugs/glusterd/bug-1092841.t b/tests/bugs/glusterd/bug-1092841.t
deleted file mode 100644
index d3dcf07fd02..00000000000
--- a/tests/bugs/glusterd/bug-1092841.t
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd;
-TEST pidof glusterd;
-TEST $CLI volume info;
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-
-TEST $CLI volume start $V0;
-
-TEST $CLI volume barrier $V0 enable;
-
-TEST ! $CLI volume barrier $V0 enable;
-
-TEST $CLI volume barrier $V0 disable;
-
-TEST ! $CLI volume barrier $V0 disable;
-
-cleanup
diff --git a/tests/bugs/glusterd/bug-1094119-remove-replace-brick-support-from-glusterd.t b/tests/bugs/glusterd/bug-1094119-remove-replace-brick-support-from-glusterd.t
deleted file mode 100644
index 30d375a1eb0..00000000000
--- a/tests/bugs/glusterd/bug-1094119-remove-replace-brick-support-from-glusterd.t
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-## Test case for BZ: 1094119 Remove replace-brick support from gluster
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-# Start glusterd
-TEST glusterd
-TEST pidof glusterd
-
-## Lets create and start volume
-TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2
-TEST $CLI volume start $V0
-
-## Now with this patch replace-brick only accept following commad
-## volume replace-brick <VOLNAME> <SOURCE-BRICK> <NEW-BRICK> {commit force}
-## Apart form this replace brick command will failed.
-
-TEST ! $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick3 start
-TEST ! $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick3 status
-TEST ! $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick3 abort
-
-
-## replace-brick commit force command should success
-TEST $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick3 commit force
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1095097.t b/tests/bugs/glusterd/bug-1095097.t
deleted file mode 100755
index 0a616f7831e..00000000000
--- a/tests/bugs/glusterd/bug-1095097.t
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-TEST glusterd;
-TEST pidof glusterd;
-TEST $CLI volume info;
-
-TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B1/brick1;
-EXPECT 'Created' volinfo_field $V0 'Status';
-
-TEST $CLI volume start $V0;
-EXPECT 'Started' volinfo_field $V0 'Status';
-
-TEST $CLI volume profile $V0 start
-TEST $CLI volume profile $V0 info
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1102656.t b/tests/bugs/glusterd/bug-1102656.t
deleted file mode 100644
index e80f4930a63..00000000000
--- a/tests/bugs/glusterd/bug-1102656.t
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1
-TEST $CLI volume start $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
-
-TEST $CLI volume top $V0 open
-TEST ! $CLI volume top $V0 open brick $H0:/tmp/brick
-TEST $CLI volume top $V0 read
-
-TEST $CLI volume status
-TEST $CLI volume stop $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Stopped' volinfo_field $V0 'Status';
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1104642.t b/tests/bugs/glusterd/bug-1104642.t
deleted file mode 100644
index 000093a8ae2..00000000000
--- a/tests/bugs/glusterd/bug-1104642.t
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-
-function get_value()
-{
- local key=$1
- local var="CLI_$2"
-
- eval cli_index=\$$var
-
- $cli_index volume info | grep "^$key"\
- | sed 's/.*: //'
-}
-
-cleanup
-
-TEST launch_cluster 2
-
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
-EXPECT "$V0" get_value 'Volume Name' 1
-EXPECT "Created" get_value 'Status' 1
-
-TEST $CLI_1 volume start $V0
-EXPECT "Started" get_value 'Status' 1
-
-#Bring down 2nd glusterd
-TEST kill_glusterd 2
-
-#set the volume all options from the 1st glusterd
-TEST $CLI_1 volume set all cluster.server-quorum-ratio 80
-
-#Bring back the 2nd glusterd
-TEST $glusterd_2
-
-#Verify whether the value has been synced
-EXPECT '80' get_value 'cluster.server-quorum-ratio' 1
-EXPECT_WITHIN $PROBE_TIMEOUT '1' peer_count
-EXPECT_WITHIN $PROBE_TIMEOUT '80' get_value 'cluster.server-quorum-ratio' 2
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t b/tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t
deleted file mode 100644
index 561b90740fa..00000000000
--- a/tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t
+++ /dev/null
@@ -1,50 +0,0 @@
-#! /bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-# The test will attempt to verify that management handshake requests to
-# GlusterD are authenticated before being allowed to change a GlusterD's
-# op-version
-#
-# 1. Launch 3 glusterds
-# 2. Probe 2 of them to form a cluster. This should succeed.
-# 3. Probe either of the first two GlusterD's from the 3rd GlusterD. This should fail.
-# 4. a. Reduce the op-version of 3rd GlusterD and restart it.
-# b. Probe either of the first two GlusterD's from the 3rd GlusterD. This should fail.
-# 5. Check current op-version of first two GlusterDs. It shouldn't have changed.
-# 6. Probe third GlusterD from the cluster. This should succeed.
-
-
-cleanup
-
-TEST launch_cluster 3
-
-TEST $CLI_1 peer probe $H2
-
-TEST ! $CLI_3 peer probe $H1
-
-GD1_WD=$($CLI_1 system getwd)
-OP_VERS_ORIG=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
-
-TEST $CLI_3 system uuid get # Needed for glusterd.info to be created
-
-GD3_WD=$($CLI_3 system getwd)
-TEST sed -rnie "'s/(operating-version=)\w+/\130600/gip'" ${GD3_WD}/glusterd.info
-
-TEST kill_glusterd 3
-TEST start_glusterd 3
-
-TEST ! $CLI_3 peer probe $H1
-
-OP_VERS_NEW=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
-TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]]
-
-TEST $CLI_1 peer probe $H3
-
-kill_node 1
-kill_node 2
-kill_node 3
-
-cleanup;
-
diff --git a/tests/bugs/glusterd/bug-1120647.t b/tests/bugs/glusterd/bug-1120647.t
deleted file mode 100644
index 90d069ca502..00000000000
--- a/tests/bugs/glusterd/bug-1120647.t
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{1..4}
-TEST $CLI volume start $V0
-TEST $CLI volume remove-brick $V0 $H0:$B0/brick{3..4} start
-EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/brick3"
-EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/brick4"
-TEST $CLI volume remove-brick $V0 $H0:$B0/brick{3..4} commit
-TEST $CLI volume remove-brick $V0 replica 1 $H0:$B0/brick2 force
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1121584-brick-existing-validation-for-remove-brick-status-stop.t b/tests/bugs/glusterd/bug-1121584-brick-existing-validation-for-remove-brick-status-stop.t
deleted file mode 100644
index de80afcc2eb..00000000000
--- a/tests/bugs/glusterd/bug-1121584-brick-existing-validation-for-remove-brick-status-stop.t
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-## Test case for BZ-1121584. Execution of remove-brick status/stop command
-## should give error for brick which is not part of volume.
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../dht.rc
-
-cleanup;
-
-## Start glusterd
-TEST glusterd
-TEST pidof glusterd
-
-## Lets Create and start volume
-TEST $CLI volume create $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
-TEST $CLI volume start $V0
-
-## Start remove-brick operation on the volume
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
-
-## By giving non existing brick for remove-brick status/stop command should
-## give error.
-TEST ! $CLI volume remove-brick $V0 $H0:$B0/ABCD status
-TEST ! $CLI volume remove-brick $V0 $H0:$B0/ABCD stop
-
-## By giving brick which is part of volume for remove-brick status/stop command
-## should print statistics of remove-brick operation or stop remove-brick
-## operation.
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 status
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 stop
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t b/tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t
deleted file mode 100644
index 9fc7ac3b845..00000000000
--- a/tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-## Test case for cluster.min-free-disk option validation.
-
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-## Start glusterd
-TEST glusterd
-TEST pidof glusterd
-
-## Lets create and start volume
-TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2
-TEST $CLI volume start $V0
-
-## Setting invalid value for option cluster.min-free-disk should fail
-TEST ! $CLI volume set $V0 min-free-disk ""
-TEST ! $CLI volume set $V0 min-free-disk 143.!/12
-TEST ! $CLI volume set $V0 min-free-disk 123%
-TEST ! $CLI volume set $V0 min-free-disk 194.34%
-
-## Setting fractional value as a size (unit is byte) for option
-## cluster.min-free-disk should fail
-TEST ! $CLI volume set $V0 min-free-disk 199.051
-TEST ! $CLI volume set $V0 min-free-disk 111.999
-
-## Setting valid value for option cluster.min-free-disk should pass
-TEST $CLI volume set $V0 min-free-disk 12%
-TEST $CLI volume set $V0 min-free-disk 56.7%
-TEST $CLI volume set $V0 min-free-disk 120
-TEST $CLI volume set $V0 min-free-disk 369.0000
-
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t b/tests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t
deleted file mode 100755
index 5a6cf81fd53..00000000000
--- a/tests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-cleanup;
-
-TEST launch_cluster 2;
-TEST $CLI_1 peer probe $H2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0
-TEST $CLI_1 volume create $V1 $H1:$B1/$V1
-TEST $CLI_1 volume start $V0
-TEST $CLI_1 volume start $V1
-
-for i in {1..20}
-do
- $CLI_1 volume set $V0 diagnostics.client-log-level DEBUG &
- $CLI_1 volume set $V1 barrier on
- $CLI_2 volume set $V0 diagnostics.client-log-level DEBUG &
- $CLI_2 volume set $V1 barrier on
-done
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
-TEST $CLI_1 volume status
-TEST $CLI_2 volume status
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1177132-quorum-validation.t b/tests/bugs/glusterd/bug-1177132-quorum-validation.t
deleted file mode 100644
index f18b5a178d3..00000000000
--- a/tests/bugs/glusterd/bug-1177132-quorum-validation.t
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/bash
-
-# Test case for quorum validation in glusterd for syncop framework
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-
-cleanup;
-
-TEST launch_cluster 2
-
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-# Lets create the volume and set quorum type as a server
-TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
-TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
-
-# Start the volume
-TEST $CLI_1 volume start $V0
-
-# Set quorum ratio 52. means 52 % or more than 52% nodes of total available node
-# should be available for performing volume operation.
-# i.e. Server-side quorum is met if the number of nodes that are available is
-# greater than or equal to 'quorum-ratio' times the number of nodes in the
-# cluster
-
-TEST $CLI_1 volume set all cluster.server-quorum-ratio 52
-
-# Bring down 2nd glusterd
-TEST kill_glusterd 2
-
-# Now quorum is not meet. Add-brick, Remove-brick, volume-set command
-#(Command based on syncop framework)should fail
-TEST ! $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1
-TEST ! $CLI_1 volume remove-brick $V0 $H1:$B1/${V0}0 start
-TEST ! $CLI_1 volume set $V0 barrier enable
-
-# Now execute a command which goes through op state machine and it should fail
-TEST ! $CLI_1 volume profile $V0 start
-
-# Volume set all command and volume reset all command should be successful
-TEST $CLI_1 volume set all cluster.server-quorum-ratio 80
-TEST $CLI_1 volume reset all
-
-# Bring back 2nd glusterd
-TEST $glusterd_2
-
-# After 2nd glusterd come back, there will be 2 nodes in a clusater
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
-
-# Now quorum is meet.
-# Add-brick, Remove-brick, volume-set command should success
-TEST $CLI_1 volume add-brick $V0 $H2:$B2/${V0}2
-TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 start
-TEST $CLI_1 volume set $V0 barrier enable
-TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 stop
-
-## Stop the volume
-TEST $CLI_1 volume stop $V0
-
-## Bring down 2nd glusterd
-TEST kill_glusterd 2
-
-## Now quorum is not meet. Starting volume on 1st node should not success
-TEST ! $CLI_1 volume start $V0
-
-## Bring back 2nd glusterd
-TEST $glusterd_2
-
-# After 2nd glusterd come back, there will be 2 nodes in a clusater
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
-
-## Now quorum is meet. Starting volume on 1st node should be success.
-TEST $CLI_1 volume start $V0
-
-# Now re-execute the same profile command and this time it should succeed
-TEST $CLI_1 volume profile $V0 start
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1179175-uss-option-validation.t b/tests/bugs/glusterd/bug-1179175-uss-option-validation.t
deleted file mode 100644
index 6bbe3c9336f..00000000000
--- a/tests/bugs/glusterd/bug-1179175-uss-option-validation.t
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-## Test case for option features.uss validation.
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-## Start glusterd
-TEST glusterd;
-TEST pidof glusterd;
-
-## Lets create and start volume
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-TEST $CLI volume start $V0
-
-## Set features.uss option with non-boolean value. These non-boolean value
-## for features.uss option should fail.
-TEST ! $CLI volume set $V0 features.uss abcd
-TEST ! $CLI volume set $V0 features.uss #$#$
-TEST ! $CLI volume set $V0 features.uss 2324
-
-## Setting other options with valid value. These options should succeed.
-TEST $CLI volume set $V0 barrier enable
-TEST $CLI volume set $V0 ping-timeout 60
-
-## Set features.uss option with valid boolean value. It should succeed.
-TEST $CLI volume set $V0 features.uss enable
-TEST $CLI volume set $V0 features.uss disable
-
-
-## Setting other options with valid value. These options should succeed.
-TEST $CLI volume set $V0 barrier enable
-TEST $CLI volume set $V0 ping-timeout 60
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1199451-op-version-retrieving-fix.t b/tests/bugs/glusterd/bug-1199451-op-version-retrieving-fix.t
deleted file mode 100644
index 43661b67628..00000000000
--- a/tests/bugs/glusterd/bug-1199451-op-version-retrieving-fix.t
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-## Test case for BZ-1199451 (gluster command should retrieve current op-version
-## of the NODE)
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-## Start glusterd
-TEST glusterd
-TEST pidof glusterd
-
-## Lets create and start volume
-TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2
-TEST $CLI volume start $V0
-
-## glusterd command should retrieve current op-version of the node
-TEST $CLI volume get $V0 cluster.op-version
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1209329_daemon-svcs-on-reset-volume.t b/tests/bugs/glusterd/bug-1209329_daemon-svcs-on-reset-volume.t
deleted file mode 100644
index f6ca953e40b..00000000000
--- a/tests/bugs/glusterd/bug-1209329_daemon-svcs-on-reset-volume.t
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../nfs.rc
-
-cleanup;
-
-## Start glusterd
-TEST glusterd;
-TEST pidof glusterd;
-
-## Lets create volume
-TEST $CLI volume create $V0 $H0:$B0/${V0};
-
-## Verify volume is created
-EXPECT "$V0" volinfo_field $V0 'Volume Name';
-EXPECT 'Created' volinfo_field $V0 'Status';
-
-## Start volume and verify
-TEST $CLI volume start $V0;
-EXPECT 'Started' volinfo_field $V0 'Status';
-
-##enable the bitrot and verify bitd is running or not
-TEST $CLI volume bitrot $V0 enable
-EXPECT 'on' volinfo_field $V0 'features.bitrot'
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
-
-##Do reset force which set the bitrot options to default
-TEST $CLI volume reset $V0 force;
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_bitd_count
-
-##enable the uss option and verify snapd is running or not
-TEST $CLI volume set $V0 features.uss on
-EXPECT 'on' volinfo_field $V0 'features.uss'
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count
-
-##Do reset force which set the uss options to default
-TEST $CLI volume reset $V0 force;
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count
-
-##verify initial nfs disabled by default
-EXPECT "0" get_nfs_count
-
-##enable nfs and verify
-TEST $CLI volume set $V0 nfs.disable off
-EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
-EXPECT "1" get_nfs_count
-
-##Do reset force which set the nfs.option to default
-TEST $CLI volume reset $V0 force;
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count
-
-##enable the uss option and verify snapd is running or not
-TEST $CLI volume set $V0 features.uss on
-EXPECT 'on' volinfo_field $V0 'features.uss'
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count
-
-##Disable the uss option using set command and verify snapd
-TEST $CLI volume set $V0 features.uss off
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count
-
-##enable nfs.disable and verify
-TEST $CLI volume set $V0 nfs.disable on
-EXPECT 'on' volinfo_field $V0 'nfs.disable'
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count
-
-## disable nfs.disable option using set command
-TEST $CLI volume set $V0 nfs.disable off
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_nfs_count
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1213295-snapd-svc-uninitialized.t b/tests/bugs/glusterd/bug-1213295-snapd-svc-uninitialized.t
deleted file mode 100644
index 1dbfdf8697b..00000000000
--- a/tests/bugs/glusterd/bug-1213295-snapd-svc-uninitialized.t
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-cleanup
-
-TEST launch_cluster 2;
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
-TEST $CLI_1 volume start $V0
-
-kill_glusterd 2
-TEST start_glusterd 2
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-#volume stop should not crash
-TEST $CLI_2 volume stop $V0
-
-# check whether glusterd instance is running on H2 as this is the node which
-# restored the volume configuration after a restart
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-cleanup
diff --git a/tests/bugs/glusterd/bug-1223213-peerid-fix.t b/tests/bugs/glusterd/bug-1223213-peerid-fix.t
deleted file mode 100755
index 8e7589c9c3b..00000000000
--- a/tests/bugs/glusterd/bug-1223213-peerid-fix.t
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-cleanup;
-
-TEST launch_cluster 2;
-
-# Fool the cluster to operate with 3.5 version even though binary's op-version
-# is > 3.5. This is to ensure 3.5 code path is hit to test that volume status
-# works when a node is upgraded from 3.5 to 3.7 or higher as mgmt_v3 lock is
-# been introduced in 3.6 version and onwards
-
-GD1_WD=$($CLI_1 system getwd)
-$CLI_1 system uuid get
-TEST sed -rnie "'s/(operating-version=)\w+/\130500/gip'" ${GD1_WD}/glusterd.info
-
-TEST kill_glusterd 1
-TEST start_glusterd 1
-
-TEST $CLI_1 peer probe $H2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
-TEST $CLI_1 volume start $V0
-
-TEST $CLI_1 volume status $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1225716-brick-online-validation-remove-brick.t b/tests/bugs/glusterd/bug-1225716-brick-online-validation-remove-brick.t
deleted file mode 100644
index d168866ab63..00000000000
--- a/tests/bugs/glusterd/bug-1225716-brick-online-validation-remove-brick.t
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
-TEST $CLI volume start $V0
-
-#kill a brick process
-kill_brick $V0 $H0 $B0/${V0}1
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status $V0 $H0 $B0/${V0}1
-
-#remove-brick start should fail as the brick is down
-TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
-
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
-
-#remove-brick start should succeed as the brick is up
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
-
-EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}1"
-
-#kill a brick process
-kill_brick $V0 $H0 $B0/${V0}1
-
-#remove-brick commit should pass even if the brick is down
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 commit
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1231437-rebalance-test-in-cluster.t b/tests/bugs/glusterd/bug-1231437-rebalance-test-in-cluster.t
deleted file mode 100644
index 3257f6994dd..00000000000
--- a/tests/bugs/glusterd/bug-1231437-rebalance-test-in-cluster.t
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-. $(dirname $0)/../../volume.rc
-
-
-cleanup;
-TEST launch_cluster 2;
-TEST $CLI_1 peer probe $H2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-$CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
-EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
-
-$CLI_1 volume start $V0
-EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
-
-#Mount FUSE
-TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
-
-TEST mkdir $M0/dir{1..4};
-TEST touch $M0/dir{1..4}/files{1..4};
-
-TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
-
-TEST $CLI_1 volume rebalance $V0 start
-EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1238135-lazy-daemon-initialization-on-demand.t b/tests/bugs/glusterd/bug-1238135-lazy-daemon-initialization-on-demand.t
deleted file mode 100644
index 54c3187cbdb..00000000000
--- a/tests/bugs/glusterd/bug-1238135-lazy-daemon-initialization-on-demand.t
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-TEST glusterd;
-TEST pidof glusterd;
-
-GDWD=$($CLI system getwd)
-
-# glusterd.info file will be created on either first peer probe or volume
-# creation, hence we expect file to be not present in this case
-TEST ! -e $GDWD/glusterd.info
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1242543-replace-brick.t b/tests/bugs/glusterd/bug-1242543-replace-brick.t
deleted file mode 100644
index 0b1087f1d51..00000000000
--- a/tests/bugs/glusterd/bug-1242543-replace-brick.t
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
-TEST $CLI volume start $V0
-
-TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
-
-# Replace brick1 without killing the brick
-TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1_new commit force
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
-
-TEST kill_brick $V0 $H0 $B0/${V0}1_new
-
-# Replace brick1 after killing the brick
-TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1_new $H0:$B0/${V0}1_newer commit force
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1245045-remove-brick-validation.t b/tests/bugs/glusterd/bug-1245045-remove-brick-validation.t
deleted file mode 100644
index 597c40ca4ec..00000000000
--- a/tests/bugs/glusterd/bug-1245045-remove-brick-validation.t
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-cleanup
-
-TEST launch_cluster 3;
-TEST $CLI_1 peer probe $H2;
-TEST $CLI_1 peer probe $H3;
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
-TEST $CLI_1 volume start $V0
-
-kill_glusterd 2
-
-#remove-brick should fail as the peer hosting the brick is down
-TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
-
-TEST start_glusterd 2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}
-
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
-
-#volume status should work
-TEST $CLI_2 volume status
-
-
-TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
-kill_glusterd 2
-
-#remove-brick commit should fail as the peer hosting the brick is down
-TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} commit
-
-TEST start_glusterd 2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}
-
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
-
-#volume status should work
-TEST $CLI_2 volume status
-
-TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} stop
-
-kill_glusterd 3
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
-
-TEST start_glusterd 3
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
-
-TEST $CLI_3 volume status
-
-cleanup
diff --git a/tests/bugs/glusterd/bug-1265479-validate-replica-volume-options.t b/tests/bugs/glusterd/bug-1265479-validate-replica-volume-options.t
deleted file mode 100644
index e2d43ca817b..00000000000
--- a/tests/bugs/glusterd/bug-1265479-validate-replica-volume-options.t
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info
-
-#Create a distributed volume
-TEST $CLI volume create $V0 $H0:$B0/${V00}{1..2};
-TEST $CLI volume start $V0
-
-#Setting data-self-heal option on for distribute volume
-TEST ! $CLI volume set $V0 data-self-heal on
-EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
-TEST ! $CLI volume set $V0 cluster.data-self-heal on
-EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
-
-#Setting metadata-self-heal option on for distribute volume
-TEST ! $CLI volume set $V0 metadata-self-heal on
-EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
-TEST ! $CLI volume set $V0 cluster.metadata-self-heal on
-EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
-
-#Setting entry-self-heal option on for distribute volume
-TEST ! $CLI volume set $V0 entry-self-heal on
-EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
-TEST ! $CLI volume set $V0 cluster.entry-self-heal on
-EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
-
-#Delete the volume
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0;
-
-
-#Create a distribute-replicate volume
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
-TEST $CLI volume start $V0
-
-#Setting data-self-heal option on for distribute-replicate volume
-TEST $CLI volume set $V0 data-self-heal on
-EXPECT 'on' volinfo_field $V0 'cluster.data-self-heal';
-TEST $CLI volume set $V0 cluster.data-self-heal on
-EXPECT 'on' volinfo_field $V0 'cluster.data-self-heal';
-
-#Setting metadata-self-heal option on for distribute-replicate volume
-TEST $CLI volume set $V0 metadata-self-heal on
-EXPECT 'on' volinfo_field $V0 'cluster.metadata-self-heal';
-TEST $CLI volume set $V0 cluster.metadata-self-heal on
-EXPECT 'on' volinfo_field $V0 'cluster.metadata-self-heal';
-
-#Setting entry-self-heal option on for distribute-replicate volume
-TEST $CLI volume set $V0 entry-self-heal on
-EXPECT 'on' volinfo_field $V0 'cluster.entry-self-heal';
-TEST $CLI volume set $V0 cluster.entry-self-heal on
-EXPECT 'on' volinfo_field $V0 'cluster.entry-self-heal';
-
-#Delete the volume
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0;
-
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1266818-shared-storage-disable.t b/tests/bugs/glusterd/bug-1266818-shared-storage-disable.t
deleted file mode 100644
index a9ccf1b8954..00000000000
--- a/tests/bugs/glusterd/bug-1266818-shared-storage-disable.t
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-## Test case for BZ 1266818;
-## Disabling enable-shared-storage option should not delete user created
-## volume with name glusterd_shared_storage
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-cleanup;
-
-## Start a 2 node virtual cluster
-TEST launch_cluster 2;
-
-## Peer probe server 2 from server 1 cli
-TEST $CLI_1 peer probe $H2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-## Creating a volume with name glusterd_shared_storage
-TEST $CLI_1 volume create glusterd_shared_storage $H1:$B1/${V0}0 $H2:$B1/${V0}1
-
-## Disabling enable-shared-storage should not succeed and should not delete the
-## user created volume with name "glusterd_shared_storage"
-TEST ! $CLI_1 volume all enable-shared-storage disable
-
-## Volume with name should exist
-TEST $CLI_1 volume info glusterd_shared_storage
-
-cleanup;
-
-
-
-
-
diff --git a/tests/bugs/glusterd/bug-1293414-import-brickinfo-uuid.t b/tests/bugs/glusterd/bug-1293414-import-brickinfo-uuid.t
deleted file mode 100755
index 9f67e4ccfa0..00000000000
--- a/tests/bugs/glusterd/bug-1293414-import-brickinfo-uuid.t
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-cleanup;
-
-TEST launch_cluster 4;
-
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
-TEST $CLI_1 volume start $V0
-
-
-TEST $CLI_1 peer probe $H3;
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
-
-TEST $CLI_1 peer probe $H4;
-EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count
-
-# peers hosting bricks can't be detached
-TEST ! $CLI_3 peer detach $H1
-TEST ! $CLI_3 peer detach $H2
-
-
-# peer not hosting bricks should be detachable
-TEST $CLI_3 peer detach $H4
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1314649-group-virt.t b/tests/bugs/glusterd/bug-1314649-group-virt.t
deleted file mode 100644
index 257e7845611..00000000000
--- a/tests/bugs/glusterd/bug-1314649-group-virt.t
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd
-
-TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
-
-TEST $CLI volume set $V0 group virt;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1315186-reject-lowering-down-op-version.t b/tests/bugs/glusterd/bug-1315186-reject-lowering-down-op-version.t
deleted file mode 100644
index 4bd6eaac59f..00000000000
--- a/tests/bugs/glusterd/bug-1315186-reject-lowering-down-op-version.t
+++ /dev/null
@@ -1,22 +0,0 @@
-#! /bin/bash
-
-. $(dirname $0)/../../include.rc
-
-# The test validates that lowering down the op-version should fail
-
-cleanup
-
-TEST glusterd
-TEST pidof glusterd
-
-#volume create is just to ensure glusterd.info file is created
-TEST $CLI volume create $V0 $H0:$B0/b1
-
-GDWD=$($CLI system getwd)
-OP_VERS_ORIG=$(grep 'operating-version' ${GDWD}/glusterd.info | cut -d '=' -f 2)
-OP_VERS_NEW=`expr $OP_VERS_ORIG-1`
-
-TEST ! $CLI volume set all $V0 cluster.op-version $OP_VERS_NEW
-
-cleanup;
-
diff --git a/tests/bugs/glusterd/bug-1318591-skip-non-directories-inside-vols.t b/tests/bugs/glusterd/bug-1318591-skip-non-directories-inside-vols.t
deleted file mode 100644
index c776b489957..00000000000
--- a/tests/bugs/glusterd/bug-1318591-skip-non-directories-inside-vols.t
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../env.rc
-. $(dirname $0)/../../snapshot.rc
-
-cleanup;
-
-TEST verify_lvm_version
-TEST glusterd
-TEST pidof glusterd
-
-TEST setup_lvm 1
-
-TEST $CLI volume create $V0 $H0:$L1
-TEST $CLI volume start $V0
-
-TEST $CLI volume status $V0;
-
-TEST touch $GLUSTERD_WORKDIR/vols/file
-
-TEST $CLI snapshot create snap1 $V0 no-timestamp
-
-TEST touch $GLUSTERD_WORKDIR/snaps/snap1/file
-
-TEST killall_gluster
-
-TEST glusterd
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1321836-fix-opret-for-volume-info-xml-output.t b/tests/bugs/glusterd/bug-1321836-fix-opret-for-volume-info-xml-output.t
deleted file mode 100644
index 48fccc621d8..00000000000
--- a/tests/bugs/glusterd/bug-1321836-fix-opret-for-volume-info-xml-output.t
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-## Check that opRet field has correct value assigned for non existent volumes
-## --------------------------------------------------------------------------
-
-function get_opret_value () {
- local VOL=$1
- $CLI volume info $VOL --xml | sed -ne 's/.*<opRet>\([-0-9]*\)<\/opRet>/\1/p'
-}
-
-cleanup;
-
-TEST glusterd;
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 $H0:$B0/$V0;
-
-EXPECT 0 get_opret_value $V0
-EXPECT -1 get_opret_value "novol"
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1323287-real_path-handshake-test.t b/tests/bugs/glusterd/bug-1323287-real_path-handshake-test.t
deleted file mode 100644
index 12b722bae36..00000000000
--- a/tests/bugs/glusterd/bug-1323287-real_path-handshake-test.t
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-. $(dirname $0)/../../volume.rc
-
-function volume_get_field()
-{
- local vol=$1
- local field=$2
- $CLI_2 volume get $vol $field | tail -1 | awk '{print $2}'
-}
-
-cleanup;
-TEST launch_cluster 2;
-TEST $CLI_1 peer probe $H2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
-EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
-
-TEST $CLI_1 volume start $V0
-EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
-
-#kill glusterd2 and do a volume set command to change the version
-kill_glusterd 2
-
-TEST $CLI_1 volume set $V0 performance.write-behind off
-TEST start_glusterd 2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-#Check for handshake completion.
-EXPECT_WITHIN $PROBE_TIMEOUT 'off' volume_get_field $V0 'write-behind'
-
-#During hanndshake, if we failed to populate real_path,
-#then volume create will fail.
-TEST $CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1
diff --git a/tests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t b/tests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t
deleted file mode 100755
index 5081c373e47..00000000000
--- a/tests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-cleanup;
-
-TEST launch_cluster 2;
-
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0
-
-TEST kill_glusterd 2
-TEST ! $CLI_1 volume delete $V0
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1345727-bricks-stop-on-no-quorum-validation.t b/tests/bugs/glusterd/bug-1345727-bricks-stop-on-no-quorum-validation.t
deleted file mode 100644
index e3258b29277..00000000000
--- a/tests/bugs/glusterd/bug-1345727-bricks-stop-on-no-quorum-validation.t
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/bash
-
-# Test case to check if bricks are down when quorum is not met
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-cleanup;
-
-TEST launch_cluster 3
-
-TEST $CLI_1 peer probe $H2;
-TEST $CLI_1 peer probe $H3;
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
-
-# Lets create the volume and set quorum type as a server
-TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}2 $H3:$B3/${V0}3
-TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
-
-# Start the volume
-TEST $CLI_1 volume start $V0
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H3 $B3/${V0}3
-
-# Bring down 2nd and 3rd glusterd
-TEST kill_glusterd 2
-TEST kill_glusterd 3
-EXPECT_WITHIN $PROBE_TIMEOUT 0 peer_count
-
-# Server quorum is not met. Brick on 1st node must be down
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status_1 $V0 $H1 $B1/${V0}1
-
-# Set quorum ratio 95. means 95 % or more than 95% nodes of total available node
-# should be available for performing volume operation.
-# i.e. Server-side quorum is met if the number of nodes that are available is
-# greater than or equal to 'quorum-ratio' times the number of nodes in the
-# cluster
-
-TEST $CLI_1 volume set all cluster.server-quorum-ratio 95
-
-# Bring back 2nd glusterd
-TEST $glusterd_2
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-# Server quorum is still not met. Bricks should be down on 1st and 2nd nodes
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status_1 $V0 $H1 $B1/${V0}1
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status_1 $V0 $H2 $B2/${V0}2
-
-# Bring back 3rd glusterd
-TEST $glusterd_3
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
-
-# Server quorum is met now. Bricks should be up on all nodes
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H3 $B3/${V0}3
-
-cleanup;
-
diff --git a/tests/bugs/glusterd/bug-1351021-rebalance-info-post-glusterd-restart.t b/tests/bugs/glusterd/bug-1351021-rebalance-info-post-glusterd-restart.t
deleted file mode 100755
index cb3206f7d49..00000000000
--- a/tests/bugs/glusterd/bug-1351021-rebalance-info-post-glusterd-restart.t
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-function get_rebalanced_info()
-{
- local rebal_info_key=$2
- $CLI volume rebalance $1 status | awk '{print $'$rebal_info_key'}' |sed -n 3p| sed 's/ *$//g'
-}
-
-
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
-TEST $CLI volume start $V0;
-
-#Mount volume and create data
-TEST glusterfs -s $H0 --volfile-id $V0 $M0;
-TEST mkdir $M0/dir{1..10}
-TEST touch $M0/dir{1..10}/file{1..10}
-
-# Add-brick and start rebalance
-TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4
-TEST $CLI volume rebalance $V0 start
-EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
-
-#Rebalance info before glusterd restart
-OLD_REBAL_FILES=$(get_rebalanced_info $V0 2)
-OLD_SIZE=$(get_rebalanced_info $V0 3)
-OLD_SCANNED=$(get_rebalanced_info $V0 4)
-OLD_FAILURES=$(get_rebalanced_info $V0 5)
-OLD_SKIPPED=$(get_rebalanced_info $V0 6)
-
-
-pkill glusterd;
-pkill glusterfsd;
-TEST glusterd
-
-#Rebalance info after glusterd restart
-NEW_REBAL_FILES=$(get_rebalanced_info $V0 2)
-NEW_SIZE=$(get_rebalanced_info $V0 3)
-NEW_SCANNED=$(get_rebalanced_info $V0 4)
-NEW_FAILURES=$(get_rebalanced_info $V0 5)
-NEW_SKIPPED=$(get_rebalanced_info $V0 6)
-
-#Check rebalance info before and after glusterd restart
-TEST [ $OLD_REBAL_FILES == $NEW_REBAL_FILES ]
-TEST [ $OLD_SIZE == $NEW_SIZE ]
-TEST [ $OLD_SCANNED == $NEW_SCANNED ]
-TEST [ $OLD_FAILURES == $NEW_FAILURES ]
-TEST [ $OLD_SKIPPED == $NEW_SKIPPED ]
-
-cleanup;
-
diff --git a/tests/bugs/glusterd/bug-1352277-spawn-daemons-on-two-node-setup.t b/tests/bugs/glusterd/bug-1352277-spawn-daemons-on-two-node-setup.t
deleted file mode 100644
index 53d8d34160e..00000000000
--- a/tests/bugs/glusterd/bug-1352277-spawn-daemons-on-two-node-setup.t
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-# Test case for checking whether the brick process(es) come up on a two node
-# cluster if one of them is already down and other is going through a restart
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-cleanup;
-
-TEST launch_cluster 2
-
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-# Lets create the volume
-TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}2
-
-# Start the volume
-TEST $CLI_1 volume start $V0
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
-
-# Bring down all the gluster processes
-TEST killall_gluster
-
-#Bring back 1st glusterd and check whether the brick process comes back
-TEST $glusterd_1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
-
-#Enabling quorum should bring down the brick
-TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" brick_up_status_1 $V0 $H1 $B1/${V0}1
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1367478-volume-start-validation-after-glusterd-restart.t b/tests/bugs/glusterd/bug-1367478-volume-start-validation-after-glusterd-restart.t
deleted file mode 100644
index 4329c66474f..00000000000
--- a/tests/bugs/glusterd/bug-1367478-volume-start-validation-after-glusterd-restart.t
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-# Test case to check for successful startup of volume bricks on glusterd restart
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-cleanup;
-
-TEST launch_cluster 2
-
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-# Lets create the volume and set quorum type as a server
-TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}1 $H2:$B2/${V0}2
-TEST $CLI_1 volume create $V1 replica 2 $H1:$B1/${V1}1 $H2:$B2/${V1}2
-
-# Start the volume
-TEST $CLI_1 volume start $V0
-TEST $CLI_1 volume start $V1
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H1 $B1/${V1}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H2 $B2/${V1}2
-
-# Restart 2nd glusterd
-TEST kill_glusterd 2
-TEST $glusterd_2
-
-# Check if all bricks are up
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H1 $B1/${V1}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H2 $B2/${V1}2
-
-cleanup;
-
diff --git a/tests/bugs/glusterd/bug-1406411-fail-add-brick-on-replica-count-change.t b/tests/bugs/glusterd/bug-1406411-fail-add-brick-on-replica-count-change.t
deleted file mode 100644
index a9dd2b7a811..00000000000
--- a/tests/bugs/glusterd/bug-1406411-fail-add-brick-on-replica-count-change.t
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd;
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
-TEST $CLI volume start $V0
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
-TEST kill_brick $V0 $H0 $B0/${V0}1
-
-#add-brick should fail
-TEST ! $CLI_NO_FORCE volume add-brick $V0 replica 3 $H0:$B0/${V0}3
-
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
-TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}3
-
-TEST $CLI volume create $V1 $H0:$B0/${V1}{1,2};
-TEST $CLI volume start $V1
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
-TEST kill_brick $V1 $H0 $B0/${V1}1
-
-#add-brick should fail
-TEST ! $CLI_NO_FORCE volume add-brick $V1 replica 2 $H0:$B0/${V1}{3,4}
-
-TEST $CLI volume start $V1 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
-TEST $CLI volume add-brick $V1 replica 2 $H0:$B0/${V1}{3,4}
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1420637-volume-sync-fix.t b/tests/bugs/glusterd/bug-1420637-volume-sync-fix.t
deleted file mode 100644
index 0bd9988f6be..00000000000
--- a/tests/bugs/glusterd/bug-1420637-volume-sync-fix.t
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-# Test case for checking when server-quorum-ratio value is changed on one
-# glusterd where the other is down, the other changes done get synced back
-properly when the glusterd is brought up.
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-cleanup;
-
-TEST launch_cluster 2
-
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-# Lets create & start the volume
-TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
-
-# Start the volume
-TEST $CLI_1 volume start $V0
-TEST $CLI_1 volume set $V0 performance.readdir-ahead on
-
-# Bring down 2nd glusterd
-TEST kill_glusterd 2
-
-TEST $CLI_1 volume set all cluster.server-quorum-ratio 60
-TEST $CLI_1 volume set $V0 performance.readdir-ahead off
-
-# Bring back 2nd glusterd
-TEST $glusterd_2
-
-# After 2nd glusterd come back, there will be 2 nodes in a clusater
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
-
-EXPECT_WITHIN $PROBE_TIMEOUT "60" volinfo_field_2 all cluster.server-quorum-ratio
-EXPECT_WITHIN $PROBE_TIMEOUT "off" volinfo_field_2 $V0 performance.readdir-ahead
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1433578-invalid-peer-glusterd-crash.t b/tests/bugs/glusterd/bug-1433578-invalid-peer-glusterd-crash.t
deleted file mode 100644
index 1aea8bc134d..00000000000
--- a/tests/bugs/glusterd/bug-1433578-invalid-peer-glusterd-crash.t
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-## Start glusterd
-TEST glusterd;
-TEST pidof glusterd;
-
-TEST ! $CLI peer probe invalid-peer
-
-TEST pidof glusterd;
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1444596_brick_mux_gd_status_restart.t b/tests/bugs/glusterd/bug-1444596_brick_mux_gd_status_restart.t
deleted file mode 100644
index 950cb5f8046..00000000000
--- a/tests/bugs/glusterd/bug-1444596_brick_mux_gd_status_restart.t
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-
-function count_up_bricks {
- $CLI --xml volume status $1 | grep '<status>1' | wc -l
-}
-
-function count_brick_processes {
- pgrep glusterfsd | wc -l
-}
-
-cleanup
-TEST glusterd
-TEST $CLI volume create $V0 $H0:$B0/brick{0,1}
-TEST $CLI volume create $V1 $H0:$B0/brick{2,3}
-
-TEST $CLI volume set all cluster.brick-multiplex on
-
-TEST $CLI volume start $V0
-TEST $CLI volume start $V1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V1
-EXPECT 1 count_brick_processes
-
-pkill glusterd
-TEST glusterd
-
-#Check brick status after restart glusterd
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V1
-
-
-TEST $CLI volume stop $V0
-TEST $CLI volume stop $V1
-
-cleanup
-
-TEST glusterd
-TEST $CLI volume create $V0 $H0:$B0/brick{0,1}
-TEST $CLI volume create $V1 $H0:$B0/brick{2,3}
-
-TEST $CLI volume set all cluster.brick-multiplex on
-
-TEST $CLI volume start $V0
-TEST $CLI volume start $V1
-
-EXPECT 1 count_brick_processes
-
-TEST $CLI volume set $V0 performance.cache-size 32MB
-TEST $CLI volume stop $V0
-TEST $CLI volume start $V0
-
-#Check No. of brick processes after change option
-EXPECT 2 count_brick_processes
-
-pkill glusterd
-TEST glusterd
-
-#Check brick status after restart glusterd should not be NA
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V1
-EXPECT 2 count_brick_processes
-
-cleanup
diff --git a/tests/bugs/glusterd/bug-1444596_brick_mux_posix_hlth_chk_status.t b/tests/bugs/glusterd/bug-1444596_brick_mux_posix_hlth_chk_status.t
deleted file mode 100644
index e082ba12173..00000000000
--- a/tests/bugs/glusterd/bug-1444596_brick_mux_posix_hlth_chk_status.t
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-
-function count_up_bricks {
- $CLI --xml volume status $1 | grep '<status>1' | wc -l
-}
-
-function count_brick_processes {
- pgrep glusterfsd | wc -l
-}
-
-cleanup
-TEST glusterd -LDEBUG
-TEST $CLI volume create $V0 $H0:$B0/brick{0,1}
-TEST $CLI volume create $V1 $H0:$B0/brick{2,3}
-
-TEST $CLI volume set all cluster.brick-multiplex on
-
-TEST $CLI volume start $V0
-TEST $CLI volume start $V1
-
-EXPECT 1 count_brick_processes
-
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0
-TEST rm -rf $H0:$B0/brick{0,1}
-
-#Check No. of brick processes after remove brick from back-end
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V1
-
-EXPECT 1 count_brick_processes
-
-TEST glusterfs -s $H0 --volfile-id $V1 $M0
-TEST touch $M0/file{1..10}
-
-pkill glusterd
-TEST glusterd -LDEBUG
-sleep 5
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V1
-
-
-cleanup
-
diff --git a/tests/bugs/glusterd/bug-1451248-mux-reboot-node.t b/tests/bugs/glusterd/bug-1451248-mux-reboot-node.t
deleted file mode 100644
index 5d8ce6e75e6..00000000000
--- a/tests/bugs/glusterd/bug-1451248-mux-reboot-node.t
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../traps.rc
-. $(dirname $0)/../../volume.rc
-
-function count_up_bricks {
- $CLI --xml volume status all | grep '<status>1' | wc -l
-}
-
-function count_brick_processes {
- pgrep glusterfsd | wc -l
-}
-
-function count_brick_pids {
- $CLI --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
- | grep -v "N/A" | sort | uniq | wc -l
-}
-
-cleanup;
-
-TEST glusterd
-TEST $CLI volume set all cluster.brick-multiplex on
-push_trapfunc "$CLI volume set all cluster.brick-multiplex off"
-push_trapfunc "cleanup"
-
-TEST $CLI volume create $V0 $H0:$B0/brick{0..2}
-TEST $CLI volume start $V0
-
-EXPECT 1 count_brick_processes
-EXPECT 1 count_brick_pids
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_up_bricks
-
-pkill gluster
-TEST glusterd
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_up_bricks
-
-pkill glusterd
-TEST glusterd
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_up_bricks
-
-TEST $CLI volume create $V1 $H0:$B0/brick{3..5}
-TEST $CLI volume start $V1
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 count_up_bricks
-
diff --git a/tests/bugs/glusterd/bug-1454418-seg-fault.t b/tests/bugs/glusterd/bug-1454418-seg-fault.t
deleted file mode 100644
index eafaa55ede8..00000000000
--- a/tests/bugs/glusterd/bug-1454418-seg-fault.t
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-
-cleanup;
-
-## Setting Port number in specific range
-sysctl net.ipv4.ip_local_reserved_ports="24007-24008,32765-32768,49152-49156"
-
-## Start a 2 node virtual cluster
-TEST launch_cluster 2;
-
-
-## Peer probe server 2 from server 1 cli
-TEST $CLI_1 peer probe $H2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-sysctl net.ipv4.ip_local_reserved_ports="
-"
-
-cleanup;
-
diff --git a/tests/bugs/glusterd/bug-1482344-volume-option-set-cluster-level.t b/tests/bugs/glusterd/bug-1482344-volume-option-set-cluster-level.t
deleted file mode 100644
index 481dee186b8..00000000000
--- a/tests/bugs/glusterd/bug-1482344-volume-option-set-cluster-level.t
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-#Test case: glusterd should disallow a volume level option to be set cluster
-wide and glusterd should not crash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-TEST pidof glusterd
-
-#Create a 2x1 distributed volume
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-TEST $CLI volume start $V0
-
-TEST ! $CLI volume set all transport.listen-backlog 128
-
-# Check the volume info output, if glusterd would have crashed then this command
-# will fail
-TEST $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1483058-replace-brick-quorum-validation.t b/tests/bugs/glusterd/bug-1483058-replace-brick-quorum-validation.t
deleted file mode 100644
index 2d9e5287818..00000000000
--- a/tests/bugs/glusterd/bug-1483058-replace-brick-quorum-validation.t
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/bin/bash
-
-# Test case for quorum validation in glusterd for syncop framework
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-
-cleanup;
-
-TEST launch_cluster 3
-
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-TEST $CLI_1 peer probe $H3;
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
-
-# Lets create the volume and set quorum type as a server
-TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/${V0}0 $H2:$B2/${V0}1 $H3:$B3/${V0}2
-TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
-
-# Start the volume
-TEST $CLI_1 volume start $V0
-
-# Set quorum ratio 95. means 95 % or more than 95% nodes of total available node
-# should be available for performing volume operation.
-# i.e. Server-side quorum is met if the number of nodes that are available is
-# greater than or equal to 'quorum-ratio' times the number of nodes in the
-# cluster
-
-TEST $CLI_1 volume set all cluster.server-quorum-ratio 95
-# Bring down 2nd glusterd
-TEST kill_glusterd 2
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-# Now quorum is not meet. Now execute replace-brick command
-# This command should fail as cluster is not in quorum
-TEST ! $CLI_1 volume replace-brick $V0 $H2:$B2/${V0}1 $H1:$B1/${V0}1_new commit force
-
-# Bring 2nd glusterd up
-TEST start_glusterd 2
-
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
-
-# checking peer_count is not enough to call that quorum is regained as
-# peer_count is based on peerinfo->connected where as quorum is calculated based
-# on peerinfo->quorum_contrib. To avoid this spurious race of replace brick
-# commit force to execute and fail before the quorum is regained run the command
-# in EXPECT_WITHIN to ensure that with multiple attempts the command goes
-# through once the quorum is regained.
-
-# Now quorum is met. replace-brick will execute successfuly
-EXPECT_WITHIN $PEER_SYNC_TIMEOUT 0 attempt_replace_brick 1 $V0 $H2:$B2/${V0}1 $H1:$B1/${V0}1_new
-
-#cleanup;
diff --git a/tests/bugs/glusterd/bug-1499509-disconnect-in-brick-mux.t b/tests/bugs/glusterd/bug-1499509-disconnect-in-brick-mux.t
deleted file mode 100644
index 3c5bebee0c7..00000000000
--- a/tests/bugs/glusterd/bug-1499509-disconnect-in-brick-mux.t
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-cleanup
-
-TEST glusterd
-TEST pidof glusterd
-
-## Enable brick multiplexing
-TEST $CLI volume set all cluster.brick-multiplex on
-
-## creating 1x3 replicated volumes
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}_{1..3}
-TEST $CLI volume create $V1 replica 3 $H0:$B1/${V1}_{1..3}
-
-## Start the volume
-TEST $CLI volume start $V0
-TEST $CLI volume start $V1
-
-kill -9 $(pgrep glusterfsd)
-
-EXPECT 0 online_brick_count
-
-cleanup
diff --git a/tests/bugs/glusterd/bug-1507466-reset-brick-commit-force.t b/tests/bugs/glusterd/bug-1507466-reset-brick-commit-force.t
deleted file mode 100644
index 764399dfab9..00000000000
--- a/tests/bugs/glusterd/bug-1507466-reset-brick-commit-force.t
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-cleanup;
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-TEST launch_cluster 3
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
-
-TEST $CLI_1 volume create $V0 replica 2 $H1:$B0/${V0} $H2:$B0/${V0}
-TEST $CLI_1 volume start $V0
-
-# Negative case with brick not killed && volume-id xattrs present
-TEST ! $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force
-
-TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} start
-# Now test if reset-brick commit force works
-TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-765230-remove-quota-related-option-after-disabling-quota.t b/tests/bugs/glusterd/bug-765230-remove-quota-related-option-after-disabling-quota.t
deleted file mode 100755
index d943dcf5780..00000000000
--- a/tests/bugs/glusterd/bug-765230-remove-quota-related-option-after-disabling-quota.t
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-## Start and create a volume
-TEST glusterd;
-TEST pidof glusterd;
-TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
-
-## Verify volume is created
-EXPECT "$V0" volinfo_field $V0 'Volume Name';
-EXPECT 'Created' volinfo_field $V0 'Status';
-
-## Start volume and verify
-TEST $CLI volume start $V0;
-EXPECT 'Started' volinfo_field $V0 'Status';
-
-## Setting soft-timeout as 20
-TEST $CLI volume set $V0 features.soft-timeout 20
-EXPECT '20' volinfo_field $V0 'features.soft-timeout';
-
-## Enabling features.quota-deem-statfs
-TEST ! $CLI volume set $V0 features.quota-deem-statfs on
-EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
-
-## Enabling quota
-TEST $CLI volume quota $V0 enable
-EXPECT 'on' volinfo_field $V0 'features.quota'
-
-## Setting soft-timeout as 20
-TEST $CLI volume set $V0 features.soft-timeout 20
-EXPECT '20' volinfo_field $V0 'features.soft-timeout';
-
-## Enabling features.quota-deem-statfs
-TEST $CLI volume set $V0 features.quota-deem-statfs on
-EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
-
-## Disabling quota
-TEST $CLI volume quota $V0 disable
-EXPECT 'off' volinfo_field $V0 'features.quota'
-EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
-EXPECT '' volinfo_field $V0 'features.soft-timeout'
-
-## Setting soft-timeout as 30
-TEST $CLI volume set $V0 features.soft-timeout 30
-EXPECT '30' volinfo_field $V0 'features.soft-timeout';
-
-## Disabling features.quota-deem-statfs
-TEST ! $CLI volume set $V0 features.quota-deem-statfs off
-EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
-
-## Finish up
-TEST $CLI volume stop $V0
-EXPECT 'Stopped' volinfo_field $V0 'Status';
-
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-782095.t b/tests/bugs/glusterd/bug-782095.t
deleted file mode 100755
index dd8a8dc3026..00000000000
--- a/tests/bugs/glusterd/bug-782095.t
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-## Start and create a volume
-TEST glusterd;
-TEST pidof glusterd;
-TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
-
-## Verify volume is is created
-EXPECT "$V0" volinfo_field $V0 'Volume Name';
-EXPECT 'Created' volinfo_field $V0 'Status';
-
-## Start volume and verify
-TEST $CLI volume start $V0;
-EXPECT 'Started' volinfo_field $V0 'Status';
-
-## Setting performance cache min size as 2MB
-TEST $CLI volume set $V0 performance.cache-min-file-size 2MB
-EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
-
-## Setting performance cache max size as 20MB
-TEST $CLI volume set $V0 performance.cache-max-file-size 20MB
-EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
-
-## Trying to set performance cache min size as 25MB
-TEST ! $CLI volume set $V0 performance.cache-min-file-size 25MB
-EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
-
-## Able to set performance cache min size as long as its lesser than max size
-TEST $CLI volume set $V0 performance.cache-min-file-size 15MB
-EXPECT '15MB' volinfo_field $V0 'performance.cache-min-file-size';
-
-## Trying it out with only cache-max-file-size in CLI as 10MB
-TEST ! $CLI volume set $V0 cache-max-file-size 10MB
-EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
-
-## Finish up
-TEST $CLI volume stop $V0;
-EXPECT 'Stopped' volinfo_field $V0 'Status';
-
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-839595.t b/tests/bugs/glusterd/bug-839595.t
deleted file mode 100644
index b2fe9789a8c..00000000000
--- a/tests/bugs/glusterd/bug-839595.t
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}1
-TEST $CLI volume set $V0 cluster.server-quorum-type server
-EXPECT "server" volume_option $V0 cluster.server-quorum-type
-TEST $CLI volume set $V0 cluster.server-quorum-type none
-EXPECT "none" volume_option $V0 cluster.server-quorum-type
-TEST $CLI volume reset $V0 cluster.server-quorum-type
-TEST ! $CLI volume set $V0 cluster.server-quorum-type abc
-TEST ! $CLI volume set all cluster.server-quorum-type none
-TEST ! $CLI volume set $V0 cluster.server-quorum-ratio 100
-
-TEST ! $CLI volume set all cluster.server-quorum-ratio abc
-TEST ! $CLI volume set all cluster.server-quorum-ratio -1
-TEST ! $CLI volume set all cluster.server-quorum-ratio 100.0000005
-TEST $CLI volume set all cluster.server-quorum-ratio 0
-EXPECT "0" volume_option $V0 cluster.server-quorum-ratio
-TEST $CLI volume set all cluster.server-quorum-ratio 100
-EXPECT "100" volume_option $V0 cluster.server-quorum-ratio
-TEST $CLI volume set all cluster.server-quorum-ratio 0.0000005
-EXPECT "0.0000005" volume_option $V0 cluster.server-quorum-ratio
-TEST $CLI volume set all cluster.server-quorum-ratio 100%
-EXPECT "100%" volume_option $V0 cluster.server-quorum-ratio
-cleanup;
diff --git a/tests/bugs/glusterd/bug-859927.t b/tests/bugs/glusterd/bug-859927.t
deleted file mode 100755
index c30d2b852d4..00000000000
--- a/tests/bugs/glusterd/bug-859927.t
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-cleanup;
-
-glusterd;
-
-TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
-
-TEST ! $CLI volume set $V0 statedump-path ""
-TEST ! $CLI volume set $V0 statedump-path " "
-TEST $CLI volume set $V0 statedump-path "/home/"
-EXPECT "/home/" volume_option $V0 server.statedump-path
-
-TEST ! $CLI volume set $V0 background-self-heal-count ""
-TEST ! $CLI volume set $V0 background-self-heal-count " "
-TEST $CLI volume set $V0 background-self-heal-count 10
-EXPECT "10" volume_option $V0 cluster.background-self-heal-count
-
-TEST ! $CLI volume set $V0 cache-size ""
-TEST ! $CLI volume set $V0 cache-size " "
-TEST $CLI volume set $V0 cache-size 512MB
-EXPECT "512MB" volume_option $V0 performance.cache-size
-
-TEST ! $CLI volume set $V0 self-heal-daemon ""
-TEST ! $CLI volume set $V0 self-heal-daemon " "
-TEST $CLI volume set $V0 self-heal-daemon on
-EXPECT "on" volume_option $V0 cluster.self-heal-daemon
-
-TEST ! $CLI volume set $V0 read-subvolume ""
-TEST ! $CLI volume set $V0 read-subvolume " "
-TEST $CLI volume set $V0 read-subvolume $V0-client-0
-EXPECT "$V0-client-0" volume_option $V0 cluster.read-subvolume
-
-TEST ! $CLI volume set $V0 data-self-heal-algorithm ""
-TEST ! $CLI volume set $V0 data-self-heal-algorithm " "
-TEST ! $CLI volume set $V0 data-self-heal-algorithm on
-TEST $CLI volume set $V0 data-self-heal-algorithm full
-EXPECT "full" volume_option $V0 cluster.data-self-heal-algorithm
-
-TEST ! $CLI volume set $V0 min-free-inodes ""
-TEST ! $CLI volume set $V0 min-free-inodes " "
-TEST $CLI volume set $V0 min-free-inodes 60%
-EXPECT "60%" volume_option $V0 cluster.min-free-inodes
-
-TEST ! $CLI volume set $V0 min-free-disk ""
-TEST ! $CLI volume set $V0 min-free-disk " "
-TEST $CLI volume set $V0 min-free-disk 60%
-EXPECT "60%" volume_option $V0 cluster.min-free-disk
-
-TEST $CLI volume set $V0 min-free-disk 120
-EXPECT "120" volume_option $V0 cluster.min-free-disk
-
-TEST ! $CLI volume set $V0 frame-timeout ""
-TEST ! $CLI volume set $V0 frame-timeout " "
-TEST $CLI volume set $V0 frame-timeout 0
-EXPECT "0" volume_option $V0 network.frame-timeout
-
-TEST ! $CLI volume set $V0 auth.allow ""
-TEST ! $CLI volume set $V0 auth.allow " "
-TEST $CLI volume set $V0 auth.allow 192.168.122.1
-EXPECT "192.168.122.1" volume_option $V0 auth.allow
-
-TEST ! $CLI volume set $V0 stripe-block-size ""
-TEST ! $CLI volume set $V0 stripe-block-size " "
-TEST $CLI volume set $V0 stripe-block-size 512MB
-EXPECT "512MB" volume_option $V0 cluster.stripe-block-size
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-862834.t b/tests/bugs/glusterd/bug-862834.t
deleted file mode 100755
index ac2f956a1ed..00000000000
--- a/tests/bugs/glusterd/bug-862834.t
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-V1="patchy2"
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info;
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-
-function check_brick()
-{
- vol=$1;
- num=$2
- $CLI volume info $V0 | grep "Brick$num" | awk '{print $2}';
-}
-
-function volinfo_field()
-{
- local vol=$1;
- local field=$2;
-
- $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
-}
-
-function brick_count()
-{
- local vol=$1;
-
- $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
-}
-
-EXPECT "$V0" volinfo_field $V0 'Volume Name';
-EXPECT 'Created' volinfo_field $V0 'Status';
-EXPECT '2' brick_count $V0
-
-
-EXPECT "$H0:$B0/${V0}1" check_brick $V0 '1';
-EXPECT "$H0:$B0/${V0}2" check_brick $V0 '2';
-
-TEST ! $CLI volume create $V1 $H0:$B0/${V1}0 $H0:$B0/${V0}1;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-878004.t b/tests/bugs/glusterd/bug-878004.t
deleted file mode 100644
index 8abada3c3b3..00000000000
--- a/tests/bugs/glusterd/bug-878004.t
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info;
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3;
-
-function brick_count()
-{
- local vol=$1;
-
- $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
-}
-
-
-TEST $CLI volume start $V0
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force;
-EXPECT '2' brick_count $V0
-
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 force;
-EXPECT '1' brick_count $V0
-
-cleanup;
-
diff --git a/tests/bugs/glusterd/bug-888752.t b/tests/bugs/glusterd/bug-888752.t
deleted file mode 100644
index ed0602e34e2..00000000000
--- a/tests/bugs/glusterd/bug-888752.t
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-# Check if xml output is generated correctly for volume status for a single brick
-# present on another peer and no async tasks are running.
-
-function get_peer_count {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-cleanup
-
-TEST launch_cluster 2;
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 get_peer_count
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
-TEST $CLI_1 volume start $V0
-
-TEST $CLI_1 volume status $V0 $H2:$B2/$V0 --xml
-
-TEST $CLI_1 volume stop $V0
-
-cleanup
diff --git a/tests/bugs/glusterd/bug-889630.t b/tests/bugs/glusterd/bug-889630.t
deleted file mode 100755
index 4fefd94d66f..00000000000
--- a/tests/bugs/glusterd/bug-889630.t
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-function volume_count {
- local cli=$1;
- if [ $cli -eq '1' ] ; then
- $CLI_1 volume info | grep 'Volume Name' | wc -l;
- else
- $CLI_2 volume info | grep 'Volume Name' | wc -l;
- fi
-}
-
-cleanup;
-
-TEST launch_cluster 2;
-TEST $CLI_1 peer probe $H2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
-TEST $CLI_1 volume start $V0
-
-b="B1";
-
-#Create an extra file in the originator's volume store
-touch ${!b}/glusterd/vols/$V0/run/file
-
-TEST $CLI_1 volume stop $V0
-#Test for self-commit failure
-TEST $CLI_1 volume delete $V0
-
-#Check whether delete succeeded on both the nodes
-EXPECT "0" volume_count '1'
-EXPECT "0" volume_count '2'
-
-#Check whether the volume name can be reused after deletion
-TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
-TEST $CLI_1 volume start $V0
-
-#Create an extra file in the peer's volume store
-touch ${!b}/glusterd/vols/$V0/run/file
-
-TEST $CLI_1 volume stop $V0
-#Test for commit failure on the other node
-TEST $CLI_2 volume delete $V0
-
-EXPECT "0" volume_count '1';
-EXPECT "0" volume_count '2';
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-905307.t b/tests/bugs/glusterd/bug-905307.t
deleted file mode 100644
index dd1c1bc0795..00000000000
--- a/tests/bugs/glusterd/bug-905307.t
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-TEST glusterd
-TEST pidof glusterd
-
-#test functionality of post-op-delay-secs
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
-
-#Strings should not be accepted.
-TEST ! $CLI volume set $V0 cluster.post-op-delay-secs abc
-
-#-ve ints should not be accepted.
-TEST ! $CLI volume set $V0 cluster.post-op-delay-secs -1
-
-#INT_MAX+1 should not be accepted.
-TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 2147483648
-
-#floats should not be accepted.
-TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 1.25
-
-#min val 0 should be accepted
-TEST $CLI volume set $V0 cluster.post-op-delay-secs 0
-EXPECT "0" volume_option $V0 cluster.post-op-delay-secs
-
-#max val 2147483647 should be accepted
-TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147483647
-EXPECT "2147483647" volume_option $V0 cluster.post-op-delay-secs
-
-#some middle val in range 2147 should be accepted
-TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147
-EXPECT "2147" volume_option $V0 cluster.post-op-delay-secs
-cleanup;
diff --git a/tests/bugs/glusterd/bug-913487.t b/tests/bugs/glusterd/bug-913487.t
deleted file mode 100644
index 9c616ea28fb..00000000000
--- a/tests/bugs/glusterd/bug-913487.t
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-TEST glusterd;
-TEST pidof glusterd;
-
-TEST ! $CLI volume set $V0 performance.open-behind off;
-
-TEST pidof glusterd;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-913555.t b/tests/bugs/glusterd/bug-913555.t
deleted file mode 100755
index 9bc875340d1..00000000000
--- a/tests/bugs/glusterd/bug-913555.t
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/bash
-
-# Test that a volume becomes unwritable when the cluster loses quorum.
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-
-function check_fs {
- df $1 &> /dev/null
- echo $?
-}
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-function online_brick_count {
- $CLI_1 --xml volume status | grep '<status>1' | wc -l
-}
-
-cleanup;
-
-TEST launch_cluster 3; # start 3-node virtual cluster
-TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
-TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
-
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
-TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
-TEST $CLI_1 volume start $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count;
-
-TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
-
-# Kill one pseudo-node, make sure the others survive and volume stays up.
-TEST kill_node 3;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 online_brick_count;
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
-
-# Kill another pseudo-node, make sure the last one dies and volume goes down.
-TEST kill_node 2;
-EXPECT_WITHIN $PROBE_TIMEOUT 0 check_peers
-#two glusterfsds of the other two glusterds must be dead
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 online_brick_count;
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_fs $M0;
-
-TEST $glusterd_2;
-TEST $glusterd_3;
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count; # restore quorum, all ok
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
-
-cleanup
diff --git a/tests/bugs/glusterd/bug-916549.t b/tests/bugs/glusterd/bug-916549.t
deleted file mode 100755
index 6e3612dce94..00000000000
--- a/tests/bugs/glusterd/bug-916549.t
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-TEST glusterd;
-TEST $CLI volume create $V0 $H0:$B0/${V0}1;
-TEST $CLI volume start $V0;
-
-pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/);
-brick_pid=$(cat $GLUSTERD_PIDFILEDIR/vols/$V0/$pid_file);
-
-
-kill -SIGKILL $brick_pid;
-TEST $CLI volume start $V0 force;
-TEST process_leak_count $(pidof glusterd);
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-948686.t b/tests/bugs/glusterd/bug-948686.t
deleted file mode 100755
index dfe11ff153f..00000000000
--- a/tests/bugs/glusterd/bug-948686.t
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-cleanup;
-#setup cluster and test volume
-TEST launch_cluster 3; # start 3-node virtual cluster
-TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
-TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
-
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
-
-TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0
-TEST $CLI_1 volume start $V0
-TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
-
-#kill a node
-TEST kill_node 3
-
-#modify volume config to see change in volume-sync
-TEST $CLI_1 volume set $V0 write-behind off
-#add some files to the volume to see effect of volume-heal cmd
-TEST touch $M0/{1..100};
-TEST $CLI_1 volume stop $V0;
-TEST $glusterd_3;
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
-TEST $CLI_3 volume start $V0;
-TEST $CLI_2 volume stop $V0;
-TEST $CLI_2 volume delete $V0;
-
-cleanup;
-
-TEST glusterd;
-TEST $CLI volume create $V0 $H0:$B0/$V0
-TEST $CLI volume start $V0
-pkill glusterd;
-pkill glusterfsd;
-TEST glusterd
-TEST $CLI volume status $V0
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-955588.t b/tests/bugs/glusterd/bug-955588.t
deleted file mode 100755
index 028a34edd7d..00000000000
--- a/tests/bugs/glusterd/bug-955588.t
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-TEST glusterd
-TEST pidof glusterd
-
-function get_brick_host_uuid()
-{
- local vol=$1;
- local uuid_regex='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
- local host_uuid_list=$($CLI volume info $vol --xml | grep "brick.uuid" | grep -o -E "$uuid_regex");
-
- echo $host_uuid_list | awk '{print $1}'
-}
-
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
-
-uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
-EXPECT $uuid get_brick_host_uuid $V0
-
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-958790.t b/tests/bugs/glusterd/bug-958790.t
deleted file mode 100644
index 39be0a19137..00000000000
--- a/tests/bugs/glusterd/bug-958790.t
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd;
-TEST pidof glusterd;
-TEST $CLI volume info;
-
-touch $GLUSTERD_WORKDIR/groups/test
-echo "read-ahead=off" > $GLUSTERD_WORKDIR/groups/test
-echo "open-behind=off" >> $GLUSTERD_WORKDIR/groups/test
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-TEST $CLI volume set $V0 group test
-EXPECT "off" volume_option $V0 performance.read-ahead
-EXPECT "off" volume_option $V0 performance.open-behind
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-961669.t b/tests/bugs/glusterd/bug-961669.t
deleted file mode 100644
index b02f2f50af1..00000000000
--- a/tests/bugs/glusterd/bug-961669.t
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-
-#Test case: Fail remove-brick 'start' variant when reducing the replica count of a volume.
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info
-
-#Create a 3x3 dist-rep volume
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5,6,7,8};
-TEST $CLI volume start $V0
-
-# Mount FUSE and create file/directory
-TEST glusterfs -s $H0 --volfile-id $V0 $M0
-TEST touch $M0/zerobytefile.txt
-TEST mkdir $M0/test_dir
-TEST dd if=/dev/zero of=$M0/file bs=1024 count=1024
-
-function remove_brick_start {
- $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{1,4,7} start 2>&1|grep -oE 'success|failed'
-}
-
-function remove_brick {
- $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{1,4,7} force 2>&1|grep -oE 'success|failed'
-}
-
-#remove-brick start variant
-#Actual message displayed at cli is:
-#"volume remove-brick start: failed: Rebalancing not needed when reducing replica count. Try without the 'start' option"
-EXPECT "failed" remove_brick_start;
-
-#remove-brick commit-force
-#Actual message displayed at cli is:
-#"volume remove-brick commit force: success"
-EXPECT "success" remove_brick
-
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-963541.t b/tests/bugs/glusterd/bug-963541.t
deleted file mode 100755
index ff94db3e6ef..00000000000
--- a/tests/bugs/glusterd/bug-963541.t
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
-TEST $CLI volume start $V0;
-
-# Start a remove-brick and try to start a rebalance/remove-brick without committing
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
-
-TEST ! $CLI volume rebalance $V0 start
-TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
-
-EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field \
-"$V0" "$H0:$B0/${V0}1"
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 commit
-
-gluster volume status
-
-TEST $CLI volume rebalance $V0 start
-EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
-TEST $CLI volume rebalance $V0 stop
-
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop
-
-TEST $CLI volume stop $V0
-
-cleanup;
-
diff --git a/tests/bugs/glusterd/bug-964059.t b/tests/bugs/glusterd/bug-964059.t
deleted file mode 100755
index 7b4f60454b8..00000000000
--- a/tests/bugs/glusterd/bug-964059.t
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-function volume_count {
- local cli=$1;
- if [ $cli -eq '1' ] ; then
- $CLI_1 volume info | grep 'Volume Name' | wc -l;
- else
- $CLI_2 volume info | grep 'Volume Name' | wc -l;
- fi
-}
-
-cleanup;
-
-TEST launch_cluster 2;
-TEST $CLI_1 peer probe $H2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
-TEST $CLI_1 volume start $V0
-TEST $CLI_1 volume remove-brick $V0 $H2:$B2/$V0 start
-TEST $CLI_1 volume status
-cleanup;
diff --git a/tests/bugs/glusterd/enable-shared-storage-and-remove-brick-validation.t b/tests/bugs/glusterd/enable-shared-storage-and-remove-brick-validation.t
new file mode 100644
index 00000000000..9e05e8150d6
--- /dev/null
+++ b/tests/bugs/glusterd/enable-shared-storage-and-remove-brick-validation.t
@@ -0,0 +1,82 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function peer_count {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+## start a 3 node virtual cluster
+TEST launch_cluster 3;
+
+## peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+
+#test case for bug 1266818 - disabling enable-shared-storage option
+##should not delete user created volume with name glusterd_shared_storage
+
+## creating a volume with name glusterd_shared_storage
+TEST $CLI_1 volume create glusterd_shared_storage $H1:$B1/${V0}0 $H2:$B2/${V0}1
+TEST $CLI_1 volume start glusterd_shared_storage
+
+## disabling enable-shared-storage should not succeed and should not delete the
+## user created volume with name "glusterd_shared_storage"
+TEST ! $CLI_1 volume all enable-shared-storage disable
+
+## volume with name should exist
+TEST $CLI_1 volume info glusterd_shared_storage
+
+#testcase: bug-1245045-remove-brick-validation
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+kill_glusterd 2
+
+#remove-brick should fail as the peer hosting the brick is down
+TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
+
+TEST $glusterd_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+#volume status should work
+TEST $CLI_2 volume status
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 3
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
+kill_glusterd 2
+
+#remove-brick commit should fail as the peer hosting the brick is down
+TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} commit
+
+TEST $glusterd_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+#volume status should work
+TEST $CLI_2 volume status
+
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} stop
+
+kill_glusterd 3
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
+
+TEST start_glusterd 3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+TEST $CLI_3 volume status
+
+cleanup
diff --git a/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
new file mode 100644
index 00000000000..fdc0a73f60c
--- /dev/null
+++ b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
@@ -0,0 +1,59 @@
+#! /bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup
+
+TEST launch_cluster 3
+
+TEST $CLI_1 peer probe $H2
+
+#bug-1109741 - validate mgmt handshake
+
+TEST ! $CLI_3 peer probe $H1
+
+GD1_WD=$($CLI_1 system getwd)
+OP_VERS_ORIG=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
+
+TEST $CLI_3 system uuid get # Needed for glusterd.info to be created
+
+GD3_WD=$($CLI_3 system getwd)
+TEST sed -rnie "'s/(operating-version=)\w+/\130600/gip'" ${GD3_WD}/glusterd.info
+
+TEST kill_glusterd 3
+TEST start_glusterd 3
+
+TEST ! $CLI_3 peer probe $H1
+
+OP_VERS_NEW=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
+TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]]
+
+#bug-948686 - volume sync after bringing up the killed node
+
+TEST $CLI_1 peer probe $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0
+TEST $CLI_1 volume start $V0
+TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
+
+#kill a node
+TEST kill_node 3
+
+#modify volume config to see change in volume-sync
+TEST $CLI_1 volume set $V0 write-behind off
+#add some files to the volume to see effect of volume-heal cmd
+TEST touch $M0/{1..100};
+TEST $CLI_1 volume stop $V0;
+TEST $glusterd_3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
+TEST $CLI_3 volume start $V0;
+TEST $CLI_2 volume stop $V0;
+TEST $CLI_2 volume delete $V0;
+
+cleanup
diff --git a/tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t b/tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t
new file mode 100644
index 00000000000..a55e7cb7c46
--- /dev/null
+++ b/tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t
@@ -0,0 +1,97 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+function peer_count {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+#bug-1454418 - Setting Port number in specific range
+sysctl net.ipv4.ip_local_reserved_ports="24007-24008,32765-32768,49152-49156"
+
+TEST launch_cluster 3;
+
+#bug-1223213
+
+# Fool the cluster to operate with 3.5 version even though binary's op-version
+# is > 3.5. This is to ensure 3.5 code path is hit to test that volume status
+# works when a node is upgraded from 3.5 to 3.7 or higher as mgmt_v3 lock is
+# been introduced in 3.6 version and onwards
+
+GD1_WD=$($CLI_1 system getwd)
+$CLI_1 system uuid get
+Old_op_version=$(cat ${GD1_WD}/glusterd.info | grep operating-version | cut -d '=' -f 2)
+
+TEST sed -rnie "'s/(operating-version=)\w+/\130500/gip'" ${GD1_WD}/glusterd.info
+
+TEST kill_glusterd 1
+TEST start_glusterd 1
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+
+TEST `sed -i "s/"30500"/${Old_op_version}/g" ${GD1_WD}/glusterd.info`
+
+TEST kill_glusterd 1
+TEST start_glusterd 1
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
+
+#bug-1454418
+sysctl net.ipv4.ip_local_reserved_ports="
+"
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+#bug-888752 - volume status --xml from peer in the cluster
+
+TEST $CLI_1 volume status $V0 $H2:$B2/$V0 --xml
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0
+TEST $CLI_1 volume create $V1 $H1:$B1/$V1
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+TEST $CLI_1 volume start $V0
+TEST $CLI_1 volume start $V1
+
+#bug-1173414 - validate mgmt-v3-remote-lock-failure
+
+for i in {1..20}
+do
+$CLI_1 volume set $V0 diagnostics.client-log-level DEBUG &
+$CLI_1 volume set $V1 barrier on
+$CLI_2 volume set $V0 diagnostics.client-log-level DEBUG &
+$CLI_2 volume set $V1 barrier on
+done
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+TEST $CLI_1 volume status
+TEST $CLI_2 volume status
+
+#bug-1293414 - validate peer detach
+
+# peers hosting bricks cannot be detached
+TEST ! $CLI_2 peer detach $H1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+# peer not hosting bricks should be detachable
+TEST $CLI_2 peer detach $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+
+#bug-1344407 - deleting a volume when peer is down should fail
+
+TEST kill_glusterd 2
+TEST ! $CLI_1 volume delete $V0
+
+cleanup
diff --git a/tests/bugs/glusterd/optimized-basic-testcases.t b/tests/bugs/glusterd/optimized-basic-testcases.t
new file mode 100644
index 00000000000..b9fba4cdefb
--- /dev/null
+++ b/tests/bugs/glusterd/optimized-basic-testcases.t
@@ -0,0 +1,273 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+function get_opret_value () {
+ local VOL=$1
+ $CLI volume info $VOL --xml | sed -ne 's/.*<opRet>\([-0-9]*\)<\/opRet>/\1/p'
+}
+
+function check_brick()
+{
+ vol=$1;
+ num=$2
+ $CLI volume info $V0 | grep "Brick$num" | awk '{print $2}';
+}
+
+function brick_count()
+{
+ local vol=$1;
+
+ $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
+}
+
+function get_brick_host_uuid()
+{
+ local vol=$1;
+ local uuid_regex='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
+ local host_uuid_list=$($CLI volume info $vol --xml | grep "brick.uuid" | grep -o -E "$uuid_regex");
+
+ echo $host_uuid_list | awk '{print $1}'
+}
+
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+#bug-1238135-lazy-daemon-initialization-on-demand
+
+GDWD=$($CLI system getwd)
+
+# glusterd.info file will be created on either first peer probe or volume
+# creation, hence we expect file to be not present in this case
+TEST ! -e $GDWD/glusterd.info
+
+#bug-913487 - setting volume options before creation of volume should fail
+
+TEST ! $CLI volume set $V0 performance.open-behind off;
+TEST pidof glusterd;
+
+#bug-1433578 - glusterd should not crash after probing a invalid peer
+
+TEST ! $CLI peer probe invalid-peer
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+#bug-955588 - uuid validation
+
+uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
+EXPECT $uuid get_brick_host_uuid $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+#bug-958790 - set options from file
+
+touch $GLUSTERD_WORKDIR/groups/test
+echo "read-ahead=off" > $GLUSTERD_WORKDIR/groups/test
+echo "open-behind=off" >> $GLUSTERD_WORKDIR/groups/test
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 group test
+EXPECT "off" volume_option $V0 performance.read-ahead
+EXPECT "off" volume_option $V0 performance.open-behind
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+#bug-1321836 - validate opret value for non existing volume
+
+EXPECT 0 get_opret_value $V0
+EXPECT -1 get_opret_value "novol"
+
+EXPECT '2' brick_count $V0
+
+#bug-862834 - validate brick status
+
+EXPECT "$H0:$B0/${V0}1" check_brick $V0 '1';
+EXPECT "$H0:$B0/${V0}2" check_brick $V0 '2';
+
+TEST ! $CLI volume create $V1 $H0:$B0/${V1}0 $H0:$B0/${V0}1;
+
+#bug-1482344 - setting volume-option-at-cluster-level should not result in glusterd crash
+
+TEST ! $CLI volume set all transport.listen-backlog 128
+
+# Check the volume info output, if glusterd would have crashed then this command
+# will fail
+TEST $CLI volume info $V0;
+
+#bug-1002556 and bug-1199451 - command should retrieve current op-version of the node
+TEST $CLI volume get all cluster.op-version
+
+#bug-1315186 - reject-lowering-down-op-version
+
+OP_VERS_ORIG=$(grep 'operating-version' ${GDWD}/glusterd.info | cut -d '=' -f 2)
+OP_VERS_NEW=`expr $OP_VERS_ORIG-1`
+
+TEST ! $CLI volume set all $V0 cluster.op-version $OP_VERS_NEW
+
+#bug-1022055 - validate log rotate command
+
+TEST $CLI volume log rotate $V0;
+
+#bug-1092841 - validating barrier enable/disable
+
+TEST $CLI volume barrier $V0 enable;
+TEST ! $CLI volume barrier $V0 enable;
+
+TEST $CLI volume barrier $V0 disable;
+TEST ! $CLI volume barrier $V0 disable;
+
+#bug-1095097 - validate volume profile command
+
+TEST $CLI volume profile $V0 start
+TEST $CLI volume profile $V0 info
+
+#bug-839595 - validate server-quorum options
+
+TEST $CLI volume set $V0 cluster.server-quorum-type server
+EXPECT "server" volume_option $V0 cluster.server-quorum-type
+TEST $CLI volume set $V0 cluster.server-quorum-type none
+EXPECT "none" volume_option $V0 cluster.server-quorum-type
+TEST $CLI volume reset $V0 cluster.server-quorum-type
+TEST ! $CLI volume set $V0 cluster.server-quorum-type abc
+TEST ! $CLI volume set all cluster.server-quorum-type none
+TEST ! $CLI volume set $V0 cluster.server-quorum-ratio 100
+
+TEST ! $CLI volume set all cluster.server-quorum-ratio abc
+TEST ! $CLI volume set all cluster.server-quorum-ratio -1
+TEST ! $CLI volume set all cluster.server-quorum-ratio 100.0000005
+TEST $CLI volume set all cluster.server-quorum-ratio 0
+EXPECT "0" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 100
+EXPECT "100" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 0.0000005
+EXPECT "0.0000005" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 100%
+EXPECT "100%" volume_option $V0 cluster.server-quorum-ratio
+
+#bug-1265479 - validate-distributed-volume-options
+
+#Setting data-self-heal option on for distribute volume
+TEST ! $CLI volume set $V0 data-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
+TEST ! $CLI volume set $V0 cluster.data-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
+
+#Setting metadata-self-heal option on for distribute volume
+TEST ! $CLI volume set $V0 metadata-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
+TEST ! $CLI volume set $V0 cluster.metadata-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
+
+#Setting entry-self-heal option on for distribute volume
+TEST ! $CLI volume set $V0 entry-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
+TEST ! $CLI volume set $V0 cluster.entry-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
+
+#bug-1163108 - validate min-free-disk-option
+
+## Setting invalid value for option cluster.min-free-disk should fail
+TEST ! $CLI volume set $V0 min-free-disk ""
+TEST ! $CLI volume set $V0 min-free-disk 143.!/12
+TEST ! $CLI volume set $V0 min-free-disk 123%
+TEST ! $CLI volume set $V0 min-free-disk 194.34%
+
+## Setting fractional value as a size (unit is byte) for option
+## cluster.min-free-disk should fail
+TEST ! $CLI volume set $V0 min-free-disk 199.051
+TEST ! $CLI volume set $V0 min-free-disk 111.999
+
+## Setting valid value for option cluster.min-free-disk should pass
+TEST $CLI volume set $V0 min-free-disk 12%
+TEST $CLI volume set $V0 min-free-disk 56.7%
+TEST $CLI volume set $V0 min-free-disk 120
+TEST $CLI volume set $V0 min-free-disk 369.0000
+
+#bug-1179175-uss-option-validation
+
+## Set features.uss option with non-boolean value. These non-boolean value
+## for features.uss option should fail.
+TEST ! $CLI volume set $V0 features.uss abcd
+TEST ! $CLI volume set $V0 features.uss #$#$
+TEST ! $CLI volume set $V0 features.uss 2324
+
+## Setting other options with valid value. These options should succeed.
+TEST $CLI volume set $V0 barrier enable
+TEST $CLI volume set $V0 ping-timeout 60
+
+## Set features.uss option with valid boolean value. It should succeed.
+TEST $CLI volume set $V0 features.uss enable
+TEST $CLI volume set $V0 features.uss disable
+
+
+## Setting other options with valid value. These options should succeed.
+TEST $CLI volume set $V0 barrier enable
+TEST $CLI volume set $V0 ping-timeout 60
+
+#bug-1209329 - daemon-svcs-on-reset-volume
+
+##enable the bitrot and verify bitd is running or not
+TEST $CLI volume bitrot $V0 enable
+EXPECT 'on' volinfo_field $V0 'features.bitrot'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+##Do reset force which set the bitrot options to default
+TEST $CLI volume reset $V0 force;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_bitd_count
+
+##enable the uss option and verify snapd is running or not
+TEST $CLI volume set $V0 features.uss on
+EXPECT 'on' volinfo_field $V0 'features.uss'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count
+
+##Do reset force which set the uss options to default
+TEST $CLI volume reset $V0 force;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count
+
+##verify initial nfs disabled by default
+EXPECT "0" get_nfs_count
+
+##enable nfs and verify
+TEST $CLI volume set $V0 nfs.disable off
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+EXPECT "1" get_nfs_count
+
+##Do reset force which set the nfs.option to default
+TEST $CLI volume reset $V0 force;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count
+
+##enable the uss option and verify snapd is running or not
+TEST $CLI volume set $V0 features.uss on
+EXPECT 'on' volinfo_field $V0 'features.uss'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count
+
+##Disable the uss option using set command and verify snapd
+TEST $CLI volume set $V0 features.uss off
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count
+
+##enable nfs.disable and verify
+TEST $CLI volume set $V0 nfs.disable on
+EXPECT 'on' volinfo_field $V0 'nfs.disable'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count
+
+## disable nfs.disable option using set command
+TEST $CLI volume set $V0 nfs.disable off
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_nfs_count
+
+TEST $CLI volume info;
+TEST $CLI volume create $V1 $H0:$B0/${V1}1
+TEST $CLI volume start $V1
+pkill glusterd;
+pkill glusterfsd;
+TEST glusterd
+TEST $CLI volume status $V1
+
+cleanup
diff --git a/tests/bugs/glusterd/quorum-validation.t b/tests/bugs/glusterd/quorum-validation.t
new file mode 100644
index 00000000000..ab7c1adc1c3
--- /dev/null
+++ b/tests/bugs/glusterd/quorum-validation.t
@@ -0,0 +1,115 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+TEST launch_cluster 2
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+TEST $CLI_1 volume start $V0
+
+#bug-1177132 - sync server quorum options when a node is brought up
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 52
+
+#Bring down 2nd glusterd
+TEST kill_glusterd 2
+
+#bug-1104642 - sync server quorum options when a node is brought up
+#set the volume all options from the 1st glusterd
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 80
+
+# Now quorum is not meet. Add-brick, Remove-brick, volume-set command
+#(Command based on syncop framework)should fail
+TEST ! $CLI_1 volume add-brick $V0 $H1:$B1/${V0}2
+TEST ! $CLI_1 volume remove-brick $V0 $H1:$B1/${V0}0 start
+TEST ! $CLI_1 volume set $V0 barrier enable
+
+# Now execute a command which goes through op state machine and it should fail
+TEST ! $CLI_1 volume profile $V0 start
+
+#Bring back the 2nd glusterd
+TEST $glusterd_2
+
+#verify whether the value has been synced
+EXPECT_WITHIN $PROBE_TIMEOUT "80" volinfo_field_1 all cluster.server-quorum-ratio
+EXPECT_WITHIN $PROBE_TIMEOUT '1' peer_count
+EXPECT_WITHIN $PROBE_TIMEOUT "80" volinfo_field_2 all cluster.server-quorum-ratio
+
+# Now quorum is meet.
+# Add-brick, Remove-brick, volume-set command should success
+TEST $CLI_1 volume add-brick $V0 $H2:$B2/${V0}2
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 start
+TEST $CLI_1 volume set $V0 barrier enable
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 stop
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}1
+
+## Stop the volume
+TEST $CLI_1 volume stop $V0
+
+## Bring down 2nd glusterd
+TEST kill_glusterd 2
+
+## Now quorum is not meet. Starting volume on 1st node should not success
+TEST ! $CLI_1 volume start $V0
+
+## Bring back 2nd glusterd
+TEST $glusterd_2
+
+# After 2nd glusterd come back, there will be 2 nodes in a cluster
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+## Now quorum is meet. Starting volume on 1st node should be success.
+TEST $CLI_1 volume start $V0
+
+# Now re-execute the same profile command and this time it should succeed
+TEST $CLI_1 volume profile $V0 start
+
+#bug-1352277
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}1
+
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type none
+
+# Bring down all the gluster processes
+TEST killall_gluster
+
+#bring back 1st glusterd and check whether the brick process comes back
+TEST $glusterd_1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}0
+
+#enabling quorum should bring down the brick
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" brick_up_status_1 $V0 $H1 $B1/${V0}0
+
+TEST $glusterd_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}1
+
+#bug-1367478 - brick processes should not be up when quorum is not met
+TEST $CLI_1 volume create $V1 $H1:$B1/${V1}1 $H2:$B2/${V1}2
+TEST $CLI_1 volume start $V1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H1 $B1/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H2 $B2/${V1}2
+
+# Restart 2nd glusterd
+TEST kill_glusterd 2
+TEST $glusterd_2
+
+# Check if all bricks are up
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H1 $B1/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H2 $B2/${V1}2
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1245142-rebalance_test.t b/tests/bugs/glusterd/rebalance-in-cluster.t
index a28810ea71c..9565faef01d 100644
--- a/tests/bugs/glusterd/bug-1245142-rebalance_test.t
+++ b/tests/bugs/glusterd/rebalance-in-cluster.t
@@ -4,7 +4,6 @@
. $(dirname $0)/../../cluster.rc
. $(dirname $0)/../../volume.rc
-
cleanup;
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
@@ -17,6 +16,21 @@ EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
$CLI_1 volume start $V0
EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+#bug-1231437
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
+
+TEST mkdir $M0/dir{1..4};
+TEST touch $M0/dir{1..4}/files{1..4};
+
+TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
+
+TEST $CLI_1 volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
+
+#bug-1245142
+
$CLI_1 volume rebalance $V0 start &
#kill glusterd2 after requst sent, so that call back is called
#with rpc->status fail ,so roughly 1sec delay is introduced to get this scenario.
@@ -26,3 +40,4 @@ kill_glusterd 2
EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
cleanup;
+
diff --git a/tests/bugs/glusterd/rebalance-operations-in-single-node.t b/tests/bugs/glusterd/rebalance-operations-in-single-node.t
new file mode 100644
index 00000000000..c0823afebb8
--- /dev/null
+++ b/tests/bugs/glusterd/rebalance-operations-in-single-node.t
@@ -0,0 +1,131 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function get_rebalanced_info()
+{
+ local rebal_info_key=$2
+ $CLI volume rebalance $1 status | awk '{print $'$rebal_info_key'}' |sed -n 3p| sed 's/ *$//g'
+}
+
+volname="StartMigrationDuringRebalanceTest"
+TEST glusterd
+TEST pidof glusterd;
+
+TEST $CLI volume info;
+TEST $CLI volume create $volname $H0:$B0/${volname}{1..4};
+TEST $CLI volume start $volname;
+
+#bug-1046308 - validate rebalance on a specified volume name
+TEST $CLI volume rebalance $volname start;
+
+#bug-1089668 - validation of rebalance status and remove brick status
+#bug-963541 - after remove brick start rebalance/remove brick start without commiting should fail
+
+TEST ! $CLI volume remove-brick $volname $H0:$B0/${volname}1 status
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $volname
+
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}1 start
+TEST ! $CLI volume rebalance $volname start
+TEST ! $CLI volume rebalance $volname status
+TEST ! $CLI volume remove-brick $volname $H0:$B0/${volname}2 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field \
+"$volname" "$H0:$B0/${volname}1"
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}1 commit
+
+TEST $CLI volume rebalance $volname start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $volname
+TEST $CLI volume rebalance $volname stop
+
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}2 start
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}2 stop
+
+#bug-1351021-rebalance-info-post-glusterd-restart
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
+TEST $CLI volume start $V0;
+
+#Mount volume and create data
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+TEST mkdir $M0/dir{1..10}
+TEST touch $M0/dir{1..10}/file{1..10}
+
+# Add-brick and start rebalance
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4
+TEST $CLI volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+#Rebalance info before glusterd restart
+OLD_REBAL_FILES=$(get_rebalanced_info $V0 2)
+OLD_SIZE=$(get_rebalanced_info $V0 3)
+OLD_SCANNED=$(get_rebalanced_info $V0 4)
+OLD_FAILURES=$(get_rebalanced_info $V0 5)
+OLD_SKIPPED=$(get_rebalanced_info $V0 6)
+
+
+pkill glusterd;
+pkill glusterfsd;
+TEST glusterd
+
+#Rebalance info after glusterd restart
+NEW_REBAL_FILES=$(get_rebalanced_info $V0 2)
+NEW_SIZE=$(get_rebalanced_info $V0 3)
+NEW_SCANNED=$(get_rebalanced_info $V0 4)
+NEW_FAILURES=$(get_rebalanced_info $V0 5)
+NEW_SKIPPED=$(get_rebalanced_info $V0 6)
+#Check rebalance info before and after glusterd restart
+TEST [ $OLD_REBAL_FILES == $NEW_REBAL_FILES ]
+TEST [ $OLD_SIZE == $NEW_SIZE ]
+TEST [ $OLD_SCANNED == $NEW_SCANNED ]
+TEST [ $OLD_FAILURES == $NEW_FAILURES ]
+TEST [ $OLD_SKIPPED == $NEW_SKIPPED ]
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#bug-1004744 - validation of rebalance fix layout
+
+TEST $CLI volume start $V0 force
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+for i in `seq 11 20`;
+do
+ mkdir $M0/dir_$i
+ echo file>$M0/dir_$i/file_$i
+ for j in `seq 1 100`;
+ do
+ mkdir $M0/dir_$i/dir_$j
+ echo file>$M0/dir_$i/dir_$j/file_$j
+ done
+done
+
+#add 2 bricks
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{5,6};
+
+#perform rebalance fix-layout
+TEST $CLI volume rebalance $V0 fix-layout start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" fix-layout_status_field $V0;
+
+#bug-1075087 - rebalance post add brick
+TEST mkdir $M0/dir{21..30};
+TEST touch $M0/dir{21..30}/files{1..10};
+
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{7,8}
+
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN 60 "completed" rebalance_status_field $V0
+
+TEST pkill gluster
+TEST glusterd
+TEST pidof glusterd
+
+# status should be "completed" immediate after glusterd has respawned.
+EXPECT_WITHIN 5 "completed" rebalance_status_field $V0
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1230121-replica_subvol_count_correct_cal.t b/tests/bugs/glusterd/remove-brick-in-cluster.t
index 71d98e18491..de94220a906 100644
--- a/tests/bugs/glusterd/bug-1230121-replica_subvol_count_correct_cal.t
+++ b/tests/bugs/glusterd/remove-brick-in-cluster.t
@@ -1,23 +1,32 @@
#!/bin/bash
-## Test case for BZ:1230121 glusterd crashed while trying to remove a bricks
-## one selected from each replica set - after shrinking nX3 to nX2 to nX1
-
. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup;
-## Start a 2 node virtual cluster
TEST launch_cluster 2;
-TEST pidof glusterd
-## Peer probe server 2 from server 1 cli
-TEST $CLI_1 peer probe $H2;
+#bug-1047955 - remove brick from new peer in cluster
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}{1,2,3,4}
+TEST $CLI_1 volume start $V0;
+TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+TEST $CLI_2 volume remove-brick $V0 $H1:$B1/${V0}{3,4} start;
+TEST $CLI_2 volume info
+
+#bug-964059 - volume status post remove brick start
+TEST $CLI_1 volume create $V1 $H1:$B1/${V1}0 $H2:$B2/${V1}1
+TEST $CLI_1 volume start $V1
+TEST $CLI_1 volume remove-brick $V1 $H2:$B2/${V1}1 start
+TEST $CLI_1 volume status
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+#bug-1230121 - decrease replica count by remove-brick and increse by add-brick
## Creating a 2x3 replicate volume
TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/brick1 $H2:$B2/brick2 \
$H1:$B1/brick3 $H2:$B2/brick4 \
@@ -26,7 +35,6 @@ TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/brick1 $H2:$B2/brick2 \
## Start the volume
TEST $CLI_1 volume start $V0
-
## Shrinking volume replica 2x3 to 2x2 by performing remove-brick operation.
TEST $CLI_1 volume remove-brick $V0 replica 2 $H1:$B1/brick1 $H2:$B2/brick6 force
@@ -37,7 +45,6 @@ TEST $CLI_1 volume remove-brick $V0 replica 2 $H1:$B1/brick3 $H2:$B2/brick2 forc
TEST $CLI_1 volume remove-brick $V0 replica 1 $H1:$B1/brick5 force
-
### Expanding volume replica by performing add-brick operation.
## Expend volume replica from 1x1 to 1x2 by performing add-brick operation
@@ -49,4 +56,5 @@ TEST $CLI_1 volume add-brick $V0 replica 2 $H1:$B1/brick3 $H2:$B2/brick2 force
## Expend volume replica from 2x2 to 2x3 by performing add-brick operation
TEST $CLI_1 volume add-brick $V0 replica 3 $H1:$B1/brick1 $H2:$B2/brick6 force
-cleanup;
+cleanup
+
diff --git a/tests/bugs/glusterd/remove-brick-testcases.t b/tests/bugs/glusterd/remove-brick-testcases.t
new file mode 100644
index 00000000000..2f982d5266f
--- /dev/null
+++ b/tests/bugs/glusterd/remove-brick-testcases.t
@@ -0,0 +1,119 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+function brick_count()
+{
+ local vol=$1;
+
+ $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..5}
+TEST $CLI volume start $V0
+
+#bug-1225716 - remove-brick on a brick which is down should fail
+#kill a brick process
+kill_brick $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status $V0 $H0 $B0/${V0}1
+
+#remove-brick start should fail as the brick is down
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+#remove-brick start should succeed as the brick is up
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}1"
+
+#kill a brick process
+kill_brick $V0 $H0 $B0/${V0}1
+
+#remove-brick commit should pass even if the brick is down
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 commit
+
+#bug-1121584 - brick-existing-validation-for-remove-brick-status-stop
+## Start remove-brick operation on the volume
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
+
+## By giving non existing brick for remove-brick status/stop command should
+## give error.
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/ABCD status
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/ABCD stop
+
+## By giving brick which is part of volume for remove-brick status/stop command
+## should print statistics of remove-brick operation or stop remove-brick
+## operation.
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 status
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop
+
+#bug-878004 - validate remove brick force
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force;
+EXPECT '3' brick_count $V0
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 force;
+EXPECT '2' brick_count $V0
+
+#bug-1027171 - Do not allow commit if the bricks are not decommissioned
+#Remove bricks and commit without starting
+function remove_brick_commit_status {
+ $CLI volume remove-brick $V0 \
+ $H0:$B0/${V0}4 commit 2>&1 |grep -oE "success|decommissioned"
+}
+EXPECT "decommissioned" remove_brick_commit_status;
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+
+#Create a 2X3 distributed-replicate volume
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..6};
+TEST $CLI volume start $V0
+
+#Try to reduce replica count with start option
+function remove_brick_start_status {
+ $CLI volume remove-brick $V0 replica 2 \
+ $H0:$B0/${V0}3 $H0:$B0/${V0}6 start 2>&1 |grep -oE "success|failed"
+}
+EXPECT "failed" remove_brick_start_status;
+
+#Remove bricks with commit option
+function remove_brick_commit_status2 {
+ $CLI volume remove-brick $V0 replica 2 \
+ $H0:$B0/${V0}3 $H0:$B0/${V0}6 commit 2>&1 |
+ grep -oE "success|decommissioned"
+}
+EXPECT "decommissioned" remove_brick_commit_status2;
+TEST $CLI volume info $V0
+
+#bug-1040408 - reduce replica count of distributed replicate volume
+
+# Reduce to 2x2 volume by specifying bricks in reverse order
+function remove_brick_status {
+ $CLI volume remove-brick $V0 replica 2 \
+ $H0:$B0/${V0}6 $H0:$B0/${V0}3 force 2>&1 |grep -oE "success|failed"
+}
+EXPECT "success" remove_brick_status;
+TEST $CLI volume info $V0
+
+#bug-1120647 - remove brick validation
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}{4..5} start
+EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}5"
+EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}4"
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}{4..5} commit
+TEST $CLI volume remove-brick $V0 replica 1 $H0:$B0/${V0}2 force
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-974007.t b/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t
index 5759adb583f..20c84d26b9c 100644
--- a/tests/bugs/glusterd/bug-974007.t
+++ b/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t
@@ -1,8 +1,5 @@
#!/bin/bash
-#Test case: Create a distributed replicate volume, and remove multiple
-#replica pairs in a single remove-brick command.
-
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
@@ -17,6 +14,7 @@ TEST $CLI volume info
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..6};
TEST $CLI volume start $V0
+#bug-974007 - remove multiple replica pairs in a single brick command
# Mount FUSE and create files
TEST glusterfs -s $H0 --volfile-id $V0 $M0
TEST touch $M0/file{1..10}
@@ -41,12 +39,41 @@ function remove_brick_commit_status {
}
EXPECT "success" remove_brick_commit_status;
+
# Check the volume type
EXPECT "Replicate" echo `$CLI volume info |grep Type |awk '{print $2}'`
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#bug-961669 - remove brick start should fail when reducing the replica count
+
+#Create a 3x3 dist-rep volume
+TEST $CLI volume create $V1 replica 3 $H0:$B0/${V1}{0,1,2,3,4,5,6,7,8};
+TEST $CLI volume start $V1
+
+# Mount FUSE and create file/directory
+TEST glusterfs -s $H0 --volfile-id $V1 $M0
+TEST touch $M0/zerobytefile.txt
+TEST mkdir $M0/test_dir
+TEST dd if=/dev/zero of=$M0/file bs=1024 count=1024
+
+function remove_brick_start {
+ $CLI volume remove-brick $V1 replica 2 $H0:$B0/${V1}{1,4,7} start 2>&1|grep -oE 'success|failed'
+}
+
+function remove_brick {
+ $CLI volume remove-brick $V1 replica 2 $H0:$B0/${V1}{1,4,7} force 2>&1|grep -oE 'success|failed'
+}
+
+#remove-brick start variant
+#Actual message displayed at cli is:
+#"volume remove-brick start: failed: Rebalancing not needed when reducing replica count. Try without the 'start' option"
+EXPECT "failed" remove_brick_start;
+
+#remove-brick commit-force
+#Actual message displayed at cli is:
+#"volume remove-brick commit force: success"
+EXPECT "success" remove_brick
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
cleanup;
diff --git a/tests/bugs/glusterd/replace-brick-operations.t b/tests/bugs/glusterd/replace-brick-operations.t
new file mode 100644
index 00000000000..044aa3d6c6d
--- /dev/null
+++ b/tests/bugs/glusterd/replace-brick-operations.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+## Test case for BZ: 1094119 Remove replace-brick support from gluster
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+# Start glusterd
+TEST glusterd
+TEST pidof glusterd
+
+## Lets create and start volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}
+TEST $CLI volume start $V0
+
+#bug-1094119-remove-replace-brick-support-from-glusterd
+
+## Now with this patch replace-brick only accept following commad
+## volume replace-brick <VOLNAME> <SOURCE-BRICK> <NEW-BRICK> {commit force}
+## Apart form this replace brick command will failed.
+
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 start
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 status
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 abort
+
+
+## replace-brick commit force command should success
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 commit force
+
+#bug-1242543-replace-brick validation
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Replace brick1 without killing
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1_new commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST kill_brick $V0 $H0 $B0/${V0}1_new
+
+# Replace brick1 after killing the brick
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1_new $H0:$B0/${V0}1_newer commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1383893-daemons-to-follow-quorum.t b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t
index 105292ab5bb..5d2d9590a0e 100644
--- a/tests/bugs/glusterd/bug-1383893-daemons-to-follow-quorum.t
+++ b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t
@@ -1,9 +1,4 @@
#!/bin/bash
-
-# This test checks for if shd or any other daemons brought down (apart from
-# brick processes) is not brought up automatically when glusterd on the other
-# node is (re)started
-
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
@@ -19,24 +14,35 @@ function shd_up_status_2 {
function get_shd_pid_2 {
$CLI_2 volume status | grep "localhost" | grep "Self-heal Daemon" | awk '{print $8}'
}
+
cleanup;
-TEST launch_cluster 3
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+TEST launch_cluster 3
TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
-TEST $CLI_1 peer probe $H3;
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B0/${V0} $H2:$B0/${V0}
+TEST $CLI_1 volume start $V0
-# Lets create the volume
-TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}1 $H2:$B2/${V0}2
+#testcase: bug-1507466 - validate reset-brick commit force
+# Negative case with brick not killed && volume-id xattrs present
+TEST ! $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force
-# Start the volume
-TEST $CLI_1 volume start $V0
+TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} start
+# Now test if reset-brick commit force works
+TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force
+
+#testcase: bug-1383893 - shd should not come up after restarting the peer glusterd
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B0/${V0}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B0/${V0}
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_2
diff --git a/tests/bugs/glusterd/bug-1322145-disallow-detatch-peer.t b/tests/bugs/glusterd/snapshot-operations.t
index 60eceb4f44d..4705577d741 100644
--- a/tests/bugs/glusterd/bug-1322145-disallow-detatch-peer.t
+++ b/tests/bugs/glusterd/snapshot-operations.t
@@ -7,6 +7,7 @@
cleanup;
+
TEST verify_lvm_version
TEST launch_cluster 3;
TEST setup_lvm 3;
@@ -20,8 +21,17 @@ EXPECT 'Created' volinfo_field $V0 'Status'
TEST $CLI_1 volume start $V0
EXPECT 'Started' volinfo_field $V0 'Status'
+#bug-1318591 - skip-non-directories-inside-vols
+
+b="B1"
+TEST touch ${!b}/glusterd/vols/file
+
TEST $CLI_1 snapshot create snap1 $V0 no-timestamp;
+TEST touch ${!b}/glusterd/snaps/snap1/file
+
+#bug-1322145 - peer hosting snapshotted bricks should not be detachable
+
kill_glusterd 2
TEST $CLI_1 peer probe $H3;
@@ -29,8 +39,12 @@ EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 volume replace-brick $V0 $H2:$L2 $H3:$L3 commit force
-
# peer hosting snapshotted bricks should not be detachable
TEST ! $CLI_1 peer detach $H2
+
+TEST killall_gluster
+TEST $glusterd_1
+TEST $glusterd_2
+
cleanup;
diff --git a/tests/bugs/glusterd/sync-post-glusterd-restart.t b/tests/bugs/glusterd/sync-post-glusterd-restart.t
new file mode 100644
index 00000000000..de3dff715ab
--- /dev/null
+++ b/tests/bugs/glusterd/sync-post-glusterd-restart.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function volume_get_field()
+{
+ local vol=$1
+ local field=$2
+ $CLI_2 volume get $vol $field | tail -1 | awk '{print $2}'
+}
+
+cleanup
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+TEST $CLI_1 volume set $V0 performance.readdir-ahead on
+
+# Bring down 2nd glusterd
+TEST kill_glusterd 2
+
+##bug-1420637 and bug-1323287 - sync post glusterd restart
+
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 60
+TEST $CLI_1 volume set $V0 performance.readdir-ahead off
+TEST $CLI_1 volume set $V0 performance.write-behind off
+
+# Bring back 2nd glusterd
+TEST $glusterd_2
+
+# After 2nd glusterd come back, there will be 2 nodes in a cluster
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+#bug-1420637-volume sync post glusterd restart
+
+EXPECT_WITHIN $PROBE_TIMEOUT "60" volinfo_field_2 all cluster.server-quorum-ratio
+EXPECT_WITHIN $PROBE_TIMEOUT "off" volinfo_field_2 $V0 performance.readdir-ahead
+
+#bug-1323287
+EXPECT_WITHIN $PROBE_TIMEOUT 'off' volume_get_field $V0 'write-behind'
+
+#bug-1213295 - volume stop should not crash glusterd post glusterd restart
+
+TEST $CLI_2 volume stop $V0
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1
+
+cleanup
diff --git a/tests/bugs/glusterd/validating-options-for-striped-replicated-volume.t b/tests/bugs/glusterd/validating-options-for-striped-replicated-volume.t
new file mode 100644
index 00000000000..8a6772b402b
--- /dev/null
+++ b/tests/bugs/glusterd/validating-options-for-striped-replicated-volume.t
@@ -0,0 +1,144 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+#bug-1314649 - validate group virt
+TEST $CLI volume set $V0 group virt;
+
+#bug-765230 - remove-quota-related-option-after-disabling-quota
+## setting soft-timeout as 20
+TEST $CLI volume set $V0 features.soft-timeout 20
+EXPECT '20' volinfo_field $V0 'features.soft-timeout';
+
+## enabling features.quota-deem-statfs
+TEST ! $CLI volume set $V0 features.quota-deem-statfs on
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+
+## enabling quota
+TEST $CLI volume quota $V0 enable
+EXPECT 'on' volinfo_field $V0 'features.quota'
+
+## eetting soft-timeout as 20
+TEST $CLI volume set $V0 features.soft-timeout 20
+EXPECT '20' volinfo_field $V0 'features.soft-timeout';
+
+## enabling features.quota-deem-statfs
+TEST $CLI volume set $V0 features.quota-deem-statfs on
+EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
+
+## disabling quota
+TEST $CLI volume quota $V0 disable
+EXPECT 'off' volinfo_field $V0 'features.quota'
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+EXPECT '' volinfo_field $V0 'features.soft-timeout'
+
+## setting soft-timeout as 30
+TEST $CLI volume set $V0 features.soft-timeout 30
+EXPECT '30' volinfo_field $V0 'features.soft-timeout';
+
+## disabling features.quota-deem-statfs
+TEST ! $CLI volume set $V0 features.quota-deem-statfs off
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+
+#bug-859927 - validate different options for striped replicated volume
+
+TEST ! $CLI volume set $V0 statedump-path ""
+TEST ! $CLI volume set $V0 statedump-path " "
+TEST $CLI volume set $V0 statedump-path "/home/"
+EXPECT "/home/" volume_option $V0 server.statedump-path
+
+TEST ! $CLI volume set $V0 background-self-heal-count ""
+TEST ! $CLI volume set $V0 background-self-heal-count " "
+TEST $CLI volume set $V0 background-self-heal-count 10
+EXPECT "10" volume_option $V0 cluster.background-self-heal-count
+
+TEST ! $CLI volume set $V0 cache-size ""
+TEST ! $CLI volume set $V0 cache-size " "
+TEST $CLI volume set $V0 cache-size 512MB
+EXPECT "512MB" volume_option $V0 performance.cache-size
+
+TEST ! $CLI volume set $V0 self-heal-daemon ""
+TEST ! $CLI volume set $V0 self-heal-daemon " "
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT "on" volume_option $V0 cluster.self-heal-daemon
+
+TEST ! $CLI volume set $V0 read-subvolume ""
+TEST ! $CLI volume set $V0 read-subvolume " "
+TEST $CLI volume set $V0 read-subvolume $V0-client-0
+EXPECT "$V0-client-0" volume_option $V0 cluster.read-subvolume
+
+TEST ! $CLI volume set $V0 data-self-heal-algorithm ""
+TEST ! $CLI volume set $V0 data-self-heal-algorithm " "
+TEST ! $CLI volume set $V0 data-self-heal-algorithm on
+TEST $CLI volume set $V0 data-self-heal-algorithm full
+EXPECT "full" volume_option $V0 cluster.data-self-heal-algorithm
+
+TEST ! $CLI volume set $V0 min-free-inodes ""
+TEST ! $CLI volume set $V0 min-free-inodes " "
+TEST $CLI volume set $V0 min-free-inodes 60%
+EXPECT "60%" volume_option $V0 cluster.min-free-inodes
+
+TEST ! $CLI volume set $V0 min-free-disk ""
+TEST ! $CLI volume set $V0 min-free-disk " "
+TEST $CLI volume set $V0 min-free-disk 60%
+EXPECT "60%" volume_option $V0 cluster.min-free-disk
+
+TEST $CLI volume set $V0 min-free-disk 120
+EXPECT "120" volume_option $V0 cluster.min-free-disk
+
+TEST ! $CLI volume set $V0 frame-timeout ""
+TEST ! $CLI volume set $V0 frame-timeout " "
+TEST $CLI volume set $V0 frame-timeout 0
+EXPECT "0" volume_option $V0 network.frame-timeout
+
+TEST ! $CLI volume set $V0 auth.allow ""
+TEST ! $CLI volume set $V0 auth.allow " "
+TEST $CLI volume set $V0 auth.allow 192.168.122.1
+EXPECT "192.168.122.1" volume_option $V0 auth.allow
+
+TEST ! $CLI volume set $V0 stripe-block-size ""
+TEST ! $CLI volume set $V0 stripe-block-size " "
+TEST $CLI volume set $V0 stripe-block-size 512MB
+EXPECT "512MB" volume_option $V0 cluster.stripe-block-size
+
+#bug-782095 - validate performance cache min/max size value
+
+## setting performance cache min size as 2MB
+TEST $CLI volume set $V0 performance.cache-min-file-size 2MB
+EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## setting performance cache max size as 20MB
+TEST $CLI volume set $V0 performance.cache-max-file-size 20MB
+EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
+
+## trying to set performance cache min size as 25MB
+TEST ! $CLI volume set $V0 performance.cache-min-file-size 25MB
+EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## able to set performance cache min size as long as its lesser than max size
+TEST $CLI volume set $V0 performance.cache-min-file-size 15MB
+EXPECT '15MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## trying it out with only cache-max-file-size in CLI as 10MB
+TEST ! $CLI volume set $V0 cache-max-file-size 10MB
+EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
+
+## finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup
diff --git a/tests/bugs/glusterd/validating-server-quorum.t b/tests/bugs/glusterd/validating-server-quorum.t
new file mode 100644
index 00000000000..277bb4af993
--- /dev/null
+++ b/tests/bugs/glusterd/validating-server-quorum.t
@@ -0,0 +1,110 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_fs {
+ df $1 &> /dev/null
+ echo $?
+}
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 3
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+# Lets create the volume
+TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/${V0}1 $H2:$B2/${V0}2 $H3:$B3/${V0}3
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+
+# Start the volume
+TEST $CLI_1 volume start $V0
+
+#bug-1345727 - bricks should be down when quorum is not met
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H3 $B3/${V0}3
+
+# Bring down glusterd on 2nd node
+TEST kill_glusterd 2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST kill_glusterd 3
+EXPECT_WITHIN $PROBE_TIMEOUT 0 peer_count
+
+# Server quorum is not met. Brick on 1st node must be down
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status_1 $V0 $H1 $B1/${V0}1
+
+# Set quorum ratio 95. means 95 % or more than 95% nodes of total available node
+# should be available for performing volume operation.
+# i.e. Server-side quorum is met if the number of nodes that are available is
+# greater than or equal to 'quorum-ratio' times the number of nodes in the
+# cluster
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 95
+
+#bug-1483058 - replace-brick should fail when quorum is not met
+TEST ! $CLI_1 volume replace-brick $V0 $H2:$B2/${V0}2 $H1:$B1/${V0}2_new commit force
+
+#Bring back 2nd glusterd
+TEST $glusterd_2
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+# Server quorum is still not met. Bricks should be down on 1st and 2nd nodes
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status_1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status_1 $V0 $H2 $B2/${V0}2
+
+# Bring back 3rd glusterd
+TEST $glusterd_3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+# Server quorum is met now. Bricks should be up on all nodes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H3 $B3/${V0}3
+
+# quorum is met. replace-brick will execute successfully
+EXPECT_WITHIN $PEER_SYNC_TIMEOUT 0 attempt_replace_brick 1 $V0 $H2:$B2/${V0}2 $H2:$B2/${V0}2_new
+
+TEST $CLI_1 volume reset all
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2_new
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H3 $B3/${V0}3
+
+
+#bug-913555 - volume should become unwritable when quorum does not met
+
+TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
+
+# Kill one pseudo-node, make sure the others survive and volume stays up.
+TEST kill_node 3;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2_new
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
+
+# Kill another pseudo-node, make sure the last one dies and volume goes down.
+TEST kill_node 2;
+EXPECT_WITHIN $PROBE_TIMEOUT 0 check_peers
+#two glusterfsds of the other two glusterds must be dead
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" brick_up_status_1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_fs $M0;
+
+TEST $glusterd_2;
+TEST $glusterd_3;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
+
+cleanup