diff options
Diffstat (limited to 'tests/bugs/glusterd')
27 files changed, 613 insertions, 365 deletions
diff --git a/tests/bugs/glusterd/859927/repl.t b/tests/bugs/glusterd/859927/repl.t index 70143e2c193..6e7c23b5b1d 100755 --- a/tests/bugs/glusterd/859927/repl.t +++ b/tests/bugs/glusterd/859927/repl.t @@ -23,6 +23,9 @@ TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; TEST $CLI volume set $V0 cluster.self-heal-daemon off TEST $CLI volume set $V0 performance.stat-prefetch off TEST $CLI volume set $V0 client-log-level DEBUG +TEST $CLI volume set $V0 cluster.data-self-heal on +TEST $CLI volume set $V0 cluster.metadata-self-heal on +TEST $CLI volume set $V0 cluster.entry-self-heal on TEST $CLI volume start $V0 TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0; diff --git a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t index 4e570381701..b6af487a791 100644 --- a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t +++ b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t @@ -7,6 +7,20 @@ function count_brick_processes { pgrep glusterfsd | wc -l } +function count_brick_pids { + $CLI_1 --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \ + | grep -v "N/A" | sort | uniq | wc -l +} + +function count_N/A_brick_pids { + $CLI_1 --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \ + | grep -- '\-1' | sort | uniq | wc -l +} + +function check_peers { + $CLI_2 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + cleanup; TEST launch_cluster 3 @@ -48,4 +62,47 @@ TEST $CLI_1 volume stop $V1 EXPECT 3 count_brick_processes -cleanup +TEST $CLI_1 volume stop $META_VOL + +TEST $CLI_1 volume delete $META_VOL +TEST $CLI_1 volume delete $V0 +TEST $CLI_1 volume delete $V1 + +#bug-1773856 - Brick process fails to come up with brickmux on + +TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1 $H3:$B3/${V0}1 force +TEST $CLI_1 volume start $V0 + + +EXPECT 3 count_brick_processes + +#create and start a new volume +TEST $CLI_1 volume create $V1 $H1:$B1/${V1}2 $H2:$B2/${V1}2 $H3:$B3/${V1}2 force +TEST $CLI_1 volume start $V1 + +EXPECT 3 count_brick_processes + +V2=patchy2 +TEST $CLI_1 volume create $V2 $H1:$B1/${V2}3 $H2:$B2/${V2}3 $H3:$B3/${V2}3 force +TEST $CLI_1 volume start $V2 + +EXPECT 3 count_brick_processes + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_brick_pids + +TEST kill_node 1 + +sleep 10 + +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers; + +$CLI_2 volume set $V0 performance.readdir-ahead on +$CLI_2 volume set $V1 performance.readdir-ahead on + +TEST $glusterd_1; +EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_brick_pids +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 count_N/A_brick_pids + +cleanup; diff --git a/tests/bugs/glusterd/brick-mux-validation.t b/tests/bugs/glusterd/brick-mux-validation.t index 03a476823ca..61b0455f9a8 100644 --- a/tests/bugs/glusterd/brick-mux-validation.t +++ b/tests/bugs/glusterd/brick-mux-validation.t @@ -24,7 +24,7 @@ TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3} TEST $CLI volume start $V0 EXPECT 1 count_brick_processes -EXPECT 1 count_brick_pids +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count pkill gluster @@ -101,4 +101,4 @@ TEST $CLI_IGNORE_PARTITION volume reset-brick $V1 $H0:$B0/${V1}1 $H0:$B0/${V1}1 EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count EXPECT 1 count_brick_processes -cleanup;
\ No newline at end of file +cleanup; diff --git a/tests/bugs/glusterd/brick-mux.t b/tests/bugs/glusterd/brick-mux.t index eeaa3ebfea8..927940534c1 100644 --- a/tests/bugs/glusterd/brick-mux.t +++ b/tests/bugs/glusterd/brick-mux.t @@ -39,7 +39,7 @@ TEST glusterd EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count EXPECT 1 count_brick_processes -TEST $CLI volume set $V1 performance.cache-size 32MB +TEST $CLI volume set $V1 performance.io-cache-size 32MB TEST $CLI volume stop $V1 TEST $CLI volume start $V1 diff --git a/tests/bugs/glusterd/brick-order-check-add-brick.t b/tests/bugs/glusterd/brick-order-check-add-brick.t new file mode 100644 index 00000000000..0be31dac768 --- /dev/null +++ b/tests/bugs/glusterd/brick-order-check-add-brick.t @@ -0,0 +1,61 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../cluster.rc +. $(dirname $0)/../../snapshot.rc + +cleanup; + +TEST verify_lvm_version; +#Create cluster with 3 nodes +TEST launch_cluster 3 -NO_DEBUG -NO_FORCE +TEST setup_lvm 3 + +TEST $CLI_1 peer probe $H2 +TEST $CLI_1 peer probe $H3 +EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count + +TEST $CLI_1 volume create $V0 replica 3 $H1:$L1/$V0 $H2:$L2/$V0 $H3:$L3/$V0 +EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks' +EXPECT 'Created' volinfo_field $V0 'Status' + +TEST $CLI_1 volume start $V0 +EXPECT 'Started' volinfo_field $V0 'Status' + +#add-brick with or without mentioning the replica count should not fail +TEST $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}_1 $H2:$L2/${V0}_1 $H3:$L3/${V0}_1 +EXPECT '2 x 3 = 6' volinfo_field $V0 'Number of Bricks' + +TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_2 $H2:$L2/${V0}_2 $H3:$L3/${V0}_2 +EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks' + +#adding bricks from same host should fail the brick order check +TEST ! $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5 +EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks' + +#adding bricks from same host with force should succeed +TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5 force +EXPECT '4 x 3 = 12' volinfo_field $V0 'Number of Bricks' + +TEST $CLI_1 volume stop $V0 +TEST $CLI_1 volume delete $V0 + +TEST $CLI_1 volume create $V0 replica 2 $H1:$L1/${V0}1 $H2:$L2/${V0}1 +EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks' +EXPECT 'Created' volinfo_field $V0 'Status' + +TEST $CLI_1 volume start $V0 +EXPECT 'Started' volinfo_field $V0 'Status' + +#Add-brick with Increasing replica count +TEST $CLI_1 volume add-brick $V0 replica 3 $H3:$L3/${V0}1 +EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks' + +#Add-brick with Increasing replica count from same host should fail +TEST ! $CLI_1 volume add-brick $V0 replica 5 $H1:$L1/${V0}2 $H1:$L1/${V0}3 + +#adding multiple bricks from same host should fail the brick order check +TEST ! $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}{4..6} $H2:$L2/${V0}{7..9} +EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks' + +cleanup diff --git a/tests/bugs/glusterd/bug-1070734.t b/tests/bugs/glusterd/bug-1070734.t index ea160d7ec6b..0afcb3b37b3 100755 --- a/tests/bugs/glusterd/bug-1070734.t +++ b/tests/bugs/glusterd/bug-1070734.t @@ -4,6 +4,8 @@ . $(dirname $0)/../../volume.rc . $(dirname $0)/../../nfs.rc +#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST + cleanup; ## Start glusterd diff --git a/tests/bugs/glusterd/bug-1303028-Rebalance-glusterd-rpc-connection-issue.t b/tests/bugs/glusterd/bug-1303028-Rebalance-glusterd-rpc-connection-issue.t deleted file mode 100644 index 3b62a4547b5..00000000000 --- a/tests/bugs/glusterd/bug-1303028-Rebalance-glusterd-rpc-connection-issue.t +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../../include.rc -. $(dirname $0)/../../volume.rc -. $(dirname $0)/../../tier.rc - - -# Creates a tiered volume with pure distribute hot and cold tiers -# Both hot and cold tiers will have an equal number of bricks. - -function create_dist_tier_vol () { - mkdir $B0/cold - mkdir $B0/hot - TEST $CLI volume create $V0 $H0:$B0/cold/${V0}{1..3} - TEST $CLI volume set $V0 performance.quick-read off - TEST $CLI volume set $V0 performance.io-cache off - TEST $CLI volume start $V0 - TEST $CLI volume tier $V0 attach $H0:$B0/hot/${V0}{1..2} - TEST $CLI volume set $V0 cluster.tier-mode test -} - -function non_zero_check () { - if [ "$1" -ne 0 ] - then - echo "0" - else - echo "1" - fi -} - -function num_bricks_up { - local b - local n_up=0 - - for b in $B0/hot/${V0}{1..2} $B0/cold/${V0}{1..3}; do - if [ x"$(brick_up_status $V0 $H0 $b)" = x"1" ]; then - n_up=$((n_up+1)) - fi - done - - echo $n_up -} - -cleanup; - -#Basic checks -TEST glusterd -TEST pidof glusterd -TEST $CLI volume status - - -#Create and start a tiered volume -create_dist_tier_vol -# Wait for the bricks to come up, *then* the tier daemon. -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 5 num_bricks_up -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 tier_daemon_check -sleep 5 #wait for some time to run tier daemon -time_before_restarting=$(rebalance_run_time $V0); - -#checking for elapsed time after sleeping for two seconds. -EXPECT "0" non_zero_check $time_before_restarting; - -#Difference of elapsed time should be positive - -kill -9 $(pidof glusterd); -TEST glusterd; -sleep 2; -# Wait for the bricks to come up, *then* the tier daemon. -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 5 num_bricks_up -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" tier_daemon_check; -sleep 1; -time1=$(rebalance_run_time $V0); -EXPECT "0" non_zero_check $time1; -sleep 2; -time2=$(rebalance_run_time $V0); -EXPECT "0" non_zero_check $time2; -diff=`expr $time2 - $time1` -EXPECT "0" non_zero_check $diff; diff --git a/tests/bugs/glusterd/bug-1595320.t b/tests/bugs/glusterd/bug-1595320.t index f41df9d0ffa..c10e11821a1 100644 --- a/tests/bugs/glusterd/bug-1595320.t +++ b/tests/bugs/glusterd/bug-1595320.t @@ -25,7 +25,7 @@ TEST pidof glusterd # Create volume and enable brick multiplexing TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3 -gluster v set all cluster.brick-multiplex on +TEST $CLI v set all cluster.brick-multiplex on # Start the volume TEST $CLI volume start $V0 @@ -48,7 +48,7 @@ b2_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-2*.pid) b3_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-3*.pid) kill -9 $brick_pid -EXPECT 0 count_brick_processes +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 count_brick_processes # Unmount 3rd brick root from node brick_root=$L3 diff --git a/tests/bugs/glusterd/bug-1696046.t b/tests/bugs/glusterd/bug-1696046.t new file mode 100644 index 00000000000..e1c1eb2ceb9 --- /dev/null +++ b/tests/bugs/glusterd/bug-1696046.t @@ -0,0 +1,113 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +function count_up_bricks { + $CLI --xml volume status $1 | grep '<status>1' | wc -l +} + +function count_brick_processes { + pgrep glusterfsd | wc -l +} + +logdir=`gluster --print-logdir` + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; + +TEST $CLI volume set all cluster.brick-multiplex on +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3}; +TEST $CLI volume create $V1 replica 3 $H0:$B0/${V1}{1,2,3}; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; +TEST $CLI volume start $V1; +EXPECT 'Started' volinfo_field $V1 'Status'; + + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks $V1 + +EXPECT 1 count_brick_processes + +# Mount V0 +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0; + +function client-log-file-name() +{ + logfilename=$M0".log" + echo ${logfilename:1} | tr / - +} + +function brick-log-file-name() +{ + logfilename=$B0"/"$V0"1.log" + echo ${logfilename:1} | tr / - +} + +log_file=$logdir"/"`client-log-file-name` +nofdlog=$(cat $log_file | grep " D " | wc -l) +TEST [ $((nofdlog)) -eq 0 ] + +brick_log_file=$logdir"/bricks/"`brick-log-file-name` +nofdlog=$(cat $brick_log_file | grep " D " | wc -l) +TEST [ $((nofdlog)) -eq 0 ] + +## Set brick-log-level to DEBUG +TEST $CLI volume set $V0 diagnostics.brick-log-level DEBUG + +# Do some operation +touch $M0/file1 + +# Check debug message debug message should be exist only for V0 +# Server xlator is common in brick_mux so after enabling DEBUG log +# some debug message should be available for other xlators like posix + +brick_log_file=$logdir"/bricks/"`brick-log-file-name` +nofdlog=$(cat $brick_log_file | grep file1 | grep -v server | wc -l) +TEST [ $((nofdlog)) -ne 0 ] + +#Check if any debug log exist in client-log file +nofdlog=$(cat $log_file | grep " D " | wc -l) +TEST [ $((nofdlog)) -eq 0 ] + +## Set brick-log-level to INFO +TEST $CLI volume set $V0 diagnostics.brick-log-level INFO + +## Set client-log-level to DEBUG +TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG + +# Do some operation +touch $M0/file2 + +nofdlog=$(cat $brick_log_file | grep " D " | grep file2 | wc -l) +TEST [ $((nofdlog)) -eq 0 ] + +nofdlog=$(cat $log_file | grep " D " | wc -l) +TEST [ $((nofdlog)) -ne 0 ] + +# Unmount V0 +TEST umount $M0 + +#Mount V1 +TEST glusterfs --volfile-id=$V1 --volfile-server=$H0 --entry-timeout=0 $M0; + +#do some operation +touch $M0/file3 + + +# DEBUG log level is enabled only for V0 so no debug message should be available +# in log specific to file2 creation except for server xlator, server xlator is +# common xlator in brick mulitplex +nofdlog=$(cat $brick_log_file | grep file3 | grep -v server | wc -l) +TEST [ $((nofdlog)) -eq 0 ] + +# Unmount V1 +TEST umount $M0 + +cleanup; diff --git a/tests/bugs/glusterd/bug-1699339.t b/tests/bugs/glusterd/bug-1699339.t new file mode 100644 index 00000000000..bb8d4f46eb8 --- /dev/null +++ b/tests/bugs/glusterd/bug-1699339.t @@ -0,0 +1,73 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../cluster.rc + +cleanup; + +NUM_VOLS=15 + + +get_brick_base () { + printf "%s/vol%02d" $B0 $1 +} + +function count_up_bricks { + vol=$1; + $CLI_1 --xml volume status $vol | grep '<status>1' | wc -l +} + +create_volume () { + + local vol_name=$(printf "%s-vol%02d" $V0 $1) + + TEST $CLI_1 volume create $vol_name replica 3 $H1:$B1/${vol_name} $H2:$B2/${vol_name} $H3:$B3/${vol_name} + TEST $CLI_1 volume start $vol_name +} + +TEST launch_cluster 3 +TEST $CLI_1 volume set all cluster.brick-multiplex on + +# The option accepts the value in the range from 5 to 200 +TEST ! $CLI_1 volume set all glusterd.vol_count_per_thread 210 +TEST ! $CLI_1 volume set all glusterd.vol_count_per_thread 4 + +TEST $CLI_1 volume set all glusterd.vol_count_per_thread 5 + +TEST $CLI_1 peer probe $H2; +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count + +TEST $CLI_1 peer probe $H3; +EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count + +# Our infrastructure can't handle an arithmetic expression here. The formula +# is (NUM_VOLS-1)*5 because it sees each TEST/EXPECT once but needs the other +# NUM_VOLS-1 and there are 5 such statements in each iteration. +TESTS_EXPECTED_IN_LOOP=28 +for i in $(seq 1 $NUM_VOLS); do + starttime="$(date +%s)"; + create_volume $i +done + +TEST kill_glusterd 1 + +TESTS_EXPECTED_IN_LOOP=4 +for i in `seq 1 3 15` +do +vol1=$(printf "%s-vol%02d" $V0 $i) +TEST $CLI_2 volume set $vol1 performance.readdir-ahead on +done + +# Bring back 1st glusterd +TEST $glusterd_1 +EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count + +TESTS_EXPECTED_IN_LOOP=4 +for i in `seq 1 3 15` +do +vol1=$(printf "%s-vol%02d" $V0 $i) +EXPECT_WITHIN $PROBE_TIMEOUT "on" volinfo_field_1 $vol1 performance.readdir-ahead +done + +cleanup diff --git a/tests/bugs/glusterd/bug-1720566.t b/tests/bugs/glusterd/bug-1720566.t new file mode 100644 index 00000000000..99bcf6ff785 --- /dev/null +++ b/tests/bugs/glusterd/bug-1720566.t @@ -0,0 +1,50 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc +. $(dirname $0)/../../volume.rc + + +cleanup; +V0="TestLongVolnamec363b7b536700ff06eedeae0dd9037fec363b7b536700ff06eedeae0dd9037fec363b7b536700ff06eedeae0dd9abcd" +V1="TestLongVolname3102bd28a16c49440bd5210e4ec4d5d93102bd28a16c49440bd5210e4ec4d5d933102bd28a16c49440bd5210e4ebbcd" +TEST launch_cluster 2; +TEST $CLI_1 peer probe $H2; + +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count + +$CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 +EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status'; +$CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1 +EXPECT 'Created' cluster_volinfo_field 1 $V1 'Status'; + +$CLI_1 volume start $V0 +EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status'; + +$CLI_1 volume start $V1 +EXPECT 'Started' cluster_volinfo_field 1 $V1 'Status'; + +#Mount FUSE +TEST glusterfs -s $H1 --volfile-id=$V0 $M0; + + +#Mount FUSE +TEST glusterfs -s $H1 --volfile-id=$V1 $M1; + +TEST mkdir $M0/dir{1..4}; +TEST touch $M0/dir{1..4}/files{1..4}; + +TEST mkdir $M1/dir{1..4}; +TEST touch $M1/dir{1..4}/files{1..4}; + +TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}_1 $H2:$B2/${V0}_1 +TEST $CLI_1 volume add-brick $V1 $H1:$B1/${V1}_1 $H2:$B2/${V1}_1 + + +TEST $CLI_1 volume rebalance $V0 start +TEST $CLI_1 volume rebalance $V1 start + +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0 +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V1 + +cleanup; diff --git a/tests/bugs/glusterd/bug-824753.t b/tests/bugs/glusterd/bug-824753.t index 2ce4a07c5bd..b969e28f35e 100755 --- a/tests/bugs/glusterd/bug-824753.t +++ b/tests/bugs/glusterd/bug-824753.t @@ -9,7 +9,7 @@ TEST glusterd; TEST pidof glusterd; TEST $CLI volume info; -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6}; function volinfo_field() { diff --git a/tests/bugs/glusterd/bug-857330/common.rc b/tests/bugs/glusterd/bug-857330/common.rc deleted file mode 100644 index d0aa4b1a640..00000000000 --- a/tests/bugs/glusterd/bug-857330/common.rc +++ /dev/null @@ -1,57 +0,0 @@ -. $(dirname $0)/../../../include.rc - -UUID_REGEX='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}' - -TASK_ID="" -COMMAND="" -PATTERN="" - -function check-and-store-task-id() -{ - TASK_ID="" - - local task_id=$($CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX") - - if [ -z "$task_id" ] && [ "${task_id+asdf}" = "asdf" ]; then - return 1 - fi - - TASK_ID=$task_id - return 0; -} - -function get-task-id() -{ - $CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX" | tail -n1 - -} - -function check-and-store-task-id-xml() -{ - TASK_ID="" - - local task_id=$($CLI $COMMAND --xml | xmllint --format - | grep $PATTERN | grep -o -E "$UUID_REGEX") - - if [ -z "$task_id" ] && [ "${task_id+asdf}" = "asdf" ]; then - return 1 - fi - - TASK_ID=$task_id - return 0; -} - -function get-task-id-xml() -{ - $CLI $COMMAND --xml | xmllint --format - | grep $PATTERN | grep -o -E "$UUID_REGEX" -} - -function get-task-status() -{ - pattern=$1 - val=1 - test=$(gluster $COMMAND | grep -o $pattern 2>&1) - if [ $? -eq 0 ]; then - val=0 - fi - echo $val -} diff --git a/tests/bugs/glusterd/bug-857330/normal.t b/tests/bugs/glusterd/bug-857330/normal.t deleted file mode 100755 index ad0c8844fae..00000000000 --- a/tests/bugs/glusterd/bug-857330/normal.t +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/common.rc -. $(dirname $0)/../../../volume.rc -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}1 $H0:$B0/${V0}2; -TEST $CLI volume info $V0; -TEST $CLI volume start $V0; - -TEST glusterfs -s $H0 --volfile-id=$V0 $M0; - -TEST $PYTHON $(dirname $0)/../../../utils/create-files.py \ - --multi -b 10 -d 10 -n 10 $M0; - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - -############### -## Rebalance ## -############### -TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}3 $H0:$B0/${V0}4; - -COMMAND="volume rebalance $V0 start" -PATTERN="ID:" -TEST check-and-store-task-id - -COMMAND="volume status $V0" -PATTERN="ID" -EXPECT $TASK_ID get-task-id - -COMMAND="volume rebalance $V0 status" -PATTERN="completed" -EXPECT_WITHIN $REBALANCE_TIMEOUT "0" get-task-status $PATTERN - -################### -## Replace-brick ## -################### -REP_BRICK_PAIR="$H0:$B0/${V0}2 $H0:$B0/${V0}5" - -TEST $CLI volume replace-brick $V0 $REP_BRICK_PAIR commit force; - -################## -## Remove-brick ## -################## -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5 - -COMMAND="volume remove-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}5 start" -PATTERN="ID:" -TEST check-and-store-task-id - -COMMAND="volume status $V0" -PATTERN="ID" -EXPECT $TASK_ID get-task-id - -COMMAND="volume remove-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}5 status" -PATTERN="completed" -EXPECT_WITHIN $REBALANCE_TIMEOUT "0" get-task-status $PATTERN - -TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}5 commit - -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/glusterd/bug-857330/xml.t b/tests/bugs/glusterd/bug-857330/xml.t deleted file mode 100755 index 8383d2a0711..00000000000 --- a/tests/bugs/glusterd/bug-857330/xml.t +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/common.rc -. $(dirname $0)/../../../volume.rc - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume info; - -TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}1 $H0:$B0/${V0}2; -TEST $CLI volume info $V0; -TEST $CLI volume start $V0; - -TEST glusterfs -s $H0 --volfile-id=$V0 $M0; - -TEST $PYTHON $(dirname $0)/../../../utils/create-files.py \ - --multi -b 10 -d 10 -n 10 $M0; - -EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 - - -############### -## Rebalance ## -############### -TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}3 $H0:$B0/${V0}4; - -COMMAND="volume rebalance $V0 start" -PATTERN="task-id" -TEST check-and-store-task-id-xml - -COMMAND="volume status $V0" -PATTERN="id" -EXPECT $TASK_ID get-task-id-xml - -COMMAND="volume rebalance $V0 status" -PATTERN="task-id" -EXPECT $TASK_ID get-task-id-xml - -## TODO: Add tests for rebalance stop - -COMMAND="volume rebalance $V0 status" -PATTERN="completed" -EXPECT_WITHIN $REBALANCE_TIMEOUT "0" get-task-status $PATTERN - -################### -## Replace-brick ## -################### -TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}4 $H0:$B0/${V0}5 commit force - -################## -## Remove-brick ## -################## -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5 - -COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 $H0:$B0/${V0}5 start" -PATTERN="task-id" -TEST check-and-store-task-id-xml - -COMMAND="volume status $V0" -PATTERN="id" -EXPECT $TASK_ID get-task-id-xml - -COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 $H0:$B0/${V0}5 status" -PATTERN="task-id" -EXPECT $TASK_ID get-task-id-xml - -COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 $H0:$B0/${V0}5 status" -PATTERN="completed" -EXPECT_WITHIN $REBALANCE_TIMEOUT "0" get-task-status $PATTERN - -## TODO: Add tests for remove-brick stop - -COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 $H0:$B0/${V0}5 commit" -PATTERN="task-id" -EXPECT $TASK_ID get-task-id-xml - -TEST $CLI volume stop $V0; -TEST $CLI volume delete $V0; -TEST ! $CLI volume info $V0; - -cleanup; diff --git a/tests/bugs/glusterd/check_elastic_server.t b/tests/bugs/glusterd/check_elastic_server.t new file mode 100644 index 00000000000..41d2140aa2b --- /dev/null +++ b/tests/bugs/glusterd/check_elastic_server.t @@ -0,0 +1,63 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc +. $(dirname $0)/../../volume.rc + +function cluster_rebalance_status { + local vol=$1 + $CLI_2 volume status | grep -iw "Rebalance" -A 5 | grep "Status" | sed 's/.*: //' +} + +cleanup; +TEST launch_cluster 4; +TEST $CLI_1 peer probe $H2; +TEST $CLI_1 peer probe $H3; +TEST $CLI_1 peer probe $H4; + +EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count + +TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 +EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status'; + +$CLI_1 volume start $V0 +EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status'; + +#Mount invalid volume +TEST ! glusterfs -s $H1 --volfile-id=$V0_NA $M0; + +#Mount FUSE +TEST glusterfs -s $H1 --volfile-id=$V0 $M0; + +TEST mkdir $M0/dir{1..4}; +TEST touch $M0/dir{1..4}/files{1..4}; + +TEST $CLI_1 volume remove-brick $V0 $H1:$B1/$V0 start +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_remove_brick_status_completed_field "$V0 $H1:$B1/$V0" + +TEST $CLI_1 volume remove-brick $V0 $H1:$B1/$V0 commit + +kill_glusterd 1 + +total_files=`find $M0 -name "files*" | wc -l` +TEST [ $total_files -eq 16 ]; + +TEST $CLI_2 volume add-brick $V0 $H3:$B3/$V0 + +TEST $CLI_2 volume rebalance $V0 start +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status $V0 + +total_files=`find $M0 -name "files*" | wc -l` +TEST [ $total_files -eq 16 ]; + +TEST $CLI_2 volume add-brick $V0 $H4:$B4/$V0 + +TEST $CLI_2 volume rebalance $V0 start +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status $V0 +kill_glusterd 2 + +total_files=`find $M0 -name "files*" | wc -l` +TEST [ $total_files -eq 16 ]; + +cleanup; + diff --git a/tests/bugs/glusterd/df-results-post-replace-brick-operations.t b/tests/bugs/glusterd/df-results-post-replace-brick-operations.t new file mode 100644 index 00000000000..04f75889388 --- /dev/null +++ b/tests/bugs/glusterd/df-results-post-replace-brick-operations.t @@ -0,0 +1,61 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup +TEST glusterd + +#Create brick partitions +TEST truncate -s 100M $B0/brick1 +TEST truncate -s 100M $B0/brick2 +TEST truncate -s 100M $B0/brick3 +TEST truncate -s 100M $B0/brick4 +TEST truncate -s 100M $B0/brick5 + +LO1=`SETUP_LOOP $B0/brick1` +TEST [ $? -eq 0 ] +TEST MKFS_LOOP $LO1 + +LO2=`SETUP_LOOP $B0/brick2` +TEST [ $? -eq 0 ] +TEST MKFS_LOOP $LO2 + +LO3=`SETUP_LOOP $B0/brick3` +TEST [ $? -eq 0 ] +TEST MKFS_LOOP $LO3 + +LO4=`SETUP_LOOP $B0/brick4` +TEST [ $? -eq 0 ] +TEST MKFS_LOOP $LO4 + +LO5=`SETUP_LOOP $B0/brick5` +TEST [ $? -eq 0 ] +TEST MKFS_LOOP $LO5 + +TEST mkdir -p $B0/${V0}1 $B0/${V0}2 $B0/${V0}3 $B0/${V0}4 $B0/${V0}5 +TEST MOUNT_LOOP $LO1 $B0/${V0}1 +TEST MOUNT_LOOP $LO2 $B0/${V0}2 +TEST MOUNT_LOOP $LO3 $B0/${V0}3 +TEST MOUNT_LOOP $LO4 $B0/${V0}4 +TEST MOUNT_LOOP $LO5 $B0/${V0}5 + +# create a subdirectory in mount point and use it for volume creation +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}1/brick1 $H0:$B0/${V0}2/brick1 $H0:$B0/${V0}3/brick1 +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" online_brick_count + +# mount the volume and check the size at mount point +TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0 +total_space=$(df -P $M0 | tail -1 | awk '{ print $2}') + +# perform replace brick operations +TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1/brick1 $H0:$B0/${V0}4/brick1 commit force +TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}2/brick1 $H0:$B0/${V0}5/brick1 commit force + +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +# check for the size at mount point, it should be same as previous +total_space_new=$(df -P $M0 | tail -1 | awk '{ print $2}') +TEST [ $total_space -eq $total_space_new ] diff --git a/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t index fdc0a73f60c..8001359e6b3 100644 --- a/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t +++ b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t @@ -4,7 +4,7 @@ . $(dirname $0)/../../cluster.rc function check_peers { -$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l } cleanup @@ -36,23 +36,35 @@ TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]] #bug-948686 - volume sync after bringing up the killed node TEST $CLI_1 peer probe $H3 -EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers; +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 1 +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 2 +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 3 TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0 TEST $CLI_1 volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status' TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0 #kill a node TEST kill_node 3 +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers 1 +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers 2 #modify volume config to see change in volume-sync TEST $CLI_1 volume set $V0 write-behind off #add some files to the volume to see effect of volume-heal cmd TEST touch $M0/{1..100}; TEST $CLI_1 volume stop $V0; +EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 'Stopped' volinfo_field_1 $V0 'Status' + TEST $glusterd_3; -EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 1 +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 2 +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 3 + +sleep 5 TEST $CLI_3 volume start $V0; +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status' TEST $CLI_2 volume stop $V0; TEST $CLI_2 volume delete $V0; diff --git a/tests/bugs/glusterd/optimized-basic-testcases.t b/tests/bugs/glusterd/optimized-basic-testcases.t index dd98a65fa9a..b89ca22415e 100644 --- a/tests/bugs/glusterd/optimized-basic-testcases.t +++ b/tests/bugs/glusterd/optimized-basic-testcases.t @@ -32,6 +32,16 @@ function get_brick_host_uuid() echo $host_uuid_list | awk '{print $1}' } +function generate_statedump_and_check_for_glusterd_info { + pid=`pidof glusterd` + #remove old stale statedumps + cleanup_statedump $pid + kill -USR1 $pid + #Wait till the statedump is generated + sleep 1 + fname=$(ls $statedumpdir | grep -E "\.$pid\.dump\.") + cat $statedumpdir/$fname | grep "xlator.glusterd.priv" | wc -l +} cleanup; @@ -59,6 +69,11 @@ TEST pidof glusterd; TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; EXPECT 'Created' volinfo_field $V0 'Status'; +#bug-1786478 - default volume option after volume reset +addr_family=`volinfo_field $V0 'transport.address-family'` +TEST $CLI volume reset $V0 +EXPECT $addr_family volinfo_field $V0 'transport.address-family' + #bug-955588 - uuid validation uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=` @@ -114,7 +129,8 @@ TEST ! $CLI volume set all $V0 cluster.op-version $OP_VERS_NEW #bug-1022055 - validate log rotate command -TEST $CLI volume log rotate $V0; +TEST ! $CLI volume log rotate $V0; +TEST $CLI volume log $V0 rotate; #bug-1092841 - validating barrier enable/disable @@ -276,7 +292,14 @@ TEST ! $CLI volume create "test" $H0:/var/lib/glusterd force TEST ! $CLI volume create "test" $H0:/var/lib/glusterd/abc TEST ! $CLI volume create "test" $H0:/var/lib/glusterd/abc force mkdir -p /xyz/var/lib/glusterd/abc -TEST $CLI volume create "test" $H0:/xyz/var/lib/glusterd/abc + +#bug 1716812 - volfile should be created with transport type both +TEST $CLI volume create "test" transport tcp,rdma $H0:/xyz/var/lib/glusterd/abc EXPECT 'Created' volinfo_field "test" 'Status'; +#While taking a statedump, there is a TRY_LOCK on call_frame, which might may cause +#failure. So Adding a EXPECT_WITHIN +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" generate_statedump_and_check_for_glusterd_info + +cleanup_statedump `pidof glusterd` cleanup diff --git a/tests/bugs/glusterd/quorum-validation.t b/tests/bugs/glusterd/quorum-validation.t index 05aef4edccb..3cc3351b43b 100644 --- a/tests/bugs/glusterd/quorum-validation.t +++ b/tests/bugs/glusterd/quorum-validation.t @@ -34,9 +34,13 @@ TEST ! $CLI_1 volume add-brick $V0 $H1:$B1/${V0}2 TEST ! $CLI_1 volume remove-brick $V0 $H1:$B1/${V0}0 start TEST ! $CLI_1 volume set $V0 barrier enable -# Now execute a command which goes through op state machine and it should fail +#quorum is not met, rebalance/profile start should fail +TEST ! $CLI_1 volume rebalance $V0 start TEST ! $CLI_1 volume profile $V0 start +#bug-1690753 - Volume stop when quorum not met is successful +TEST ! $CLI_1 volume stop $V0 + #Bring back the 2nd glusterd TEST $glusterd_2 diff --git a/tests/bugs/glusterd/quorum-value-check.t b/tests/bugs/glusterd/quorum-value-check.t deleted file mode 100755 index aaf636274b6..00000000000 --- a/tests/bugs/glusterd/quorum-value-check.t +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../../include.rc -. $(dirname $0)/../../volume.rc - -function check_quorum_nfs() { - local qnfs="$(less /var/lib/glusterd/nfs/nfs-server.vol | grep "quorum-count"| awk '{print $3}')" - local qinfo="$($CLI volume info $V0| grep "cluster.quorum-count"| awk '{print $2}')" - - if [ $qnfs = $qinfo ]; then - echo "Y" - else - echo "N" - fi -} - -cleanup; - -TEST glusterd -TEST pidof glusterd -TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2} -TEST $CLI volume set $V0 nfs.disable off -TEST $CLI volume set $V0 performance.write-behind off -TEST $CLI volume set $V0 cluster.self-heal-daemon off -TEST $CLI volume set $V0 cluster.quorum-type fixed -TEST $CLI volume start $V0 - -TEST $CLI volume set $V0 cluster.quorum-count 1 -EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "Y" check_quorum_nfs -TEST $CLI volume set $V0 cluster.quorum-count 2 -EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "Y" check_quorum_nfs -TEST $CLI volume set $V0 cluster.quorum-count 3 -EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "Y" check_quorum_nfs - -cleanup; diff --git a/tests/bugs/glusterd/rebalance-in-cluster.t b/tests/bugs/glusterd/rebalance-in-cluster.t index 9565faef01d..469ec6cd48e 100644 --- a/tests/bugs/glusterd/rebalance-in-cluster.t +++ b/tests/bugs/glusterd/rebalance-in-cluster.t @@ -4,6 +4,10 @@ . $(dirname $0)/../../cluster.rc . $(dirname $0)/../../volume.rc +function rebalance_status_field_1 { + $CLI_1 volume rebalance $1 status | awk '{print $7}' | sed -n 3p +} + cleanup; TEST launch_cluster 2; TEST $CLI_1 peer probe $H2; @@ -29,6 +33,11 @@ TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1 TEST $CLI_1 volume rebalance $V0 start EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0 +#bug - 1764119 - rebalance status should display detailed info when any of the node is dowm +TEST kill_glusterd 2 +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field_1 $V0 + +TEST start_glusterd 2 #bug-1245142 $CLI_1 volume rebalance $V0 start & diff --git a/tests/bugs/glusterd/enable-shared-storage-and-remove-brick-validation.t b/tests/bugs/glusterd/remove-brick-validation.t index 11ed0d94d79..a0ff4ff6a24 100644 --- a/tests/bugs/glusterd/enable-shared-storage-and-remove-brick-validation.t +++ b/tests/bugs/glusterd/remove-brick-validation.t @@ -18,20 +18,6 @@ TEST $CLI_1 peer probe $H2; EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1 -#test case for bug 1266818 - disabling enable-shared-storage option -##should not delete user created volume with name glusterd_shared_storage - -## creating a volume with name glusterd_shared_storage -TEST $CLI_1 volume create glusterd_shared_storage $H1:$B1/${V0}0 $H2:$B2/${V0}1 -TEST $CLI_1 volume start glusterd_shared_storage - -## disabling enable-shared-storage should not succeed and should not delete the -## user created volume with name "glusterd_shared_storage" -TEST ! $CLI_1 volume all enable-shared-storage disable - -## volume with name should exist -TEST $CLI_1 volume info glusterd_shared_storage - #testcase: bug-1245045-remove-brick-validation TEST $CLI_1 peer probe $H3; diff --git a/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t b/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t index 20c84d26b9c..00beab59137 100644 --- a/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t +++ b/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t @@ -49,6 +49,7 @@ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 #Create a 3x3 dist-rep volume TEST $CLI volume create $V1 replica 3 $H0:$B0/${V1}{0,1,2,3,4,5,6,7,8}; TEST $CLI volume start $V1 +EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "9" brick_count ${V1} # Mount FUSE and create file/directory TEST glusterfs -s $H0 --volfile-id $V1 $M0 diff --git a/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t index cdb1a3399c9..e6e65c48456 100644 --- a/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t +++ b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t @@ -55,9 +55,9 @@ TEST kill_glusterd 1 #Bring back 1st glusterd TEST $glusterd_1 -# We need to wait till PROCESS_UP_TIMEOUT and then check shd service does not -# come up on node 2 -sleep $PROCESS_UP_TIMEOUT -EXPECT "N" shd_up_status_2 +# We need to wait till PROCESS_UP_TIMEOUT and then check shd service started +#on node 2, because once glusterd regains quorum, it will restart all volume +#level daemons +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_2 cleanup; diff --git a/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t new file mode 100644 index 00000000000..a871e112d87 --- /dev/null +++ b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t @@ -0,0 +1,54 @@ +#! /bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc + +function check_peers { +count=`$CLI_3 peer status | grep 'Peer in Cluster (Connected)' | wc -l` +echo $count +} + +function check_shd { +ps aux | grep $1 | grep glustershd | wc -l +} + +cleanup + + +TEST launch_cluster 6 + +TESTS_EXPECTED_IN_LOOP=25 +for i in $(seq 2 6); do + hostname="H$i" + TEST $CLI_1 peer probe ${!hostname} +done + + +EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers; +for i in $(seq 1 5); do + + TEST $CLI_1 volume create ${V0}_$i replica 3 $H1:$B1/${V0}_$i $H2:$B2/${V0}_$i $H3:$B3/${V0}_$i $H4:$B4/${V0}_$i $H5:$B5/${V0}_$i $H6:$B6/${V0}_$i + TEST $CLI_1 volume start ${V0}_$i force + +done + +#kill a node +TEST kill_node 3 + +TEST $glusterd_3; +EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_shd $H3 + +for i in $(seq 1 5); do + + TEST $CLI_1 volume stop ${V0}_$i + TEST $CLI_1 volume delete ${V0}_$i + +done + +for i in $(seq 1 6); do + hostname="H$i" + EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 0 check_shd ${!hostname} +done +cleanup diff --git a/tests/bugs/glusterd/validating-options-for-striped-replicated-volume.t b/tests/bugs/glusterd/validating-options-for-replicated-volume.t index 8a6772b402b..ddc80b17870 100644 --- a/tests/bugs/glusterd/validating-options-for-striped-replicated-volume.t +++ b/tests/bugs/glusterd/validating-options-for-replicated-volume.t @@ -7,7 +7,7 @@ cleanup; TEST glusterd -TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6}; ## start volume and verify TEST $CLI volume start $V0; @@ -51,8 +51,6 @@ EXPECT '30' volinfo_field $V0 'features.soft-timeout'; TEST ! $CLI volume set $V0 features.quota-deem-statfs off EXPECT '' volinfo_field $V0 'features.quota-deem-statfs' -#bug-859927 - validate different options for striped replicated volume - TEST ! $CLI volume set $V0 statedump-path "" TEST ! $CLI volume set $V0 statedump-path " " TEST $CLI volume set $V0 statedump-path "/home/" @@ -63,10 +61,15 @@ TEST ! $CLI volume set $V0 background-self-heal-count " " TEST $CLI volume set $V0 background-self-heal-count 10 EXPECT "10" volume_option $V0 cluster.background-self-heal-count -TEST ! $CLI volume set $V0 cache-size "" -TEST ! $CLI volume set $V0 cache-size " " -TEST $CLI volume set $V0 cache-size 512MB -EXPECT "512MB" volume_option $V0 performance.cache-size +TEST ! $CLI volume set $V0 io-cache-size "" +TEST ! $CLI volume set $V0 io-cache-size " " +TEST $CLI volume set $V0 io-cache-size 64MB +EXPECT "64MB" volume_option $V0 performance.io-cache-size + +TEST ! $CLI volume set $V0 quick-read-cache-size "" +TEST ! $CLI volume set $V0 quick-read-cache-size " " +TEST $CLI volume set $V0 quick-read-cache-size 512MB +EXPECT "512MB" volume_option $V0 performance.quick-read-cache-size TEST ! $CLI volume set $V0 self-heal-daemon "" TEST ! $CLI volume set $V0 self-heal-daemon " " @@ -107,11 +110,6 @@ TEST ! $CLI volume set $V0 auth.allow " " TEST $CLI volume set $V0 auth.allow 192.168.122.1 EXPECT "192.168.122.1" volume_option $V0 auth.allow -TEST ! $CLI volume set $V0 stripe-block-size "" -TEST ! $CLI volume set $V0 stripe-block-size " " -TEST $CLI volume set $V0 stripe-block-size 512MB -EXPECT "512MB" volume_option $V0 cluster.stripe-block-size - #bug-782095 - validate performance cache min/max size value ## setting performance cache min size as 2MB |
