diff options
author | hari <hgowtham@redhat.com> | 2016-04-06 16:16:47 +0530 |
---|---|---|
committer | Kaushal M <kaushal@redhat.com> | 2016-04-19 03:13:17 -0700 |
commit | 690715bb3e341f673a71d1dc8b80bb35aa29c75d (patch) | |
tree | e41e670253cd635cfdff047f64174d25791fc817 | |
parent | abd47f27848c9bb2bf5bc371367c3d41f526ad50 (diff) |
Tier: tier command fails message when any node is down
PROBLEM: the dict doesn't get set on the node if its down.
so while printing the output on cli we get a ENOENT
which ends in a tier command failed.
FIX: this patch skips the node that wasn't available
and carrys on with the next node for both tier status
and tier detach status.
Change-Id: I718a034b18b109748ec67f3ace56540c50650d23
BUG: 1324439
Signed-off-by: hari <hgowtham@redhat.com>
Reviewed-on: http://review.gluster.org/13918
Smoke: Gluster Build System <jenkins@build.gluster.com>
Tested-by: hari gowtham <hari.gowtham005@gmail.com>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Kaushal M <kaushal@redhat.com>
-rw-r--r-- | cli/src/cli-rpc-ops.c | 23 | ||||
-rw-r--r-- | tests/basic/tier/new-tier-cmds.t | 64 |
2 files changed, 63 insertions, 24 deletions
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index 3bc21ff77d5..223ec4260aa 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -1579,10 +1579,18 @@ gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type) snprintf (key, 256, "status-%d", i); ret = dict_get_int32 (dict, key, (int32_t *)&status_rcd); - if (ret) { + if (ret == -ENOENT) { gf_log ("cli", GF_LOG_TRACE, "count %d %d", count, i); gf_log ("cli", GF_LOG_TRACE, "failed to get status"); - goto out; + gf_log ("cli", GF_LOG_ERROR, "node down and has failed" + " to set dict"); + continue; + /* skip this node if value not available*/ + } else if (ret) { + gf_log ("cli", GF_LOG_TRACE, "count %d %d", count, i); + gf_log ("cli", GF_LOG_TRACE, "failed to get status"); + continue; + /* skip this node if value not available*/ } if (GF_DEFRAG_STATUS_NOT_STARTED == status_rcd) @@ -1704,10 +1712,17 @@ gf_cli_print_tier_status (dict_t *dict, enum gf_task_types task_type) snprintf (key, 256, "status-%d", i); ret = dict_get_int32 (dict, key, (int32_t *)&status_rcd); - if (ret) { + if (ret == -ENOENT) { gf_log ("cli", GF_LOG_TRACE, "count: %d, %d," "failed to get status", count, i); - goto out; + gf_log ("cli", GF_LOG_ERROR, "node down and has failed" + " to set dict"); + continue; + /*skipping this node as value unavailable*/ + } else if (ret) { + gf_log ("cli", GF_LOG_TRACE, "count: %d, %d," + "failed to get status", count, i); + continue; } if (GF_DEFRAG_STATUS_NOT_STARTED == status_rcd) diff --git a/tests/basic/tier/new-tier-cmds.t b/tests/basic/tier/new-tier-cmds.t index 6c78f139156..e4fef69d265 100644 --- a/tests/basic/tier/new-tier-cmds.t +++ b/tests/basic/tier/new-tier-cmds.t @@ -3,33 +3,42 @@ . $(dirname $0)/../../include.rc . $(dirname $0)/../../volume.rc . $(dirname $0)/../../tier.rc +. $(dirname $0)/../../cluster.rc # Creates a tiered volume with pure distribute hot and cold tiers # Both hot and cold tiers will have an equal number of bricks. +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + function create_dist_tier_vol () { - mkdir $B0/cold - mkdir $B0/hot - TEST $CLI volume create $V0 disperse 6 disperse-data 4 $H0:$B0/cold/${V0}{1..12} - TEST $CLI volume set $V0 performance.quick-read off - TEST $CLI volume set $V0 performance.io-cache off - TEST $CLI volume start $V0 - TEST $CLI volume attach-tier $V0 replica 2 $H0:$B0/hot/${V0}{0..5} - TEST $CLI volume set $V0 cluster.tier-mode test + TEST $CLI_1 volume create $V0 $H1:$B1/${V0} $H2:$B2/${V0} $H3:$B3/${V0} + TEST $CLI_1 volume start $V0 + TEST $CLI_1 volume attach-tier $V0 $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2 $H3:$B3/${V0}_h3 } function tier_detach_commit () { - $CLI volume tier $V0 detach commit | grep "success" | wc -l + $CLI_1 volume tier $V0 detach commit | grep "success" | wc -l +} + +function tier_detach_status_node_down () { + $CLI_1 volume tier $V0 detach status | wc -l +} + +function tier_status_node_down () { + $CLI_1 volume tier $V0 status | wc -l } cleanup; -#Basic checks -TEST glusterd -TEST pidof glusterd -TEST $CLI volume status +#setup cluster and test volume +TEST launch_cluster 3; # start 3-node virtual cluster +TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli +TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers; #Create and start a tiered volume create_dist_tier_vol @@ -37,21 +46,36 @@ create_dist_tier_vol #Issue detach tier on the tiered volume #Will throw error saying detach tier not started -EXPECT "Tier command failed" $CLI volume tier $V0 detach status +EXPECT "Tier command failed" $CLI_1 volume tier $V0 detach status #after starting detach tier the detach tier status should display the status -TEST $CLI volume tier $V0 detach start +TEST $CLI_1 volume tier $V0 detach start + +TEST $CLI_1 volume tier $V0 detach status + +#kill a node +TEST kill_node 2 + +#check if we have the rest of the node available printed in the output of detach status +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" tier_detach_status_node_down + +#check if we have the rest of the node available printed in the output of tier status +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "5" tier_status_node_down + +TEST $glusterd_2; + +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers; -TEST $CLI volume tier $V0 detach status +TEST $CLI_1 volume tier $V0 detach status -TEST $CLI volume tier $V0 detach stop +TEST $CLI_1 volume tier $V0 detach stop #If detach tier is stopped the detach tier command will fail -EXPECT "Tier command failed" $CLI volume tier $V0 detach status +EXPECT "Tier command failed" $CLI_1 volume tier $V0 detach status -TEST $CLI volume tier $V0 detach start +TEST $CLI_1 volume tier $V0 detach start #wait for the detach to complete EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_commit @@ -59,7 +83,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_commit #If detach tier is committed then the detach status should fail throwing an error #saying its not a tiered volume -EXPECT "Tier command failed" $CLI volume tier $V0 detach status +EXPECT "Tier command failed" $CLI_1 volume tier $V0 detach status cleanup; |