diff options
| -rw-r--r-- | tests/basic/tier/tierd_check.t | 76 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 13 | 
2 files changed, 67 insertions, 22 deletions
diff --git a/tests/basic/tier/tierd_check.t b/tests/basic/tier/tierd_check.t index 2b8ccbbbd96..1f88ea0b72e 100644 --- a/tests/basic/tier/tierd_check.t +++ b/tests/basic/tier/tierd_check.t @@ -3,20 +3,24 @@  . $(dirname $0)/../../include.rc  . $(dirname $0)/../../volume.rc  . $(dirname $0)/../../tier.rc +. $(dirname $0)/../../cluster.rc  # Creates a tiered volume with pure distribute hot and cold tiers  # Both hot and cold tiers will have an equal number of bricks. +function check_peers { +    $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} +  function create_dist_tier_vol () { -        mkdir $B0/cold -        mkdir $B0/hot -        TEST $CLI volume create $V0 $H0:$B0/cold/${V0}{1..3} -        TEST $CLI volume set $V0 performance.quick-read off -        TEST $CLI volume set $V0 performance.io-cache off -        TEST $CLI volume start $V0 -        TEST $CLI volume attach-tier $V0 $H0:$B0/hot/${V0}{1..2} -        TEST $CLI volume set $V0 cluster.tier-mode test +        TEST $CLI_1 volume create $V0 $H1:$B1/${V0} $H2:$B2/${V0} +        TEST $CLI_1 volume start $V0 +        TEST $CLI_1 volume attach-tier $V0 $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2 +} + +function tier_status () { +	$CLI_1 volume tier $V0 status | grep progress | wc -l  }  function tier_deamon_kill () { @@ -26,38 +30,74 @@ echo "$?"  cleanup; -#Basic checks -TEST glusterd -TEST pidof glusterd -TEST $CLI volume status +#setup cluster and test volume +TEST launch_cluster 3; # start 3-node virtual cluster +TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli +TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;  #Create and start a tiered volume  create_dist_tier_vol  EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 tier_daemon_check +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status +  EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 tier_deamon_kill -TEST $CLI volume tier $V0 start +TEST $CLI_1 volume tier $V0 start  EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" tier_daemon_check +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status +  EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" tier_deamon_kill -TEST $CLI volume tier $V0 start force +TEST $CLI_3 volume tier $V0 start force  EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" tier_daemon_check -TEST $CLI volume tier $V0 start force +#The pattern progress should occur twice only. +#it shouldn't come up on the third node without tierd even +#after the tier start force is issued on the node without +#tierd + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status + +#kill the node on which tier is not supposed to run +TEST kill_node 3 + +#bring the node back, it should not have tierd running on it +TEST $glusterd_3; + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status + +#after volume restart, check for tierd + +TEST $CLI_3 volume stop $V0 + +TEST $CLI_3 volume start $V0 + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status + +#check for detach start and stop + +TEST $CLI_3 volume tier $V0 detach start + +TEST $CLI_3 volume tier $V0 detach stop + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status + +TEST $CLI_1 volume tier $V0 start force  EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" tier_daemon_check -# To test fordetach start fail while the brick is down +# To test for detach start fail while the brick is down -TEST pkill -f "$B0/hot/$V0" +TEST pkill -f "$B1/$V0" -TEST ! $CLI volume tier $V0 detach start +TEST ! $CLI_1 volume tier $V0 detach start  cleanup  #G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000 diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 43a4898536b..99b01e32915 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -7225,6 +7225,8 @@ glusterd_volume_defrag_restart (glusterd_volinfo_t *volinfo, char *op_errstr,          case GF_DEFRAG_STATUS_NOT_STARTED:                  ret = glusterd_handle_defrag_start (volinfo, op_errstr, len,                                  cmd, cbk, volinfo->rebal.op); +                if (ret) +                        volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_FAILED;                  break;          default:                  gf_msg (this->name, GF_LOG_ERROR, 0, @@ -7236,6 +7238,7 @@ glusterd_volume_defrag_restart (glusterd_volinfo_t *volinfo, char *op_errstr,          }  out:          return ret; +  }  void @@ -7297,9 +7300,6 @@ glusterd_restart_rebalance_for_volume (glusterd_volinfo_t *volinfo)          int             ret = -1;          char          op_errstr[PATH_MAX]; -        if (!volinfo->rebal.defrag_cmd) -                return -1; -          if (!gd_should_i_start_rebalance (volinfo)) {                  /* Store the rebalance-id and rebalance command even if @@ -7310,11 +7310,17 @@ glusterd_restart_rebalance_for_volume (glusterd_volinfo_t *volinfo)                   * Storing this is needed for having 'volume status'                   * work correctly.                   */ +                volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED;                  if (volinfo->type == GF_CLUSTER_TYPE_TIER)                          glusterd_store_perform_node_state_store (volinfo);                  return 0;          } +        if (!volinfo->rebal.defrag_cmd) { +                volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_FAILED; +                return -1; +        } +          ret = glusterd_volume_defrag_restart (volinfo, op_errstr, PATH_MAX,                                  volinfo->rebal.defrag_cmd,                                  volinfo->rebal.op == GD_OP_REMOVE_BRICK ? @@ -7329,7 +7335,6 @@ glusterd_restart_rebalance_for_volume (glusterd_volinfo_t *volinfo)                          volinfo->decommission_in_progress = 1;                  }          } -          return ret;  }  int  | 
