diff options
| -rw-r--r-- | cli/src/cli-rpc-ops.c | 30 | ||||
| -rw-r--r-- | tests/basic/tier/new-tier-cmds.t | 16 | ||||
| -rw-r--r-- | tests/tier.rc | 13 | 
3 files changed, 40 insertions, 19 deletions
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index 360e4eb3a18..b768b0ef668 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -1531,7 +1531,8 @@ out:  }  int -gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type) +gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type, +                               gf_boolean_t is_tier)  {          int                ret          = -1;          int                count        = 0; @@ -1550,6 +1551,7 @@ gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type)          int                hrs          = 0;          int                min          = 0;          int                sec          = 0; +        gf_boolean_t       down         = _gf_false;          ret = dict_get_int32 (dict, "count", &count);          if (ret) { @@ -1584,6 +1586,7 @@ gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type)                          gf_log ("cli", GF_LOG_TRACE, "failed to get status");                          gf_log ("cli", GF_LOG_ERROR, "node down and has failed"                                  " to set dict"); +                        down = _gf_true;                          continue;                          /* skip this node if value not available*/                  } else if (ret) { @@ -1672,6 +1675,11 @@ gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type)                  }                  GF_FREE(size_str);          } +        if (is_tier && down) +                cli_out ("WARNING: glusterd might be down on one or more nodes." +                         " Please check the nodes that are down using \'gluster" +                         " peer status\' and start the glusterd on those nodes," +                         " else tier detach commit might fail!");  out:          return ret;  } @@ -1689,6 +1697,7 @@ gf_cli_print_tier_status (dict_t *dict, enum gf_task_types task_type)          gf_defrag_status_t status_rcd   = GF_DEFRAG_STATUS_NOT_STARTED;          char               *status_str  = NULL;          char               *size_str    = NULL; +        gf_boolean_t       down         = _gf_false;          ret = dict_get_int32 (dict, "count", &count);          if (ret) { @@ -1717,6 +1726,7 @@ gf_cli_print_tier_status (dict_t *dict, enum gf_task_types task_type)                                  "failed to get status", count, i);                          gf_log ("cli", GF_LOG_ERROR, "node down and has failed"                                  " to set dict"); +                        down = _gf_true;                          continue;                          /*skipping this node as value unavailable*/                  } else if (ret) { @@ -1755,8 +1765,11 @@ gf_cli_print_tier_status (dict_t *dict, enum gf_task_types task_type)                  status_str = cli_vol_task_status_str[status_rcd];                  cli_out ("%-20s %-20"PRIu64" %-20"PRIu64" %-20s",                           node_name, promoted, demoted, status_str); -          } +        if (down) +                cli_out ("WARNING: glusterd might be down on one or more nodes." +                         " Please check the nodes that are down using \'gluster" +                         " peer status\' and start the glusterd on those nodes.");  out:          return ret;  } @@ -1914,9 +1927,14 @@ gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov,          if (cmd == GF_DEFRAG_CMD_STATUS_TIER)                  ret = gf_cli_print_tier_status (dict, GF_TASK_TYPE_REBALANCE); +        else if (cmd == GF_DEFRAG_CMD_DETACH_STATUS) +                ret = gf_cli_print_rebalance_status (dict, +                                                     GF_TASK_TYPE_REBALANCE, +                                                     _gf_true);          else                  ret = gf_cli_print_rebalance_status (dict, -                                                     GF_TASK_TYPE_REBALANCE); +                                                     GF_TASK_TYPE_REBALANCE, +                                                     _gf_false);          if (ret)                  gf_log ("cli", GF_LOG_ERROR, @@ -2533,7 +2551,8 @@ xml_output:                  goto out;          } -        ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REMOVE_BRICK); +        ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REMOVE_BRICK, +                                             _gf_true);          if (ret) {                  gf_log ("cli", GF_LOG_ERROR, "Failed to print remove-brick "                          "rebalance status"); @@ -2718,7 +2737,8 @@ xml_output:                  goto out;          } -        ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REMOVE_BRICK); +        ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REMOVE_BRICK, +                                             _gf_false);          if (ret) {                  gf_log ("cli", GF_LOG_ERROR, "Failed to print remove-brick "                          "rebalance status"); diff --git a/tests/basic/tier/new-tier-cmds.t b/tests/basic/tier/new-tier-cmds.t index e4fef69d265..dbfac54938e 100644 --- a/tests/basic/tier/new-tier-cmds.t +++ b/tests/basic/tier/new-tier-cmds.t @@ -19,18 +19,6 @@ function create_dist_tier_vol () {          TEST $CLI_1 volume attach-tier $V0 $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2 $H3:$B3/${V0}_h3  } -function tier_detach_commit () { -	$CLI_1 volume tier $V0 detach commit | grep "success" | wc -l -} - -function tier_detach_status_node_down () { -        $CLI_1 volume tier $V0 detach status | wc -l -} - -function tier_status_node_down () { -	$CLI_1 volume tier $V0 status | wc -l -} -  cleanup;  #setup cluster and test volume @@ -58,10 +46,10 @@ TEST $CLI_1 volume tier $V0 detach status  TEST kill_node 2  #check if we have the rest of the node available printed in the output of detach status -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" tier_detach_status_node_down +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_status_node_down  #check if we have the rest of the node available printed in the output of tier status -EXPECT_WITHIN $PROCESS_UP_TIMEOUT "5" tier_status_node_down +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_status_node_down  TEST $glusterd_2; diff --git a/tests/tier.rc b/tests/tier.rc index ee37e0704fa..69512c3fb4e 100644 --- a/tests/tier.rc +++ b/tests/tier.rc @@ -134,3 +134,16 @@ function rebalance_run_time () {      echo $total;  } + +function tier_detach_commit () { +	$CLI_1 volume tier $V0 detach commit | grep "success" | wc -l +} + +function tier_detach_status_node_down () { +        $CLI_1 volume tier $V0 detach status | grep "WARNING" | wc -l +} + +function tier_status_node_down () { +	$CLI_1 volume tier $V0 status | grep "WARNING" | wc -l +} +  | 
