From 09198e203ece6925791a8a3a6121c5f808e4e873 Mon Sep 17 00:00:00 2001 From: Yaniv Kaul Date: Mon, 3 Sep 2018 13:55:01 +0300 Subject: Some (mgmt) xlators: use dict_{setn|getn|deln|get_int32n|set_int32n|set_strn} In a previous patch (https://review.gluster.org/20769) we've added the key length to be passed to dict_* funcs, to remove the need to strlen() it. This patch moves some xlators to use it. - It also adds dict_get_int32n which was missing. - It also reduces the size of some key variables. They were set to 1024b or PATH_MAX, where sometimes 64 bytes were really enough. Please review carefully: 1. That I did not reduce some the size of the key variables too much. 2. That I did not mix up some keys. Compile-tested only! Change-Id: Ic729baf179f40e8d02bc2350491d4bb9b6934266 updates: bz#1193929 Signed-off-by: Yaniv Kaul --- xlators/mgmt/glusterd/src/glusterd-mgmt.c | 37 +++++++++++++++++++------------ 1 file changed, 23 insertions(+), 14 deletions(-) (limited to 'xlators/mgmt/glusterd/src/glusterd-mgmt.c') diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c index a924dd3425b..2714478cda6 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c +++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c @@ -396,7 +396,9 @@ gd_mgmt_v3_commit_fn (glusterd_op_t op, dict_t *dict, "tier detach commit failed."); goto out; } - ret = dict_get_int32 (dict, "rebalance-command", &cmd); + ret = dict_get_int32n (dict, "rebalance-command", + SLEN ("rebalance-command"), + &cmd); if (ret) { gf_msg_debug (this->name, 0, "cmd not found"); goto out; @@ -477,7 +479,8 @@ gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict, } case GD_OP_ADD_BRICK: { - ret = dict_get_str (dict, "volname", &volname); + ret = dict_get_strn (dict, "volname", + SLEN ("volname"), &volname); if (ret) { gf_msg ("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Unable to get" @@ -505,7 +508,8 @@ gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict, } case GD_OP_START_VOLUME: { - ret = dict_get_str (dict, "volname", &volname); + ret = dict_get_strn (dict, "volname", + SLEN ("volname"), &volname); if (ret) { gf_msg ("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Unable to get" @@ -532,7 +536,8 @@ gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict, } case GD_OP_STOP_VOLUME: { - ret = dict_get_str (dict, "volname", &volname); + ret = dict_get_strn (dict, "volname", + SLEN ("volname"), &volname); if (ret) { gf_msg ("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Unable to get" @@ -551,7 +556,8 @@ gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict, } case GD_OP_ADD_TIER_BRICK: { - ret = dict_get_str (dict, "volname", &volname); + ret = dict_get_strn (dict, "volname", + SLEN ("volname"), &volname); if (ret) { gf_msg ("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Unable to get" @@ -574,7 +580,8 @@ gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict, GLUSTERD_VOLINFO_VER_AC_INCREMENT); if (ret) goto out; - ret = dict_get_str (dict, "volname", &volname); + ret = dict_get_strn (dict, "volname", + SLEN ("volname"), &volname); if (ret) { gf_msg ("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Unable to get" @@ -1196,7 +1203,8 @@ glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict, case GD_OP_RESET_BRICK: case GD_OP_ADD_TIER_BRICK: { - ret = dict_get_str (dict, "volname", &volname); + ret = dict_get_strn (dict, "volname", + SLEN ("volname"), &volname); if (ret) { gf_msg (this->name, GF_LOG_CRITICAL, errno, GD_MSG_DICT_GET_FAILED, @@ -1684,15 +1692,15 @@ glusterd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, if (!peerinfo->connected) { if (op == GD_OP_TIER_STATUS || op == GD_OP_DETACH_TIER_STATUS) { - ret = dict_get_int32 (args.dict, "count", - &count); + ret = dict_get_int32n (args.dict, "count", + SLEN ("count"), &count); if (ret) gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "failed to get index"); count++; - ret = dict_set_int32 (args.dict, "count", - count); + ret = dict_set_int32n (args.dict, "count", + SLEN ("count"), count); if (ret) gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, @@ -2330,7 +2338,7 @@ glusterd_set_barrier_value (dict_t *dict, char *option) * As of now only snapshot of single volume is supported, * Hence volname1 is directly fetched */ - ret = dict_get_str (dict, "volname1", &volname); + ret = dict_get_strn (dict, "volname1", SLEN ("volname1"), &volname); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Volname not present in " @@ -2435,7 +2443,8 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op, } /* Marking the operation as complete synctasked */ - ret = dict_set_int32 (dict, "is_synctasked", _gf_true); + ret = dict_set_int32n (dict, "is_synctasked", + SLEN ("is_synctasked"), _gf_true); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, @@ -2530,7 +2539,7 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op, above and along with it the originator glusterd also goes down? Who will initiate the cleanup? */ - ret = dict_set_int32 (req_dict, "cleanup", 1); + ret = dict_set_int32n (req_dict, "cleanup", SLEN ("cleanup"), 1); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "failed to set dict"); -- cgit