From 521626a952a0259b7cf4a1b43205b954d0252fbc Mon Sep 17 00:00:00 2001 From: Kaushal M Date: Thu, 2 Nov 2017 13:26:01 +0530 Subject: cli: Fix coverity errors for cli-rpc-ops.c Fixes issues 147, 168, 169, 219, 715, 718, 766, 768, 772, 774, 776, 782, 790 from the report at [1]. Also, fixed some other possible static checker errors as well. [1]: https://download.gluster.org/pub/gluster/glusterfs/static-analysis/master/glusterfs-coverity/2017-10-30-9aa574a5/html/ BUG: 789278 Change-Id: I985cea1ef787d239b2632d5a7f467070846f92e4 Signed-off-by: Kaushal M --- cli/src/cli-rpc-ops.c | 140 ++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 106 insertions(+), 34 deletions(-) (limited to 'cli') diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index 0015a1198b7..a6efffc1207 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -294,6 +294,11 @@ gf_cli_output_peer_status (dict_t *dict, int count) ret = gf_cli_output_peer_hostnames (dict, hostname_count, key); + if (ret) { + gf_log ("cli", GF_LOG_WARNING, + "error outputting peer other names"); + goto out; + } } i++; } @@ -413,8 +418,6 @@ gf_cli_list_friends_cbk (struct rpc_req *req, struct iovec *iov, gf_log ("cli", GF_LOG_DEBUG, "Received resp to list: %d", rsp.op_ret); - ret = rsp.op_ret; - if (!rsp.op_ret) { if (!rsp.friends.friends_len) { @@ -784,7 +787,6 @@ gf_cli_print_tier_info (dict_t *dict, int i, int brick_count) goto out; cli_out ("Hot Tier :"); - vol_type = hot_type; hot_dist_count = (hot_replica_count ? hot_replica_count : 1); @@ -1381,7 +1383,6 @@ gf_cli3_1_uuid_reset_cbk (struct rpc_req *req, struct iovec *iov, int ret = -1; cli_local_t *local = NULL; call_frame_t *frame = NULL; - dict_t *dict = NULL; GF_ASSERT (myframe); @@ -1407,7 +1408,7 @@ gf_cli3_1_uuid_reset_cbk (struct rpc_req *req, struct iovec *iov, gf_log ("cli", GF_LOG_INFO, "Received resp to uuid reset"); if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_dict ("uuidReset", dict, rsp.op_ret, + ret = cli_xml_output_dict ("uuidReset", NULL, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log ("cli", GF_LOG_ERROR, @@ -1427,8 +1428,6 @@ out: cli_local_wipe (local); if (rsp.dict.dict_val) free (rsp.dict.dict_val); - if (dict) - dict_unref (dict); gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret); return ret; @@ -2005,6 +2004,12 @@ gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov, (cmd == GF_DEFRAG_CMD_STATUS_TIER)) && !(global_state->mode & GLUSTER_MODE_XML)) { ret = dict_get_str (dict, GF_REBALANCE_TID_KEY, &task_id_str); + if (ret) { + gf_log ("cli", GF_LOG_WARNING, + "failed to get %s from dict", + GF_REBALANCE_TID_KEY); + goto out; + } if (rsp.op_ret && strcmp (rsp.op_errstr, "")) { snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); } else { @@ -2318,6 +2323,11 @@ gf_cli_set_volume_cbk (struct rpc_req *req, struct iovec *iov, } ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, + "failed to unserialize volume set respone dict"); + goto out; + } /* For brick processes graph change does not happen on the fly. * The process has to be restarted. So this is a check from the @@ -3548,6 +3558,12 @@ print_quota_list_output (cli_local_t *local, char *path, char *default_sl, if (limit_set) { if (limits->sl < 0) { ret = gf_string2percent (default_sl, &sl_num); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, + "could not convert default soft limit" + " to percent"); + goto out; + } sl_num = (sl_num * limits->hl) / 100; sl_final = default_sl; } else { @@ -3584,7 +3600,7 @@ print_quota_list_output (cli_local_t *local, char *path, char *default_sl, sl_final, limits, used_space, sl, hl, sl_num, limit_set); - +out: return ret; } @@ -3717,8 +3733,6 @@ gf_cli_print_limit_list_from_dict (cli_local_t *local, char *volname, char key[1024] = {0,}; char mountdir[PATH_MAX] = {0,}; char *path = NULL; - gf_boolean_t xml_err_flag = _gf_false; - char err_str[NAME_MAX] = {0,}; int type = -1; if (!dict|| count <= 0) @@ -3761,13 +3775,6 @@ gf_cli_print_limit_list_from_dict (cli_local_t *local, char *volname, } out: - if (xml_err_flag) { - ret = cli_xml_output_str ("volQuota", NULL, -1, 0, err_str); - if (ret) { - gf_log ("cli", GF_LOG_ERROR, "Error outputting in xml " - "format"); - } - } return ret; } @@ -4456,7 +4463,7 @@ gf_cli_list_friends (call_frame_t *frame, xlator_t *this, (xdrproc_t) xdr_gf1_cli_peer_list_req); out: - if (ret) { + if (ret && frame) { /* * If everything goes fine, gf_cli_list_friends_cbk() * [invoked through cli_cmd_submit()]resets the @@ -4587,6 +4594,11 @@ gf_cli_get_volume (call_frame_t *frame, xlator_t *this, ret = dict_allocate_and_serialize (dict, &req.dict.dict_val, &req.dict.dict_len); + if (ret) { + gf_log (frame->this->name, GF_LOG_ERROR, + "failed to serialize dict"); + goto out; + } ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog, GLUSTER_CLI_GET_VOLUME, NULL, @@ -5602,7 +5614,7 @@ gf_cli_gsync_config_command (dict_t *dict) op_name = NULL; ret = dict_get_str (dict, "conf_path", &confpath); - if (!confpath) { + if (ret || !confpath) { ret = snprintf (conf_path, sizeof(conf_path) - 1, "%s/"GEOREP"/gsyncd_template.conf", gwd); conf_path[ret] = '\0'; @@ -6179,8 +6191,6 @@ gf_cli_gsync_set_cbk (struct rpc_req *req, struct iovec *iov, ret = dict_get_str (dict, "gsync-status", &gsync_status); if (!ret) cli_out ("%s", gsync_status); - else - ret = 0; ret = dict_get_int32 (dict, "type", &type); if (ret) { @@ -6381,6 +6391,10 @@ cmd_profile_volume_brick_out (dict_t *dict, int count, int interval) snprintf (key, sizeof (key), "%d-%d-read-%d", count, interval, (1 << i)); ret = dict_get_uint64 (dict, key, &rb_counts[i]); + if (ret) { + gf_log ("cli", GF_LOG_DEBUG, + "failed to get %s from dict", key); + } } for (i = 0; i < 32; i++) { @@ -6388,12 +6402,20 @@ cmd_profile_volume_brick_out (dict_t *dict, int count, int interval) snprintf (key, sizeof (key), "%d-%d-write-%d", count, interval, (1<