summaryrefslogtreecommitdiffstats
path: root/tox.ini
diff options
context:
space:
mode:
Diffstat (limited to 'tox.ini')
0 files changed, 0 insertions, 0 deletions
GF_LOG_INFO, "Received resp to probe"); if (rsp.op_errstr && (strlen(rsp.op_errstr) > 0)) { snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); if (rsp.op_ret) gf_log("cli", GF_LOG_ERROR, "%s", msg); } if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_str(NULL, (rsp.op_ret) ? NULL : msg, rsp.op_ret, rsp.op_errno, (rsp.op_ret) ? msg : NULL); if (ret) gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (!rsp.op_ret) cli_out("peer probe: success. %s", msg); else cli_err("peer probe: failed: %s", msg); ret = rsp.op_ret; out: cli_cmd_broadcast_response(ret); gf_free_xdr_cli_rsp(rsp); return ret; } int gf_cli_deprobe_cbk(struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = { 0, }; int ret = -1; char msg[1024] = { 0, }; GF_ASSERT(myframe); if (-1 == req->rpc_status) { goto out; } ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); // rsp.op_ret = -1; // rsp.op_errno = EINVAL; goto out; } gf_log("cli", GF_LOG_INFO, "Received resp to deprobe"); if (rsp.op_ret) { if (strlen(rsp.op_errstr) > 0) { snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); gf_log("cli", GF_LOG_ERROR, "%s", rsp.op_errstr); } } else { snprintf(msg, sizeof(msg), "success"); } if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_str(NULL, (rsp.op_ret) ? NULL : msg, rsp.op_ret, rsp.op_errno, (rsp.op_ret) ? msg : NULL); if (ret) gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (!rsp.op_ret) cli_out("peer detach: %s", msg); else cli_err("peer detach: failed: %s", msg); ret = rsp.op_ret; out: cli_cmd_broadcast_response(ret); gf_free_xdr_cli_rsp(rsp); return ret; } int gf_cli_output_peer_hostnames(dict_t *dict, int count, char *prefix) { int ret = -1; char key[256] = { 0, }; int i = 0; char *hostname = NULL; cli_out("Other names:"); /* Starting from friend.hostname1, as friend.hostname0 will be the same * as friend.hostname */ for (i = 1; i < count; i++) { snprintf(key, sizeof(key), "%s.hostname%d", prefix, i); ret = dict_get_str(dict, key, &hostname); if (ret) break; cli_out("%s", hostname); hostname = NULL; } return ret; } int gf_cli_output_peer_status(dict_t *dict, int count) { int ret = -1; char *uuid_buf = NULL; char *hostname_buf = NULL; int32_t i = 1; char key[256] = { 0, }; char *state = NULL; int32_t connected = 0; char *connected_str = NULL; int hostname_count = 0; cli_out("Number of Peers: %d", count); i = 1; while (i <= count) { snprintf(key, 256, "friend%d.uuid", i); ret = dict_get_str(dict, key, &uuid_buf); if (ret) goto out; snprintf(key, 256, "friend%d.hostname", i); ret = dict_get_str(dict, key, &hostname_buf); if (ret) goto out; snprintf(key, 256, "friend%d.connected", i); ret = dict_get_int32(dict, key, &connected); if (ret) goto out; if (connected) connected_str = "Connected"; else connected_str = "Disconnected"; snprintf(key, 256, "friend%d.state", i); ret = dict_get_str(dict, key, &state); if (ret) goto out; cli_out("\nHostname: %s\nUuid: %s\nState: %s (%s)", hostname_buf, uuid_buf, state, connected_str); snprintf(key, sizeof(key), "friend%d.hostname_count", i); ret = dict_get_int32(dict, key, &hostname_count); /* Print other addresses only if there are more than 1. */ if ((ret == 0) && (hostname_count > 1)) { snprintf(key, sizeof(key), "friend%d", i); ret = gf_cli_output_peer_hostnames(dict, hostname_count, key); if (ret) { gf_log("cli", GF_LOG_WARNING, "error outputting peer other names"); goto out; } } i++; } ret = 0; out: return ret; } int gf_cli_output_pool_list(dict_t *dict, int count) { int ret = -1; char *uuid_buf = NULL; char *hostname_buf = NULL; int32_t hostname_len = 8; /*min len 8 chars*/ int32_t i = 1; char key[256] = { 0, }; int32_t connected = 0; char *connected_str = NULL; if (count <= 0) goto out; while (i <= count) { snprintf(key, 256, "friend%d.hostname", i); ret = dict_get_str(dict, key, &hostname_buf); if (ret) goto out; ret = strlen(hostname_buf); if (ret > hostname_len) hostname_len = ret; i++; } cli_out("UUID\t\t\t\t\t%-*s\tState", hostname_len, "Hostname"); i = 1; while (i <= count) { snprintf(key, 256, "friend%d.uuid", i); ret = dict_get_str(dict, key, &uuid_buf); if (ret) goto out; snprintf(key, 256, "friend%d.hostname", i); ret = dict_get_str(dict, key, &hostname_buf); if (ret) goto out; snprintf(key, 256, "friend%d.connected", i); ret = dict_get_int32(dict, key, &connected); if (ret) goto out; if (connected) connected_str = "Connected"; else connected_str = "Disconnected"; cli_out("%s\t%-*s\t%s ", uuid_buf, hostname_len, hostname_buf, connected_str); i++; } ret = 0; out: return ret; } /* function pointer for gf_cli_output_{pool_list,peer_status} */ typedef int (*cli_friend_output_fn)(dict_t *, int); int gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf1_cli_peer_list_rsp rsp = { 0, }; int ret = -1; dict_t *dict = NULL; char msg[1024] = { 0, }; char *cmd = NULL; cli_friend_output_fn friend_output_fn; call_frame_t *frame = NULL; unsigned long flags = 0; GF_ASSERT(myframe); frame = myframe; flags = (long)frame->local; if (flags == GF_CLI_LIST_POOL_NODES) { cmd = "pool list"; friend_output_fn = &gf_cli_output_pool_list; } else { cmd = "peer status"; friend_output_fn = &gf_cli_output_peer_status; } /* 'free' the flags set by gf_cli_list_friends */ frame->local = NULL; if (-1 == req->rpc_status) { goto out; } ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_peer_list_rsp); if (ret < 0) { gf_log(frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); // rsp.op_ret = -1; // rsp.op_errno = EINVAL; goto out; } gf_log("cli", GF_LOG_DEBUG, "Received resp to list: %d", rsp.op_ret); if (!rsp.op_ret) { if (!rsp.friends.friends_len) { snprintf(msg, sizeof(msg), "%s: No peers present", cmd); if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_peer_status(dict, rsp.op_ret, rsp.op_errno, msg); if (ret) gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } cli_err("%s", msg); ret = 0; goto out; } dict = dict_new(); if (!dict) { ret = -1; goto out; } ret = dict_unserialize(rsp.friends.friends_val, rsp.friends.friends_len, &dict); if (ret) { gf_log("", GF_LOG_ERROR, "Unable to allocate memory"); goto out; } if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_peer_status(dict, rsp.op_ret, rsp.op_errno, msg); if (ret) gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } ret = dict_get_int32(dict, "count", &count); if (ret) { goto out; } ret = friend_output_fn(dict, count); if (ret) { goto out; } } else { if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_peer_status(dict, rsp.op_ret, rsp.op_errno, NULL); if (ret) gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); } else { ret = -1; } goto out; } ret = 0; out: if (ret) cli_err("%s: failed", cmd); cli_cmd_broadcast_response(ret); if (dict) dict_unref(dict); if (rsp.friends.friends_val) { free(rsp.friends.friends_val); } return ret; } int gf_cli_get_state_cbk(struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = { 0, }; int ret = -1; dict_t *dict = NULL; char *daemon_name = NULL; char *ofilepath = NULL; GF_VALIDATE_OR_GOTO("cli", myframe, out); if (-1 == req->rpc_status) { goto out; } ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } dict = dict_new(); if (!dict) { ret = -1; goto out; } ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); if (ret) goto out; if (rsp.op_ret) { if (strcmp(rsp.op_errstr, "")) cli_err("Failed to get daemon state: %s", rsp.op_errstr); else cli_err( "Failed to get daemon state. Check glusterd" " log file for more details"); } else { ret = dict_get_str(dict, "daemon", &daemon_name); if (ret) gf_log("cli", GF_LOG_ERROR, "Couldn't get daemon name"); ret = dict_get_str(dict, "ofilepath", &ofilepath); if (ret) gf_log("cli", GF_LOG_ERROR, "Couldn't get filepath"); if (daemon_name && ofilepath) cli_out("%s state dumped to %s", daemon_name, ofilepath); } ret = rsp.op_ret; out: if (dict) dict_unref(dict); cli_cmd_broadcast_response(ret); gf_free_xdr_cli_rsp(rsp); return ret; } void cli_out_options(char *substr, char *optstr, char *valstr) { char *ptr1 = NULL; char *ptr2 = NULL; ptr1 = substr; ptr2 = optstr; while (ptr1) { /* Avoiding segmentation fault. */ if (!ptr2) return; if (*ptr1 != *ptr2) break; ptr1++; ptr2++; } if (*ptr2 == '\0') return; cli_out("%s: %s", ptr2, valstr); } static int _gf_cli_output_volinfo_opts(dict_t *d, char *k, data_t *v, void *tmp) { int ret = 0; char *key = NULL; char *ptr = NULL; data_t *value = NULL; key = tmp; ptr = strstr(k, "option."); if (ptr) { value = v; if (!value) { ret = -1; goto out; } cli_out_options(key, k, v->data); } out: return ret; } static int print_brick_details(dict_t *dict, int volcount, int start_index, int end_index, int replica_count) { char key[1024] = { 0, }; int index = start_index; int isArbiter = 0; int ret = -1; char *brick = NULL; #ifdef HAVE_BD_XLATOR char *caps = NULL; #endif while (index <= end_index) { snprintf(key, 1024, "volume%d.brick%d", volcount, index); ret = dict_get_str(dict, key, &brick); if (ret) goto out; snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter", volcount, index); if (dict_get(dict, key)) isArbiter = 1; else isArbiter = 0; if (isArbiter) cli_out("Brick%d: %s (arbiter)", index, brick); else cli_out("Brick%d: %s", index, brick); #ifdef HAVE_BD_XLATOR snprintf(key, 1024, "volume%d.vg%d", volcount, index); ret = dict_get_str(dict, key, &caps); if (!ret) cli_out("Brick%d VG: %s", index, caps); #endif index++; } ret = 0; out: return ret; } void gf_cli_print_number_of_bricks(int type, int brick_count, int dist_count, int stripe_count, int replica_count, int disperse_count, int redundancy_count, int arbiter_count) { if (type == GF_CLUSTER_TYPE_NONE || type == GF_CLUSTER_TYPE_TIER) { cli_out("Number of Bricks: %d", brick_count); } else if (type == GF_CLUSTER_TYPE_DISPERSE) { cli_out("Number of Bricks: %d x (%d + %d) = %d", (brick_count / dist_count), disperse_count - redundancy_count, redundancy_count, brick_count); } else { /* For both replicate and stripe, dist_count is good enough */ if (arbiter_count == 0) { cli_out("Number of Bricks: %d x %d = %d", (brick_count / dist_count), dist_count, brick_count); } else { cli_out("Number of Bricks: %d x (%d + %d) = %d", (brick_count / dist_count), dist_count - arbiter_count, arbiter_count, brick_count); } } } int gf_cli_print_tier_info(dict_t *dict, int i, int brick_count) { int hot_brick_count = -1; int cold_type = 0; int cold_brick_count = 0; int cold_replica_count = 0; int cold_arbiter_count = 0; int cold_disperse_count = 0; int cold_redundancy_count = 0; int cold_dist_count = 0; int hot_type = 0; int hot_replica_count = 0; int hot_dist_count = 0; int ret = -1; int vol_type = -1; char key[256] = { 0, }; GF_ASSERT(dict); snprintf(key, sizeof(key), "volume%d.cold_brick_count", i); ret = dict_get_int32(dict, key, &cold_brick_count); if (ret) goto out; snprintf(key, sizeof(key), "volume%d.cold_type", i); ret = dict_get_int32(dict, key, &cold_type); if (ret) goto out; snprintf(key, sizeof(key), "volume%d.cold_dist_count", i); ret = dict_get_int32(dict, key, &cold_dist_count); if (ret) goto out; snprintf(key, sizeof(key), "volume%d.cold_replica_count", i); ret = dict_get_int32(dict, key, &cold_replica_count); if (ret) goto out; snprintf(key, sizeof(key), "volume%d.cold_arbiter_count", i); ret = dict_get_int32(dict, key, &cold_arbiter_count); if (ret) goto out; snprintf(key, sizeof(key), "volume%d.cold_disperse_count", i); ret = dict_get_int32(dict, key, &cold_disperse_count); if (ret) goto out; snprintf(key, sizeof(key), "volume%d.cold_redundancy_count", i); ret = dict_get_int32(dict, key, &cold_redundancy_count); if (ret) goto out; snprintf(key, sizeof(key), "volume%d.hot_brick_count", i); ret = dict_get_int32(dict, key, &hot_brick_count); if (ret) goto out; snprintf(key, sizeof(key), "volume%d.hot_type", i); ret = dict_get_int32(dict, key, &hot_type); if (ret) goto out; snprintf(key, sizeof(key), "volume%d.hot_replica_count", i); ret = dict_get_int32(dict, key, &hot_replica_count); if (ret) goto out; cli_out("Hot Tier :"); hot_dist_count = (hot_replica_count ? hot_replica_count : 1); vol_type = get_vol_type(hot_type, hot_dist_count, hot_brick_count); cli_out("Hot Tier Type : %s", vol_type_str[vol_type]); gf_cli_print_number_of_bricks(hot_type, hot_brick_count, hot_dist_count, 0, hot_replica_count, 0, 0, 0); ret = print_brick_details(dict, i, 1, hot_brick_count, hot_replica_count); if (ret) goto out; cli_out("Cold Tier:"); vol_type = get_vol_type(cold_type, cold_dist_count, cold_brick_count); cli_out("Cold Tier Type : %s", vol_type_str[vol_type]); gf_cli_print_number_of_bricks(cold_type, cold_brick_count, cold_dist_count, 0, cold_replica_count, cold_disperse_count, cold_redundancy_count, cold_arbiter_count); ret = print_brick_details(dict, i, hot_brick_count + 1, brick_count, cold_replica_count); if (ret) goto out; out: return ret; } int gf_cli_get_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, void *myframe) { int ret = -1; int opt_count = 0; int32_t i = 0; int32_t j = 1; int32_t status = 0; int32_t type = 0; int32_t brick_count = 0; int32_t dist_count = 0; int32_t stripe_count = 0; int32_t replica_count = 0; int32_t disperse_count = 0; int32_t redundancy_count = 0; int32_t arbiter_count = 0; int32_t snap_count = 0; int32_t vol_type = 0; int32_t transport = 0; char *volume_id_str = NULL; char *volname = NULL; dict_t *dict = NULL; cli_local_t *local = NULL; char key[1024] = {0}; char err_str[2048] = {0}; gf_cli_rsp rsp = {0}; char *caps __attribute__((unused)) = NULL; int k __attribute__((unused)) = 0; call_frame_t *frame = NULL; GF_ASSERT(myframe); if (-1 == req->rpc_status) goto out; frame = myframe; GF_ASSERT(frame->local); local = frame->local; ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log(frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } gf_log("cli", GF_LOG_INFO, "Received resp to get vol: %d", rsp.op_ret); if (!rsp.dict.dict_len) { if (global_state->mode & GLUSTER_MODE_XML) goto xml_output; cli_err("No volumes present"); ret = 0; goto out; } dict = dict_new(); if (!dict) { ret = -1; goto out; } ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); if (ret) { gf_log("cli", GF_LOG_ERROR, "Unable to allocate memory"); goto out; } ret = dict_get_int32(dict, "count", &count); if (ret) goto out; if (!count) { switch (local->get_vol.flags) { case GF_CLI_GET_NEXT_VOLUME: GF_FREE(local->get_vol.volname); local->get_vol.volname = NULL; ret = 0; goto out; case GF_CLI_GET_VOLUME: snprintf(err_str, sizeof(err_str), "Volume %s does not exist", local->get_vol.volname); ret = -1; if (!(global_state->mode & GLUSTER_MODE_XML)) goto out; } } if (rsp.op_ret) { if (global_state->mode & GLUSTER_MODE_XML) goto xml_output; ret = -1; goto out; } xml_output: if (global_state->mode & GLUSTER_MODE_XML) { /* For GET_NEXT_VOLUME output is already begun in * and will also end in gf_cli_get_next_volume() */ if (local->get_vol.flags == GF_CLI_GET_VOLUME) { ret = cli_xml_output_vol_info_begin(local, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) { gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } } if (dict) { ret = cli_xml_output_vol_info(local, dict); if (ret) { gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } } if (local->get_vol.flags == GF_CLI_GET_VOLUME) { ret = cli_xml_output_vol_info_end(local); if (ret) gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); } goto out; } while (i < count) { cli_out(" "); snprintf(key, 256, "volume%d.name", i); ret = dict_get_str(dict, key, &volname); if (ret) goto out; snprintf(key, 256, "volume%d.type", i); ret = dict_get_int32(dict, key, &type); if (ret) goto out; snprintf(key, 256, "volume%d.status", i); ret = dict_get_int32(dict, key, &status); if (ret) goto out; snprintf(key, 256, "volume%d.brick_count", i); ret = dict_get_int32(dict, key, &brick_count); if (ret) goto out; snprintf(key, 256, "volume%d.dist_count", i); ret = dict_get_int32(dict, key, &dist_count); if (ret) goto out; snprintf(key, 256, "volume%d.stripe_count", i); ret = dict_get_int32(dict, key, &stripe_count); if (ret) goto out; snprintf(key, 256, "volume%d.replica_count", i); ret = dict_get_int32(dict, key, &replica_count); if (ret) goto out; snprintf(key, 256, "volume%d.disperse_count", i); ret = dict_get_int32(dict, key, &disperse_count); if (ret) goto out; snprintf(key, 256, "volume%d.redundancy_count", i); ret = dict_get_int32(dict, key, &redundancy_count); if (ret) goto out; snprintf(key, sizeof(key), "volume%d.arbiter_count", i); ret = dict_get_int32(dict, key, &arbiter_count); if (ret) goto out; snprintf(key, 256, "volume%d.transport", i); ret = dict_get_int32(dict, key, &transport); if (ret) goto out; snprintf(key, 256, "volume%d.volume_id", i); ret = dict_get_str(dict, key, &volume_id_str); if (ret) goto out; snprintf(key, 256, "volume%d.snap_count", i); ret = dict_get_int32(dict, key, &snap_count); if (ret) goto out; // Distributed (stripe/replicate/stripe-replica) setups vol_type = get_vol_type(type, dist_count, brick_count); cli_out("Volume Name: %s", volname); cli_out("Type: %s", vol_type_str[vol_type]); cli_out("Volume ID: %s", volume_id_str); cli_out("Status: %s", cli_vol_status_str[status]); cli_out("Snapshot Count: %d", snap_count); #ifdef HAVE_BD_XLATOR k = 0; snprintf(key, sizeof(key), "volume%d.xlator%d", i, k); ret = dict_get_str(dict, key, &caps); if (ret) goto next; do { j = 0; cli_out("Xlator %d: %s", k + 1, caps); do { snprintf(key, sizeof(key), "volume%d.xlator%d.caps%d", i, k, j++); ret = dict_get_str(dict, key, &caps); if (ret) break; cli_out("Capability %d: %s", j, caps); } while (1); snprintf(key, sizeof(key), "volume%d.xlator%d", i, ++k); ret = dict_get_str(dict, key, &caps); if (ret) break; } while (1); next: #endif gf_cli_print_number_of_bricks( type, brick_count, dist_count, stripe_count, replica_count, disperse_count, redundancy_count, arbiter_count); cli_out("Transport-type: %s", ((transport == 0) ? "tcp" : (transport == 1) ? "rdma" : "tcp,rdma")); j = 1; GF_FREE(local->get_vol.volname); local->get_vol.volname = gf_strdup(volname); if (type == GF_CLUSTER_TYPE_TIER) { ret = gf_cli_print_tier_info(dict, i, brick_count); if (ret) goto out; } else { cli_out("Bricks:"); ret = print_brick_details(dict, i, j, brick_count, replica_count); if (ret) goto out; } snprintf(key, 256, "volume%d.opt_count", i); ret = dict_get_int32(dict, key, &opt_count); if (ret) goto out; if (!opt_count) goto out; cli_out("Options Reconfigured:"); snprintf(key, 256, "volume%d.option.", i); ret = dict_foreach(dict, _gf_cli_output_volinfo_opts, key); if (ret) goto out; i++; } ret = 0; out: if (ret) cli_err("%s", err_str); cli_cmd_broadcast_response(ret); if (dict) dict_unref(dict); gf_free_xdr_cli_rsp(rsp); gf_log("cli", GF_LOG_DEBUG, "Returning: %d", ret); return ret; } int gf_cli_create_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = { 0, }; int ret = -1; cli_local_t *local = NULL; char *volname = NULL; dict_t *rsp_dict = NULL; call_frame_t *frame = NULL; GF_ASSERT(myframe); if (-1 == req->rpc_status) { goto out; } frame = myframe; GF_ASSERT(frame->local); local = frame->local; ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log(frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } gf_log("cli", GF_LOG_INFO, "Received resp to create volume"); ret = dict_get_str(local->dict, "volname", &volname); if (ret) goto out; if (global_state->mode & GLUSTER_MODE_XML) { if (rsp.op_ret == 0) { rsp_dict = dict_new(); ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict); if (ret) { gf_log("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization"); goto out; } } ret = cli_xml_output_vol_create(rsp_dict, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret && strcmp(rsp.op_errstr, "")) cli_err("volume create: %s: failed: %s", volname, rsp.op_errstr); else if (rsp.op_ret) cli_err("volume create: %s: failed", volname); else cli_out( "volume create: %s: success: " "please start the volume to access data", volname); ret = rsp.op_ret; out: cli_cmd_broadcast_response(ret); gf_free_xdr_cli_rsp(rsp); if (rsp_dict) dict_unref(rsp_dict); return ret; } int gf_cli_delete_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = { 0, }; int ret = -1; cli_local_t *local = NULL; char *volname = NULL; call_frame_t *frame = NULL; dict_t *rsp_dict = NULL; GF_ASSERT(myframe); if (-1 == req->rpc_status) { goto out; } frame = myframe; GF_ASSERT(frame->local); local = frame->local; ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log(frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } ret = dict_get_str(local->dict, "volname", &volname); if (ret) { gf_log(frame->this->name, GF_LOG_ERROR, "dict get failed"); goto out; } gf_log("cli", GF_LOG_INFO, "Received resp to delete volume"); if (global_state->mode & GLUSTER_MODE_XML) { if (rsp.op_ret == 0) { rsp_dict = dict_new(); ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict); if (ret) { gf_log("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization"); goto out; } } ret = cli_xml_output_generic_volume("volDelete", rsp_dict, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret && strcmp(rsp.op_errstr, "")) cli_err("volume delete: %s: failed: %s", volname, rsp.op_errstr); else if (rsp.op_ret) cli_err("volume delete: %s: failed", volname); else cli_out("volume delete: %s: success", volname); ret = rsp.op_ret; out: cli_cmd_broadcast_response(ret); gf_free_xdr_cli_rsp(rsp); if (rsp_dict) dict_unref(rsp_dict); gf_log("", GF_LOG_DEBUG, "Returning with %d", ret); return ret; } int gf_cli3_1_uuid_get_cbk(struct rpc_req *req, struct iovec *iov, int count, void *myframe) { char *uuid_str = NULL; gf_cli_rsp rsp = { 0, }; int ret = -1; cli_local_t *local = NULL; call_frame_t *frame = NULL; dict_t *dict = NULL; GF_ASSERT(myframe); if (-1 == req->rpc_status) goto out; frame = myframe; GF_ASSERT(frame->local); local = frame->local; ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log(frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } frame->local = NULL; gf_log("cli", GF_LOG_INFO, "Received resp to uuid get"); dict = dict_new(); if (!dict) { ret = -1; goto out; } ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); if (ret) { gf_log("cli", GF_LOG_ERROR, "Failed to unserialize " "response for uuid get"); goto out; } ret = dict_get_str(dict, "uuid", &uuid_str); if (ret) { gf_log("cli", GF_LOG_ERROR, "Failed to get uuid " "from dictionary"); goto out; } if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_dict("uuidGenerate", dict, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret) { if (strcmp(rsp.op_errstr, "") == 0) cli_err("Get uuid was unsuccessful"); else cli_err("%s", rsp.op_errstr); } else { cli_out("UUID: %s", uuid_str); } ret = rsp.op_ret; out: cli_cmd_broadcast_response(ret); cli_local_wipe(local); gf_free_xdr_cli_rsp(rsp); if (dict) dict_unref(dict); gf_log("", GF_LOG_DEBUG, "Returning with %d", ret); return ret; } int gf_cli3_1_uuid_reset_cbk(struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = { 0, }; int ret = -1; cli_local_t *local = NULL; call_frame_t *frame = NULL; GF_ASSERT(myframe); if (-1 == req->rpc_status) { goto out; } frame = myframe; GF_ASSERT(frame->local); local = frame->local; ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log(frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } frame->local = NULL; gf_log("cli", GF_LOG_INFO, "Received resp to uuid reset"); if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_dict("uuidReset", NULL, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret && strcmp(rsp.op_errstr, "")) cli_err("%s", rsp.op_errstr); else cli_out("resetting the peer uuid has been %s", (rsp.op_ret) ? "unsuccessful" : "successful"); ret = rsp.op_ret; out: cli_cmd_broadcast_response(ret); cli_local_wipe(local); gf_free_xdr_cli_rsp(rsp); gf_log("", GF_LOG_DEBUG, "Returning with %d", ret); return ret; } int gf_cli_start_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = { 0, }; int ret = -1; cli_local_t *local = NULL; char *volname = NULL; call_frame_t *frame = NULL; dict_t *rsp_dict = NULL; GF_ASSERT(myframe); if (-1 == req->rpc_status) { goto out; } frame = myframe; GF_ASSERT(frame->local); local = frame->local; ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log(frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } ret = dict_get_str(local->dict, "volname", &volname); if (ret) { gf_log("cli", GF_LOG_ERROR, "dict get failed"); goto out; } gf_log("cli", GF_LOG_INFO, "Received resp to start volume"); if (global_state->mode & GLUSTER_MODE_XML) { if (rsp.op_ret == 0) { rsp_dict = dict_new(); ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict); if (ret) { gf_log("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization"); goto out; } } ret = cli_xml_output_generic_volume("volStart", rsp_dict, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret && strcmp(rsp.op_errstr, "")) cli_err("volume start: %s: failed: %s", volname, rsp.op_errstr); else if (rsp.op_ret) cli_err("volume start: %s: failed", volname); else cli_out("volume start: %s: success", volname); ret = rsp.op_ret; out: cli_cmd_broadcast_response(ret); gf_free_xdr_cli_rsp(rsp); if (rsp_dict) dict_unref(rsp_dict); return ret; } int gf_cli_stop_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = { 0, }; int ret = -1; cli_local_t *local = NULL; char *volname = NULL; call_frame_t *frame = NULL; dict_t *rsp_dict = NULL; GF_ASSERT(myframe); if (-1 == req->rpc_status) { goto out; } frame = myframe; GF_ASSERT(frame->local); local = frame->local; ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log(frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } ret = dict_get_str(local->dict, "volname", &volname); if (ret) { gf_log(frame->this->name, GF_LOG_ERROR, "Unable to get volname from dict"); goto out; } gf_log("cli", GF_LOG_INFO, "Received resp to stop volume"); if (global_state->mode & GLUSTER_MODE_XML) { if (rsp.op_ret == 0) { rsp_dict = dict_new(); ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict); if (ret) { gf_log("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization"); goto out; } } ret = cli_xml_output_generic_volume("volStop", rsp_dict, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret && strcmp(rsp.op_errstr, "")) cli_err("volume stop: %s: failed: %s", volname, rsp.op_errstr); else if (rsp.op_ret) cli_err("volume stop: %s: failed", volname); else cli_out("volume stop: %s: success", volname); ret = rsp.op_ret; out: cli_cmd_broadcast_response(ret); gf_free_xdr_cli_rsp(rsp); if (rsp_dict) dict_unref(rsp_dict); return ret; } int gf_cli_print_rebalance_status(dict_t *dict, enum gf_task_types task_type, gf_boolean_t is_tier) { int ret = -1; int count = 0; int i = 1; char key[256] = { 0, }; gf_defrag_status_t status_rcd = GF_DEFRAG_STATUS_NOT_STARTED; uint64_t files = 0; uint64_t size = 0; uint64_t lookup = 0; char *node_name = NULL; uint64_t failures = 0; uint64_t skipped = 0; double elapsed = 0; char *status_str = NULL; char *size_str = NULL; int32_t hrs = 0; uint32_t min = 0; uint32_t sec = 0; gf_boolean_t down = _gf_false; gf_boolean_t fix_layout = _gf_false; uint64_t max_time = 0; uint64_t max_elapsed = 0; uint64_t time_left = 0; gf_boolean_t show_estimates = _gf_false; ret = dict_get_int32(dict, "count", &count); if (ret) { gf_log("cli", GF_LOG_ERROR, "count not set"); goto out; } snprintf(key, sizeof(key), "status-1"); ret = dict_get_int32(dict, key, (int32_t *)&status_rcd); if (ret) { gf_log("cli", GF_LOG_TRACE, "count %d %d", count, 1); gf_log("cli", GF_LOG_TRACE, "failed to get status"); goto out; } /* Fix layout will be sent to all nodes for the volume so every status should be of type GF_DEFRAG_STATUS_LAYOUT_FIX* */ if ((task_type == GF_TASK_TYPE_REBALANCE) && (status_rcd >= GF_DEFRAG_STATUS_LAYOUT_FIX_STARTED)) { fix_layout = _gf_true; } if (fix_layout) { cli_out("%35s %41s %27s", "Node", "status", "run time in h:m:s"); cli_out("%35s %41s %27s", "---------", "-----------", "------------"); } else { cli_out("%40s %16s %13s %13s %13s %13s %20s %18s", "Node", "Rebalanced-files", "size", "scanned", "failures", "skipped", "status", "run time in" " h:m:s"); cli_out("%40s %16s %13s %13s %13s %13s %20s %18s", "---------", "-----------", "-----------", "-----------", "-----------", "-----------", "------------", "--------------"); } for (i = 1; i <= count; i++) { /* Reset the variables to prevent carryover of values */ node_name = NULL; files = 0; size = 0; lookup = 0; skipped = 0; status_str = NULL; elapsed = 0; time_left = 0; /* Check if status is NOT_STARTED, and continue early */ snprintf(key, sizeof(key), "status-%d", i); ret = dict_get_int32(dict, key, (int32_t *)&status_rcd); if (ret == -ENOENT) { gf_log("cli", GF_LOG_TRACE, "count %d %d", count, i); gf_log("cli", GF_LOG_TRACE, "failed to get status"); gf_log("cli", GF_LOG_ERROR, "node down and has failed" " to set dict"); down = _gf_true; continue; /* skip this node if value not available*/ } else if (ret) { gf_log("cli", GF_LOG_TRACE, "count %d %d", count, i); gf_log("cli", GF_LOG_TRACE, "failed to get status"); continue; /* skip this node if value not available*/ } if (GF_DEFRAG_STATUS_NOT_STARTED == status_rcd) continue; if (GF_DEFRAG_STATUS_STARTED == status_rcd) show_estimates = _gf_true; snprintf(key, 256, "node-name-%d", i); ret = dict_get_str(dict, key, &node_name); if (ret) gf_log("cli", GF_LOG_TRACE, "failed to get node-name"); snprintf(key, sizeof(key), "files-%d", i); ret = dict_get_uint64(dict, key, &files); if (ret) gf_log("cli", GF_LOG_TRACE, "failed to get file count"); snprintf(key, sizeof(key), "size-%d", i); ret = dict_get_uint64(dict, key, &size); if (ret) gf_log("cli", GF_LOG_TRACE, "failed to get size of xfer"); snprintf(key, sizeof(key), "lookups-%d", i); ret = dict_get_uint64(dict, key, &lookup); if (ret) gf_log("cli", GF_LOG_TRACE, "failed to get lookedup file count"); snprintf(key, sizeof(key), "failures-%d", i); ret = dict_get_uint64(dict, key, &failures); if (ret) gf_log("cli", GF_LOG_TRACE, "failed to get failures count"); snprintf(key, sizeof(key), "skipped-%d", i); ret = dict_get_uint64(dict, key, &skipped); if (ret) gf_log("cli", GF_LOG_TRACE, "failed to get skipped count"); /* For remove-brick include skipped count into failure count*/ if (task_type != GF_TASK_TYPE_REBALANCE) { failures += skipped; skipped = 0; } snprintf(key, sizeof(key), "run-time-%d", i); ret = dict_get_double(dict, key, &elapsed); if (ret) gf_log("cli", GF_LOG_TRACE, "failed to get run-time"); snprintf(key, sizeof(key), "time-left-%d", i); ret = dict_get_uint64(dict, key, &time_left); if (ret) gf_log("cli", GF_LOG_TRACE, "failed to get time left"); if (elapsed > max_elapsed) max_elapsed = elapsed; if (time_left > max_time) max_time = time_left; /* Check for array bound */ if (status_rcd >= GF_DEFRAG_STATUS_MAX) status_rcd = GF_DEFRAG_STATUS_MAX; status_str = cli_vol_task_status_str[status_rcd]; size_str = gf_uint64_2human_readable(size); hrs = elapsed / 3600; min = ((uint64_t)elapsed % 3600) / 60; sec = ((uint64_t)elapsed % 3600) % 60; if (fix_layout) { cli_out("%35s %50s %8d:%d:%d", node_name, status_str, hrs, min, sec); } else { if (size_str) { cli_out("%40s %16" PRIu64 " %13s" " %13" PRIu64 " %13" PRIu64 " %13" PRIu64 " %20s " "%8d:%02d:%02d", node_name, files, size_str, lookup, failures, skipped, status_str, hrs, min, sec); } else { cli_out("%40s %16" PRIu64 " %13" PRIu64 " %13" PRIu64 " %13" PRIu64 " %13" PRIu64 " %20s" " %8d:%02d:%02d", node_name, files, size, lookup, failures, skipped, status_str, hrs, min, sec); } } GF_FREE(size_str); } if (is_tier && down) cli_out( "WARNING: glusterd might be down on one or more nodes." " Please check the nodes that are down using \'gluster" " peer status\' and start the glusterd on those nodes," " else tier detach commit might fail!"); /* Max time will be non-zero if rebalance is still running */ if (max_time) { hrs = max_time / 3600; min = (max_time % 3600) / 60; sec = (max_time % 3600) % 60; if (hrs < REBAL_ESTIMATE_SEC_UPPER_LIMIT) { cli_out( "Estimated time left for rebalance to " "complete : %8d:%02d:%02d", hrs, min, sec); } else { cli_out( "Estimated time left for rebalance to " "complete : > 2 months. Please try again " "later."); } } else { /* Rebalance will return 0 if it could not calculate the * estimates or if it is complete. */ if (!show_estimates) { goto out; } if (max_elapsed <= REBAL_ESTIMATE_START_TIME) { cli_out( "The estimated time for rebalance to complete " "will be unavailable for the first 10 " "minutes."); } else { cli_out( "Rebalance estimated time unavailable. Please " "try again later."); } } out: return ret; } int gf_cli_print_tier_status(dict_t *dict, enum gf_task_types task_type) { int ret = -1; int count = 0; int i = 1; uint64_t promoted = 0; uint64_t demoted = 0; char key[256] = { 0, }; char *node_name = NULL; gf_defrag_status_t status_rcd = GF_DEFRAG_STATUS_NOT_STARTED; char *status_str = NULL; gf_boolean_t down = _gf_false; double elapsed = 0; int hrs = 0; int min = 0; int sec = 0; ret = dict_get_int32(dict, "count", &count); if (ret) { gf_log("cli", GF_LOG_ERROR, "count not set"); goto out; } cli_out("%-20s %-20s %-20s %-20s %-20s", "Node", "Promoted files", "Demoted files", "Status", "run time in h:m:s"); cli_out("%-20s %-20s %-20s %-20s %-20s", "---------", "---------", "---------", "---------", "---------"); for (i = 1; i <= count; i++) { /* Reset the variables to prevent carryover of values */ node_name = NULL; promoted = 0; demoted = 0; /* Check if status is NOT_STARTED, and continue early */ snprintf(key, sizeof(key), "status-%d", i); ret = dict_get_int32(dict, key, (int32_t *)&status_rcd); if (ret == -ENOENT) { gf_log("cli", GF_LOG_TRACE, "count: %d, %d," "failed to get status", count, i); gf_log("cli", GF_LOG_ERROR, "node down and has failed" " to set dict"); down = _gf_true; continue; /*skipping this node as value unavailable*/ } else if (ret) { gf_log("cli", GF_LOG_TRACE, "count: %d, %d," "failed to get status", count, i); continue; } if (GF_DEFRAG_STATUS_NOT_STARTED == status_rcd) continue; snprintf(key, sizeof(key), "node-name-%d", i); ret = dict_get_str(dict, key, &node_name); if (ret) gf_log("cli", GF_LOG_TRACE, "failed to get node-name"); snprintf(key, sizeof(key), "promoted-%d", i); ret = dict_get_uint64(dict, key, &promoted); if (ret) gf_log("cli", GF_LOG_TRACE, "failed to get promoted count"); snprintf(key, sizeof(key), "demoted-%d", i); ret = dict_get_uint64(dict, key, &demoted); if (ret) gf_log("cli", GF_LOG_TRACE, "failed to get demoted count"); snprintf(key, sizeof(key), "run-time-%d", i); ret = dict_get_double(dict, key, &elapsed); if (ret) gf_log("cli", GF_LOG_TRACE, "failed to get run-time"); /* Check for array bound */ if (status_rcd >= GF_DEFRAG_STATUS_MAX) status_rcd = GF_DEFRAG_STATUS_MAX; hrs = elapsed / 3600; min = ((int)elapsed % 3600) / 60; sec = ((int)elapsed % 3600) % 60; status_str = cli_vol_task_status_str[status_rcd]; cli_out("%-20s %-20" PRIu64 " %-20" PRIu64 " %-20s" " %d:%d:%d", node_name, promoted, demoted, status_str, hrs, min, sec); } if (down) cli_out( "WARNING: glusterd might be down on one or more nodes." " Please check the nodes that are down using \'gluster" " peer status\' and start the glusterd on those nodes."); out: return ret; } int gf_cli_defrag_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = { 0, }; cli_local_t *local = NULL; char *volname = NULL; call_frame_t *frame = NULL; int cmd = 0; int ret = -1; dict_t *dict = NULL; char msg[1024] = { 0, }; char *task_id_str = NULL; if (-1 == req->rpc_status) { goto out; } GF_ASSERT(myframe); frame = myframe; GF_ASSERT(frame->local); local = frame->local; ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log(frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } ret = dict_get_str(local->dict, "volname", &volname); if (ret) { gf_log(frame->this->name, GF_LOG_ERROR, "Failed to get volname"); goto out; } ret = dict_get_int32(local->dict, "rebalance-command", (int32_t *)&cmd); if (ret) { gf_log("cli", GF_LOG_ERROR, "Failed to get command"); goto out; } if (rsp.dict.dict_len) { /* Unserialize the dictionary */ dict = dict_new(); ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); if (ret < 0) { gf_log("glusterd", GF_LOG_ERROR, "failed to " "unserialize req-buffer to dictionary"); goto out; } } if (!((cmd == GF_DEFRAG_CMD_STOP) || (cmd == GF_DEFRAG_CMD_STATUS) || (cmd == GF_DEFRAG_CMD_STATUS_TIER)) && !(global_state->mode & GLUSTER_MODE_XML)) { ret = dict_get_str(dict, GF_REBALANCE_TID_KEY, &task_id_str); if (ret) { gf_log("cli", GF_LOG_WARNING, "failed to get %s from dict", GF_REBALANCE_TID_KEY); goto out; } if (rsp.op_ret && strcmp(rsp.op_errstr, "")) { snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); } else { if (!rsp.op_ret) { /* append errstr in the cli msg for successful * case since unlock failures can be highlighted * event though rebalance command was successful */ if (cmd == GF_DEFRAG_CMD_START_TIER) { snprintf(msg, sizeof(msg), "Tier " "start is successful on %s.", volname); } else if (cmd == GF_DEFRAG_CMD_STOP_TIER) { snprintf(msg, sizeof(msg), "Tier " "daemon stopped " "on %s.", volname); } else { snprintf(msg, sizeof(msg), "Rebalance on %s has been " "started successfully. Use " "rebalance status command to" " check status of the " "rebalance process.\nID: %s", volname, task_id_str); } } else { snprintf(msg, sizeof(msg), "Starting rebalance on volume %s has " "been unsuccessful.", volname); } } goto done; } if (cmd == GF_DEFRAG_CMD_STOP) { if (rsp.op_ret == -1) { if (strcmp(rsp.op_errstr, "")) snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); else snprintf(msg, sizeof(msg), "rebalance volume %s stop failed", volname); goto done; } else { /* append errstr in the cli msg for successful case * since unlock failures can be highlighted event though * rebalance command was successful */ snprintf(msg, sizeof(msg), "rebalance process may be in the middle of a " "file migration.\nThe process will be fully " "stopped once the migration of the file is " "complete.\nPlease check rebalance process " "for completion before doing any further " "brick related tasks on the volume.\n%s", rsp.op_errstr); } } if (cmd == GF_DEFRAG_CMD_STATUS || cmd == GF_DEFRAG_CMD_STATUS_TIER) { if (rsp.op_ret == -1) { if (strcmp(rsp.op_errstr, "")) snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); else snprintf(msg, sizeof(msg), "Failed to get the status of " "rebalance process"); goto done; } else { snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); } } if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_vol_rebalance(cmd, dict, rsp.op_ret, rsp.op_errno, rsp.op_errstr); goto out; } if (cmd == GF_DEFRAG_CMD_STATUS_TIER) ret = gf_cli_print_tier_status(dict, GF_TASK_TYPE_REBALANCE); else if (cmd == GF_DEFRAG_CMD_DETACH_STATUS) ret = gf_cli_print_rebalance_status(dict, GF_TASK_TYPE_REBALANCE, _gf_true); else ret = gf_cli_print_rebalance_status(dict, GF_TASK_TYPE_REBALANCE, _gf_false); if (ret) gf_log("cli", GF_LOG_ERROR, "Failed to print rebalance status"); done: if (global_state->mode & GLUSTER_MODE_XML) cli_xml_output_str("volRebalance", msg, rsp.op_ret, rsp.op_errno, rsp.op_errstr); else { if (rsp.op_ret) if (cmd == GF_DEFRAG_CMD_START_TIER || cmd == GF_DEFRAG_CMD_STATUS_TIER) { cli_err( "Tiering Migration Functionality: %s:" " failed%s%s", volname, strlen(msg) ? ": " : "", msg); } else cli_err("volume rebalance: %s: failed%s%s", volname, strlen(msg) ? ": " : "", msg); else if (cmd == GF_DEFRAG_CMD_START_TIER || cmd == GF_DEFRAG_CMD_STATUS_TIER) { cli_out( "Tiering Migration Functionality: %s:" " success%s%s", volname, strlen(msg) ? ": " : "", msg); } else cli_out("volume rebalance: %s: success%s%s", volname, strlen(msg) ? ": " : "", msg); } ret = rsp.op_ret; out: gf_free_xdr_cli_rsp(rsp); if (dict) dict_unref(dict); cli_cmd_broadcast_response(ret); return ret; } int gf_cli_rename_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = { 0, }; int ret = -1; char msg[1024] = { 0, }; GF_ASSERT(myframe); if (-1 == req->rpc_status) { goto out; } ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } gf_log("cli", GF_LOG_INFO, "Received resp to probe"); snprintf(msg, sizeof(msg), "Rename volume %s", (rsp.op_ret) ? "unsuccessful" : "successful"); if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_str("volRename", msg, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret) cli_err("volume rename: failed"); else cli_out("volume rename: success"); ret = rsp.op_ret; out: cli_cmd_broadcast_response(ret); gf_free_xdr_cli_rsp(rsp); return ret; } int gf_cli_reset_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = { 0, }; int ret = -1; char msg[1024] = { 0, }; GF_ASSERT(myframe); if (-1 == req->rpc_status) { goto out; } ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } gf_log("cli", GF_LOG_INFO, "Received resp to reset"); if (strc