summaryrefslogtreecommitdiffstats
path: root/cli/src/cli.c
diff options
context:
space:
mode:
Diffstat (limited to 'cli/src/cli.c')
-rw-r--r--cli/src/cli.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/cli/src/cli.c b/cli/src/cli.c
index 9b1ed87c1e7..511b45b54ef 100644
--- a/cli/src/cli.c
+++ b/cli/src/cli.c
@@ -396,10 +396,11 @@ cli_opt_parse (char *opt, struct cli_state *state)
int
parse_cmdline (int argc, char *argv[], struct cli_state *state)
{
- int ret = 0;
- int i = 0;
- int j = 0;
- char *opt = NULL;
+ int ret = 0;
+ int i = 0;
+ int j = 0;
+ char *opt = NULL;
+ gf_boolean_t geo_rep_config = _gf_false;
state->argc=argc-1;
state->argv=&argv[1];
@@ -409,9 +410,14 @@ parse_cmdline (int argc, char *argv[], struct cli_state *state)
state->ctx->secure_mgmt = 1;
}
+ if (state->argc >= GEO_REP_CMD_CONFIG_INDEX &&
+ strtail (state->argv[GEO_REP_CMD_INDEX], "geo") &&
+ strtail (state->argv[GEO_REP_CMD_CONFIG_INDEX], "co"))
+ geo_rep_config = _gf_true;
+
for (i = 0; i < state->argc; i++) {
opt = strtail (state->argv[i], "--");
- if (opt) {
+ if (opt && !geo_rep_config) {
ret = cli_opt_parse (opt, state);
if (ret == -1) {
cli_out ("unrecognized option --%s", opt);
if (connected) connected_str = "Connected"; else connected_str = "Disconnected"; snprintf (key, 256, "friend%d.state", i); ret = dict_get_str (dict, key, &state); if (ret) goto out; cli_out ("\nHostname: %s\nUuid: %s\nState: %s (%s)", hostname_buf, uuid_buf, state, connected_str); memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "friend%d.hostname_count", i); ret = dict_get_int32 (dict, key, &hostname_count); /* Print other addresses only if there are more than 1. */ if ((ret == 0) && (hostname_count > 1)) { memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "friend%d", i); ret = gf_cli_output_peer_hostnames (dict, hostname_count, key); } i++; } ret = 0; out: return ret; } int gf_cli_output_pool_list (dict_t *dict, int count) { int ret = -1; char *uuid_buf = NULL; char *hostname_buf = NULL; int32_t hostname_len = 8; /*min len 8 chars*/ int32_t i = 1; char key[256] = {0,}; int32_t connected = 0; char *connected_str = NULL; if (count <= 0) goto out; while (i <= count) { snprintf (key, 256, "friend%d.hostname", i); ret = dict_get_str (dict, key, &hostname_buf); if (ret) goto out; ret = strlen(hostname_buf); if (ret > hostname_len) hostname_len = ret; i++; } cli_out ("UUID\t\t\t\t\t%-*s\tState", hostname_len, "Hostname"); i = 1; while ( i <= count) { snprintf (key, 256, "friend%d.uuid", i); ret = dict_get_str (dict, key, &uuid_buf); if (ret) goto out; snprintf (key, 256, "friend%d.hostname", i); ret = dict_get_str (dict, key, &hostname_buf); if (ret) goto out; snprintf (key, 256, "friend%d.connected", i); ret = dict_get_int32 (dict, key, &connected); if (ret) goto out; if (connected) connected_str = "Connected"; else connected_str = "Disconnected"; cli_out ("%s\t%-*s\t%s ", uuid_buf, hostname_len, hostname_buf, connected_str); i++; } ret = 0; out: return ret; } /* function pointer for gf_cli_output_{pool_list,peer_status} */ typedef int (*cli_friend_output_fn) (dict_t*, int); int gf_cli_list_friends_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf1_cli_peer_list_rsp rsp = {0,}; int ret = -1; dict_t *dict = NULL; char msg[1024] = {0,}; char *cmd = NULL; cli_friend_output_fn friend_output_fn; call_frame_t *frame = NULL; unsigned long flags = 0; frame = myframe; flags = (long)frame->local; if (flags == GF_CLI_LIST_POOL_NODES) { cmd = "pool list"; friend_output_fn = &gf_cli_output_pool_list; } else { cmd = "peer status"; friend_output_fn = &gf_cli_output_peer_status; } /* 'free' the flags set by gf_cli_list_friends */ frame->local = NULL; if (-1 == req->rpc_status) { goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_peer_list_rsp); if (ret < 0) { gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); //rsp.op_ret = -1; //rsp.op_errno = EINVAL; goto out; } gf_log ("cli", GF_LOG_DEBUG, "Received resp to list: %d", rsp.op_ret); ret = rsp.op_ret; if (!rsp.op_ret) { if (!rsp.friends.friends_len) { snprintf (msg, sizeof (msg), "%s: No peers present", cmd); if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_peer_status (dict, rsp.op_ret, rsp.op_errno, msg); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } cli_err ("%s", msg); ret = 0; goto out; } dict = dict_new (); if (!dict) { ret = -1; goto out; } ret = dict_unserialize (rsp.friends.friends_val, rsp.friends.friends_len, &dict); if (ret) { gf_log ("", GF_LOG_ERROR, "Unable to allocate memory"); goto out; } if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_peer_status (dict, rsp.op_ret, rsp.op_errno, msg); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } ret = dict_get_int32 (dict, "count", &count); if (ret) { goto out; } ret = friend_output_fn (dict, count); if (ret) { goto out; } } else { if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_peer_status (dict, rsp.op_ret, rsp.op_errno, NULL); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); } else { ret = -1; } goto out; } ret = 0; out: if (ret) cli_err ("%s: failed", cmd); cli_cmd_broadcast_response (ret); if (dict) dict_destroy (dict); return ret; } void cli_out_options ( char *substr, char *optstr, char *valstr) { char *ptr1 = NULL; char *ptr2 = NULL; ptr1 = substr; ptr2 = optstr; while (ptr1) { /* Avoiding segmentation fault. */ if (!ptr2) return; if (*ptr1 != *ptr2) break; ptr1++; ptr2++; } if (*ptr2 == '\0') return; cli_out ("%s: %s",ptr2 , valstr); } static int _gf_cli_output_volinfo_opts (dict_t *d, char *k, data_t *v, void *tmp) { int ret = 0; char *key = NULL; char *ptr = NULL; data_t *value = NULL; key = tmp; ptr = strstr (k, "option."); if (ptr) { value = v; if (!value) { ret = -1; goto out; } cli_out_options (key, k, v->data); } out: return ret; } int gf_cli_get_volume_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { int ret = -1; int opt_count = 0; int32_t i = 0; int32_t j = 1; int32_t status = 0; int32_t type = 0; int32_t brick_count = 0; int32_t dist_count = 0; int32_t stripe_count = 0; int32_t replica_count = 0; int32_t disperse_count = 0; int32_t redundancy_count = 0; int32_t vol_type = 0; int32_t transport = 0; char *volume_id_str = NULL; char *brick = NULL; char *volname = NULL; dict_t *dict = NULL; cli_local_t *local = NULL; char key[1024] = {0}; char err_str[2048] = {0}; gf_cli_rsp rsp = {0}; char *caps = NULL; int k __attribute__((unused)) = 0; if (-1 == req->rpc_status) goto out; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } gf_log ("cli", GF_LOG_INFO, "Received resp to get vol: %d", rsp.op_ret); if (rsp.op_ret) { ret = -1; goto out; } if (!rsp.dict.dict_len) { if (global_state->mode & GLUSTER_MODE_XML) goto xml_output; cli_err ("No volumes present"); ret = 0; goto out; } dict = dict_new (); if (!dict) { ret = -1; goto out; } ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Unable to allocate memory"); goto out; } ret = dict_get_int32 (dict, "count", &count); if (ret) goto out; local = ((call_frame_t *)myframe)->local; if (!count) { switch (local->get_vol.flags) { case GF_CLI_GET_NEXT_VOLUME: GF_FREE (local->get_vol.volname); local->get_vol.volname = NULL; ret = 0; goto out; case GF_CLI_GET_VOLUME: memset (err_str, 0, sizeof (err_str)); snprintf (err_str, sizeof (err_str), "Volume %s does not exist", local->get_vol.volname); ret = -1; if (!(global_state->mode & GLUSTER_MODE_XML)) goto out; } } xml_output: if (global_state->mode & GLUSTER_MODE_XML) { /* For GET_NEXT_VOLUME output is already begun in * and will also end in gf_cli_get_next_volume() */ if (local->get_vol.flags == GF_CLI_GET_VOLUME) { ret = cli_xml_output_vol_info_begin (local, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } } if (dict) { ret = cli_xml_output_vol_info (local, dict); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } } if (local->get_vol.flags == GF_CLI_GET_VOLUME) { ret = cli_xml_output_vol_info_end (local); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); } goto out; } while ( i < count) { cli_out (" "); snprintf (key, 256, "volume%d.name", i); ret = dict_get_str (dict, key, &volname); if (ret) goto out; snprintf (key, 256, "volume%d.type", i); ret = dict_get_int32 (dict, key, &type); if (ret) goto out; snprintf (key, 256, "volume%d.status", i); ret = dict_get_int32 (dict, key, &status); if (ret) goto out; snprintf (key, 256, "volume%d.brick_count", i); ret = dict_get_int32 (dict, key, &brick_count); if (ret) goto out; snprintf (key, 256, "volume%d.dist_count", i); ret = dict_get_int32 (dict, key, &dist_count); if (ret) goto out; snprintf (key, 256, "volume%d.stripe_count", i); ret = dict_get_int32 (dict, key, &stripe_count); if (ret) goto out; snprintf (key, 256, "volume%d.replica_count", i); ret = dict_get_int32 (dict, key, &replica_count); if (ret) goto out; snprintf (key, 256, "volume%d.disperse_count", i); ret = dict_get_int32 (dict, key, &disperse_count); if (ret) goto out; snprintf (key, 256, "volume%d.redundancy_count", i); ret = dict_get_int32 (dict, key, &redundancy_count); if (ret) goto out; snprintf (key, 256, "volume%d.transport", i); ret = dict_get_int32 (dict, key, &transport); if (ret) goto out; snprintf (key, 256, "volume%d.volume_id", i); ret = dict_get_str (dict, key, &volume_id_str); if (ret) goto out; vol_type = type; // Distributed (stripe/replicate/stripe-replica) setups if ((type != GF_CLUSTER_TYPE_TIER) && (type > 0) && (dist_count < brick_count)) vol_type = type + GF_CLUSTER_TYPE_MAX - 1; cli_out ("Volume Name: %s", volname); cli_out ("Type: %s", cli_vol_type_str[vol_type]); cli_out ("Volume ID: %s", volume_id_str); cli_out ("Status: %s", cli_vol_status_str[status]); #ifdef HAVE_BD_XLATOR k = 0; memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "volume%d.xlator%d", i, k); ret = dict_get_str (dict, key, &caps); if (ret) goto next; do { j = 0; cli_out ("Xlator %d: %s", k + 1, caps); do { memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "volume%d.xlator%d.caps%d", i, k, j++); ret = dict_get_str (dict, key, &caps); if (ret) break; cli_out ("Capability %d: %s", j, caps); } while (1); memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "volume%d.xlator%d", i, ++k); ret = dict_get_str (dict, key, &caps); if (ret) break; } while (1); next: #else caps = 0; /* Avoid compiler warnings when BD not enabled */ #endif if (type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) { cli_out ("Number of Bricks: %d x %d x %d = %d", (brick_count / dist_count), stripe_count, replica_count, brick_count); } else if (type == GF_CLUSTER_TYPE_NONE) { cli_out ("Number of Bricks: %d", brick_count); } else if (type == GF_CLUSTER_TYPE_DISPERSE) { cli_out ("Number of Bricks: %d x (%d + %d) = %d", (brick_count / dist_count), disperse_count - redundancy_count, redundancy_count, brick_count); } else { /* For both replicate and stripe, dist_count is good enough */ cli_out ("Number of Bricks: %d x %d = %d", (brick_count / dist_count), dist_count, brick_count); } cli_out ("Transport-type: %s", ((transport == 0)?"tcp": (transport == 1)?"rdma": "tcp,rdma")); j = 1; GF_FREE (local->get_vol.volname); local->get_vol.volname = gf_strdup (volname); if (brick_count) cli_out ("Bricks:"); while (j <= brick_count) { snprintf (key, 1024, "volume%d.brick%d", i, j); ret = dict_get_str (dict, key, &brick); if (ret) goto out; cli_out ("Brick%d: %s", j, brick); #ifdef HAVE_BD_XLATOR snprintf (key, 256, "volume%d.vg%d", i, j); ret = dict_get_str (dict, key, &caps); if (!ret) cli_out ("Brick%d VG: %s", j, caps); #endif j++; } snprintf (key, 256, "volume%d.opt_count",i); ret = dict_get_int32 (dict, key, &opt_count); if (ret) goto out; if (!opt_count) goto out; cli_out ("Options Reconfigured:"); snprintf (key, 256, "volume%d.option.",i); ret = dict_foreach (dict, _gf_cli_output_volinfo_opts, key); if (ret) goto out; i++; } ret = 0; out: if (ret) cli_err ("%s", err_str); cli_cmd_broadcast_response (ret); if (dict) dict_destroy (dict); free (rsp.dict.dict_val); free (rsp.op_errstr); gf_log ("cli", GF_LOG_DEBUG, "Returning: %d", ret); return ret; } int gf_cli_create_volume_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; cli_local_t *local = NULL; char *volname = NULL; dict_t *dict = NULL; dict_t *rsp_dict = NULL; if (-1 == req->rpc_status) { goto out; } local = ((call_frame_t *) (myframe))->local; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } gf_log ("cli", GF_LOG_INFO, "Received resp to create volume"); dict = local->dict; ret = dict_get_str (dict, "volname", &volname); if (ret) goto out; if (global_state->mode & GLUSTER_MODE_XML) { if (rsp.op_ret == 0) { rsp_dict = dict_new (); ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization"); goto out; } } ret = cli_xml_output_vol_create (rsp_dict, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret && strcmp (rsp.op_errstr, "")) cli_err ("volume create: %s: failed: %s", volname, rsp.op_errstr); else if (rsp.op_ret) cli_err ("volume create: %s: failed", volname); else cli_out ("volume create: %s: success: " "please start the volume to access data", volname); ret = rsp.op_ret; out: cli_cmd_broadcast_response (ret); free (rsp.dict.dict_val); free (rsp.op_errstr); return ret; } int gf_cli_delete_volume_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; cli_local_t *local = NULL; char *volname = NULL; call_frame_t *frame = NULL; dict_t *dict = NULL; dict_t *rsp_dict = NULL; if (-1 == req->rpc_status) { goto out; } frame = myframe; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } local = frame->local; if (local) dict = local->dict; ret = dict_get_str (dict, "volname", &volname); if (ret) { gf_log (frame->this->name, GF_LOG_ERROR, "dict get failed"); goto out; } gf_log ("cli", GF_LOG_INFO, "Received resp to delete volume"); if (global_state->mode & GLUSTER_MODE_XML) { if (rsp.op_ret == 0) { rsp_dict = dict_new (); ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization"); goto out; } } ret = cli_xml_output_generic_volume ("volDelete", rsp_dict, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret && strcmp (rsp.op_errstr, "")) cli_err ("volume delete: %s: failed: %s", volname, rsp.op_errstr); else if (rsp.op_ret) cli_err ("volume delete: %s: failed", volname); else cli_out ("volume delete: %s: success", volname); ret = rsp.op_ret; out: cli_cmd_broadcast_response (ret); free (rsp.dict.dict_val); gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret); return ret; } int gf_cli3_1_uuid_get_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { char *uuid_str = NULL; gf_cli_rsp rsp = {0,}; int ret = -1; cli_local_t *local = NULL; call_frame_t *frame = NULL; dict_t *dict = NULL; if (-1 == req->rpc_status) goto out; frame = myframe; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } local = frame->local; frame->local = NULL; gf_log ("cli", GF_LOG_INFO, "Received resp to uuid get"); dict = dict_new (); if (!dict) { ret = -1; goto out; } ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to unserialize " "response for uuid get"); goto out; } ret = dict_get_str (dict, "uuid", &uuid_str); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to get uuid " "from dictionary"); goto out; } if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_dict ("uuidGenerate", dict, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret) { if (strcmp (rsp.op_errstr, "") == 0) cli_err ("Get uuid was unsuccessful"); else cli_err ("%s", rsp.op_errstr); } else { cli_out ("UUID: %s", uuid_str); } ret = rsp.op_ret; out: cli_cmd_broadcast_response (ret); cli_local_wipe (local); if (rsp.dict.dict_val) free (rsp.dict.dict_val); gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret); return ret; } int gf_cli3_1_uuid_reset_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; cli_local_t *local = NULL; call_frame_t *frame = NULL; dict_t *dict = NULL; if (-1 == req->rpc_status) { goto out; } frame = myframe; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } local = frame->local; frame->local = NULL; gf_log ("cli", GF_LOG_INFO, "Received resp to uuid reset"); if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_dict ("uuidReset", dict, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret && strcmp (rsp.op_errstr, "")) cli_err ("%s", rsp.op_errstr); else cli_out ("resetting the peer uuid has been %s", (rsp.op_ret) ? "unsuccessful": "successful"); ret = rsp.op_ret; out: cli_cmd_broadcast_response (ret); cli_local_wipe (local); if (rsp.dict.dict_val) free (rsp.dict.dict_val); if (dict) dict_unref (dict); gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret); return ret; } int gf_cli_start_volume_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; cli_local_t *local = NULL; char *volname = NULL; call_frame_t *frame = NULL; dict_t *dict = NULL; dict_t *rsp_dict = NULL; if (-1 == req->rpc_status) { goto out; } frame = myframe; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } if (frame) local = frame->local; if (local) dict = local->dict; ret = dict_get_str (dict, "volname", &volname); if (ret) { gf_log ("cli", GF_LOG_ERROR, "dict get failed"); goto out; } gf_log ("cli", GF_LOG_INFO, "Received resp to start volume"); if (global_state->mode & GLUSTER_MODE_XML) { if (rsp.op_ret == 0) { rsp_dict = dict_new (); ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization"); goto out; } } ret = cli_xml_output_generic_volume ("volStart", rsp_dict, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret && strcmp (rsp.op_errstr, "")) cli_err ("volume start: %s: failed: %s", volname, rsp.op_errstr); else if (rsp.op_ret) cli_err ("volume start: %s: failed", volname); else cli_out ("volume start: %s: success", volname); ret = rsp.op_ret; out: cli_cmd_broadcast_response (ret); free (rsp.dict.dict_val); free (rsp.op_errstr); return ret; } int gf_cli_stop_volume_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; cli_local_t *local = NULL; char *volname = NULL; call_frame_t *frame = NULL; dict_t *dict = NULL; dict_t *rsp_dict = NULL; if (-1 == req->rpc_status) { goto out; } frame = myframe; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } if (frame) local = frame->local; if (local) { dict = local->dict; ret = dict_get_str (dict, "volname", &volname); if (ret) { gf_log (frame->this->name, GF_LOG_ERROR, "Unable to get volname from dict"); goto out; } } gf_log ("cli", GF_LOG_INFO, "Received resp to stop volume"); if (global_state->mode & GLUSTER_MODE_XML) { if (rsp.op_ret == 0) { rsp_dict = dict_new (); ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization"); goto out; } } ret = cli_xml_output_generic_volume ("volStop", rsp_dict, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret && strcmp (rsp.op_errstr, "")) cli_err ("volume stop: %s: failed: %s", volname, rsp.op_errstr); else if (rsp.op_ret) cli_err ("volume stop: %s: failed", volname); else cli_out ("volume stop: %s: success", volname); ret = rsp.op_ret; out: cli_cmd_broadcast_response (ret); free (rsp.op_errstr); free (rsp.dict.dict_val); return ret; } int gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type) { int ret = -1; int count = 0; int i = 1; char key[256] = {0,}; gf_defrag_status_t status_rcd = GF_DEFRAG_STATUS_NOT_STARTED; uint64_t files = 0; uint64_t size = 0; uint64_t lookup = 0; char *node_name = NULL; uint64_t failures = 0; uint64_t skipped = 0; double elapsed = 0; char *status_str = NULL; char *size_str = NULL; ret = dict_get_int32 (dict, "count", &count); if (ret) { gf_log ("cli", GF_LOG_ERROR, "count not set"); goto out; } cli_out ("%40s %16s %13s %13s %13s %13s %20s %18s", "Node", "Rebalanced-files", "size", "scanned", "failures", "skipped", "status", "run time in secs"); cli_out ("%40s %16s %13s %13s %13s %13s %20s %18s", "---------", "-----------", "-----------", "-----------", "-----------", "-----------", "------------", "--------------"); for (i = 1; i <= count; i++) { /* Reset the variables to prevent carryover of values */ node_name = NULL; files = 0; size = 0; lookup = 0; skipped = 0; status_str = NULL; elapsed = 0; /* Check if status is NOT_STARTED, and continue early */ memset (key, 0, 256); snprintf (key, 256, "status-%d", i); ret = dict_get_int32 (dict, key, (int32_t *)&status_rcd); if (ret) { gf_log ("cli", GF_LOG_TRACE, "count %d %d", count, i); gf_log ("cli", GF_LOG_TRACE, "failed to get status"); goto out; } if (GF_DEFRAG_STATUS_NOT_STARTED == status_rcd) continue; snprintf (key, 256, "node-name-%d", i); ret = dict_get_str (dict, key, &node_name); if (ret) gf_log ("cli", GF_LOG_TRACE, "failed to get node-name"); memset (key, 0, 256); snprintf (key, 256, "files-%d", i); ret = dict_get_uint64 (dict, key, &files); if (ret) gf_log ("cli", GF_LOG_TRACE, "failed to get file count"); memset (key, 0, 256); snprintf (key, 256, "size-%d", i); ret = dict_get_uint64 (dict, key, &size); if (ret) gf_log ("cli", GF_LOG_TRACE, "failed to get size of xfer"); memset (key, 0, 256); snprintf (key, 256, "lookups-%d", i); ret = dict_get_uint64 (dict, key, &lookup); if (ret) gf_log ("cli", GF_LOG_TRACE, "failed to get lookedup file count"); memset (key, 0, 256); snprintf (key, 256, "failures-%d", i); ret = dict_get_uint64 (dict, key, &failures); if (ret) gf_log ("cli", GF_LOG_TRACE, "failed to get failures count"); memset (key, 0, 256); snprintf (key, 256, "skipped-%d", i); ret = dict_get_uint64 (dict, key, &skipped); if (ret) gf_log ("cli", GF_LOG_TRACE, "failed to get skipped count"); /* For remove-brick include skipped count into failure count*/ if (task_type != GF_TASK_TYPE_REBALANCE) { failures += skipped; skipped = 0; } memset (key, 0, 256); snprintf (key, 256, "run-time-%d", i); ret = dict_get_double (dict, key, &elapsed); if (ret) gf_log ("cli", GF_LOG_TRACE, "failed to get run-time"); /* Check for array bound */ if (status_rcd >= GF_DEFRAG_STATUS_MAX) status_rcd = GF_DEFRAG_STATUS_MAX; status_str = cli_vol_task_status_str[status_rcd]; size_str = gf_uint64_2human_readable(size); if (size_str) { cli_out ("%40s %16"PRIu64 " %13s" " %13"PRIu64 " %13" PRIu64" %13"PRIu64 " %20s %18.2f", node_name, files, size_str, lookup, failures, skipped, status_str, elapsed); } else { cli_out ("%40s %16"PRIu64 " %13"PRIu64 " %13"PRIu64 " %13"PRIu64" %13"PRIu64 " %20s %18.2f", node_name, files, size, lookup, failures, skipped, status_str, elapsed); } GF_FREE(size_str); } out: return ret; } int gf_cli_print_tier_status (dict_t *dict, enum gf_task_types task_type) { int ret = -1; int count = 0; int i = 1; char key[256] = {0,}; gf_defrag_status_t status_rcd = GF_DEFRAG_STATUS_NOT_STARTED; uint64_t files = 0; uint64_t size = 0; uint64_t lookup = 0; char *node_name = NULL; uint64_t failures = 0; uint64_t skipped = 0; double elapsed = 0; char *status_str = NULL; char *size_str = NULL; ret = dict_get_int32 (dict, "count", &count); if (ret) { gf_log ("cli", GF_LOG_ERROR, "count not set"); goto out; } cli_out ("%40s %16s %13s %13s %13s %13s %20s %18s", "Node", "Rebalanced-files", "size", "scanned", "failures", "skipped", "status", "run time in secs"); cli_out ("%40s %16s %13s %13s %13s %13s %20s %18s", "---------", "-----------", "-----------", "-----------", "-----------", "-----------", "------------", "--------------"); for (i = 1; i <= count; i++) { /* Reset the variables to prevent carryover of values */ node_name = NULL; files = 0; size = 0; lookup = 0; skipped = 0; status_str = NULL; elapsed = 0; /* Check if status is NOT_STARTED, and continue early */ memset (key, 0, 256); snprintf (key, 256, "status-%d", i); ret = dict_get_int32 (dict, key, (int32_t *)&status_rcd); if (ret) { gf_log ("cli", GF_LOG_TRACE, "failed to get status"); goto out; } if (GF_DEFRAG_STATUS_NOT_STARTED == status_rcd) continue; snprintf (key, 256, "node-name-%d", i); ret = dict_get_str (dict, key, &node_name); if (ret) gf_log ("cli", GF_LOG_TRACE, "failed to get node-name"); memset (key, 0, 256); snprintf (key, 256, "files-%d", i); ret = dict_get_uint64 (dict, key, &files); if (ret) gf_log ("cli", GF_LOG_TRACE, "failed to get file count"); memset (key, 0, 256); snprintf (key, 256, "size-%d", i); ret = dict_get_uint64 (dict, key, &size); if (ret) gf_log ("cli", GF_LOG_TRACE, "failed to get size of xfer"); memset (key, 0, 256); snprintf (key, 256, "lookups-%d", i); ret = dict_get_uint64 (dict, key, &lookup); if (ret) gf_log ("cli", GF_LOG_TRACE, "failed to get lookedup file count"); memset (key, 0, 256); snprintf (key, 256, "failures-%d", i); ret = dict_get_uint64 (dict, key, &failures); if (ret) gf_log ("cli", GF_LOG_TRACE, "failed to get failures count"); memset (key, 0, 256); snprintf (key, 256, "skipped-%d", i); ret = dict_get_uint64 (dict, key, &skipped); if (ret) gf_log ("cli", GF_LOG_TRACE, "failed to get skipped count"); /* For remove-brick include skipped count into failure count*/ if (task_type != GF_TASK_TYPE_REBALANCE) { failures += skipped; skipped = 0; } memset (key, 0, 256); snprintf (key, 256, "run-time-%d", i); ret = dict_get_double (dict, key, &elapsed); if (ret) gf_log ("cli", GF_LOG_TRACE, "failed to get run-time"); /* Check for array bound */ if (status_rcd >= GF_DEFRAG_STATUS_MAX) status_rcd = GF_DEFRAG_STATUS_MAX; status_str = cli_vol_task_status_str[status_rcd]; size_str = gf_uint64_2human_readable(size); if (size_str) { cli_out ("%40s %16"PRIu64 " %13s" " %13"PRIu64 " %13" PRIu64" %13"PRIu64 " %20s %18.2f", node_name, files, size_str, lookup, failures, skipped, status_str, elapsed); } else { cli_out ("%40s %16"PRIu64 " %13"PRIu64 " %13"PRIu64 " %13"PRIu64" %13"PRIu64 " %20s %18.2f", node_name, files, size, lookup, failures, skipped, status_str, elapsed); } GF_FREE(size_str); } out: return ret; } int gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; cli_local_t *local = NULL; char *volname = NULL; call_frame_t *frame = NULL; int cmd = 0; int ret = -1; dict_t *dict = NULL; dict_t *local_dict = NULL; char msg[1024] = {0,}; char *task_id_str = NULL; if (-1 == req->rpc_status) { goto out; } frame = myframe; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } if (frame) local = frame->local; if (local) local_dict = local->dict; ret = dict_get_str (local_dict, "volname", &volname); if (ret) { gf_log (frame->this->name, GF_LOG_ERROR, "Failed to get volname"); goto out; } ret = dict_get_int32 (local_dict, "rebalance-command", (int32_t*)&cmd); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to get command"); goto out; } if (rsp.dict.dict_len) { /* Unserialize the dictionary */ dict = dict_new (); ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); if (ret < 0) { gf_log ("glusterd", GF_LOG_ERROR, "failed to " "unserialize req-buffer to dictionary"); goto out; } } if (!((cmd == GF_DEFRAG_CMD_STOP) || (cmd == GF_DEFRAG_CMD_STATUS) || (cmd == GF_DEFRAG_CMD_STATUS_TIER)) && !(global_state->mode & GLUSTER_MODE_XML)) { /* All other possibilites are about starting a rebalance */ ret = dict_get_str (dict, GF_REBALANCE_TID_KEY, &task_id_str); if (rsp.op_ret && strcmp (rsp.op_errstr, "")) { snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); } else { if (!rsp.op_ret) { /* append errstr in the cli msg for successful * case since unlock failures can be highlighted * event though rebalance command was successful */ snprintf (msg, sizeof (msg), "Rebalance on %s has been started " "successfully. Use rebalance status " "command to check status of the " "rebalance process.\nID: %s\n%s", volname, task_id_str, rsp.op_errstr); } else { snprintf (msg, sizeof (msg), "Starting rebalance on volume %s has " "been unsuccessful.", volname); } } goto done; } if (cmd == GF_DEFRAG_CMD_STOP) { if (rsp.op_ret == -1) { if (strcmp (rsp.op_errstr, "")) snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); else snprintf (msg, sizeof (msg), "rebalance volume %s stop failed", volname); goto done; } else { /* append errstr in the cli msg for successful case * since unlock failures can be highlighted event though * rebalance command was successful */ snprintf (msg, sizeof (msg), "rebalance process may be in the middle of a " "file migration.\nThe process will be fully " "stopped once the migration of the file is " "complete.\nPlease check rebalance process " "for completion before doing any further " "brick related tasks on the volume.\n%s", rsp.op_errstr); } } if (cmd == GF_DEFRAG_CMD_STATUS) { if (rsp.op_ret == -1) { if (strcmp (rsp.op_errstr, "")) snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); else snprintf (msg, sizeof (msg), "Failed to get the status of " "rebalance process"); goto done; } else { snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); } } if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_vol_rebalance (cmd, dict, rsp.op_ret, rsp.op_errno, rsp.op_errstr); goto out; } if (cmd == GF_DEFRAG_CMD_STATUS_TIER) ret = gf_cli_print_tier_status (dict, GF_TASK_TYPE_REBALANCE); else ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REBALANCE); if (ret) gf_log ("cli", GF_LOG_ERROR, "Failed to print rebalance status"); done: if (global_state->mode & GLUSTER_MODE_XML) cli_xml_output_str ("volRebalance", msg, rsp.op_ret, rsp.op_errno, rsp.op_errstr); else { if (rsp.op_ret) cli_err ("volume rebalance: %s: failed: %s", volname, msg); else cli_out ("volume rebalance: %s: success: %s", volname, msg); } ret = rsp.op_ret; out: free (rsp.op_errstr); //malloced by xdr free (rsp.dict.dict_val); //malloced by xdr if (dict) dict_unref (dict); cli_cmd_broadcast_response (ret); return ret; } int gf_cli_rename_volume_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; char msg[1024] = {0,}; if (-1 == req->rpc_status) { goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } gf_log ("cli", GF_LOG_INFO, "Received resp to probe"); snprintf (msg, sizeof (msg), "Rename volume %s", (rsp.op_ret) ? "unsuccessful": "successful"); if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_str ("volRename", msg, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret) cli_err ("volume rename: failed"); else cli_out ("volume rename: success"); ret = rsp.op_ret; out: cli_cmd_broadcast_response (ret); return ret; } int gf_cli_reset_volume_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; char msg[1024] = {0,}; if (-1 == req->rpc_status) { goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } gf_log ("cli", GF_LOG_INFO, "Received resp to reset"); if (strcmp (rsp.op_errstr, "")) snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); else snprintf (msg, sizeof (msg), "reset volume %s", (rsp.op_ret) ? "unsuccessful": "successful"); if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_str ("volReset", msg, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret) cli_err ("volume reset: failed: %s", msg); else cli_out ("volume reset: success: %s", msg); ret = rsp.op_ret; out: cli_cmd_broadcast_response (ret); return ret; } int gf_cli_ganesha_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; dict_t *dict = NULL; char *help_str = NULL; char msg[1024] = {0,}; char tmp_str[512] = {0,}; if (-1 == req->rpc_status) { goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } gf_log ("cli", GF_LOG_DEBUG, "Received resp to ganesha"); dict = dict_new (); if (!dict) { ret = -1; goto out; } ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); if (ret) goto out; if (rsp.op_ret) { if (strcmp (rsp.op_errstr, "")) cli_err ("nfs-ganesha: failed: %s", rsp.op_errstr); else cli_err ("nfs-ganesha: failed"); } else { cli_out("ganesha enable : success "); } ret = rsp.op_ret; out: if (dict) dict_unref (dict); cli_cmd_broadcast_response (ret); return ret; } char * is_server_debug_xlator (void *myframe) { call_frame_t *frame = NULL; cli_local_t *local = NULL; char **words = NULL; char *key = NULL; char *value = NULL; char *debug_xlator = NULL; frame = myframe; local = frame->local; words = (char **)local->words; while (*words != NULL) { if (strstr (*words, "trace") == NULL && strstr (*words, "error-gen") == NULL) { words++; continue; } key = *words; words++; value = *words; if (value == NULL) break; if (strstr (value, "client")) { words++; continue; } else { if (!(strstr (value, "posix") || strstr (value, "acl") || strstr (value, "locks") || strstr (value, "io-threads") || strstr (value, "marker") || strstr (value, "index"))) { words++; continue; } else { debug_xlator = gf_strdup (key); break; } } } return debug_xlator; } int gf_cli_set_volume_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; dict_t *dict = NULL; char *help_str = NULL; char msg[1024] = {0,}; char *debug_xlator = NULL; char tmp_str[512] = {0,}; if (-1 == req->rpc_status) { goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } gf_log ("cli", GF_LOG_INFO, "Received resp to set"); dict = dict_new (); if (!dict) { ret = -1; goto out; } ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); /* For brick processes graph change does not happen on the fly. * The process has to be restarted. So this is a check from the * volume set option such that if debug xlators such as trace/errorgen * are provided in the set command, warn the user. */ debug_xlator = is_server_debug_xlator (myframe); if (dict_get_str (dict, "help-str", &help_str) && !msg[0]) snprintf (msg, sizeof (msg), "Set volume %s", (rsp.op_ret) ? "unsuccessful": "successful"); if (rsp.op_ret == 0 && debug_xlator) { snprintf (tmp_str, sizeof (tmp_str), "\n%s translator has been " "added to the server volume file. Please restart the" " volume for enabling the translator", debug_xlator); } if ((global_state->mode & GLUSTER_MODE_XML) && (help_str == NULL)) { ret = cli_xml_output_str ("volSet", msg, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret) { if (strcmp (rsp.op_errstr, "")) cli_err ("volume set: failed: %s", rsp.op_errstr); else cli_err ("volume set: failed"); } else { if (help_str == NULL) { if (debug_xlator == NULL) cli_out ("volume set: success"); else cli_out ("volume set: success%s", tmp_str); }else { cli_out ("%s", help_str); } } ret = rsp.op_ret; out: if (dict) dict_unref (dict); GF_FREE (debug_xlator); cli_cmd_broadcast_response (ret); return ret; } int gf_cli_add_brick_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; char msg[1024] = {0,}; if (-1 == req->rpc_status) { goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } gf_log ("cli", GF_LOG_INFO, "Received resp to add brick"); if (rsp.op_ret && strcmp (rsp.op_errstr, "")) snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); else snprintf (msg, sizeof (msg), "Add Brick %s", (rsp.op_ret) ? "unsuccessful": "successful"); if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_str ("volAddBrick", msg, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret) cli_err ("volume add-brick: failed: %s", rsp.op_errstr); else cli_out ("volume add-brick: success"); ret = rsp.op_ret; out: cli_cmd_broadcast_response (ret); free (rsp.dict.dict_val); free (rsp.op_errstr); return ret; } int gf_cli3_remove_brick_status_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; dict_t *dict = NULL; char msg[1024] = {0,}; int32_t command = 0; gf1_op_commands cmd = GF_OP_CMD_NONE; cli_local_t *local = NULL; call_frame_t *frame = NULL; char *cmd_str = "unknown"; if (-1 == req->rpc_status) { goto out; } frame = myframe; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } if (frame) local = frame->local; ret = dict_get_int32 (local->dict, "command", &command); if (ret) goto out; cmd = command; switch (cmd) { case GF_OP_CMD_STOP: cmd_str = "stop"; break; case GF_OP_CMD_STATUS: cmd_str = "status"; break; default: break; } ret = rsp.op_ret; if (rsp.op_ret == -1) { if (strcmp (rsp.op_errstr, "")) snprintf (msg, sizeof (msg), "volume remove-brick %s: " "failed: %s", cmd_str, rsp.op_errstr); else snprintf (msg, sizeof (msg), "volume remove-brick %s: " "failed", cmd_str); if (global_state->mode & GLUSTER_MODE_XML) goto xml_output; cli_err ("%s", msg); goto out; } if (rsp.dict.dict_len) { /* Unserialize the dictionary */ dict = dict_new (); ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); if (ret < 0) { strncpy (msg, "failed to unserialize req-buffer to " "dictionary", sizeof (msg)); if (global_state->mode & GLUSTER_MODE_XML) { rsp.op_ret = -1; goto xml_output; } gf_log ("cli", GF_LOG_ERROR, "%s", msg); goto out; } } xml_output: if (global_state->mode & GLUSTER_MODE_XML) { if (strcmp (rsp.op_errstr, "")) { ret = cli_xml_output_vol_remove_brick (_gf_true, dict, rsp.op_ret, rsp.op_errno, rsp.op_errstr); } else { ret = cli_xml_output_vol_remove_brick (_gf_true, dict, rsp.op_ret, rsp.op_errno, msg); } goto out; } ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REMOVE_BRICK); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to print remove-brick " "rebalance status"); goto out; } if ((cmd == GF_OP_CMD_STOP) && (rsp.op_ret == 0)) { cli_out ("'remove-brick' process may be in the middle of a " "file migration.\nThe process will be fully stopped " "once the migration of the file is complete.\nPlease " "check remove-brick process for completion before " "doing any further brick related tasks on the " "volume."); } out: free (rsp.dict.dict_val); //malloced by xdr if (dict) dict_unref (dict); cli_cmd_broadcast_response (ret); return ret; } int gf_cli_remove_brick_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; char msg[1024] = {0,}; gf1_op_commands cmd = GF_OP_CMD_NONE; char *cmd_str = "unknown"; cli_local_t *local = NULL; call_frame_t *frame = NULL; char *task_id_str = NULL; dict_t *rsp_dict = NULL; if (-1 == req->rpc_status) { goto out; } frame = myframe; local = frame->local; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } ret = dict_get_int32 (local->dict, "command", (int32_t *)&cmd); if (ret) { gf_log ("", GF_LOG_ERROR, "failed to get command"); goto out; } if (rsp.dict.dict_len) { rsp_dict = dict_new (); if (!rsp_dict) { ret = -1; goto out; } ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to unserialize rsp_dict"); goto out; } } switch (cmd) { case GF_OP_CMD_DETACH_START: case GF_OP_CMD_START: cmd_str = "start"; ret = dict_get_str (rsp_dict, GF_REMOVE_BRICK_TID_KEY, &task_id_str); if (ret) { gf_log ("cli", GF_LOG_ERROR, "remove-brick-id is not present in dict"); } break; case GF_OP_CMD_COMMIT: cmd_str = "commit"; break; case GF_OP_CMD_COMMIT_FORCE: cmd_str = "commit force"; break; default: cmd_str = "unknown"; break; } gf_log ("cli", GF_LOG_INFO, "Received resp to remove brick"); if (rsp.op_ret && strcmp (rsp.op_errstr, "")) snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); else snprintf (msg, sizeof (msg), "Remove Brick %s %s", cmd_str, (rsp.op_ret) ? "unsuccessful": "successful"); if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_vol_remove_brick (_gf_false, rsp_dict, rsp.op_ret, rsp.op_errno, msg); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret) { cli_err ("volume remove-brick %s: failed: %s", cmd_str, msg); } else { cli_out ("volume remove-brick %s: success", cmd_str); if (GF_OP_CMD_START == cmd && task_id_str != NULL) cli_out ("ID: %s", task_id_str); if (GF_OP_CMD_COMMIT == cmd) cli_out ("Check the removed bricks to ensure all files " "are migrated.\nIf files with data are " "found on the brick path, copy them via a " "gluster mount point before re-purposing the " "removed brick. "); } ret = rsp.op_ret; out: cli_cmd_broadcast_response (ret); free (rsp.dict.dict_val); free (rsp.op_errstr); return ret; } int gf_cli_replace_brick_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; cli_local_t *local = NULL; call_frame_t *frame = NULL; dict_t *dict = NULL; char *src_brick = NULL; char *dst_brick = NULL; char *status_reply = NULL; char *rb_operation_str = NULL; dict_t *rsp_dict = NULL; char msg[1024] = {0,}; char *task_id_str = NULL; char *replace_op = 0; if (-1 == req->rpc_status) { goto out; } frame = (call_frame_t *) myframe; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } local = frame->local; GF_ASSERT (local); dict = local->dict; ret = dict_get_str (dict, "operation", &replace_op); if (ret) { gf_log (frame->this->name, GF_LOG_ERROR, "dict_get on operation failed"); goto out; } if (rsp.dict.dict_len) { /* Unserialize the dictionary */ rsp_dict = dict_new (); ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict); if (ret < 0) { gf_log (frame->this->name, GF_LOG_ERROR, "failed to " "unserialize rsp buffer to dictionary"); goto out; } } if (!strcmp(replace_op, "GF_REPLACE_OP_COMMIT_FORCE")) { if (rsp.op_ret || ret) rb_operation_str = gf_strdup ("replace-brick commit " "force operation failed"); else rb_operation_str = gf_strdup ("replace-brick commit " "force operation " "successful"); } else { gf_log (frame->this->name, GF_LOG_DEBUG, "Unknown operation"); } if (rsp.op_ret && (strcmp (rsp.op_errstr, ""))) { rb_operation_str = gf_strdup (rsp.op_errstr); } gf_log ("cli", GF_LOG_INFO, "Received resp to replace brick"); snprintf (msg, sizeof (msg), "%s", rb_operation_str ? rb_operation_str : "Unknown operation"); if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_vol_replace_brick (replace_op, rsp_dict, rsp.op_ret, rsp.op_errno, msg); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret) cli_err ("volume replace-brick: failed: %s", msg); else cli_out ("volume replace-brick: success: %s", msg); ret = rsp.op_ret; out: if (frame) frame->local = NULL; if (local) { dict_unref (local->dict); cli_local_wipe (local); } if (rb_operation_str) GF_FREE (rb_operation_str); cli_cmd_broadcast_response (ret); free (rsp.dict.dict_val); if (rsp_dict) dict_unref (rsp_dict); return ret; } static int gf_cli_log_rotate_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; char msg[1024] = {0,}; if (-1 == req->rpc_status) { goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } gf_log ("cli", GF_LOG_DEBUG, "Received resp to log rotate"); if (rsp.op_ret && strcmp (rsp.op_errstr, "")) snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); else snprintf (msg, sizeof (msg), "log rotate %s", (rsp.op_ret) ? "unsuccessful": "successful"); if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_str ("volLogRotate", msg, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret) cli_err ("volume log-rotate: failed: %s", msg); else cli_out ("volume log-rotate: success"); ret = rsp.op_ret; out: cli_cmd_broadcast_response (ret); free (rsp.dict.dict_val); return ret; } static int gf_cli_sync_volume_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; char msg[1024] = {0,}; if (-1 == req->rpc_status) { goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } gf_log ("cli", GF_LOG_DEBUG, "Received resp to sync"); if (rsp.op_ret && strcmp (rsp.op_errstr, "")) snprintf (msg, sizeof (msg), "volume sync: failed: %s", rsp.op_errstr); else snprintf (msg, sizeof (msg), "volume sync: %s", (rsp.op_ret) ? "failed": "success"); if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_str ("volSync", msg, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (rsp.op_ret) cli_err ("%s", msg); else cli_out ("%s", msg); ret = rsp.op_ret; out: cli_cmd_broadcast_response (ret); return ret; } static int print_quota_list_usage_output (cli_local_t *local, char *path, int64_t avail, char *sl_str, quota_limits_t *limits, quota_meta_t *used_space, gf_boolean_t sl, gf_boolean_t hl) { int32_t ret = -1; char *used_str = NULL; char *avail_str = NULL; char *hl_str = NULL; hl_str = gf_uint64_2human_readable (limits->hl); used_str = gf_uint64_2human_readable (used_space->size); avail_str = gf_uint64_2human_readable (avail); if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_quota_xml_output (local, path, hl_str, sl_str, used_str, avail_str, sl ? "Yes" : "No", hl ? "Yes" : "No"); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to " "output in xml format for quota " "list command"); } goto out; } if (!used_str) { cli_out ("%-40s %7s %9s %11"PRIu64 "%9"PRIu64" %15s %18s", path, hl_str, sl_str, used_space->size, avail, sl ? "Yes" : "No", hl ? "Yes" : "No"); } else { cli_out ("%-40s %7s %9s %11s %7s %15s %20s", path, hl_str, sl_str, used_str, avail_str, sl ? "Yes" : "No", hl ? "Yes" : "No"); } ret = 0; out: GF_FREE (hl_str); GF_FREE (used_str); GF_FREE (avail_str); return ret; } static int print_quota_list_object_output (cli_local_t *local, char *path, int64_t avail, char *sl_str, quota_limits_t *limits, quota_meta_t *used_space, gf_boolean_t sl, gf_boolean_t hl) { int32_t ret = -1; if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_quota_object_xml_output (local, path, sl_str, limits, used_space, avail, sl ? "Yes" : "No", hl ? "Yes" : "No"); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to " "output in xml format for quota " "list command"); } goto out; } cli_out ("%-40s %9"PRIu64" %9s %10"PRIu64" %10"PRIu64" %11"PRIu64 " %15s %20s", path, limits->hl, sl_str, used_space->file_count, used_space->dir_count, avail, sl ? "Yes" : "No", hl ? "Yes" : "No"); ret = 0; out: return ret; } static int print_quota_list_output (cli_local_t *local, char *path, char *default_sl, quota_limits_t *limits, quota_meta_t *used_space, int type) { int64_t avail = 0; char percent_str[20] = {0}; char *sl_final = NULL; int ret = -1; double sl_num = 0; gf_boolean_t sl = _gf_false; gf_boolean_t hl = _gf_false; int64_t used_size = 0; GF_ASSERT (local); GF_ASSERT (path); if (limits->sl < 0) { ret = gf_string2percent (default_sl, &sl_num); sl_num = (sl_num * limits->hl) / 100; sl_final = default_sl; } else { sl_num = (limits->sl * limits->hl) / 100; snprintf (percent_str, sizeof (percent_str), "%"PRIu64"%%", limits->sl); sl_final = percent_str; } if (type == GF_QUOTA_OPTION_TYPE_LIST) used_size = used_space->size; else used_size = used_space->file_count + used_space->dir_count; if (limits->hl > used_size) { avail = limits->hl - used_size; hl = _gf_false; if (used_size > sl_num) sl = _gf_true; else sl = _gf_false; } else { avail = 0; hl = sl = _gf_true; } if (type == GF_QUOTA_OPTION_TYPE_LIST) ret = print_quota_list_usage_output (local, path, avail, sl_final, limits, used_space, sl, hl); else ret = print_quota_list_object_output (local, path, avail, sl_final, limits, used_space, sl, hl); return ret; } static int print_quota_list_from_mountdir (cli_local_t *local, char *mountdir, char *default_sl, char *path, int type) { int ret = -1; ssize_t xattr_size = 0; quota_limits_t limits = {0,}; quota_meta_t used_space = {0,}; char *key = NULL; GF_ASSERT (local); GF_ASSERT (mountdir); GF_ASSERT (path); if (type == GF_QUOTA_OPTION_TYPE_LIST) key = "trusted.glusterfs.quota.limit-set"; else key = "trusted.glusterfs.quota.limit-objects"; ret = sys_lgetxattr (mountdir, key, (void *)&limits, sizeof (limits)); if (ret < 0) { gf_log ("cli", GF_LOG_ERROR, "Failed to get the xattr %s " "on %s. Reason : %s", key, mountdir, strerror (errno)); switch (errno) { #if defined(ENODATA) case ENODATA: #endif #if defined(ENOATTR) && (ENOATTR != ENODATA) case ENOATTR: #endif cli_err ("%-40s %s", path, "Limit not set"); break; default: cli_err ("%-40s %s", path, strerror (errno)); break; } goto out; } limits.hl = ntoh64 (limits.hl); limits.sl = ntoh64 (limits.sl); xattr_size = sys_lgetxattr (mountdir, "trusted.glusterfs.quota.size", NULL, 0); if (xattr_size < (sizeof (int64_t) * 2) && type == GF_QUOTA_OPTION_TYPE_LIST_OBJECTS) { ret = -1; /* This can happen when glusterfs is upgraded from 3.6 to 3.7 * and the xattr healing is not completed. */ } else if (xattr_size > (sizeof (int64_t) * 2)) { ret = sys_lgetxattr (mountdir, "trusted.glusterfs.quota.size", &used_space, sizeof (used_space)); } else if (xattr_size > 0) { /* This is for compatibility. * Older version had only file usage */ ret = sys_lgetxattr (mountdir, "trusted.glusterfs.quota.size", &(used_space.size), sizeof (used_space.size)); used_space.file_count = 0; used_space.dir_count = 0; } else { ret = -1; } if (ret < 0) { gf_log ("cli", GF_LOG_ERROR, "Failed to get quota size " "on path %s: %s", mountdir, strerror (errno)); print_quota_list_empty (path, type); goto out; } used_space.size = ntoh64 (used_space.size); used_space.file_count = ntoh64 (used_space.file_count); used_space.dir_count = ntoh64 (used_space.dir_count); ret = print_quota_list_output (local, path, default_sl, &limits, &used_space, type); out: return ret; } int gf_cli_print_limit_list_from_dict (cli_local_t *local, char *volname, dict_t *dict, char *default_sl, int count, int op_ret, int op_errno, char *op_errstr) { int ret = -1; int i = 0; char key[1024] = {0,}; char mountdir[PATH_MAX] = {0,}; char *path = NULL; gf_boolean_t xml_err_flag = _gf_false; char err_str[NAME_MAX] = {0,}; int type = -1; if (!dict|| count <= 0) goto out; ret = dict_get_int32 (dict, "type", &type); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to get quota type"); goto out; } /* Need to check if any quota limits are set on the volume before trying * to list them */ if (!_limits_set_on_volume (volname, type)) { snprintf (err_str, sizeof (err_str), "No%s quota configured on " "volume %s", (type == GF_QUOTA_OPTION_TYPE_LIST) ? "" : " inode", volname); if (global_state->mode & GLUSTER_MODE_XML) { xml_err_flag = _gf_true; } else { cli_out ("quota: %s", err_str); } ret = 0; goto out; } /* Check if the mount is online before doing any listing */ if (!_quota_aux_mount_online (volname)) { ret = -1; goto out; } if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_vol_quota_limit_list_begin (local, op_ret, op_errno, op_errstr); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Error outputting xml begin"); goto out; } } else { print_quota_list_header (type); } while (count--) { snprintf (key, sizeof (key), "path%d", i++); ret = dict_get_str (dict, key, &path); if (ret < 0) { gf_log ("cli", GF_LOG_DEBUG, "Path not present in limit" " list"); continue; } ret = gf_canonicalize_path (path); if (ret) goto out; GLUSTERD_GET_QUOTA_AUX_MOUNT_PATH (mountdir, volname, path); ret = print_quota_list_from_mountdir (local, mountdir, default_sl, path, type); } out: if (xml_err_flag) { ret = cli_xml_output_str ("volQuota", NULL, -1, 0, err_str); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Error outputting in xml " "format"); } } return ret; } int print_quota_list_from_quotad (call_frame_t *frame, dict_t *rsp_dict) { char *path = NULL; char *default_sl = NULL; int ret = -1; cli_local_t *local = NULL; dict_t *gd_rsp_dict = NULL; quota_meta_t used_space = {0, }; quota_limits_t limits = {0, }; quota_limits_t *size_limits = NULL; int32_t type = 0; local = frame->local; gd_rsp_dict = local->dict; GF_ASSERT (frame); ret = dict_get_int32 (rsp_dict, "type", &type); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to get type"); goto out; } ret = dict_get_str (rsp_dict, GET_ANCESTRY_PATH_KEY, &path); if (ret) { gf_log ("cli", GF_LOG_WARNING, "path key is not present " "in dict"); goto out; } ret = dict_get_str (gd_rsp_dict, "default-soft-limit", &default_sl); if (ret) { gf_log (frame->this->name, GF_LOG_ERROR, "failed to " "get default soft limit"); goto out; } if (type == GF_QUOTA_OPTION_TYPE_LIST) { ret = dict_get_bin (rsp_dict, QUOTA_LIMIT_KEY, (void **)&size_limits); if (ret) { gf_log ("cli", GF_LOG_WARNING, "limit key not present in dict on %s", path); goto out; } } else { ret = dict_get_bin (rsp_dict, QUOTA_LIMIT_OBJECTS_KEY, (void **)&size_limits); if (ret) { gf_log ("cli", GF_LOG_WARNING, "object limit key not present in dict on %s", path); goto out; } } limits.hl = ntoh64 (size_limits->hl); limits.sl = ntoh64 (size_limits->sl); ret = quota_dict_get_meta (rsp_dict, QUOTA_SIZE_KEY, &used_space); if (ret == -2 && type == GF_QUOTA_OPTION_TYPE_LIST) { ret = 0; /* quota_dict_get_meta returns -2 if metadata for inode * quotas is missing. * This can happen when glusterfs is upgraded from 3.6 to 3.7 * and the xattr healing is not completed. * We can contiue as success if we are listing only file usage */ } if (ret < 0) { gf_log ("cli", GF_LOG_WARNING, "size key not present in dict"); print_quota_list_empty (path, type); goto out; } ret = print_quota_list_output (local, path, default_sl, &limits, &used_space, type); out: return ret; } int cli_quotad_getlimit_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { /*TODO: we need to gather the path, hard-limit, soft-limit and used space*/ gf_cli_rsp rsp = {0,}; int ret = -1; dict_t *dict = NULL; call_frame_t *frame = NULL; if (-1 == req->rpc_status) { goto out; } frame = myframe; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } if (rsp.op_ret) { ret = -1; if (strcmp (rsp.op_errstr, "")) cli_err ("quota command failed : %s", rsp.op_errstr); else cli_err ("quota command : failed"); goto out; } if (rsp.dict.dict_len) { /* Unserialize the dictionary */ dict = dict_new (); ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); if (ret < 0) { gf_log ("cli", GF_LOG_ERROR, "failed to " "unserialize req-buffer to dictionary"); goto out; } print_quota_list_from_quotad (frame, dict); } out: cli_cmd_broadcast_response (ret); if (dict) dict_unref (dict); free (rsp.dict.dict_val); return ret; } int cli_quotad_getlimit (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,}}; int ret = 0; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = dict_allocate_and_serialize (dict, &req.dict.dict_val, &req.dict.dict_len); if (ret < 0) { gf_log (this->name, GF_LOG_ERROR, "failed to serialize the data"); goto out; } ret = cli_cmd_submit (global_quotad_rpc, &req, frame, &cli_quotad_clnt, GF_AGGREGATOR_GETLIMIT, NULL, this, cli_quotad_getlimit_cbk, (xdrproc_t) xdr_gf_cli_req); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } void gf_cli_quota_list (cli_local_t *local, char *volname, dict_t *dict, char *default_sl, int count, int op_ret, int op_errno, char *op_errstr) { GF_VALIDATE_OR_GOTO ("cli", volname, out); if (!connected) goto out; if (count > 0) gf_cli_print_limit_list_from_dict (local, volname, dict, default_sl, count, op_ret, op_errno, op_errstr); out: return; } int gf_cli_quota_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_cli_rsp rsp = {0,}; int ret = -1; dict_t *dict = NULL; char *volname = NULL; int32_t type = 0; call_frame_t *frame = NULL; char *default_sl = NULL; char *limit_list = NULL; cli_local_t *local = NULL; dict_t *aggr = NULL; char *default_sl_dup = NULL; int32_t entry_count = 0; if (-1 == req->rpc_status) { goto out; } frame = myframe; local = frame->local; aggr = local->dict; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); if (ret < 0) { gf_log (frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } if (rsp.op_ret) { ret = -1; if (global_state->mode & GLUSTER_MODE_XML) goto xml_output; if (strcmp (rsp.op_errstr, "")) { cli_err ("quota command failed : %s", rsp.op_errstr); if (rsp.op_ret == -ENOENT) cli_err ("please enter the path relative to " "the volume"); } else { cli_err ("quota command : failed"); } goto out; } if (rsp.dict.dict_len) { /* Unserialize the dictionary */ dict = dict_new (); ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); if (ret < 0) { gf_log ("cli", GF_LOG_ERROR, "failed to " "unserialize req-buffer to dictionary"); goto out; } } gf_log ("cli", GF_LOG_DEBUG, "Received resp to quota command"); ret = dict_get_str (dict, "volname", &volname); if (ret) gf_log (frame->this->name, GF_LOG_ERROR, "failed to get volname"); ret = dict_get_str (dict, "default-soft-limit", &default_sl); if (ret) gf_log (frame->this->name, GF_LOG_TRACE, "failed to get " "default soft limit"); // default-soft-limit is part of rsp_dict only iff we sent // GLUSTER_CLI_QUOTA with type being GF_QUOTA_OPTION_TYPE_LIST if (default_sl) { default_sl_dup = gf_strdup (default_sl); if (!default_sl_dup) { ret = -1; goto out; } ret = dict_set_dynstr (aggr, "default-soft-limit", default_sl_dup); if (ret) { gf_log (frame->this->name, GF_LOG_TRACE, "failed to set default soft limit"); GF_FREE (default_sl_dup); } } ret = dict_get_int32 (dict, "type", &type); if (ret) gf_log (frame->this->name, GF_LOG_TRACE, "failed to get type"); ret = dict_get_int32 (dict, "count", &entry_count); if (ret) gf_log (frame->this->name, GF_LOG_TRACE, "failed to get count"); if ((type == GF_QUOTA_OPTION_TYPE_LIST) || (type == GF_QUOTA_OPTION_TYPE_LIST_OBJECTS)) { gf_cli_quota_list (local, volname, dict, default_sl, entry_count, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_vol_quota_limit_list_end (local); if (ret < 0) { ret = -1; gf_log ("cli", GF_LOG_ERROR, "Error in printing" " xml output"); } goto out; } } xml_output: if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_str ("volQuota", NULL, rsp.op_ret, rsp.op_errno, rsp.op_errstr); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } if (!rsp.op_ret && type != GF_QUOTA_OPTION_TYPE_LIST && type != GF_QUOTA_OPTION_TYPE_LIST_OBJECTS) cli_out ("volume quota : success"); ret = rsp.op_ret; out: cli_cmd_broadcast_response (ret); if (dict) dict_unref (dict); free (rsp.dict.dict_val); return ret; } int gf_cli_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf_getspec_rsp rsp = {0,}; int ret = -1; char *spec = NULL; call_frame_t *frame = NULL; if (-1 == req->rpc_status) { goto out; } frame = myframe; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp); if (ret < 0) { gf_log (frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } if (rsp.op_ret == -1) { gf_log (frame->this->name, GF_LOG_ERROR, "getspec failed"); goto out; } gf_log ("cli", GF_LOG_INFO, "Received resp to getspec"); spec = GF_MALLOC (rsp.op_ret + 1, cli_mt_char); if (!spec) { gf_log("", GF_LOG_ERROR, "out of memory"); goto out; } memcpy (spec, rsp.spec, rsp.op_ret); spec[rsp.op_ret] = '\0'; cli_out ("%s", spec); GF_FREE (spec); ret = 0; out: cli_cmd_broadcast_response (ret); return ret; } int gf_cli_pmap_b2p_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { pmap_port_by_brick_rsp rsp = {0,}; int ret = -1; char *spec = NULL; call_frame_t *frame = NULL; if (-1 == req->rpc_status) { goto out; } frame = myframe; ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_pmap_port_by_brick_rsp); if (ret < 0) { gf_log (frame->this->name, GF_LOG_ERROR, "Failed to decode xdr response"); goto out; } if (rsp.op_ret == -1) { gf_log (frame->this->name, GF_LOG_ERROR, "pump_b2p failed"); goto out; } gf_log ("cli", GF_LOG_INFO, "Received resp to pmap b2p"); cli_out ("%d", rsp.port); GF_FREE (spec); ret = rsp.op_ret; out: cli_cmd_broadcast_response (ret); return ret; } int32_t gf_cli_probe (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,},}; int ret = 0; dict_t *dict = NULL; int port = 0; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = dict_get_int32 (dict, "port", &port); if (ret) { ret = dict_set_int32 (dict, "port", CLI_GLUSTERD_PORT); if (ret) goto out; } ret = cli_to_glusterd (&req, frame, gf_cli_probe_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_PROBE, this, cli_rpc_prog, NULL); out: GF_FREE (req.dict.dict_val); gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli_deprobe (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,},}; int ret = 0; dict_t *dict = NULL; int port = 0; int flags = 0; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = dict_get_int32 (dict, "port", &port); if (ret) { ret = dict_set_int32 (dict, "port", CLI_GLUSTERD_PORT); if (ret) goto out; } ret = dict_get_int32 (dict, "flags", &flags); if (ret) { ret = dict_set_int32 (dict, "flags", 0); if (ret) goto out; } ret = cli_to_glusterd (&req, frame, gf_cli_deprobe_cbk, (xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_DEPROBE, this, cli_rpc_prog, NULL); out: GF_FREE (req.dict.dict_val); gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli_list_friends (call_frame_t *frame, xlator_t *this, void *data) { gf1_cli_peer_list_req req = {0,}; int ret = 0; unsigned long flags = 0; if (!frame || !this) { ret = -1; goto out; } GF_ASSERT (frame->local == NULL); flags = (long)data; req.flags = flags; frame->local = (void*)flags; ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog, GLUSTER_CLI_LIST_FRIENDS, NULL, this, gf_cli_list_friends_cbk, (xdrproc_t) xdr_gf1_cli_peer_list_req); out: if (ret) { /* * If everything goes fine, gf_cli_list_friends_cbk() * [invoked through cli_cmd_submit()]resets the * frame->local to NULL. In case cli_cmd_submit() * fails in between, RESET frame->local here. */ frame->local = NULL; } gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli_get_next_volume (call_frame_t *frame, xlator_t *this, void *data) { int ret = 0; cli_cmd_volume_get_ctx_t *ctx = NULL; cli_local_t *local = NULL; if (!frame || !this || !data) { ret = -1; goto out; } ctx = data; local = frame->local; if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_vol_info_begin (local, 0, 0, ""); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); goto out; } } ret = gf_cli_get_volume (frame, this, data); if (!local || !local->get_vol.volname) { if ((global_state->mode & GLUSTER_MODE_XML)) goto end_xml; cli_err ("No volumes present"); goto out; } ctx->volname = local->get_vol.volname; while (ctx->volname) { ret = gf_cli_get_volume (frame, this, ctx); if (ret) goto out; ctx->volname = local->get_vol.volname; } end_xml: if (global_state->mode & GLUSTER_MODE_XML) { ret = cli_xml_output_vol_info_end (local); if (ret) gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); } out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli_get_volume (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,}}; int ret = 0; cli_cmd_volume_get_ctx_t *ctx = NULL; dict_t *dict = NULL; int32_t flags = 0; if (!frame || !this || !data) { ret = -1; goto out; } ctx = data; dict = dict_new (); if (!dict) goto out; if (ctx->volname) { ret = dict_set_str (dict, "volname", ctx->volname); if (ret) goto out; } flags = ctx->flags; ret = dict_set_int32 (dict, "flags", flags); if (ret) { gf_log (frame->this->name, GF_LOG_ERROR, "failed to set flags"); goto out; } ret = dict_allocate_and_serialize (dict, &req.dict.dict_val, &req.dict.dict_len); ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog, GLUSTER_CLI_GET_VOLUME, NULL, this, gf_cli_get_volume_cbk, (xdrproc_t) xdr_gf_cli_req); out: if (dict) dict_unref (dict); GF_FREE (req.dict.dict_val); gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli3_1_uuid_get (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,}}; int ret = 0; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = cli_to_glusterd (&req, frame, gf_cli3_1_uuid_get_cbk, (xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_UUID_GET, this, cli_rpc_prog, NULL); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli3_1_uuid_reset (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,}}; int ret = 0; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = cli_to_glusterd (&req, frame, gf_cli3_1_uuid_reset_cbk, (xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_UUID_RESET, this, cli_rpc_prog, NULL); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli_create_volume (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,}}; int ret = 0; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = cli_to_glusterd (&req, frame, gf_cli_create_volume_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_CREATE_VOLUME, this, cli_rpc_prog, NULL); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); GF_FREE (req.dict.dict_val); return ret; } int32_t gf_cli_delete_volume (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,}}; int ret = 0; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = cli_to_glusterd (&req, frame, gf_cli_delete_volume_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_DELETE_VOLUME, this, cli_rpc_prog, NULL); out: GF_FREE (req.dict.dict_val); gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli_start_volume (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,}}; int ret = 0; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = cli_to_glusterd (&req, frame, gf_cli_start_volume_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_START_VOLUME, this, cli_rpc_prog, NULL); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli_stop_volume (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,}}; int ret = 0; dict_t *dict = data; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = cli_to_glusterd (&req, frame, gf_cli_stop_volume_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_STOP_VOLUME, this, cli_rpc_prog, NULL); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli_defrag_volume (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,}}; int ret = 0; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = cli_to_glusterd (&req, frame, gf_cli_defrag_volume_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_DEFRAG_VOLUME, this, cli_rpc_prog, NULL); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli_rename_volume (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,}}; int ret = 0; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = dict_allocate_and_serialize (dict, &req.dict.dict_val, &req.dict.dict_len); if (ret < 0) { gf_log (this->name, GF_LOG_ERROR, "failed to serialize the data"); goto out; } ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog, GLUSTER_CLI_RENAME_VOLUME, NULL, this, gf_cli_rename_volume_cbk, (xdrproc_t) xdr_gf_cli_req); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli_reset_volume (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,} }; int ret = 0; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = cli_to_glusterd (&req, frame, gf_cli_reset_volume_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_RESET_VOLUME, this, cli_rpc_prog, NULL); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli_ganesha (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = { {0,} } ; int ret = 0; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = cli_to_glusterd (&req, frame, gf_cli_ganesha_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_GANESHA, this, cli_rpc_prog, NULL); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli_set_volume (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,} }; int ret = 0; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = cli_to_glusterd (&req, frame, gf_cli_set_volume_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_SET_VOLUME, this, cli_rpc_prog, NULL); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli_add_brick (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,} }; int ret = 0; dict_t *dict = NULL; char *volname = NULL; int32_t count = 0; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = dict_get_str (dict, "volname", &volname); if (ret) goto out; ret = dict_get_int32 (dict, "count", &count); if (ret) goto out; ret = cli_to_glusterd (&req, frame, gf_cli_add_brick_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_ADD_BRICK, this, cli_rpc_prog, NULL); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); GF_FREE (req.dict.dict_val); return ret; } int32_t gf_cli_attach_tier (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,} }; int ret = 0; dict_t *dict = NULL; dict_t *newdict = NULL; char *tierwords[] = {"volume", "rebalance", "", "tier", "start", NULL}; const char **words = (const char **)tierwords; int wordcount = 5; char *volname = NULL; cli_local_t *local = NULL; cli_local_t *oldlocal = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; if (ret) goto out; ret = cli_to_glusterd (&req, frame, gf_cli_add_brick_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_ATTACH_TIER, this, cli_rpc_prog, NULL); if (ret) goto out; ret = dict_get_str (dict, "volname", &volname); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to get volume name"); goto notify_cli; } words[2] = volname; ret = cli_cmd_volume_defrag_parse ((const char **)words, wordcount, &newdict); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to parse tier start " "command"); goto notify_cli; } gf_log ("cli", GF_LOG_DEBUG, "Sending tier start"); oldlocal = frame->local; CLI_LOCAL_INIT (local, words, frame, newdict); ret = gf_cli_defrag_volume (frame, this, newdict); frame->local = oldlocal; cli_local_wipe (local); notify_cli: if (ret) { cli_out ("Failed to run tier start. Please execute tier start " "command explictly"); cli_out ("Usage : gluster volume rebalance tier " "start"); } out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); GF_FREE (req.dict.dict_val); return ret; } int32_t gf_cli_detach_tier (call_frame_t *frame, xlator_t *this, void *data) { return gf_cli_remove_brick(frame, this, data); } int32_t gf_cli_remove_brick (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,}};; gf_cli_req status_req = {{0,}};; int ret = 0; dict_t *dict = NULL; int32_t command = 0; char *volname = NULL; int32_t cmd = 0; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = dict_get_str (dict, "volname", &volname); if (ret) goto out; ret = dict_get_int32 (dict, "command", &command); if (ret) goto out; if ((command != GF_OP_CMD_STATUS) && (command != GF_OP_CMD_STOP)) { ret = cli_to_glusterd (&req, frame, gf_cli_remove_brick_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_REMOVE_BRICK, this, cli_rpc_prog, NULL); } else { /* Need rebalance status to be sent :-) */ if (command == GF_OP_CMD_STATUS) cmd |= GF_DEFRAG_CMD_STATUS; else cmd |= GF_DEFRAG_CMD_STOP; ret = dict_set_int32 (dict, "rebalance-command", (int32_t) cmd); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Failed to set dict"); goto out; } ret = cli_to_glusterd (&status_req, frame, gf_cli3_remove_brick_status_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_DEFRAG_VOLUME, this, cli_rpc_prog, NULL); } out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); GF_FREE (req.dict.dict_val); GF_FREE (status_req.dict.dict_val); return ret; } int32_t gf_cli_replace_brick (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,}}; int ret = 0; dict_t *dict = NULL; char *src_brick = NULL; char *dst_brick = NULL; char *volname = NULL; int32_t op = 0; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = dict_get_int32 (dict, "operation", &op); if (ret) { gf_log (this->name, GF_LOG_DEBUG, "dict_get on operation failed"); goto out; } ret = dict_get_str (dict, "volname", &volname); if (ret) { gf_log (this->name, GF_LOG_DEBUG, "dict_get on volname failed"); goto out; } ret = dict_get_str (dict, "src-brick", &src_brick); if (ret) { gf_log (this->name, GF_LOG_DEBUG, "dict_get on src-brick failed"); goto out; } ret = dict_get_str (dict, "dst-brick", &dst_brick); if (ret) { gf_log (this->name, GF_LOG_DEBUG, "dict_get on dst-brick failed"); goto out; } gf_log (this->name, GF_LOG_DEBUG, "Received command replace-brick %s with " "%s with operation=%d", src_brick, dst_brick, op); ret = cli_to_glusterd (&req, frame, gf_cli_replace_brick_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_REPLACE_BRICK, this, cli_rpc_prog, NULL); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); GF_FREE (req.dict.dict_val); return ret; } int32_t gf_cli_log_rotate (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,}}; int ret = 0; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = cli_to_glusterd (&req, frame, gf_cli_log_rotate_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_LOG_ROTATE, this, cli_rpc_prog, NULL); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); GF_FREE (req.dict.dict_val); return ret; } int32_t gf_cli_sync_volume (call_frame_t *frame, xlator_t *this, void *data) { int ret = 0; gf_cli_req req = {{0,}}; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = cli_to_glusterd (&req, frame, gf_cli_sync_volume_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_SYNC_VOLUME, this, cli_rpc_prog, NULL); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); GF_FREE (req.dict.dict_val); return ret; } int32_t gf_cli_getspec (call_frame_t *frame, xlator_t *this, void *data) { gf_getspec_req req = {0,}; int ret = 0; dict_t *dict = NULL; dict_t *op_dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = dict_get_str (dict, "volid", &req.key); if (ret) goto out; op_dict = dict_new (); if (!op_dict) { ret = -1; goto out; } // Set the supported min and max op-versions, so glusterd can make a // decision ret = dict_set_int32 (op_dict, "min-op-version", GD_OP_VERSION_MIN); if (ret) { gf_log (THIS->name, GF_LOG_ERROR, "Failed to set min-op-version" " in request dict"); goto out; } ret = dict_set_int32 (op_dict, "max-op-version", GD_OP_VERSION_MAX); if (ret) { gf_log (THIS->name, GF_LOG_ERROR, "Failed to set max-op-version" " in request dict"); goto out; } ret = dict_allocate_and_serialize (op_dict, &req.xdata.xdata_val, &req.xdata.xdata_len); if (ret < 0) { gf_log (THIS->name, GF_LOG_ERROR, "Failed to serialize dictionary"); goto out; } ret = cli_cmd_submit (NULL, &req, frame, &cli_handshake_prog, GF_HNDSK_GETSPEC, NULL, this, gf_cli_getspec_cbk, (xdrproc_t) xdr_gf_getspec_req); out: if (op_dict) { dict_unref(op_dict); } gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int32_t gf_cli_quota (call_frame_t *frame, xlator_t *this, void *data) { gf_cli_req req = {{0,}}; int ret = 0; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = cli_to_glusterd (&req, frame, gf_cli_quota_cbk, (xdrproc_t) xdr_gf_cli_req, dict, GLUSTER_CLI_QUOTA, this, cli_rpc_prog, NULL); out: GF_FREE (req.dict.dict_val); return ret; } int32_t gf_cli_pmap_b2p (call_frame_t *frame, xlator_t *this, void *data) { pmap_port_by_brick_req req = {0,}; int ret = 0; dict_t *dict = NULL; if (!frame || !this || !data) { ret = -1; goto out; } dict = data; ret = dict_get_str (dict, "brick", &req.brick); if (ret) goto out; ret = cli_cmd_submit (NULL, &req, frame, &cli_pmap_prog, GF_PMAP_PORTBYBRICK, NULL, this, gf_cli_pmap_b2p_cbk, (xdrproc_t) xdr_pmap_port_by_brick_req); out: gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); return ret; } static int gf_cli_fsm_log_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { gf1_cli_fsm_log_rsp rsp = {0,}; int ret = -1; dict_t *dict = NULL; int tr_count = 0; char key[256] = {0}; int i = 0; char *old_state = NULL; char *new_state = NULL; char *event = NULL; char *time = NULL; if (-1 == req->rpc_status) { goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_fsm_log_rsp); if (ret < 0) { gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR, "Failed to decode xdr response");