From 214dccb317437dab5464456a4eb30c88444370e7 Mon Sep 17 00:00:00 2001 From: Krishnan Parthasarathi Date: Thu, 6 Jun 2013 10:46:57 +0530 Subject: glusterd: Add a cmd for getting uuid of local node Usage: gluster system:: uuid get This is needed since we generate uuid of a node in a lazy manner. ie, we generate a uuid for the node only on the first volume or peer operation, when the node needs an external identity. With this command, we can force[1] the uuid generation, without a volume or peer operation performed. [1]: Querying for uuid (or uuid get), forces uuid to come into existence. Change-Id: I62c8b6754117756aa4d773dd48af4ddeb1a1d878 BUG: 971661 Signed-off-by: Krishnan Parthasarathi Reviewed-on: http://review.gluster.org/5175 Tested-by: Gluster Build System Reviewed-by: Kaushal M --- cli/src/cli-cmd-system.c | 51 +++++++++++++++++++++++ cli/src/cli-rpc-ops.c | 106 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 157 insertions(+) (limited to 'cli') diff --git a/cli/src/cli-cmd-system.c b/cli/src/cli-cmd-system.c index b969c227..f73758ae 100644 --- a/cli/src/cli-cmd-system.c +++ b/cli/src/cli-cmd-system.c @@ -278,6 +278,53 @@ cli_cmd_umount_cbk (struct cli_state *state, struct cli_cmd_word *word, return ret; } +int +cli_cmd_uuid_get_cbk (struct cli_state *state, struct cli_cmd_word *word, + const char **words, int wordcount) +{ + int ret = -1; + int sent = 0; + int parse_error = 0; + dict_t *dict = NULL; + rpc_clnt_procedure_t *proc = NULL; + call_frame_t *frame = NULL; + cli_local_t *local = NULL; + xlator_t *this = NULL; + + this = THIS; + if (wordcount != 3) { + cli_usage_out (word->pattern); + parse_error = 1; + goto out; + } + + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_UUID_GET]; + frame = create_frame (this, this->ctx->pool); + if (!frame) + goto out; + + dict = dict_new (); + if (!dict) + goto out; + + CLI_LOCAL_INIT (local, words, frame, dict); + if (proc->fn) + ret = proc->fn (frame, this, dict); + +out: + if (ret) { + cli_cmd_sent_status_get (&sent); + if ((sent == 0) && (parse_error == 0)) + cli_out ("uuid get failed"); + } + + if (dict) + dict_unref (dict); + + CLI_STACK_DESTROY (frame); + return ret; +} + int cli_cmd_uuid_reset_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) @@ -364,6 +411,10 @@ struct cli_cmd cli_system_cmds[] = { cli_cmd_umount_cbk, "request an umount"}, + { "system:: uuid get", + cli_cmd_uuid_get_cbk, + "get uuid of glusterd"}, + { "system:: uuid reset", cli_cmd_uuid_reset_cbk, "reset the uuid of glusterd"}, diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index a9f53bfd..a89e6b78 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -992,6 +992,88 @@ out: return ret; } +int +gf_cli3_1_uuid_get_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + char *uuid_str = NULL; + gf_cli_rsp rsp = {0,}; + int ret = -1; + cli_local_t *local = NULL; + call_frame_t *frame = NULL; + dict_t *dict = NULL; + + if (-1 == req->rpc_status) + goto out; + + frame = myframe; + + ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log (frame->this->name, GF_LOG_ERROR, + "Failed to decode xdr response"); + goto out; + } + + local = frame->local; + frame->local = NULL; + + gf_log ("cli", GF_LOG_INFO, "Received resp to uuid get"); + + dict = dict_new (); + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, + &dict); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to unserialize " + "response for uuid get"); + goto out; + } + + ret = dict_get_str (dict, "uuid", &uuid_str); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to get uuid " + "from dictionary"); + goto out; + } + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_dict ("uuidGenerate", dict, rsp.op_ret, + rsp.op_errno, rsp.op_errstr); + if (ret) + gf_log ("cli", GF_LOG_ERROR, + "Error outputting to xml"); + goto out; + } + + if (rsp.op_ret) { + if (strcmp (rsp.op_errstr, "") == 0) + cli_err ("Get uuid was unsuccessful"); + else + cli_err ("%s", rsp.op_errstr); + + } else { + cli_out ("UUID: %s", uuid_str); + + } + ret = rsp.op_ret; + +out: + cli_cmd_broadcast_response (ret); + cli_local_wipe (local); + if (rsp.dict.dict_val) + free (rsp.dict.dict_val); + if (dict) + dict_unref (dict); + + gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret); + return ret; +} + int gf_cli3_1_uuid_reset_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) @@ -2890,6 +2972,29 @@ out: return ret; } +int32_t +gf_cli3_1_uuid_get (call_frame_t *frame, xlator_t *this, + void *data) +{ + gf_cli_req req = {{0,}}; + int ret = 0; + dict_t *dict = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + ret = cli_to_glusterd (&req, frame, gf_cli3_1_uuid_get_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_UUID_GET, this, cli_rpc_prog, + NULL); +out: + gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + int32_t gf_cli3_1_uuid_reset (call_frame_t *frame, xlator_t *this, void *data) @@ -6762,6 +6867,7 @@ struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = { [GLUSTER_CLI_DEPROBE] = {"DEPROBE_QUERY", gf_cli_deprobe}, [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS", gf_cli_list_friends}, [GLUSTER_CLI_UUID_RESET] = {"UUID_RESET", gf_cli3_1_uuid_reset}, + [GLUSTER_CLI_UUID_GET] = {"UUID_GET", gf_cli3_1_uuid_get}, [GLUSTER_CLI_CREATE_VOLUME] = {"CREATE_VOLUME", gf_cli_create_volume}, [GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME", gf_cli_delete_volume}, [GLUSTER_CLI_START_VOLUME] = {"START_VOLUME", gf_cli_start_volume}, -- cgit