diff options
author | Raghavendra Bhat <raghavendra@redhat.com> | 2012-08-23 15:32:33 +0530 |
---|---|---|
committer | Anand Avati <avati@redhat.com> | 2012-11-28 16:28:55 -0800 |
commit | fbfcb0ad2aac73c2b5ab8950770c1184352bbf24 (patch) | |
tree | d23af3c8a539da97e8f146ab19e4996cad6419c5 /xlators/mgmt | |
parent | fadc34e7ce82f9e7f98f20e995cb2bbf71a00b20 (diff) |
glusterd, cli: implement gluster system uuid reset command
A commonly faced problem among glusterfs users is: after a fresh
installation of glusterfs in a virtual machine, the VM image is
cloned to make multiple instances of the server. This breaks
glusterd because right after glusterfs installation on the first
boot glusterd would have created the node UUID and this gets
inherited into the clone. The result is wierd behavior at the time
of peer probe where glusterd does not (yet) deal with UUID
collisions in a user friendly way.
To handle it gluster peer reset command is implemented which upon
execution changes the uuid of local glusterd.
Change-Id: If207dd2ad93ab94ef1a3253f409c21c442975f87
BUG: 811493
Signed-off-by: Raghavendra Bhat <raghavendra@redhat.com>
Reviewed-on: http://review.gluster.org/3637
Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Anand Avati <avati@redhat.com>
Diffstat (limited to 'xlators/mgmt')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 100 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd.c | 40 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd.h | 2 |
3 files changed, 131 insertions, 11 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index 592fbbdb260..cec94f89f6c 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -921,6 +921,105 @@ out: } int +glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req) +{ + int ret = -1; + dict_t *dict = NULL; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + uuid_t uuid = {0}; + gf_cli_rsp rsp = {0,}; + gf_cli_req cli_req = {{0,}}; + char msg_str[2048] = {0,}; + + GF_ASSERT (req); + + this = THIS; + priv = this->private; + GF_ASSERT (priv); + + if (!xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req)) { + //failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + gf_log ("glusterd", GF_LOG_DEBUG, "Received uuid reset req"); + + if (cli_req.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new (); + + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, + &dict); + if (ret < 0) { + gf_log ("glusterd", GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + snprintf (msg_str, sizeof (msg_str), "Unable to decode " + "the buffer"); + goto out; + } else { + dict->extra_stdfree = cli_req.dict.dict_val; + } + } + + /* In the above section if dict_unserialize is successful, ret is set + * to zero. + */ + ret = -1; + // Do not allow peer reset if there are any volumes in the cluster + if (!list_empty (&priv->volumes)) { + snprintf (msg_str, sizeof (msg_str), "volumes are already " + "present in the cluster. Resetting uuid is not " + "allowed"); + gf_log (this->name, GF_LOG_WARNING, msg_str); + goto out; + } + + // Do not allow peer reset if trusted storage pool is already formed + if (!list_empty (&priv->peers)) { + snprintf (msg_str, sizeof (msg_str),"trusted storage pool " + "has been already formed. Please detach this peer " + "from the pool and reset its uuid."); + gf_log (this->name, GF_LOG_WARNING, msg_str); + goto out; + } + + uuid_copy (uuid, priv->uuid); + ret = glusterd_uuid_generate_save (); + + if (!uuid_compare (uuid, MY_UUID)) { + snprintf (msg_str, sizeof (msg_str), "old uuid and the new uuid" + " are same. Try gluster peer reset again"); + gf_log (this->name, GF_LOG_ERROR, msg_str); + ret = -1; + goto out; + } + +out: + if (ret) { + rsp.op_ret = -1; + if (msg_str[0] == '\0') + snprintf (msg_str, sizeof (msg_str), "Operation " + "failed"); + rsp.op_errstr = msg_str; + ret = 0; + } else { + rsp.op_errstr = ""; + } + + glusterd_to_cli (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf_cli_rsp, dict); + + if (dict) + dict_unref (dict); + + return ret; +} + +int glusterd_handle_cli_list_volume (rpcsvc_request_t *req) { int ret = -1; @@ -3035,6 +3134,7 @@ rpcsvc_actor_t gd_svc_cli_actors[] = { [GLUSTER_CLI_DEFRAG_VOLUME] = { "CLI_DEFRAG_VOLUME", GLUSTER_CLI_DEFRAG_VOLUME, glusterd_handle_defrag_volume, NULL, 0}, [GLUSTER_CLI_DEPROBE] = { "FRIEND_REMOVE", GLUSTER_CLI_DEPROBE, glusterd_handle_cli_deprobe, NULL, 0}, [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, 0}, + [GLUSTER_CLI_UUID_RESET] = { "UUID_RESET", GLUSTER_CLI_UUID_RESET, glusterd_handle_cli_uuid_reset, NULL, 0}, [GLUSTER_CLI_START_VOLUME] = { "START_VOLUME", GLUSTER_CLI_START_VOLUME, glusterd_handle_cli_start_volume, NULL, 0}, [GLUSTER_CLI_STOP_VOLUME] = { "STOP_VOLUME", GLUSTER_CLI_STOP_VOLUME, glusterd_handle_cli_stop_volume, NULL, 0}, [GLUSTER_CLI_DELETE_VOLUME] = { "DELETE_VOLUME", GLUSTER_CLI_DELETE_VOLUME, glusterd_handle_cli_delete_volume, NULL, 0}, diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c index bd9e6e031fa..c4629c795da 100644 --- a/xlators/mgmt/glusterd/src/glusterd.c +++ b/xlators/mgmt/glusterd/src/glusterd.c @@ -43,7 +43,6 @@ #include "glusterd-mountbroker.h" -static uuid_t glusterd_uuid; extern struct rpcsvc_program gluster_handshake_prog; extern struct rpcsvc_program gluster_pmap_prog; extern glusterd_op_info_t opinfo; @@ -93,28 +92,47 @@ glusterd_uuid_init () ret = glusterd_retrieve_uuid (); if (ret == 0) { - uuid_copy (glusterd_uuid, priv->uuid); gf_log (this->name, GF_LOG_INFO, "retrieved UUID: %s", uuid_utoa (priv->uuid)); return 0; } - uuid_generate (glusterd_uuid); + ret = glusterd_uuid_generate_save (); - gf_log (this->name, GF_LOG_INFO, "generated UUID: %s", - uuid_utoa (glusterd_uuid)); + if (ret) { + gf_log ("glusterd", GF_LOG_ERROR, + "Unable to generate and save new UUID"); + return ret; + } + + return 0; +} + +int +glusterd_uuid_generate_save () +{ + int ret = -1; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); - uuid_copy (priv->uuid, glusterd_uuid); + uuid_generate (priv->uuid); + + gf_log (this->name, GF_LOG_INFO, "generated UUID: %s", + uuid_utoa (priv->uuid)); ret = glusterd_store_global_info (this); - if (ret) { + if (ret) gf_log (this->name, GF_LOG_ERROR, - "Unable to store generated UUID"); - return ret; - } + "Unable to store the generated uuid %s", + uuid_utoa (priv->uuid)); - return 0; + return ret; } int diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h index fddfabd344f..95353f06514 100644 --- a/xlators/mgmt/glusterd/src/glusterd.h +++ b/xlators/mgmt/glusterd/src/glusterd.h @@ -389,6 +389,8 @@ typedef ssize_t (*gd_serialize_t) (struct iovec outmsg, void *args); int glusterd_uuid_init(); +int glusterd_uuid_generate_save (); + #define MY_UUID (__glusterd_uuid()) static inline unsigned char * |