diff options
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c')
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 2083 |
1 files changed, 1524 insertions, 559 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index b87c2cd8..afc22366 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -1,22 +1,12 @@ /* - Copyright (c) 2006-2011 Gluster, Inc. <http://www.gluster.com> - This file is part of GlusterFS. - - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. -*/ + Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ #ifndef _CONFIG_H #define _CONFIG_H #include "config.h" @@ -57,6 +47,37 @@ #include "defaults.c" #include "common-utils.h" +#include "globals.h" +#include "glusterd-syncop.h" + +#ifdef HAVE_BD_XLATOR +#include <lvm2app.h> +#endif + +int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, + void *data, rpc_clnt_notify_t notify_fn) +{ + glusterd_conf_t *priv = THIS->private; + int ret = -1; + synclock_lock (&priv->big_lock); + ret = notify_fn (rpc, mydata, event, data); + synclock_unlock (&priv->big_lock); + return ret; +} + +int glusterd_big_locked_handler (rpcsvc_request_t *req, rpcsvc_actor actor_fn) +{ + glusterd_conf_t *priv = THIS->private; + int ret = -1; + + synclock_lock (&priv->big_lock); + ret = actor_fn (req); + synclock_unlock (&priv->big_lock); + + return ret; +} + static int glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid, char *hostname, int port, @@ -78,10 +99,12 @@ glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid, ret = glusterd_friend_find (uuid, rhost, &peerinfo); if (ret) { - ret = glusterd_xfer_friend_add_resp (req, rhost, port, -1, - GF_PROBE_UNKNOWN_PEER); - if (friend_req->vols.vols_val) + ret = glusterd_xfer_friend_add_resp (req, hostname, rhost, port, + -1, GF_PROBE_UNKNOWN_PEER); + if (friend_req->vols.vols_val) { free (friend_req->vols.vols_val); + friend_req->vols.vols_val = NULL; + } goto out; } @@ -139,19 +162,16 @@ out: if (0 != ret) { if (ctx && ctx->hostname) GF_FREE (ctx->hostname); - if (ctx) - GF_FREE (ctx); + GF_FREE (ctx); if (dict) { if ((!dict->extra_stdfree) && friend_req->vols.vols_val) free (friend_req->vols.vols_val); dict_unref (dict); } else { - if (friend_req->vols.vols_val) - free (friend_req->vols.vols_val); + free (friend_req->vols.vols_val); } - if (event) - GF_FREE (event); + GF_FREE (event); } else { if (peerinfo && (0 == peerinfo->connected)) ret = GLUSTERD_CONNECTION_AWAITED; @@ -221,8 +241,7 @@ out: if (0 != ret) { if (ctx && ctx->hostname) GF_FREE (ctx->hostname); - if (ctx) - GF_FREE (ctx); + GF_FREE (ctx); } return ret; @@ -235,13 +254,14 @@ glusterd_add_peer_detail_to_dict (glusterd_peerinfo_t *peerinfo, int ret = -1; char key[256] = {0, }; + char *peer_uuid_str = NULL; GF_ASSERT (peerinfo); GF_ASSERT (friends); snprintf (key, 256, "friend%d.uuid", count); - uuid_utoa_r (peerinfo->uuid, peerinfo->uuid_str); - ret = dict_set_str (friends, key, peerinfo->uuid_str); + peer_uuid_str = gd_peer_uuid_str (peerinfo); + ret = dict_set_str (friends, key, peer_uuid_str); if (ret) goto out; @@ -255,6 +275,11 @@ glusterd_add_peer_detail_to_dict (glusterd_peerinfo_t *peerinfo, if (ret) goto out; + snprintf (key, 256, "friend%d.stateId", count); + ret = dict_set_int32 (friends, key, peerinfo->state.state); + if (ret) + goto out; + snprintf (key, 256, "friend%d.state", count); ret = dict_set_str (friends, key, glusterd_friend_sm_state_name_get(peerinfo->state.state)); @@ -270,10 +295,34 @@ out: return ret; } +struct args_pack { + dict_t *dict; + int vol_count; + int opt_count; +}; + +static int +_build_option_key (dict_t *d, char *k, data_t *v, void *tmp) +{ + char reconfig_key[256] = {0, }; + struct args_pack *pack = NULL; + int ret = -1; + + pack = tmp; + if (strcmp (k, GLUSTERD_GLOBAL_OPT_VERSION) == 0) + return 0; + snprintf (reconfig_key, 256, "volume%d.option.%s", + pack->vol_count, k); + ret = dict_set_str (pack->dict, reconfig_key, v->data); + if (0 == ret) + pack->opt_count++; + + return 0; +} int glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo, - dict_t *volumes, int count) + dict_t *volumes, int count) { int ret = -1; @@ -281,19 +330,18 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *brickinfo = NULL; char *buf = NULL; int i = 1; - data_pair_t *pairs = NULL; - char reconfig_key[256] = {0, }; dict_t *dict = NULL; - data_t *value = NULL; - int opt_count = 0; glusterd_conf_t *priv = NULL; char *volume_id_str = NULL; + struct args_pack pack = {0,}; + xlator_t *this = NULL; GF_ASSERT (volinfo); GF_ASSERT (volumes); - priv = THIS->private; + this = THIS; + priv = this->private; GF_ASSERT (priv); @@ -347,12 +395,20 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo, goto out; snprintf (key, 256, "volume%d.rebalance", count); - ret = dict_set_int32 (volumes, key, volinfo->defrag_cmd); + ret = dict_set_int32 (volumes, key, volinfo->rebal.defrag_cmd); if (ret) goto out; +#ifdef HAVE_BD_XLATOR + snprintf (key, 256, "volume%d.backend", count); + ret = dict_set_int32 (volumes, key, volinfo->backend); + if (ret) + goto out; +#endif + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { char brick[1024] = {0,}; + char brick_uuid[64] = {0,}; snprintf (key, 256, "volume%d.brick%d", count, i); snprintf (brick, 1024, "%s:%s", brickinfo->hostname, brickinfo->path); @@ -360,6 +416,15 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo, ret = dict_set_dynstr (volumes, key, buf); if (ret) goto out; + snprintf (key, 256, "volume%d.brick%d.uuid", count, i); + snprintf (brick_uuid, 64, "%s", uuid_utoa (brickinfo->uuid)); + buf = gf_strdup (brick_uuid); + if (!buf) + goto out; + ret = dict_set_dynstr (volumes, key, buf); + if (ret) + goto out; + i++; } @@ -369,25 +434,14 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo, goto out; } - pairs = dict->members_list; - - while (pairs) { - if (1 == glusterd_check_option_exists (pairs->key, NULL)) { - value = pairs->value; - if (!value) - continue; - - snprintf (reconfig_key, 256, "volume%d.option.%s", count, - pairs->key); - ret = dict_set_str (volumes, reconfig_key, value->data); - if (!ret) - opt_count++; - } - pairs = pairs->next; - } + pack.dict = volumes; + pack.vol_count = count; + pack.opt_count = 0; + dict_foreach (dict, _build_option_key, (void *) &pack); + dict_foreach (priv->opts, _build_option_key, &pack); - snprintf (key, 256, "volume%d.opt_count", count); - ret = dict_set_int32 (volumes, key, opt_count); + snprintf (key, 256, "volume%d.opt_count", pack.vol_count); + ret = dict_set_int32 (volumes, key, pack.opt_count); out: return ret; } @@ -397,13 +451,18 @@ glusterd_friend_find (uuid_t uuid, char *hostname, glusterd_peerinfo_t **peerinfo) { int ret = -1; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT (this); if (uuid) { ret = glusterd_friend_find_by_uuid (uuid, peerinfo); if (ret) { - gf_log ("glusterd", GF_LOG_INFO, - "Unable to find peer by uuid"); + gf_log (this->name, GF_LOG_DEBUG, + "Unable to find peer by uuid: %s", + uuid_utoa (uuid)); } else { goto out; } @@ -414,7 +473,7 @@ glusterd_friend_find (uuid_t uuid, char *hostname, ret = glusterd_friend_find_by_hostname (hostname, peerinfo); if (ret) { - gf_log ("glusterd", GF_LOG_INFO, + gf_log (this->name, GF_LOG_DEBUG, "Unable to find hostname: %s", hostname); } else { goto out; @@ -426,7 +485,8 @@ out: } int32_t -glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx) +glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx, + char *err_str, size_t err_len) { int32_t ret = -1; xlator_t *this = NULL; @@ -438,19 +498,21 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx) GF_ASSERT (NULL != ctx); this = THIS; + GF_ASSERT (this); priv = this->private; GF_ASSERT (priv); - ret = glusterd_lock (priv->uuid); - + ret = glusterd_lock (MY_UUID); if (ret) { gf_log (this->name, GF_LOG_ERROR, - "Unable to acquire local lock, ret: %d", ret); + "Unable to acquire lock on localhost, ret: %d", ret); + snprintf (err_str, err_len, "Another transaction is in progress. " + "Please try again after sometime."); goto out; } locked = 1; - gf_log (this->name, GF_LOG_INFO, "Acquired local lock"); + gf_log (this->name, GF_LOG_DEBUG, "Acquired lock on localhost"); ret = glusterd_op_sm_inject_event (GD_OP_EVENT_START_LOCK, NULL); if (ret) { @@ -466,33 +528,39 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx) out: if (locked && ret) - glusterd_unlock (priv->uuid); + glusterd_unlock (MY_UUID); gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret); return ret; } int -glusterd_handle_cluster_lock (rpcsvc_request_t *req) +__glusterd_handle_cluster_lock (rpcsvc_request_t *req) { gd1_mgmt_cluster_lock_req lock_req = {{0},}; int32_t ret = -1; glusterd_op_lock_ctx_t *ctx = NULL; glusterd_peerinfo_t *peerinfo = NULL; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT (this); GF_ASSERT (req); - if (!xdr_to_generic (req->msg[0], &lock_req, (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req)) { - //failed to decode msg; + ret = xdr_to_generic (req->msg[0], &lock_req, + (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, "Failed to decode lock " + "request received from peer"); req->rpc_err = GARBAGE_ARGS; goto out; } - gf_log ("glusterd", GF_LOG_INFO, - "Received LOCK from uuid: %s", uuid_utoa (lock_req.uuid)); + gf_log (this->name, GF_LOG_DEBUG, "Received LOCK from uuid: %s", + uuid_utoa (lock_req.uuid)); if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) { - gf_log (THIS->name, GF_LOG_WARNING, "%s doesn't " + gf_log (this->name, GF_LOG_WARNING, "%s doesn't " "belong to the cluster. Ignoring request.", uuid_utoa (lock_req.uuid)); ret = -1; @@ -512,7 +580,7 @@ glusterd_handle_cluster_lock (rpcsvc_request_t *req) ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, ctx); out: - gf_log ("", GF_LOG_DEBUG, "Returning %d", ret); + gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret); glusterd_friend_sm (); glusterd_op_sm (); @@ -521,6 +589,13 @@ out: } int +glusterd_handle_cluster_lock (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cluster_lock); +} + +int glusterd_req_ctx_create (rpcsvc_request_t *rpc_req, glusterd_op_t op, uuid_t uuid, char *buf_val, size_t buf_len, @@ -531,10 +606,13 @@ glusterd_req_ctx_create (rpcsvc_request_t *rpc_req, char str[50] = {0,}; glusterd_req_ctx_t *req_ctx = NULL; dict_t *dict = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT (this); uuid_unparse (uuid, str); - gf_log ("glusterd", GF_LOG_INFO, - "Received op from uuid: %s", str); + gf_log (this->name, GF_LOG_DEBUG, "Received op from uuid %s", str); dict = dict_new (); if (!dict) @@ -549,7 +627,7 @@ glusterd_req_ctx_create (rpcsvc_request_t *rpc_req, req_ctx->op = op; ret = dict_unserialize (buf_val, buf_len, &dict); if (ret) { - gf_log ("", GF_LOG_WARNING, + gf_log (this->name, GF_LOG_WARNING, "failed to unserialize the dictionary"); goto out; } @@ -562,29 +640,35 @@ out: if (ret) { if (dict) dict_unref (dict); - if (req_ctx) - GF_FREE (req_ctx); + GF_FREE (req_ctx); } return ret; } int -glusterd_handle_stage_op (rpcsvc_request_t *req) +__glusterd_handle_stage_op (rpcsvc_request_t *req) { int32_t ret = -1; glusterd_req_ctx_t *req_ctx = NULL; gd1_mgmt_stage_op_req op_req = {{0},}; glusterd_peerinfo_t *peerinfo = NULL; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT (this); GF_ASSERT (req); - if (!xdr_to_generic (req->msg[0], &op_req, (xdrproc_t)xdr_gd1_mgmt_stage_op_req)) { - //failed to decode msg; + + ret = xdr_to_generic (req->msg[0], &op_req, + (xdrproc_t)xdr_gd1_mgmt_stage_op_req); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, "Failed to decode stage " + "request received from peer"); req->rpc_err = GARBAGE_ARGS; goto out; } if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) { - gf_log (THIS->name, GF_LOG_WARNING, "%s doesn't " + gf_log (this->name, GF_LOG_WARNING, "%s doesn't " "belong to the cluster. Ignoring request.", uuid_utoa (op_req.uuid)); ret = -1; @@ -600,31 +684,43 @@ glusterd_handle_stage_op (rpcsvc_request_t *req) ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP, req_ctx); out: - if (op_req.buf.buf_val) - free (op_req.buf.buf_val);//malloced by xdr + free (op_req.buf.buf_val);//malloced by xdr glusterd_friend_sm (); glusterd_op_sm (); return ret; } int -glusterd_handle_commit_op (rpcsvc_request_t *req) +glusterd_handle_stage_op (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_stage_op); +} + + +int +__glusterd_handle_commit_op (rpcsvc_request_t *req) { int32_t ret = -1; glusterd_req_ctx_t *req_ctx = NULL; gd1_mgmt_commit_op_req op_req = {{0},}; glusterd_peerinfo_t *peerinfo = NULL; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT (this); GF_ASSERT (req); - if (!xdr_to_generic (req->msg[0], &op_req, (xdrproc_t)xdr_gd1_mgmt_commit_op_req)) { - //failed to decode msg; + ret = xdr_to_generic (req->msg[0], &op_req, + (xdrproc_t)xdr_gd1_mgmt_commit_op_req); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, "Failed to decode commit " + "request received from peer"); req->rpc_err = GARBAGE_ARGS; goto out; } if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) { - gf_log (THIS->name, GF_LOG_WARNING, "%s doesn't " + gf_log (this->name, GF_LOG_WARNING, "%s doesn't " "belong to the cluster. Ignoring request.", uuid_utoa (op_req.uuid)); ret = -1; @@ -639,70 +735,123 @@ glusterd_handle_commit_op (rpcsvc_request_t *req) if (ret) goto out; - ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP, req_ctx); + ret = glusterd_op_init_ctx (op_req.op); if (ret) goto out; - ret = glusterd_op_init_ctx (op_req.op); + + ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP, req_ctx); out: - if (op_req.buf.buf_val) - free (op_req.buf.buf_val);//malloced by xdr + free (op_req.buf.buf_val);//malloced by xdr glusterd_friend_sm (); glusterd_op_sm (); return ret; } int -glusterd_handle_cli_probe (rpcsvc_request_t *req) +glusterd_handle_commit_op (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_commit_op); +} + +int +__glusterd_handle_cli_probe (rpcsvc_request_t *req) { int32_t ret = -1; - gf1_cli_probe_req cli_req = {0,}; - glusterd_peerinfo_t *peerinfo = NULL; - gf_boolean_t run_fsm = _gf_true; + gf_cli_req cli_req = {{0,},}; + glusterd_peerinfo_t *peerinfo = NULL; + gf_boolean_t run_fsm = _gf_true; + xlator_t *this = NULL; + char *bind_name = NULL; + dict_t *dict = NULL; + char *hostname = NULL; + int port = 0; + GF_ASSERT (req); + this = THIS; - if (!xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf1_cli_probe_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { //failed to decode msg; gf_log ("", GF_LOG_ERROR, "xdr decoding error"); req->rpc_err = GARBAGE_ARGS; goto out; } - gf_cmd_log ("peer probe", " on host %s:%d", cli_req.hostname, - cli_req.port); + if (cli_req.dict.dict_len) { + dict = dict_new (); + + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, &dict); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, "Failed to " + "unserialize req-buffer to dictionary"); + goto out; + } + } + + ret = dict_get_str (dict, "hostname", &hostname); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to get hostname"); + goto out; + } + + ret = dict_get_int32 (dict, "port", &port); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to get port"); + goto out; + } + + if (glusterd_is_any_volume_in_server_quorum (this) && + !does_gd_meet_server_quorum (this)) { + glusterd_xfer_cli_probe_resp (req, -1, GF_PROBE_QUORUM_NOT_MET, + NULL, hostname, port, dict); + gf_log (this->name, GF_LOG_ERROR, "Quorum does not meet, " + "rejecting operation"); + ret = 0; + goto out; + } + gf_log ("glusterd", GF_LOG_INFO, "Received CLI probe req %s %d", - cli_req.hostname, cli_req.port); + hostname, port); - if (!(ret = glusterd_is_local_addr(cli_req.hostname))) { + if (dict_get_str(this->options,"transport.socket.bind-address", + &bind_name) == 0) { + gf_log ("glusterd", GF_LOG_DEBUG, + "only checking probe address vs. bind address"); + ret = gf_is_same_address (bind_name, hostname); + } + else { + ret = gf_is_local_addr (hostname); + } + if (ret) { glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_LOCALHOST, - cli_req.hostname, cli_req.port); + NULL, hostname, port, dict); + ret = 0; goto out; } - if (!(ret = glusterd_friend_find_by_hostname(cli_req.hostname, - &peerinfo))) { - if (strcmp (peerinfo->hostname, cli_req.hostname) == 0) { + if (!(ret = glusterd_friend_find_by_hostname (hostname, &peerinfo))) { + if (strcmp (peerinfo->hostname, hostname) == 0) { - gf_log ("glusterd", GF_LOG_DEBUG, "Probe host %s port %d" - " already a peer", cli_req.hostname, cli_req.port); + gf_log ("glusterd", GF_LOG_DEBUG, "Probe host %s port " + "%d already a peer", hostname, port); glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, - cli_req.hostname, cli_req.port); + NULL, hostname, port, + dict); goto out; } } - ret = glusterd_probe_begin (req, cli_req.hostname, cli_req.port); - - gf_cmd_log ("peer probe","on host %s:%d %s",cli_req.hostname, cli_req.port, - (ret) ? "FAILED" : "SUCCESS"); + ret = glusterd_probe_begin (req, hostname, port, dict); if (ret == GLUSTERD_CONNECTION_AWAITED) { //fsm should be run after connection establishes run_fsm = _gf_false; ret = 0; } + out: - if (cli_req.hostname) - free (cli_req.hostname);//its malloced by xdr + free (cli_req.dict.dict_val); if (run_fsm) { glusterd_friend_sm (); @@ -713,14 +862,24 @@ out: } int -glusterd_handle_cli_deprobe (rpcsvc_request_t *req) +glusterd_handle_cli_probe (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_cli_probe); +} + +int +__glusterd_handle_cli_deprobe (rpcsvc_request_t *req) { int32_t ret = -1; - gf1_cli_deprobe_req cli_req = {0,}; - uuid_t uuid = {0}; - int op_errno = 0; - xlator_t *this = NULL; - glusterd_conf_t *priv = NULL; + gf_cli_req cli_req = {{0,},}; + uuid_t uuid = {0}; + int op_errno = 0; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + dict_t *dict = NULL; + char *hostname = NULL; + int port = 0; + int flags = 0; this = THIS; GF_ASSERT (this); @@ -728,62 +887,99 @@ glusterd_handle_cli_deprobe (rpcsvc_request_t *req) GF_ASSERT (priv); GF_ASSERT (req); - if (!xdr_to_generic (req->msg[0], &cli_req, - (xdrproc_t)xdr_gf1_cli_deprobe_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, + (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } + if (cli_req.dict.dict_len) { + dict = dict_new (); + + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, &dict); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, "Failed to " + "unserialize req-buffer to dictionary"); + goto out; + } + } + gf_log ("glusterd", GF_LOG_INFO, "Received CLI deprobe req"); - ret = glusterd_hostname_to_uuid (cli_req.hostname, uuid); + ret = dict_get_str (dict, "hostname", &hostname); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to get hostname"); + goto out; + } + + ret = dict_get_int32 (dict, "port", &port); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to get port"); + goto out; + } + + ret = dict_get_int32 (dict, "flags", &flags); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to get flags"); + goto out; + } + + ret = glusterd_hostname_to_uuid (hostname, uuid); if (ret) { op_errno = GF_DEPROBE_NOT_FRIEND; goto out; } - if (!uuid_compare (uuid, priv->uuid)) { + if (!uuid_compare (uuid, MY_UUID)) { op_errno = GF_DEPROBE_LOCALHOST; ret = -1; goto out; } - if (!uuid_is_null (uuid) && !(cli_req.flags & GF_CLI_FLAG_OP_FORCE)) { - /* Check if peers are connected, except peer being detached*/ - if (!glusterd_chk_peers_connected_befriended (uuid)) { - ret = -1; - op_errno = GF_DEPROBE_FRIEND_DOWN; - goto out; + if (!(flags & GF_CLI_FLAG_OP_FORCE)) { + if (!uuid_is_null (uuid)) { + /* Check if peers are connected, except peer being detached*/ + if (!glusterd_chk_peers_connected_befriended (uuid)) { + ret = -1; + op_errno = GF_DEPROBE_FRIEND_DOWN; + goto out; + } + ret = glusterd_all_volume_cond_check ( + glusterd_friend_brick_belongs, + -1, &uuid); + if (ret) { + op_errno = GF_DEPROBE_BRICK_EXIST; + goto out; + } } - ret = glusterd_all_volume_cond_check ( - glusterd_friend_brick_belongs, - -1, &uuid); - if (ret) { - op_errno = GF_DEPROBE_BRICK_EXIST; + + if (glusterd_is_any_volume_in_server_quorum (this) && + !does_gd_meet_server_quorum (this)) { + gf_log (this->name, GF_LOG_ERROR, "Quorum does not " + "meet, rejecting operation"); + ret = -1; + op_errno = GF_DEPROBE_QUORUM_NOT_MET; goto out; } } if (!uuid_is_null (uuid)) { - ret = glusterd_deprobe_begin (req, cli_req.hostname, - cli_req.port, uuid); + ret = glusterd_deprobe_begin (req, hostname, port, uuid, dict); } else { - ret = glusterd_deprobe_begin (req, cli_req.hostname, - cli_req.port, NULL); + ret = glusterd_deprobe_begin (req, hostname, port, NULL, dict); } - gf_cmd_log ("peer deprobe", "on host %s:%d %s", cli_req.hostname, - cli_req.port, (ret) ? "FAILED" : "SUCCESS"); out: + free (cli_req.dict.dict_val); + if (ret) { - ret = glusterd_xfer_cli_deprobe_resp (req, ret, op_errno, - cli_req.hostname); + ret = glusterd_xfer_cli_deprobe_resp (req, ret, op_errno, NULL, + hostname, dict); } - if (cli_req.hostname) - free (cli_req.hostname);//malloced by xdr - glusterd_friend_sm (); glusterd_op_sm (); @@ -791,7 +987,13 @@ out: } int -glusterd_handle_cli_list_friends (rpcsvc_request_t *req) +glusterd_handle_cli_deprobe (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_cli_deprobe); +} + +int +__glusterd_handle_cli_list_friends (rpcsvc_request_t *req) { int32_t ret = -1; gf1_cli_peer_list_req cli_req = {0,}; @@ -799,7 +1001,9 @@ glusterd_handle_cli_list_friends (rpcsvc_request_t *req) GF_ASSERT (req); - if (!xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf1_cli_peer_list_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, + (xdrproc_t)xdr_gf1_cli_peer_list_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; @@ -837,7 +1041,14 @@ out: } int -glusterd_handle_cli_get_volume (rpcsvc_request_t *req) +glusterd_handle_cli_list_friends (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_list_friends); +} + +int +__glusterd_handle_cli_get_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -846,7 +1057,8 @@ glusterd_handle_cli_get_volume (rpcsvc_request_t *req) GF_ASSERT (req); - if (!xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; @@ -890,7 +1102,289 @@ out: } int -glusterd_handle_cli_list_volume (rpcsvc_request_t *req) +glusterd_handle_cli_get_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_get_volume); +} + +#ifdef HAVE_BD_XLATOR +int +__glusterd_handle_cli_bd_op (rpcsvc_request_t *req) +{ + int32_t ret = -1; + gf_cli_req cli_req = { {0,} }; + dict_t *dict = NULL; + char *volname = NULL; + char op_errstr[2048] = {0,}; + glusterd_op_t cli_op = GD_OP_BD_OP; + + GF_ASSERT (req); + + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + /* failed to decode msg */ + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + gf_log ("glusterd", GF_LOG_DEBUG, "Received bd op req"); + + if (cli_req.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new (); + + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, + &dict); + if (ret < 0) { + gf_log ("glusterd", GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + goto out; + } else { + dict->extra_stdfree = cli_req.dict.dict_val; + } + } + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "failed to get volname"); + goto out; + } + + ret = glusterd_op_begin (req, GD_OP_BD_OP, dict, op_errstr, + sizeof (op_errstr)); +out: + if (ret && dict) + dict_unref (dict); + + glusterd_friend_sm (); + glusterd_op_sm (); + + if (ret) { + if (op_errstr[0] == '\0') + snprintf (op_errstr, sizeof (op_errstr), + "Operation failed"); + ret = glusterd_op_send_cli_response (cli_op, ret, 0, + req, NULL, op_errstr); + } + + return ret; +} + +int +glusterd_handle_cli_bd_op (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_cli_bd_op); +} +#endif + +int +__glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req) +{ + int ret = -1; + dict_t *dict = NULL; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + uuid_t uuid = {0}; + gf_cli_rsp rsp = {0,}; + gf_cli_req cli_req = {{0,}}; + char msg_str[2048] = {0,}; + + GF_ASSERT (req); + + this = THIS; + priv = this->private; + GF_ASSERT (priv); + + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + //failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + gf_log ("glusterd", GF_LOG_DEBUG, "Received uuid reset req"); + + if (cli_req.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new (); + + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, + &dict); + if (ret < 0) { + gf_log ("glusterd", GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + snprintf (msg_str, sizeof (msg_str), "Unable to decode " + "the buffer"); + goto out; + } else { + dict->extra_stdfree = cli_req.dict.dict_val; + } + } + + /* In the above section if dict_unserialize is successful, ret is set + * to zero. + */ + ret = -1; + // Do not allow peer reset if there are any volumes in the cluster + if (!list_empty (&priv->volumes)) { + snprintf (msg_str, sizeof (msg_str), "volumes are already " + "present in the cluster. Resetting uuid is not " + "allowed"); + gf_log (this->name, GF_LOG_WARNING, "%s", msg_str); + goto out; + } + + // Do not allow peer reset if trusted storage pool is already formed + if (!list_empty (&priv->peers)) { + snprintf (msg_str, sizeof (msg_str),"trusted storage pool " + "has been already formed. Please detach this peer " + "from the pool and reset its uuid."); + gf_log (this->name, GF_LOG_WARNING, "%s", msg_str); + goto out; + } + + uuid_copy (uuid, priv->uuid); + ret = glusterd_uuid_generate_save (); + + if (!uuid_compare (uuid, MY_UUID)) { + snprintf (msg_str, sizeof (msg_str), "old uuid and the new uuid" + " are same. Try gluster peer reset again"); + gf_log (this->name, GF_LOG_ERROR, "%s", msg_str); + ret = -1; + goto out; + } + +out: + if (ret) { + rsp.op_ret = -1; + if (msg_str[0] == '\0') + snprintf (msg_str, sizeof (msg_str), "Operation " + "failed"); + rsp.op_errstr = msg_str; + ret = 0; + } else { + rsp.op_errstr = ""; + } + + glusterd_to_cli (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf_cli_rsp, dict); + + return ret; +} + +int +glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_uuid_reset); +} + +int +__glusterd_handle_cli_uuid_get (rpcsvc_request_t *req) +{ + int ret = -1; + dict_t *dict = NULL; + dict_t *rsp_dict = NULL; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + gf_cli_rsp rsp = {0,}; + gf_cli_req cli_req = {{0,}}; + char msg_str[2048] = {0,}; + char uuid_str[64] = {0,}; + + GF_ASSERT (req); + + this = THIS; + priv = this->private; + GF_ASSERT (priv); + + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + gf_log ("glusterd", GF_LOG_DEBUG, "Received uuid get req"); + + if (cli_req.dict.dict_len) { + dict = dict_new (); + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, + &dict); + if (ret < 0) { + gf_log ("glusterd", GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + snprintf (msg_str, sizeof (msg_str), "Unable to decode " + "the buffer"); + goto out; + + } else { + dict->extra_stdfree = cli_req.dict.dict_val; + + } + } + + rsp_dict = dict_new (); + if (!rsp_dict) { + ret = -1; + goto out; + } + + uuid_utoa_r (MY_UUID, uuid_str); + ret = dict_set_str (rsp_dict, "uuid", uuid_str); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to set uuid in " + "dictionary."); + goto out; + } + + ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val, + &rsp.dict.dict_len); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to serialize " + "dictionary."); + goto out; + } + ret = 0; +out: + if (ret) { + rsp.op_ret = -1; + if (msg_str[0] == '\0') + snprintf (msg_str, sizeof (msg_str), "Operation " + "failed"); + rsp.op_errstr = msg_str; + + } else { + rsp.op_errstr = ""; + + } + + glusterd_to_cli (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf_cli_rsp, dict); + + return 0; +} +int +glusterd_handle_cli_uuid_get (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_uuid_get); +} + +int +__glusterd_handle_cli_list_volume (rpcsvc_request_t *req) { int ret = -1; dict_t *dict = NULL; @@ -923,7 +1417,7 @@ glusterd_handle_cli_list_volume (rpcsvc_request_t *req) goto out; ret = dict_allocate_and_serialize (dict, &rsp.dict.dict_val, - (size_t *)&rsp.dict.dict_len); + &rsp.dict.dict_len); if (ret) goto out; @@ -936,42 +1430,57 @@ out: else rsp.op_errstr = ""; - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + glusterd_submit_reply (req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp); + ret = 0; if (dict) dict_unref (dict); glusterd_friend_sm (); glusterd_op_sm (); + return ret; } +int +glusterd_handle_cli_list_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_list_volume); +} + int32_t -glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx) +glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx, + char *err_str, size_t err_len) { int ret = -1; - ret = glusterd_op_txn_begin (req, op, ctx); + ret = glusterd_op_txn_begin (req, op, ctx, err_str, err_len); return ret; } - - int -glusterd_handle_reset_volume (rpcsvc_request_t *req) +__glusterd_handle_reset_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; dict_t *dict = NULL; glusterd_op_t cli_op = GD_OP_RESET_VOLUME; char *volname = NULL; + char err_str[2048] = {0,}; + xlator_t *this = NULL; GF_ASSERT (req); + this = THIS; + GF_ASSERT (this); - if (!xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req)) { - //failed to decode msg; + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + snprintf (err_str, sizeof (err_str), "Failed to decode request " + "received from cli"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); req->rpc_err = GARBAGE_ARGS; goto out; } @@ -984,8 +1493,10 @@ glusterd_handle_reset_volume (rpcsvc_request_t *req) cli_req.dict.dict_len, &dict); if (ret < 0) { - gf_log ("glusterd", GF_LOG_ERROR, "failed to " + gf_log (this->name, GF_LOG_ERROR, "failed to " "unserialize req-buffer to dictionary"); + snprintf (err_str, sizeof (err_str), "Unable to decode " + "the command"); goto out; } else { dict->extra_stdfree = cli_req.dict.dict_val; @@ -994,31 +1505,37 @@ glusterd_handle_reset_volume (rpcsvc_request_t *req) ret = dict_get_str (dict, "volname", &volname); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "failed to get volname"); + snprintf (err_str, sizeof (err_str), "Failed to get volume " + "name"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); goto out; } + gf_log (this->name, GF_LOG_DEBUG, "Received volume reset request for " + "volume %s", volname); - gf_cmd_log ("Volume reset", "volume : %s", volname); - ret = glusterd_op_begin (req, GD_OP_RESET_VOLUME, dict); - gf_cmd_log ("Volume reset", " on volume %s %s ", volname, - ((ret == 0)? " SUCCEDED":" FAILED")); + ret = glusterd_op_begin_synctask (req, GD_OP_RESET_VOLUME, dict); out: - glusterd_friend_sm (); - glusterd_op_sm (); if (ret) { - if (dict) - dict_unref (dict); + if (err_str[0] == '\0') + snprintf (err_str, sizeof (err_str), + "Operation failed"); ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, - NULL, "operation failed"); + dict, err_str); } return ret; } +int +glusterd_handle_reset_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_reset_volume); +} int -glusterd_handle_set_volume (rpcsvc_request_t *req) +__glusterd_handle_set_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -1027,11 +1544,21 @@ glusterd_handle_set_volume (rpcsvc_request_t *req) char *key = NULL; char *value = NULL; char *volname = NULL; + char *op_errstr = NULL; + gf_boolean_t help = _gf_false; + char err_str[2048] = {0,}; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT (this); GF_ASSERT (req); - if (!xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req)) { - //failed to decode msg; + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + snprintf (err_str, sizeof (err_str), "Failed to decode " + "request received from cli"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); req->rpc_err = GARBAGE_ARGS; goto out; } @@ -1044,9 +1571,11 @@ glusterd_handle_set_volume (rpcsvc_request_t *req) cli_req.dict.dict_len, &dict); if (ret < 0) { - gf_log ("glusterd", GF_LOG_ERROR, + gf_log (this->name, GF_LOG_ERROR, "failed to " "unserialize req-buffer to dictionary"); + snprintf (err_str, sizeof (err_str), "Unable to decode " + "the command"); goto out; } else { dict->extra_stdfree = cli_req.dict.dict_val; @@ -1055,65 +1584,81 @@ glusterd_handle_set_volume (rpcsvc_request_t *req) ret = dict_get_str (dict, "volname", &volname); if (ret) { - gf_log ("", GF_LOG_WARNING, "Unable to get volume name, while" - "handling volume set command"); + snprintf (err_str, sizeof (err_str), "Failed to get volume " + "name while handling volume set command"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); + goto out; + } + + if (strcmp (volname, "help") == 0 || + strcmp (volname, "help-xml") == 0) { + ret = glusterd_volset_help (dict, &op_errstr); + help = _gf_true; goto out; } ret = dict_get_str (dict, "key1", &key); if (ret) { - if (strcmp (volname, "help-xml") && strcmp (volname, "help")) { - gf_log ("", GF_LOG_WARNING, "Unable to get key, while " - "handling volume set for %s",volname); - goto out; - } + snprintf (err_str, sizeof (err_str), "Failed to get key while" + " handling volume set for %s", volname); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); + goto out; } ret = dict_get_str (dict, "value1", &value); if (ret) { - if (strcmp (volname, "help-xml") && strcmp (volname, "help")) { - gf_log ("", GF_LOG_WARNING, "Unable to get value, while" - "handling volume set for %s",volname); - goto out; - } + snprintf (err_str, sizeof (err_str), "Failed to get value while" + " handling volume set for %s", volname); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); + goto out; } + gf_log (this->name, GF_LOG_DEBUG, "Received volume set request for " + "volume %s", volname); + ret = glusterd_op_begin_synctask (req, GD_OP_SET_VOLUME, dict); - gf_cmd_log ("volume set", "volume-name:%s: key:%s, value:%s",volname, - key, value); - ret = glusterd_op_begin (req, GD_OP_SET_VOLUME, dict); - gf_cmd_log ("volume set", "volume-name:%s: key:%s, value:%s %s", - volname, key, value, (ret == 0)? "SUCCEDED" : "FAILED" ); out: - - glusterd_friend_sm (); - glusterd_op_sm (); - - if (ret) { - if (dict) - dict_unref (dict); + if (help) + ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, dict, + (op_errstr)? op_errstr:""); + else if (ret) { + if (err_str[0] == '\0') + snprintf (err_str, sizeof (err_str), + "Operation failed"); ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, - NULL, "operation failed"); + dict, err_str); } + if (op_errstr) + GF_FREE (op_errstr); + return ret; } int -glusterd_handle_sync_volume (rpcsvc_request_t *req) +glusterd_handle_set_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_set_volume); +} + +int +__glusterd_handle_sync_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; dict_t *dict = NULL; gf_cli_rsp cli_rsp = {0.}; char msg[2048] = {0,}; - glusterd_volinfo_t *volinfo = NULL; char *volname = NULL; gf1_cli_sync_volume flags = 0; char *hostname = NULL; + xlator_t *this = NULL; GF_ASSERT (req); + this = THIS; + GF_ASSERT (this); - if (!xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; @@ -1127,9 +1672,11 @@ glusterd_handle_sync_volume (rpcsvc_request_t *req) cli_req.dict.dict_len, &dict); if (ret < 0) { - gf_log ("glusterd", GF_LOG_ERROR, + gf_log (this->name, GF_LOG_ERROR, "failed to " "unserialize req-buffer to dictionary"); + snprintf (msg, sizeof (msg), "Unable to decode the " + "command"); goto out; } else { dict->extra_stdfree = cli_req.dict.dict_val; @@ -1138,7 +1685,8 @@ glusterd_handle_sync_volume (rpcsvc_request_t *req) ret = dict_get_str (dict, "hostname", &hostname); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "failed to get hostname"); + snprintf (msg, sizeof (msg), "Failed to get hostname"); + gf_log (this->name, GF_LOG_ERROR, "%s", msg); goto out; } @@ -1146,48 +1694,25 @@ glusterd_handle_sync_volume (rpcsvc_request_t *req) if (ret) { ret = dict_get_int32 (dict, "flags", (int32_t*)&flags); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "Unable to get volume" - "name, or flags"); + snprintf (msg, sizeof (msg), "Failed to get volume name" + " or flags"); + gf_log (this->name, GF_LOG_ERROR, "%s", msg); goto out; } } - gf_log ("glusterd", GF_LOG_INFO, "Received volume sync req " - "for volume %s", - (flags & GF_CLI_SYNC_ALL) ? "all" : volname); + gf_log (this->name, GF_LOG_INFO, "Received volume sync req " + "for volume %s", (flags & GF_CLI_SYNC_ALL) ? "all" : volname); - if (!glusterd_is_local_addr (hostname)) { + if (gf_is_local_addr (hostname)) { ret = -1; snprintf (msg, sizeof (msg), "sync from localhost" " not allowed"); + gf_log (this->name, GF_LOG_ERROR, "%s", msg); goto out; } - if (!flags) { - ret = glusterd_volinfo_find (volname, &volinfo); - if (!ret) { - snprintf (msg, sizeof (msg), "please delete the " - "volume: %s before sync", volname); - ret = -1; - goto out; - } - - ret = dict_set_dynmstr (dict, "volname", volname); - if (ret) { - gf_log ("", GF_LOG_ERROR, "volume name set failed"); - snprintf (msg, sizeof (msg), "volume name set failed"); - goto out; - } - } else { - if (glusterd_volume_count_get ()) { - snprintf (msg, sizeof (msg), "please delete all the " - "volumes before full sync"); - ret = -1; - goto out; - } - } - - ret = glusterd_op_begin (req, GD_OP_SYNC_VOLUME, dict); + ret = glusterd_op_begin_synctask (req, GD_OP_SYNC_VOLUME, dict); out: if (ret) { @@ -1195,22 +1720,22 @@ out: cli_rsp.op_errstr = msg; if (msg[0] == '\0') snprintf (msg, sizeof (msg), "Operation failed"); - glusterd_submit_reply(req, &cli_rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gf_cli_rsp); - if (dict) - dict_unref (dict); - + glusterd_to_cli (req, &cli_rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf_cli_rsp, dict); ret = 0; //sent error to cli, prevent second reply } - glusterd_friend_sm (); - glusterd_op_sm (); - return ret; } int +glusterd_handle_sync_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_sync_volume); +} + +int glusterd_fsm_log_send_resp (rpcsvc_request_t *req, int op_ret, char *op_errstr, dict_t *dict) { @@ -1225,12 +1750,11 @@ glusterd_fsm_log_send_resp (rpcsvc_request_t *req, int op_ret, rsp.op_errstr = op_errstr; if (rsp.op_ret == 0) ret = dict_allocate_and_serialize (dict, &rsp.fsm_log.fsm_log_val, - (size_t *)&rsp.fsm_log.fsm_log_len); + &rsp.fsm_log.fsm_log_len); ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf1_cli_fsm_log_rsp); - if (rsp.fsm_log.fsm_log_val) - GF_FREE (rsp.fsm_log.fsm_log_val); + GF_FREE (rsp.fsm_log.fsm_log_val); gf_log ("glusterd", GF_LOG_DEBUG, "Responded, ret: %d", ret); @@ -1238,7 +1762,7 @@ glusterd_fsm_log_send_resp (rpcsvc_request_t *req, int op_ret, } int -glusterd_handle_fsm_log (rpcsvc_request_t *req) +__glusterd_handle_fsm_log (rpcsvc_request_t *req) { int32_t ret = -1; gf1_cli_fsm_log_req cli_req = {0,}; @@ -1251,7 +1775,9 @@ glusterd_handle_fsm_log (rpcsvc_request_t *req) GF_ASSERT (req); - if (!xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf1_cli_fsm_log_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, + (xdrproc_t)xdr_gf1_cli_fsm_log_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; snprintf (msg, sizeof (msg), "Garbage request"); @@ -1282,8 +1808,7 @@ glusterd_handle_fsm_log (rpcsvc_request_t *req) ret = glusterd_sm_tr_log_add_to_dict (dict, log); out: (void)glusterd_fsm_log_send_resp (req, ret, msg, dict); - if (cli_req.name) - free (cli_req.name);//malloced by xdr + free (cli_req.name);//malloced by xdr if (dict) dict_unref (dict); @@ -1294,6 +1819,12 @@ out: } int +glusterd_handle_fsm_log (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_fsm_log); +} + +int glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status) { @@ -1307,8 +1838,7 @@ glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status) ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp); - gf_log ("glusterd", GF_LOG_INFO, - "Responded, ret: %d", ret); + gf_log (THIS->name, GF_LOG_DEBUG, "Responded to lock, ret: %d", ret); return 0; } @@ -1327,35 +1857,39 @@ glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status) ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp); - gf_log ("glusterd", GF_LOG_INFO, - "Responded to unlock, ret: %d", ret); + gf_log (THIS->name, GF_LOG_DEBUG, "Responded to unlock, ret: %d", ret); return ret; } int -glusterd_handle_cluster_unlock (rpcsvc_request_t *req) +__glusterd_handle_cluster_unlock (rpcsvc_request_t *req) { gd1_mgmt_cluster_unlock_req unlock_req = {{0}, }; int32_t ret = -1; glusterd_op_lock_ctx_t *ctx = NULL; glusterd_peerinfo_t *peerinfo = NULL; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT (this); GF_ASSERT (req); - if (!xdr_to_generic (req->msg[0], &unlock_req, - (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req)) { - //failed to decode msg; + ret = xdr_to_generic (req->msg[0], &unlock_req, + (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, "Failed to decode unlock " + "request received from peer"); req->rpc_err = GARBAGE_ARGS; goto out; } - gf_log ("glusterd", GF_LOG_INFO, + gf_log (this->name, GF_LOG_DEBUG, "Received UNLOCK from uuid: %s", uuid_utoa (unlock_req.uuid)); if (glusterd_friend_find_by_uuid (unlock_req.uuid, &peerinfo)) { - gf_log (THIS->name, GF_LOG_WARNING, "%s doesn't " + gf_log (this->name, GF_LOG_WARNING, "%s doesn't " "belong to the cluster. Ignoring request.", uuid_utoa (unlock_req.uuid)); ret = -1; @@ -1381,14 +1915,25 @@ out: } int +glusterd_handle_cluster_unlock (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cluster_unlock); +} + +int glusterd_op_stage_send_resp (rpcsvc_request_t *req, int32_t op, int32_t status, char *op_errstr, dict_t *rsp_dict) { gd1_mgmt_stage_op_rsp rsp = {{0},}; int ret = -1; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT (this); GF_ASSERT (req); + rsp.op_ret = status; glusterd_get_uuid (&rsp.uuid); rsp.op = op; @@ -1397,11 +1942,10 @@ glusterd_op_stage_send_resp (rpcsvc_request_t *req, else rsp.op_errstr = ""; - ret = dict_allocate_and_serialize (rsp_dict, - &rsp.dict.dict_val, - (size_t *)&rsp.dict.dict_len); + ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val, + &rsp.dict.dict_len); if (ret < 0) { - gf_log ("", GF_LOG_DEBUG, + gf_log (this->name, GF_LOG_ERROR, "failed to get serialized length of dict"); return ret; } @@ -1409,10 +1953,8 @@ glusterd_op_stage_send_resp (rpcsvc_request_t *req, ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp); - gf_log ("glusterd", GF_LOG_INFO, - "Responded to stage, ret: %d", ret); - if (rsp.dict.dict_val) - GF_FREE (rsp.dict.dict_val); + gf_log (this->name, GF_LOG_DEBUG, "Responded to stage, ret: %d", ret); + GF_FREE (rsp.dict.dict_val); return ret; } @@ -1424,7 +1966,10 @@ glusterd_op_commit_send_resp (rpcsvc_request_t *req, { gd1_mgmt_commit_op_rsp rsp = {{0}, }; int ret = -1; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT (this); GF_ASSERT (req); rsp.op_ret = status; glusterd_get_uuid (&rsp.uuid); @@ -1436,11 +1981,10 @@ glusterd_op_commit_send_resp (rpcsvc_request_t *req, rsp.op_errstr = ""; if (rsp_dict) { - ret = dict_allocate_and_serialize (rsp_dict, - &rsp.dict.dict_val, - (size_t *)&rsp.dict.dict_len); + ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val, + &rsp.dict.dict_len); if (ret < 0) { - gf_log ("", GF_LOG_DEBUG, + gf_log (this->name, GF_LOG_ERROR, "failed to get serialized length of dict"); goto out; } @@ -1450,24 +1994,24 @@ glusterd_op_commit_send_resp (rpcsvc_request_t *req, ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp); - gf_log ("glusterd", GF_LOG_INFO, - "Responded to commit, ret: %d", ret); + gf_log (this->name, GF_LOG_DEBUG, "Responded to commit, ret: %d", ret); out: - if (rsp.dict.dict_val) - GF_FREE (rsp.dict.dict_val); + GF_FREE (rsp.dict.dict_val); return ret; } int -glusterd_handle_incoming_friend_req (rpcsvc_request_t *req) +__glusterd_handle_incoming_friend_req (rpcsvc_request_t *req) { int32_t ret = -1; gd1_mgmt_friend_req friend_req = {{0},}; gf_boolean_t run_fsm = _gf_true; GF_ASSERT (req); - if (!xdr_to_generic (req->msg[0], &friend_req, (xdrproc_t)xdr_gd1_mgmt_friend_req)) { + ret = xdr_to_generic (req->msg[0], &friend_req, + (xdrproc_t)xdr_gd1_mgmt_friend_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; @@ -1486,8 +2030,7 @@ glusterd_handle_incoming_friend_req (rpcsvc_request_t *req) } out: - if (friend_req.hostname) - free (friend_req.hostname);//malloced by xdr + free (friend_req.hostname);//malloced by xdr if (run_fsm) { glusterd_friend_sm (); @@ -1498,14 +2041,23 @@ out: } int -glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req) +glusterd_handle_incoming_friend_req (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_incoming_friend_req); +} + +int +__glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req) { int32_t ret = -1; gd1_mgmt_friend_req friend_req = {{0},}; char remote_hostname[UNIX_PATH_MAX + 1] = {0,}; GF_ASSERT (req); - if (!xdr_to_generic (req->msg[0], &friend_req, (xdrproc_t)xdr_gd1_mgmt_friend_req)) { + ret = xdr_to_generic (req->msg[0], &friend_req, + (xdrproc_t)xdr_gd1_mgmt_friend_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; @@ -1524,10 +2076,8 @@ glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req) remote_hostname, friend_req.port); out: - if (friend_req.hostname) - free (friend_req.hostname);//malloced by xdr - if (friend_req.vols.vols_val) - free (friend_req.vols.vols_val);//malloced by xdr + free (friend_req.hostname);//malloced by xdr + free (friend_req.vols.vols_val);//malloced by xdr glusterd_friend_sm (); glusterd_op_sm (); @@ -1536,6 +2086,14 @@ out: } int +glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_incoming_unfriend_req); + +} + +int glusterd_handle_friend_update_delete (dict_t *dict) { char *hostname = NULL; @@ -1581,7 +2139,7 @@ out: } int -glusterd_handle_friend_update (rpcsvc_request_t *req) +__glusterd_handle_friend_update (rpcsvc_request_t *req) { int32_t ret = -1; gd1_mgmt_friend_update friend_req = {{0},}; @@ -1607,7 +2165,9 @@ glusterd_handle_friend_update (rpcsvc_request_t *req) priv = this->private; GF_ASSERT (priv); - if (!xdr_to_generic (req->msg[0], &friend_req, (xdrproc_t)xdr_gd1_mgmt_friend_update)) { + ret = xdr_to_generic (req->msg[0], &friend_req, + (xdrproc_t)xdr_gd1_mgmt_friend_update); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; @@ -1667,7 +2227,13 @@ glusterd_handle_friend_update (rpcsvc_request_t *req) gf_log ("", GF_LOG_INFO, "Received uuid: %s, hostname:%s", uuid_buf, hostname); - if (!uuid_compare (uuid, priv->uuid)) { + if (uuid_is_null (uuid)) { + gf_log (this->name, GF_LOG_WARNING, "Updates mustn't " + "contain peer with 'null' uuid"); + continue; + } + + if (!uuid_compare (uuid, MY_UUID)) { gf_log ("", GF_LOG_INFO, "Received my uuid as Friend"); i++; continue; @@ -1692,7 +2258,7 @@ glusterd_handle_friend_update (rpcsvc_request_t *req) } out: - uuid_copy (rsp.uuid, priv->uuid); + uuid_copy (rsp.uuid, MY_UUID); ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gd1_mgmt_friend_update_rsp); if (dict) { @@ -1700,8 +2266,7 @@ out: free (friend_req.friends.friends_val);//malloced by xdr dict_unref (dict); } else { - if (friend_req.friends.friends_val) - free (friend_req.friends.friends_val);//malloced by xdr + free (friend_req.friends.friends_val);//malloced by xdr } glusterd_friend_sm (); @@ -1711,7 +2276,14 @@ out: } int -glusterd_handle_probe_query (rpcsvc_request_t *req) +glusterd_handle_friend_update (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_friend_update); +} + +int +__glusterd_handle_probe_query (rpcsvc_request_t *req) { int32_t ret = -1; xlator_t *this = NULL; @@ -1725,7 +2297,9 @@ glusterd_handle_probe_query (rpcsvc_request_t *req) GF_ASSERT (req); - if (!xdr_to_generic (req->msg[0], &probe_req, (xdrproc_t)xdr_gd1_mgmt_probe_req)) { + ret = xdr_to_generic (req->msg[0], &probe_req, + (xdrproc_t)xdr_gd1_mgmt_probe_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; @@ -1742,6 +2316,20 @@ glusterd_handle_probe_query (rpcsvc_request_t *req) gf_log ("glusterd", GF_LOG_INFO, "Received probe from uuid: %s", uuid_utoa (probe_req.uuid)); + /* Check for uuid collision and handle it in a user friendly way by + * sending the error. + */ + if (!uuid_compare (probe_req.uuid, MY_UUID)) { + gf_log (THIS->name, GF_LOG_ERROR, "Peer uuid %s is same as " + "local uuid. Please check the uuid of both the peers " + "from %s/%s", uuid_utoa (probe_req.uuid), + GLUSTERD_DEFAULT_WORKDIR, GLUSTERD_INFO_FILE); + rsp.op_ret = -1; + rsp.op_errno = GF_PROBE_SAME_UUID; + rsp.port = port; + goto respond; + } + ret = glusterd_remote_hostname_get (req, remote_hostname, sizeof (remote_hostname)); if (ret) { @@ -1766,20 +2354,22 @@ glusterd_handle_probe_query (rpcsvc_request_t *req) } } - uuid_copy (rsp.uuid, conf->uuid); +respond: + uuid_copy (rsp.uuid, MY_UUID); rsp.hostname = probe_req.hostname; + rsp.op_errstr = ""; - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gd1_mgmt_probe_rsp); + glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_probe_rsp); + ret = 0; gf_log ("glusterd", GF_LOG_INFO, "Responded to %s, op_ret: %d, " - "op_errno: %d, ret: %d", probe_req.hostname, + "op_errno: %d, ret: %d", remote_hostname, rsp.op_ret, rsp.op_errno, ret); out: - if (probe_req.hostname) - free (probe_req.hostname);//malloced by xdr + free (probe_req.hostname);//malloced by xdr glusterd_friend_sm (); glusterd_op_sm (); @@ -1787,8 +2377,13 @@ out: return ret; } +int glusterd_handle_probe_query (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_probe_query); +} + int -glusterd_handle_cli_profile_volume (rpcsvc_request_t *req) +__glusterd_handle_cli_profile_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; @@ -1796,10 +2391,15 @@ glusterd_handle_cli_profile_volume (rpcsvc_request_t *req) glusterd_op_t cli_op = GD_OP_PROFILE_VOLUME; char *volname = NULL; int32_t op = 0; + char err_str[2048] = {0,}; + xlator_t *this = NULL; GF_ASSERT (req); + this = THIS; + GF_ASSERT (this); - if (!xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; @@ -1815,42 +2415,50 @@ glusterd_handle_cli_profile_volume (rpcsvc_request_t *req) ret = dict_get_str (dict, "volname", &volname); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "failed to get volname"); + snprintf (err_str, sizeof (err_str), "Unable to get volume " + "name"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); goto out; } - gf_log (THIS->name, GF_LOG_INFO, "Received volume profile req " + gf_log (this->name, GF_LOG_INFO, "Received volume profile req " "for volume %s", volname); ret = dict_get_int32 (dict, "op", &op); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "failed to get op"); + snprintf (err_str, sizeof (err_str), "Unable to get operation"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); goto out; } - gf_cmd_log ("Volume stats", "volume : %s, op: %d", volname, op); - ret = glusterd_op_begin (req, cli_op, dict); - gf_cmd_log ("Volume stats", " on volume %s, op: %d %s ", - volname, op, - ((ret == 0)? " SUCCEDED":" FAILED")); + ret = glusterd_op_begin (req, cli_op, dict, err_str, sizeof (err_str)); out: glusterd_friend_sm (); glusterd_op_sm (); - if (ret && dict) - dict_unref (dict); - if (cli_req.dict.dict_val) - free (cli_req.dict.dict_val); - if (ret) + free (cli_req.dict.dict_val); + + if (ret) { + if (err_str[0] == '\0') + snprintf (err_str, sizeof (err_str), + "Operation failed"); ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, - NULL, "operation failed"); + dict, err_str); + } - gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret); return ret; } int -glusterd_handle_getwd (rpcsvc_request_t *req) +glusterd_handle_cli_profile_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_profile_volume); +} + +int +__glusterd_handle_getwd (rpcsvc_request_t *req) { int32_t ret = -1; gf1_cli_getwd_rsp rsp = {0,}; @@ -1865,8 +2473,9 @@ glusterd_handle_getwd (rpcsvc_request_t *req) rsp.wd = priv->workdir; - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gf1_cli_getwd_rsp); + glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf1_cli_getwd_rsp); + ret = 0; glusterd_friend_sm (); glusterd_op_sm (); @@ -1874,18 +2483,27 @@ glusterd_handle_getwd (rpcsvc_request_t *req) return ret; } +int +glusterd_handle_getwd (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_getwd); +} int -glusterd_handle_mount (rpcsvc_request_t *req) +__glusterd_handle_mount (rpcsvc_request_t *req) { gf1_cli_mount_req mnt_req = {0,}; gf1_cli_mount_rsp rsp = {0,}; dict_t *dict = NULL; int ret = 0; + glusterd_conf_t *priv = NULL; GF_ASSERT (req); + priv = THIS->private; - if (!xdr_to_generic (req->msg[0], &mnt_req, (xdrproc_t)xdr_gf1_cli_mount_req)) { + ret = xdr_to_generic (req->msg[0], &mnt_req, + (xdrproc_t)xdr_gf1_cli_mount_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; rsp.op_ret = -1; @@ -1914,15 +2532,18 @@ glusterd_handle_mount (rpcsvc_request_t *req) } } + synclock_unlock (&priv->big_lock); rsp.op_ret = glusterd_do_mount (mnt_req.label, dict, &rsp.path, &rsp.op_errno); + synclock_lock (&priv->big_lock); out: if (!rsp.path) rsp.path = ""; - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gf1_cli_mount_rsp); + glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf1_cli_mount_rsp); + ret = 0; if (dict) dict_unref (dict); @@ -1936,7 +2557,13 @@ glusterd_handle_mount (rpcsvc_request_t *req) } int -glusterd_handle_umount (rpcsvc_request_t *req) +glusterd_handle_mount (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_mount); +} + +int +__glusterd_handle_umount (rpcsvc_request_t *req) { gf1_cli_umount_req umnt_req = {0,}; gf1_cli_umount_rsp rsp = {0,}; @@ -1949,11 +2576,15 @@ glusterd_handle_umount (rpcsvc_request_t *req) gf_boolean_t dir_ok = _gf_false; char *pdir = NULL; char *t = NULL; + glusterd_conf_t *priv = NULL; GF_ASSERT (req); GF_ASSERT (this); + priv = this->private; - if (!xdr_to_generic (req->msg[0], &umnt_req, (xdrproc_t)xdr_gf1_cli_umount_req)) { + ret = xdr_to_generic (req->msg[0], &umnt_req, + (xdrproc_t)xdr_gf1_cli_umount_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; rsp.op_ret = -1; @@ -1992,7 +2623,9 @@ glusterd_handle_umount (rpcsvc_request_t *req) runner_add_args (&runner, "umount", umnt_req.path, NULL); if (umnt_req.lazy) runner_add_arg (&runner, "-l"); + synclock_unlock (&priv->big_lock); rsp.op_ret = runner_run (&runner); + synclock_lock (&priv->big_lock); if (rsp.op_ret == 0) { if (realpath (umnt_req.path, mntp)) rmdir (mntp); @@ -2010,8 +2643,9 @@ glusterd_handle_umount (rpcsvc_request_t *req) if (rsp.op_errno) rsp.op_ret = -1; - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gf1_cli_umount_rsp); + glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf1_cli_umount_rsp); + ret = 0; glusterd_friend_sm (); glusterd_op_sm (); @@ -2020,6 +2654,12 @@ glusterd_handle_umount (rpcsvc_request_t *req) } int +glusterd_handle_umount (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_umount); +} + +int glusterd_friend_remove (uuid_t uuid, char *hostname) { int ret = 0; @@ -2070,7 +2710,7 @@ out: } } - gf_log ("", GF_LOG_DEBUG, "returning %d", ret); + gf_log (this->name, GF_LOG_DEBUG, "returning %d", ret); return ret; } @@ -2093,8 +2733,8 @@ glusterd_transport_keepalive_options_get (int *interval, int *time) } int -glusterd_transport_inet_keepalive_options_build (dict_t **options, - const char *hostname, int port) +glusterd_transport_inet_options_build (dict_t **options, const char *hostname, + int port) { dict_t *dict = NULL; int32_t interval = -1; @@ -2106,10 +2746,25 @@ glusterd_transport_inet_keepalive_options_build (dict_t **options, if (!port) port = GLUSTERD_DEFAULT_PORT; + + /* Build default transport options */ ret = rpc_transport_inet_options_build (&dict, hostname, port); if (ret) goto out; + /* Set frame-timeout to 10mins. Default timeout of 30 mins is too long + * when compared to 2 mins for cli timeout. This ensures users don't + * wait too long after cli timesout before being able to resume normal + * operations + */ + ret = dict_set_int32 (dict, "frame-timeout", 600); + if (ret) { + gf_log ("glusterd", GF_LOG_ERROR, + "Failed to set frame-timeout"); + goto out; + } + + /* Set keepalive options */ glusterd_transport_keepalive_options_get (&interval, &time); if ((interval > 0) || (time > 0)) @@ -2121,65 +2776,117 @@ out: } int -glusterd_friend_add (const char *hoststr, int port, - glusterd_friend_sm_state_t state, - uuid_t *uuid, - glusterd_peerinfo_t **friend, - gf_boolean_t restore, - glusterd_peerctx_args_t *args) +glusterd_friend_rpc_create (xlator_t *this, glusterd_peerinfo_t *peerinfo, + glusterd_peerctx_args_t *args) { - int ret = 0; - glusterd_conf_t *conf = NULL; - glusterd_peerctx_t *peerctx = NULL; dict_t *options = NULL; - gf_boolean_t handover = _gf_false; - - conf = THIS->private; - GF_ASSERT (conf) - GF_ASSERT (hoststr); + int ret = -1; + glusterd_peerctx_t *peerctx = NULL; + data_t *data = NULL; peerctx = GF_CALLOC (1, sizeof (*peerctx), gf_gld_mt_peerctx_t); - if (!peerctx) { - ret = -1; + if (!peerctx) goto out; - } if (args) peerctx->args = *args; - ret = glusterd_peerinfo_new (friend, state, uuid, hoststr); + peerctx->peerinfo = peerinfo; + + ret = glusterd_transport_inet_options_build (&options, + peerinfo->hostname, + peerinfo->port); if (ret) goto out; - peerctx->peerinfo = *friend; + /* + * For simulated multi-node testing, we need to make sure that we + * create our RPC endpoint with the same address that the peer would + * use to reach us. + */ + if (this->options) { + data = dict_get(this->options,"transport.socket.bind-address"); + if (data) { + ret = dict_set(options, + "transport.socket.source-addr",data); + } + } - ret = glusterd_transport_inet_keepalive_options_build (&options, - hoststr, port); - if (ret) + ret = glusterd_rpc_create (&peerinfo->rpc, options, + glusterd_peer_rpc_notify, peerctx); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "failed to create rpc for" + " peer %s", peerinfo->hostname); goto out; + } + peerctx = NULL; + ret = 0; +out: + GF_FREE (peerctx); + return ret; +} - ret = glusterd_rpc_create (&(*friend)->rpc, options, - glusterd_peer_rpc_notify, - peerctx); +int +glusterd_friend_add (const char *hoststr, int port, + glusterd_friend_sm_state_t state, + uuid_t *uuid, + glusterd_peerinfo_t **friend, + gf_boolean_t restore, + glusterd_peerctx_args_t *args) +{ + int ret = 0; + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; + + this = THIS; + conf = this->private; + GF_ASSERT (conf); + GF_ASSERT (hoststr); + + ret = glusterd_peerinfo_new (friend, state, uuid, hoststr, port); if (ret) { - gf_log ("glusterd", GF_LOG_ERROR, "failed to create rpc for" - " peer %s", (char*)hoststr); goto out; } - handover = _gf_true; -out: - if (ret && !handover) { - (void) glusterd_friend_cleanup (*friend); - *friend = NULL; + /* + * We can't add to the list after calling glusterd_friend_rpc_create, + * even if it succeeds, because by then the callback to take it back + * off and free might have happened already (notably in the case of an + * invalid peer name). That would mean we're adding something that had + * just been free, and we're likely to crash later. + */ + list_add_tail (&(*friend)->uuid_list, &conf->peers); + + //restore needs to first create the list of peers, then create rpcs + //to keep track of quorum in race-free manner. In restore for each peer + //rpc-create calls rpc_notify when the friend-list is partially + //constructed, leading to wrong quorum calculations. + if (!restore) { + ret = glusterd_store_peerinfo (*friend); + if (ret == 0) { + synclock_unlock (&conf->big_lock); + ret = glusterd_friend_rpc_create (this, *friend, args); + synclock_lock (&conf->big_lock); + } + else { + gf_log (this->name, GF_LOG_ERROR, + "Failed to store peerinfo"); + } + } + + if (ret) { + (void) glusterd_friend_cleanup (*friend); + *friend = NULL; } - gf_log ("glusterd", GF_LOG_INFO, "connect returned %d", ret); +out: + gf_log (this->name, GF_LOG_INFO, "connect returned %d", ret); return ret; } int -glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port) +glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port, + dict_t *dict) { int ret = -1; glusterd_peerinfo_t *peerinfo = NULL; @@ -2195,6 +2902,7 @@ glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port) " for host: %s (%d)", hoststr, port); args.mode = GD_MODE_ON; args.req = req; + args.dict = dict; ret = glusterd_friend_add ((char *)hoststr, port, GD_FRIEND_STATE_DEFAULT, NULL, &peerinfo, 0, &args); @@ -2215,11 +2923,12 @@ glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port) event->peerinfo = peerinfo; ret = glusterd_friend_sm_inject_event (event); glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_SUCCESS, - (char*)hoststr, port); + NULL, (char*)hoststr, + port, dict); } } else { - glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, - (char*)hoststr, port); + glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, NULL, + (char*)hoststr, port, dict); } out: @@ -2229,7 +2938,7 @@ out: int glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port, - uuid_t uuid) + uuid_t uuid, dict_t *dict) { int ret = -1; glusterd_peerinfo_t *peerinfo = NULL; @@ -2270,6 +2979,7 @@ glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port, ctx->hostname = gf_strdup (hoststr); ctx->port = port; ctx->req = req; + ctx->dict = dict; event->ctx = ctx; @@ -2304,7 +3014,7 @@ glusterd_xfer_friend_remove_resp (rpcsvc_request_t *req, char *hostname, int por conf = this->private; - uuid_copy (rsp.uuid, conf->uuid); + uuid_copy (rsp.uuid, MY_UUID); rsp.hostname = hostname; rsp.port = port; ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, @@ -2317,76 +3027,230 @@ glusterd_xfer_friend_remove_resp (rpcsvc_request_t *req, char *hostname, int por int -glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *hostname, int port, - int32_t op_ret, int32_t op_errno) +glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *myhostname, + char *remote_hostname, int port, int32_t op_ret, + int32_t op_errno) { gd1_mgmt_friend_rsp rsp = {{0}, }; int32_t ret = -1; xlator_t *this = NULL; glusterd_conf_t *conf = NULL; - GF_ASSERT (hostname); + GF_ASSERT (myhostname); this = THIS; GF_ASSERT (this); conf = this->private; - uuid_copy (rsp.uuid, conf->uuid); + uuid_copy (rsp.uuid, MY_UUID); rsp.op_ret = op_ret; rsp.op_errno = op_errno; - rsp.hostname = gf_strdup (hostname); + rsp.hostname = gf_strdup (myhostname); rsp.port = port; ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gd1_mgmt_friend_rsp); gf_log ("glusterd", GF_LOG_INFO, - "Responded to %s (%d), ret: %d", hostname, port, ret); - if (rsp.hostname) - GF_FREE (rsp.hostname); + "Responded to %s (%d), ret: %d", remote_hostname, port, ret); + GF_FREE (rsp.hostname); return ret; } +static void +get_probe_error_str (int op_ret, int op_errno, char *errstr, size_t len, + char *hostname, int port) +{ + + if (!op_ret) { + switch (op_errno) { + case GF_PROBE_LOCALHOST: + snprintf (errstr, len, "Probe on localhost not " + "needed"); + break; + + case GF_PROBE_FRIEND: + snprintf (errstr, len, "Host %s port %d already" + " in peer list", hostname, port); + break; + + default: + if (op_errno != 0) + snprintf (errstr, len, "Probe returned " + "with unknown errno %d", + op_errno); + break; + } + } else { + switch (op_errno) { + case GF_PROBE_ANOTHER_CLUSTER: + snprintf (errstr, len, "%s is already part of " + "another cluster", hostname); + break; + + case GF_PROBE_VOLUME_CONFLICT: + snprintf (errstr, len, "Atleast one volume on " + "%s conflicts with existing volumes " + "in the cluster", hostname); + break; + + case GF_PROBE_UNKNOWN_PEER: + snprintf (errstr, len, "%s responded with " + "'unknown peer' error, this could " + "happen if %s doesn't have localhost " + "in its peer database", hostname, + hostname); + break; + + case GF_PROBE_ADD_FAILED: + snprintf (errstr, len, "Failed to add peer " + "information on %s", hostname); + break; + + case GF_PROBE_SAME_UUID: + snprintf (errstr, len, "Peer uuid (host %s) is " + "same as local uuid", hostname); + break; + + case GF_PROBE_QUORUM_NOT_MET: + snprintf (errstr, len, "Cluster quorum is not " + "met. Changing peers is not allowed " + "in this state"); + break; + + default: + snprintf (errstr, len, "Probe returned with " + "unknown errno %d", op_errno); + break; + } + } +} + int glusterd_xfer_cli_probe_resp (rpcsvc_request_t *req, int32_t op_ret, - int32_t op_errno, char *hostname, int port) + int32_t op_errno, char *op_errstr, char *hostname, + int port, dict_t *dict) { - gf1_cli_probe_rsp rsp = {0, }; + gf_cli_rsp rsp = {0,}; int32_t ret = -1; + char errstr[2048] = {0,}; + char *cmd_str = NULL; + xlator_t *this = THIS; GF_ASSERT (req); + GF_ASSERT (this); + + if (op_errstr == NULL) + (void) get_probe_error_str (op_ret, op_errno, errstr, + sizeof (errstr), hostname, port); + else + snprintf (errstr, sizeof (errstr), "%s", op_errstr); + + if (dict) { + ret = dict_get_str (dict, "cmd-str", &cmd_str); + if (ret) + gf_log (this->name, GF_LOG_ERROR, "Failed to get " + "command string"); + } rsp.op_ret = op_ret; rsp.op_errno = op_errno; - rsp.hostname = hostname; - rsp.port = port; + rsp.op_errstr = (errstr[0] != '\0') ? errstr : ""; + + gf_cmd_log ("", "%s : %s %s %s", cmd_str, + (op_ret) ? "FAILED" : "SUCCESS", + (errstr[0] != '\0') ? ":" : " ", + (errstr[0] != '\0') ? errstr : " "); ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gf1_cli_probe_rsp); + (xdrproc_t)xdr_gf_cli_rsp); - gf_log ("glusterd", GF_LOG_INFO, "Responded to CLI, ret: %d",ret); + if (dict) + dict_unref (dict); + gf_log (this->name, GF_LOG_DEBUG, "Responded to CLI, ret: %d",ret); return ret; } +static void +get_deprobe_error_str (int op_ret, int op_errno, char *errstr, size_t len, + char *hostname) +{ + if (op_ret) { + switch (op_errno) { + case GF_DEPROBE_LOCALHOST: + snprintf (errstr, len, "%s is localhost", + hostname); + break; + + case GF_DEPROBE_NOT_FRIEND: + snprintf (errstr, len, "%s is not part of " + "cluster", hostname); + break; + + case GF_DEPROBE_BRICK_EXIST: + snprintf (errstr, len, "Brick(s) with the peer " + "%s exist in cluster", hostname); + break; + + case GF_DEPROBE_FRIEND_DOWN: + snprintf (errstr, len, "One of the peers is " + "probably down. Check with " + "'peer status'"); + break; + + case GF_DEPROBE_QUORUM_NOT_MET: + snprintf (errstr, len, "Cluster quorum is not " + "met. Changing peers is not allowed " + "in this state"); + break; + + default: + snprintf (errstr, len, "Detach returned with " + "unknown errno %d", op_errno); + break; + + } + } +} + + int glusterd_xfer_cli_deprobe_resp (rpcsvc_request_t *req, int32_t op_ret, - int32_t op_errno, char *hostname) + int32_t op_errno, char *op_errstr, + char *hostname, dict_t *dict) { - gf1_cli_deprobe_rsp rsp = {0, }; + gf_cli_rsp rsp = {0,}; int32_t ret = -1; + char *cmd_str = NULL; + char errstr[2048] = {0,}; GF_ASSERT (req); + (void) get_deprobe_error_str (op_ret, op_errno, errstr, sizeof (errstr), + hostname); + + if (dict) { + ret = dict_get_str (dict, "cmd-str", &cmd_str); + if (ret) + gf_log (THIS->name, GF_LOG_ERROR, "Failed to get " + "command string"); + } + rsp.op_ret = op_ret; rsp.op_errno = op_errno; - rsp.hostname = hostname; + rsp.op_errstr = (errstr[0] != '\0') ? errstr : ""; + + gf_cmd_log ("", "%s : %s %s %s", cmd_str, + (op_ret) ? "FAILED" : "SUCCESS", + (errstr[0] != '\0') ? ":" : " ", + (errstr[0] != '\0') ? errstr : " "); ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gf1_cli_deprobe_rsp); + (xdrproc_t)xdr_gf_cli_rsp); - gf_log ("glusterd", GF_LOG_INFO, "Responded to CLI, ret: %d",ret); + gf_log (THIS->name, GF_LOG_DEBUG, "Responded to CLI, ret: %d",ret); return ret; } @@ -2400,39 +3264,52 @@ glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags) int32_t count = 0; dict_t *friends = NULL; gf1_cli_peer_list_rsp rsp = {0,}; + char my_uuid_str[64] = {0,}; + char key[256] = {0,}; priv = THIS->private; GF_ASSERT (priv); - if (!list_empty (&priv->peers)) { - friends = dict_new (); - if (!friends) { - gf_log ("", GF_LOG_WARNING, "Out of Memory"); - goto out; - } - } else { - ret = 0; + friends = dict_new (); + if (!friends) { + gf_log ("", GF_LOG_WARNING, "Out of Memory"); goto out; } - - if (flags == GF_CLI_LIST_ALL) { - list_for_each_entry (entry, &priv->peers, uuid_list) { - count++; - ret = glusterd_add_peer_detail_to_dict (entry, + if (!list_empty (&priv->peers)) { + list_for_each_entry (entry, &priv->peers, uuid_list) { + count++; + ret = glusterd_add_peer_detail_to_dict (entry, friends, count); - if (ret) - goto out; + if (ret) + goto out; + } + } - } + if (flags == GF_CLI_LIST_POOL_NODES) { + count++; + snprintf (key, 256, "friend%d.uuid", count); + uuid_utoa_r (MY_UUID, my_uuid_str); + ret = dict_set_str (friends, key, my_uuid_str); + if (ret) + goto out; - ret = dict_set_int32 (friends, "count", count); + snprintf (key, 256, "friend%d.hostname", count); + ret = dict_set_str (friends, key, "localhost"); + if (ret) + goto out; - if (ret) - goto out; + snprintf (key, 256, "friend%d.connected", count); + ret = dict_set_int32 (friends, key, 1); + if (ret) + goto out; } + ret = dict_set_int32 (friends, "count", count); + if (ret) + goto out; + ret = dict_allocate_and_serialize (friends, &rsp.friends.friends_val, - (size_t *)&rsp.friends.friends_len); + &rsp.friends.friends_len); if (ret) goto out; @@ -2445,10 +3322,10 @@ out: rsp.op_ret = ret; - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gf1_cli_peer_list_rsp); - if (rsp.friends.friends_val) - GF_FREE (rsp.friends.friends_val); + glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf1_cli_peer_list_rsp); + ret = 0; + GF_FREE (rsp.friends.friends_val); return ret; } @@ -2539,7 +3416,7 @@ respond: if (ret) goto out; ret = dict_allocate_and_serialize (volumes, &rsp.dict.dict_val, - (size_t *)&rsp.dict.dict_len); + &rsp.dict.dict_len); if (ret) goto out; @@ -2549,19 +3426,19 @@ out: rsp.op_ret = ret; rsp.op_errstr = ""; - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gf_cli_rsp); + glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf_cli_rsp); + ret = 0; if (volumes) dict_unref (volumes); - if (rsp.dict.dict_val) - GF_FREE (rsp.dict.dict_val); + GF_FREE (rsp.dict.dict_val); return ret; } int -glusterd_handle_status_volume (rpcsvc_request_t *req) +__glusterd_handle_status_volume (rpcsvc_request_t *req) { int32_t ret = -1; uint32_t cmd = 0; @@ -2569,11 +3446,15 @@ glusterd_handle_status_volume (rpcsvc_request_t *req) char *volname = 0; gf_cli_req cli_req = {{0,}}; glusterd_op_t cli_op = GD_OP_STATUS_VOLUME; + char err_str[2048] = {0,}; + xlator_t *this = NULL; GF_ASSERT (req); + this = THIS; + GF_ASSERT (this); - if (!xdr_to_generic (req->msg[0], &cli_req, - (xdrproc_t)xdr_gf_cli_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; @@ -2586,8 +3467,10 @@ glusterd_handle_status_volume (rpcsvc_request_t *req) ret = dict_unserialize (cli_req.dict.dict_val, cli_req.dict.dict_len, &dict); if (ret < 0) { - gf_log (THIS->name, GF_LOG_ERROR, "failed to " + gf_log (this->name, GF_LOG_ERROR, "failed to " "unserialize buffer"); + snprintf (err_str, sizeof (err_str), "Unable to decode " + "the command"); goto out; } @@ -2600,48 +3483,57 @@ glusterd_handle_status_volume (rpcsvc_request_t *req) if (!(cmd & GF_CLI_STATUS_ALL)) { ret = dict_get_str (dict, "volname", &volname); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, - "failed to get volname"); + snprintf (err_str, sizeof (err_str), "Unable to get " + "volume name"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); goto out; } - gf_log (THIS->name, GF_LOG_INFO, - "Received status volume req " - "for volume %s", volname); + gf_log (this->name, GF_LOG_INFO, + "Received status volume req for volume %s", volname); } - ret = glusterd_op_begin (req, GD_OP_STATUS_VOLUME, dict); + ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict); out: - if (ret && dict) - dict_unref (dict); - glusterd_friend_sm (); - glusterd_op_sm (); - - if (ret) + if (ret) { + if (err_str[0] == '\0') + snprintf (err_str, sizeof (err_str), + "Operation failed"); ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, - NULL, "operation failed"); - if (cli_req.dict.dict_val) - free (cli_req.dict.dict_val); + dict, err_str); + } + free (cli_req.dict.dict_val); return ret; } int -glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req) +glusterd_handle_status_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_status_volume); +} + +int +__glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; glusterd_op_t cli_op = GD_OP_CLEARLOCKS_VOLUME; char *volname = NULL; dict_t *dict = NULL; + char err_str[2048] = {0,}; + xlator_t *this = NULL; GF_ASSERT (req); + this = THIS; + GF_ASSERT (this); ret = -1; - if (!xdr_to_generic (req->msg[0], &cli_req, - (xdrproc_t)xdr_gf_cli_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { req->rpc_err = GARBAGE_ARGS; goto out; } @@ -2653,60 +3545,106 @@ glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req) cli_req.dict.dict_len, &dict); if (ret < 0) { - gf_log (THIS->name, GF_LOG_ERROR, + gf_log (this->name, GF_LOG_ERROR, "failed to unserialize req-buffer to" " dictionary"); + snprintf (err_str, sizeof (err_str), "unable to decode " + "the command"); goto out; } } else { ret = -1; - gf_log (THIS->name, GF_LOG_ERROR, "Empty cli request."); + gf_log (this->name, GF_LOG_ERROR, "Empty cli request."); goto out; } ret = dict_get_str (dict, "volname", &volname); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "failed to get volname"); + snprintf (err_str, sizeof (err_str), "Unable to get volume " + "name"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); goto out; } - gf_log (THIS->name, GF_LOG_INFO, "Received clear-locks volume req " + gf_log (this->name, GF_LOG_INFO, "Received clear-locks volume req " "for volume %s", volname); - ret = glusterd_op_begin (req, cli_op, dict); - - gf_cmd_log ("clear-locks", "on volume %s %s", volname, - ((0 == ret) ? "SUCCEEDED" : "FAILED")); + ret = glusterd_op_begin_synctask (req, GD_OP_CLEARLOCKS_VOLUME, dict); out: - if (ret && dict) - dict_unref (dict); + if (ret) { + if (err_str[0] == '\0') + snprintf (err_str, sizeof (err_str), + "Operation failed"); + ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, + dict, err_str); + } + free (cli_req.dict.dict_val); - glusterd_friend_sm (); - glusterd_op_sm (); + return ret; +} +int +glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_clearlocks_volume); +} + +static int +get_brickinfo_from_brickid (char *brickid, glusterd_brickinfo_t **brickinfo) +{ + glusterd_volinfo_t *volinfo = NULL; + char *volid_str = NULL; + char *brick = NULL; + char *brickid_dup = NULL; + uuid_t volid = {0}; + int ret = -1; + + brickid_dup = gf_strdup (brickid); + if (!brickid_dup) + goto out; + + volid_str = brickid_dup; + brick = strchr (brickid_dup, ':'); + *brick = '\0'; + brick++; + if (!volid_str || !brick) + goto out; + + uuid_parse (volid_str, volid); + ret = glusterd_volinfo_find_by_volume_id (volid, &volinfo); if (ret) - ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, - NULL, "operation failed"); - if (cli_req.dict.dict_val) - free (cli_req.dict.dict_val); + goto out; + + ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo, + brickinfo); + if (ret) + goto out; + ret = 0; +out: + GF_FREE (brickid_dup); return ret; } int -glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, - rpc_clnt_event_t event, - void *data) +__glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) { xlator_t *this = NULL; glusterd_conf_t *conf = NULL; int ret = 0; + char *brickid = NULL; glusterd_brickinfo_t *brickinfo = NULL; - brickinfo = mydata; - if (!brickinfo) + brickid = mydata; + if (!brickid) + return 0; + + ret = get_brickinfo_from_brickid (brickid, &brickinfo); + if (ret) return 0; this = THIS; @@ -2716,15 +3654,21 @@ glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, switch (event) { case RPC_CLNT_CONNECT: - gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT"); + gf_log (this->name, GF_LOG_DEBUG, "Connected to %s:%s", + brickinfo->hostname, brickinfo->path); glusterd_set_brick_status (brickinfo, GF_BRICK_STARTED); ret = default_notify (this, GF_EVENT_CHILD_UP, NULL); break; case RPC_CLNT_DISCONNECT: - gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT"); + if (GF_BRICK_STARTED == brickinfo->status) + gf_log (this->name, GF_LOG_INFO, "Disconnected from " + "%s:%s", brickinfo->hostname, brickinfo->path); + glusterd_set_brick_status (brickinfo, GF_BRICK_STOPPED); + if (rpc_clnt_is_disabled (rpc)) + GF_FREE (brickid); break; default: @@ -2737,12 +3681,20 @@ glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, } int -glusterd_shd_rpc_notify (struct rpc_clnt *rpc, void *mydata, - rpc_clnt_event_t event, - void *data) +glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) +{ + return glusterd_big_locked_notify (rpc, mydata, event, data, + __glusterd_brick_rpc_notify); +} + +int +__glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) { xlator_t *this = NULL; glusterd_conf_t *conf = NULL; + char *server = NULL; int ret = 0; this = THIS; @@ -2750,17 +3702,21 @@ glusterd_shd_rpc_notify (struct rpc_clnt *rpc, void *mydata, conf = this->private; GF_ASSERT (conf); + server = mydata; + if (!server) + return 0; + switch (event) { case RPC_CLNT_CONNECT: gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT"); - (void) glusterd_shd_set_running (_gf_true); + (void) glusterd_nodesvc_set_online_status (server, _gf_true); ret = default_notify (this, GF_EVENT_CHILD_UP, NULL); break; case RPC_CLNT_DISCONNECT: gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT"); - (void) glusterd_shd_set_running (_gf_false); + (void) glusterd_nodesvc_set_online_status (server, _gf_false); break; default: @@ -2773,10 +3729,29 @@ glusterd_shd_rpc_notify (struct rpc_clnt *rpc, void *mydata, } int -glusterd_friend_remove_notify (glusterd_peerinfo_t *peerinfo, rpcsvc_request_t *req) +glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) +{ + return glusterd_big_locked_notify (rpc, mydata, event, data, + __glusterd_nodesvc_rpc_notify); +} + +int +glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx) { - int ret = -1; - glusterd_friend_sm_event_t *new_event = NULL; + int ret = -1; + glusterd_friend_sm_event_t *new_event = NULL; + glusterd_peerinfo_t *peerinfo = peerctx->peerinfo; + rpcsvc_request_t *req = peerctx->args.req; + char *errstr = peerctx->errstr; + dict_t *dict = NULL; + + GF_ASSERT (peerctx); + + peerinfo = peerctx->peerinfo; + req = peerctx->args.req; + dict = peerctx->args.dict; + errstr = peerctx->errstr; ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_REMOVE_FRIEND, &new_event); @@ -2788,12 +3763,12 @@ glusterd_friend_remove_notify (glusterd_peerinfo_t *peerinfo, rpcsvc_request_t * goto out; } - glusterd_xfer_cli_probe_resp (req, -1, ENOTCONN, - peerinfo->hostname, peerinfo->port); + glusterd_xfer_cli_probe_resp (req, -1, ENOTCONN, errstr, + peerinfo->hostname, + peerinfo->port, dict); new_event->peerinfo = peerinfo; ret = glusterd_friend_sm_inject_event (new_event); - glusterd_friend_sm (); } else { gf_log ("glusterd", GF_LOG_ERROR, @@ -2806,17 +3781,15 @@ out: } int -glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, - rpc_clnt_event_t event, - void *data) +__glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) { xlator_t *this = NULL; glusterd_conf_t *conf = NULL; int ret = 0; glusterd_peerinfo_t *peerinfo = NULL; glusterd_peerctx_t *peerctx = NULL; - uuid_t owner = {0,}; - uuid_t *peer_uuid = NULL; + gf_boolean_t quorum_action = _gf_false; peerctx = mydata; if (!peerctx) @@ -2831,17 +3804,9 @@ glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, { gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT"); peerinfo->connected = 1; - ret = glusterd_store_peerinfo (peerinfo); - if (ret) { - ret = -1; - gf_log (this->name, GF_LOG_ERROR, "Failed to store " - "peerinfo"); - break; - } - - list_add_tail (&peerinfo->uuid_list, &conf->peers); + peerinfo->quorum_action = _gf_true; - ret = glusterd_peer_handshake (this, rpc, peerctx); + ret = glusterd_peer_dump_version (this, rpc, peerctx); if (ret) gf_log ("", GF_LOG_ERROR, "glusterd handshake failed"); break; @@ -2852,43 +3817,22 @@ glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT %d", peerinfo->state.state); - peerinfo->connected = 0; - - /* - local glusterd (thinks that it) is the owner of the cluster - lock and 'fails' the operation on the first disconnect from - a peer. - */ - glusterd_get_lock_owner (&owner); - if (!uuid_compare (conf->uuid, owner)) { - ret = glusterd_op_sm_inject_event (GD_OP_EVENT_START_UNLOCK, - NULL); - if (ret) - gf_log (this->name, GF_LOG_ERROR, "Unable" - " to enqueue cluster unlock event"); - break; - } - - peer_uuid = GF_CALLOC (1, sizeof (*peer_uuid), gf_common_mt_char); - if (!peer_uuid) { - ret = -1; - break; + if ((peerinfo->quorum_contrib != QUORUM_DOWN) && + (peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED)) { + peerinfo->quorum_contrib = QUORUM_DOWN; + quorum_action = _gf_true; + peerinfo->quorum_action = _gf_false; } - uuid_copy (*peer_uuid, peerinfo->uuid); - ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCAL_UNLOCK_NO_RESP, - peer_uuid); - if (ret) - gf_log (this->name, GF_LOG_ERROR, "Unable" - " to enque local lock flush event."); - - //Inject friend disconnected here - if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT) { - glusterd_friend_remove_notify (peerinfo, - peerctx->args.req); + /* Remove peer if it is not a friend and connection/handshake + * fails, and notify cli. Happens only during probe. + */ + if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT) { + glusterd_friend_remove_notify (peerctx); + goto out; } - //default_notify (this, GF_EVENT_CHILD_DOWN, NULL); + peerinfo->connected = 0; break; } default: @@ -2898,12 +3842,23 @@ glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, break; } +out: glusterd_friend_sm (); glusterd_op_sm (); + if (quorum_action) + glusterd_do_quorum_action (); return ret; } int +glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) +{ + return glusterd_big_locked_notify (rpc, mydata, event, data, + __glusterd_peer_rpc_notify); +} + +int glusterd_null (rpcsvc_request_t *req) { @@ -2911,11 +3866,11 @@ glusterd_null (rpcsvc_request_t *req) } rpcsvc_actor_t gd_svc_mgmt_actors[] = { - [GLUSTERD_MGMT_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, NULL, 0}, - [GLUSTERD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GLUSTERD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, NULL, 0}, - [GLUSTERD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GLUSTERD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, NULL, 0}, - [GLUSTERD_MGMT_STAGE_OP] = { "STAGE_OP", GLUSTERD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, NULL, 0}, - [GLUSTERD_MGMT_COMMIT_OP] = { "COMMIT_OP", GLUSTERD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, NULL, 0}, + [GLUSTERD_MGMT_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GLUSTERD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GLUSTERD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_STAGE_OP] = { "STAGE_OP", GLUSTERD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_COMMIT_OP] = { "COMMIT_OP", GLUSTERD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, 0, DRC_NA}, }; struct rpcsvc_program gd_svc_mgmt_prog = { @@ -2924,14 +3879,15 @@ struct rpcsvc_program gd_svc_mgmt_prog = { .progver = GD_MGMT_VERSION, .numactors = GLUSTERD_MGMT_MAXVALUE, .actors = gd_svc_mgmt_actors, + .synctask = _gf_true, }; rpcsvc_actor_t gd_svc_peer_actors[] = { - [GLUSTERD_FRIEND_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, NULL, 0}, - [GLUSTERD_PROBE_QUERY] = { "PROBE_QUERY", GLUSTERD_PROBE_QUERY, glusterd_handle_probe_query, NULL, NULL, 0}, - [GLUSTERD_FRIEND_ADD] = { "FRIEND_ADD", GLUSTERD_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, NULL, 0}, - [GLUSTERD_FRIEND_REMOVE] = { "FRIEND_REMOVE", GLUSTERD_FRIEND_REMOVE, glusterd_handle_incoming_unfriend_req, NULL, NULL, 0}, - [GLUSTERD_FRIEND_UPDATE] = { "FRIEND_UPDATE", GLUSTERD_FRIEND_UPDATE, glusterd_handle_friend_update, NULL, NULL, 0}, + [GLUSTERD_FRIEND_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0, DRC_NA}, + [GLUSTERD_PROBE_QUERY] = { "PROBE_QUERY", GLUSTERD_PROBE_QUERY, glusterd_handle_probe_query, NULL, 0, DRC_NA}, + [GLUSTERD_FRIEND_ADD] = { "FRIEND_ADD", GLUSTERD_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, 0, DRC_NA}, + [GLUSTERD_FRIEND_REMOVE] = { "FRIEND_REMOVE", GLUSTERD_FRIEND_REMOVE, glusterd_handle_incoming_unfriend_req, NULL, 0, DRC_NA}, + [GLUSTERD_FRIEND_UPDATE] = { "FRIEND_UPDATE", GLUSTERD_FRIEND_UPDATE, glusterd_handle_friend_update, NULL, 0, DRC_NA}, }; struct rpcsvc_program gd_svc_peer_prog = { @@ -2940,39 +3896,47 @@ struct rpcsvc_program gd_svc_peer_prog = { .progver = GD_FRIEND_VERSION, .numactors = GLUSTERD_FRIEND_MAXVALUE, .actors = gd_svc_peer_actors, + .synctask = _gf_false, }; rpcsvc_actor_t gd_svc_cli_actors[] = { - [GLUSTER_CLI_PROBE] = { "CLI_PROBE", GLUSTER_CLI_PROBE, glusterd_handle_cli_probe, NULL, NULL, 0}, - [GLUSTER_CLI_CREATE_VOLUME] = { "CLI_CREATE_VOLUME", GLUSTER_CLI_CREATE_VOLUME, glusterd_handle_create_volume, NULL,NULL, 0}, - [GLUSTER_CLI_DEFRAG_VOLUME] = { "CLI_DEFRAG_VOLUME", GLUSTER_CLI_DEFRAG_VOLUME, glusterd_handle_defrag_volume, NULL,NULL, 0}, - [GLUSTER_CLI_DEPROBE] = { "FRIEND_REMOVE", GLUSTER_CLI_DEPROBE, glusterd_handle_cli_deprobe, NULL, NULL, 0}, - [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, NULL, 0}, - [GLUSTER_CLI_START_VOLUME] = { "START_VOLUME", GLUSTER_CLI_START_VOLUME, glusterd_handle_cli_start_volume, NULL, NULL, 0}, - [GLUSTER_CLI_STOP_VOLUME] = { "STOP_VOLUME", GLUSTER_CLI_STOP_VOLUME, glusterd_handle_cli_stop_volume, NULL, NULL, 0}, - [GLUSTER_CLI_DELETE_VOLUME] = { "DELETE_VOLUME", GLUSTER_CLI_DELETE_VOLUME, glusterd_handle_cli_delete_volume, NULL, NULL, 0}, - [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, NULL, 0}, - [GLUSTER_CLI_ADD_BRICK] = { "ADD_BRICK", GLUSTER_CLI_ADD_BRICK, glusterd_handle_add_brick, NULL, NULL, 0}, - [GLUSTER_CLI_REPLACE_BRICK] = { "REPLACE_BRICK", GLUSTER_CLI_REPLACE_BRICK, glusterd_handle_replace_brick, NULL, NULL, 0}, - [GLUSTER_CLI_REMOVE_BRICK] = { "REMOVE_BRICK", GLUSTER_CLI_REMOVE_BRICK, glusterd_handle_remove_brick, NULL, NULL, 0}, - [GLUSTER_CLI_LOG_ROTATE] = { "LOG FILENAME", GLUSTER_CLI_LOG_ROTATE, glusterd_handle_log_rotate, NULL, NULL, 0}, - [GLUSTER_CLI_SET_VOLUME] = { "SET_VOLUME", GLUSTER_CLI_SET_VOLUME, glusterd_handle_set_volume, NULL, NULL, 0}, - [GLUSTER_CLI_SYNC_VOLUME] = { "SYNC_VOLUME", GLUSTER_CLI_SYNC_VOLUME, glusterd_handle_sync_volume, NULL, NULL, 0}, - [GLUSTER_CLI_RESET_VOLUME] = { "RESET_VOLUME", GLUSTER_CLI_RESET_VOLUME, glusterd_handle_reset_volume, NULL, NULL, 0}, - [GLUSTER_CLI_FSM_LOG] = { "FSM_LOG", GLUSTER_CLI_FSM_LOG, glusterd_handle_fsm_log, NULL, NULL, 0}, - [GLUSTER_CLI_GSYNC_SET] = { "GSYNC_SET", GLUSTER_CLI_GSYNC_SET, glusterd_handle_gsync_set, NULL, NULL, 0}, - [GLUSTER_CLI_PROFILE_VOLUME] = { "STATS_VOLUME", GLUSTER_CLI_PROFILE_VOLUME, glusterd_handle_cli_profile_volume, NULL, NULL, 0}, - [GLUSTER_CLI_QUOTA] = { "QUOTA", GLUSTER_CLI_QUOTA, glusterd_handle_quota, NULL, NULL, 0}, - [GLUSTER_CLI_GETWD] = { "GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd, NULL, NULL, 1}, - [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME, glusterd_handle_status_volume, NULL, NULL, 0}, - [GLUSTER_CLI_MOUNT] = { "MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount, NULL, NULL, 1}, - [GLUSTER_CLI_UMOUNT] = { "UMOUNT", GLUSTER_CLI_UMOUNT, glusterd_handle_umount, NULL, NULL, 1}, - [GLUSTER_CLI_HEAL_VOLUME] = { "HEAL_VOLUME", GLUSTER_CLI_HEAL_VOLUME, glusterd_handle_cli_heal_volume, NULL, NULL, 0}, - [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", GLUSTER_CLI_STATEDUMP_VOLUME, glusterd_handle_cli_statedump_volume, NULL, NULL, 0}, - [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, NULL, 0}, - [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", GLUSTER_CLI_CLRLOCKS_VOLUME, glusterd_handle_cli_clearlocks_volume, NULL, NULL, 0}, + [GLUSTER_CLI_PROBE] = { "CLI_PROBE", GLUSTER_CLI_PROBE, glusterd_handle_cli_probe, NULL, 0, DRC_NA}, + [GLUSTER_CLI_CREATE_VOLUME] = { "CLI_CREATE_VOLUME", GLUSTER_CLI_CREATE_VOLUME, glusterd_handle_create_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_DEFRAG_VOLUME] = { "CLI_DEFRAG_VOLUME", GLUSTER_CLI_DEFRAG_VOLUME, glusterd_handle_defrag_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_DEPROBE] = { "FRIEND_REMOVE", GLUSTER_CLI_DEPROBE, glusterd_handle_cli_deprobe, NULL, 0, DRC_NA}, + [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, 0, DRC_NA}, + [GLUSTER_CLI_UUID_RESET] = { "UUID_RESET", GLUSTER_CLI_UUID_RESET, glusterd_handle_cli_uuid_reset, NULL, 0, DRC_NA}, + [GLUSTER_CLI_UUID_GET] = { "UUID_GET", GLUSTER_CLI_UUID_GET, glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA}, + [GLUSTER_CLI_START_VOLUME] = { "START_VOLUME", GLUSTER_CLI_START_VOLUME, glusterd_handle_cli_start_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_STOP_VOLUME] = { "STOP_VOLUME", GLUSTER_CLI_STOP_VOLUME, glusterd_handle_cli_stop_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_DELETE_VOLUME] = { "DELETE_VOLUME", GLUSTER_CLI_DELETE_VOLUME, glusterd_handle_cli_delete_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_ADD_BRICK] = { "ADD_BRICK", GLUSTER_CLI_ADD_BRICK, glusterd_handle_add_brick, NULL, 0, DRC_NA}, + [GLUSTER_CLI_REPLACE_BRICK] = { "REPLACE_BRICK", GLUSTER_CLI_REPLACE_BRICK, glusterd_handle_replace_brick, NULL, 0, DRC_NA}, + [GLUSTER_CLI_REMOVE_BRICK] = { "REMOVE_BRICK", GLUSTER_CLI_REMOVE_BRICK, glusterd_handle_remove_brick, NULL, 0, DRC_NA}, + [GLUSTER_CLI_LOG_ROTATE] = { "LOG FILENAME", GLUSTER_CLI_LOG_ROTATE, glusterd_handle_log_rotate, NULL, 0, DRC_NA}, + [GLUSTER_CLI_SET_VOLUME] = { "SET_VOLUME", GLUSTER_CLI_SET_VOLUME, glusterd_handle_set_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_SYNC_VOLUME] = { "SYNC_VOLUME", GLUSTER_CLI_SYNC_VOLUME, glusterd_handle_sync_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_RESET_VOLUME] = { "RESET_VOLUME", GLUSTER_CLI_RESET_VOLUME, glusterd_handle_reset_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_FSM_LOG] = { "FSM_LOG", GLUSTER_CLI_FSM_LOG, glusterd_handle_fsm_log, NULL, 0, DRC_NA}, + [GLUSTER_CLI_GSYNC_SET] = { "GSYNC_SET", GLUSTER_CLI_GSYNC_SET, glusterd_handle_gsync_set, NULL, 0, DRC_NA}, + [GLUSTER_CLI_PROFILE_VOLUME] = { "STATS_VOLUME", GLUSTER_CLI_PROFILE_VOLUME, glusterd_handle_cli_profile_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_QUOTA] = { "QUOTA", GLUSTER_CLI_QUOTA, glusterd_handle_quota, NULL, 0, DRC_NA}, + [GLUSTER_CLI_GETWD] = { "GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd, NULL, 1, DRC_NA}, + [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME, glusterd_handle_status_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_MOUNT] = { "MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount, NULL, 1, DRC_NA}, + [GLUSTER_CLI_UMOUNT] = { "UMOUNT", GLUSTER_CLI_UMOUNT, glusterd_handle_umount, NULL, 1, DRC_NA}, + [GLUSTER_CLI_HEAL_VOLUME] = { "HEAL_VOLUME", GLUSTER_CLI_HEAL_VOLUME, glusterd_handle_cli_heal_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", GLUSTER_CLI_STATEDUMP_VOLUME, glusterd_handle_cli_statedump_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", GLUSTER_CLI_CLRLOCKS_VOLUME, glusterd_handle_cli_clearlocks_volume, NULL, 0, DRC_NA}, +#ifdef HAVE_BD_XLATOR + [GLUSTER_CLI_BD_OP] = {"BD_OP", GLUSTER_CLI_BD_OP, glusterd_handle_cli_bd_op, NULL, 0, DRC_NA}, +#endif + [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", GLUSTER_CLI_COPY_FILE, glusterd_handle_copy_file, NULL, 0, DRC_NA}, + [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC, glusterd_handle_sys_exec, NULL, 0, DRC_NA}, }; struct rpcsvc_program gd_svc_cli_prog = { @@ -2981,4 +3945,5 @@ struct rpcsvc_program gd_svc_cli_prog = { .progver = GLUSTER_CLI_VERSION, .numactors = GLUSTER_CLI_MAXVALUE, .actors = gd_svc_cli_actors, + .synctask = _gf_true, }; |
