summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-handler.c
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c4187
1 files changed, 2235 insertions, 1952 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 42909136a..71d076624 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -1,22 +1,12 @@
/*
- Copyright (c) 2006-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
+ Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
@@ -36,18 +26,21 @@
#include "compat.h"
#include "compat-errno.h"
#include "statedump.h"
+#include "run.h"
#include "glusterd-mem-types.h"
#include "glusterd.h"
#include "glusterd-sm.h"
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
#include "glusterd-store.h"
+#include "glusterd-locks.h"
-#include "glusterd1.h"
-#include "cli1.h"
-#include "rpc-clnt.h"
#include "glusterd1-xdr.h"
+#include "cli1-xdr.h"
+#include "xdr-generic.h"
+#include "rpc-clnt.h"
#include "glusterd-volgen.h"
+#include "glusterd-mountbroker.h"
#include <sys/resource.h>
#include <inttypes.h>
@@ -55,6 +48,39 @@
#include "defaults.c"
#include "common-utils.h"
+#include "globals.h"
+#include "glusterd-syncop.h"
+
+#ifdef HAVE_BD_XLATOR
+#include <lvm2app.h>
+#endif
+
+extern uuid_t global_txn_id;
+
+int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event,
+ void *data, rpc_clnt_notify_t notify_fn)
+{
+ glusterd_conf_t *priv = THIS->private;
+ int ret = -1;
+ synclock_lock (&priv->big_lock);
+ ret = notify_fn (rpc, mydata, event, data);
+ synclock_unlock (&priv->big_lock);
+ return ret;
+}
+
+int glusterd_big_locked_handler (rpcsvc_request_t *req, rpcsvc_actor actor_fn)
+{
+ glusterd_conf_t *priv = THIS->private;
+ int ret = -1;
+
+ synclock_lock (&priv->big_lock);
+ ret = actor_fn (req);
+ synclock_unlock (&priv->big_lock);
+
+ return ret;
+}
+
static int
glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid,
char *hostname, int port,
@@ -76,10 +102,12 @@ glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid,
ret = glusterd_friend_find (uuid, rhost, &peerinfo);
if (ret) {
- ret = glusterd_xfer_friend_add_resp (req, rhost, port, -1,
- GF_PROBE_UNKNOWN_PEER);
- if (friend_req->vols.vols_val)
+ ret = glusterd_xfer_friend_add_resp (req, hostname, rhost, port,
+ -1, GF_PROBE_UNKNOWN_PEER);
+ if (friend_req->vols.vols_val) {
free (friend_req->vols.vols_val);
+ friend_req->vols.vols_val = NULL;
+ }
goto out;
}
@@ -137,19 +165,16 @@ out:
if (0 != ret) {
if (ctx && ctx->hostname)
GF_FREE (ctx->hostname);
- if (ctx)
- GF_FREE (ctx);
+ GF_FREE (ctx);
if (dict) {
if ((!dict->extra_stdfree) &&
friend_req->vols.vols_val)
free (friend_req->vols.vols_val);
dict_unref (dict);
} else {
- if (friend_req->vols.vols_val)
- free (friend_req->vols.vols_val);
+ free (friend_req->vols.vols_val);
}
- if (event)
- GF_FREE (event);
+ GF_FREE (event);
} else {
if (peerinfo && (0 == peerinfo->connected))
ret = GLUSTERD_CONNECTION_AWAITED;
@@ -219,8 +244,7 @@ out:
if (0 != ret) {
if (ctx && ctx->hostname)
GF_FREE (ctx->hostname);
- if (ctx)
- GF_FREE (ctx);
+ GF_FREE (ctx);
}
return ret;
@@ -233,13 +257,14 @@ glusterd_add_peer_detail_to_dict (glusterd_peerinfo_t *peerinfo,
int ret = -1;
char key[256] = {0, };
+ char *peer_uuid_str = NULL;
GF_ASSERT (peerinfo);
GF_ASSERT (friends);
snprintf (key, 256, "friend%d.uuid", count);
- uuid_utoa_r (peerinfo->uuid, peerinfo->uuid_str);
- ret = dict_set_str (friends, key, peerinfo->uuid_str);
+ peer_uuid_str = gd_peer_uuid_str (peerinfo);
+ ret = dict_set_str (friends, key, peer_uuid_str);
if (ret)
goto out;
@@ -253,6 +278,11 @@ glusterd_add_peer_detail_to_dict (glusterd_peerinfo_t *peerinfo,
if (ret)
goto out;
+ snprintf (key, 256, "friend%d.stateId", count);
+ ret = dict_set_int32 (friends, key, peerinfo->state.state);
+ if (ret)
+ goto out;
+
snprintf (key, 256, "friend%d.state", count);
ret = dict_set_str (friends, key,
glusterd_friend_sm_state_name_get(peerinfo->state.state));
@@ -268,10 +298,34 @@ out:
return ret;
}
+struct args_pack {
+ dict_t *dict;
+ int vol_count;
+ int opt_count;
+};
+
+static int
+_build_option_key (dict_t *d, char *k, data_t *v, void *tmp)
+{
+ char reconfig_key[256] = {0, };
+ struct args_pack *pack = NULL;
+ int ret = -1;
+
+ pack = tmp;
+ if (strcmp (k, GLUSTERD_GLOBAL_OPT_VERSION) == 0)
+ return 0;
+ snprintf (reconfig_key, 256, "volume%d.option.%s",
+ pack->vol_count, k);
+ ret = dict_set_str (pack->dict, reconfig_key, v->data);
+ if (0 == ret)
+ pack->opt_count++;
+
+ return 0;
+}
int
glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
- dict_t *volumes, int count)
+ dict_t *volumes, int count)
{
int ret = -1;
@@ -279,18 +333,20 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo = NULL;
char *buf = NULL;
int i = 1;
- data_pair_t *pairs = NULL;
- char reconfig_key[256] = {0, };
dict_t *dict = NULL;
- data_t *value = NULL;
- int opt_count = 0;
glusterd_conf_t *priv = NULL;
-
+ char *volume_id_str = NULL;
+ struct args_pack pack = {0,};
+ xlator_t *this = NULL;
+#ifdef HAVE_BD_XLATOR
+ int caps = 0;
+#endif
GF_ASSERT (volinfo);
GF_ASSERT (volumes);
- priv = THIS->private;
+ this = THIS;
+ priv = this->private;
GF_ASSERT (priv);
@@ -309,13 +365,38 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
if (ret)
goto out;
+ /* As of now, the snap volumes are also displayed as part of
+ volume info command. So this change is to display whether
+ the volume is original volume or the snap_volume. If
+ displaying of snap volumes in volume info o/p is not needed
+ this should be removed.
+ */
+ snprintf (key, 256, "volume%d.snap_volume", count);
+ ret = dict_set_int32 (volumes, key, volinfo->is_snap_volume);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "failed to set whether "
+ "the volume is a snap volume or actual volume (%s)",
+ volinfo->volname);
+ goto out;
+ }
+
snprintf (key, 256, "volume%d.brick_count", count);
ret = dict_set_int32 (volumes, key, volinfo->brick_count);
if (ret)
goto out;
- snprintf (key, 256, "volume%d.sub_count", count);
- ret = dict_set_int32 (volumes, key, volinfo->sub_count);
+ snprintf (key, 256, "volume%d.dist_count", count);
+ ret = dict_set_int32 (volumes, key, volinfo->dist_leaf_count);
+ if (ret)
+ goto out;
+
+ snprintf (key, 256, "volume%d.stripe_count", count);
+ ret = dict_set_int32 (volumes, key, volinfo->stripe_count);
+ if (ret)
+ goto out;
+
+ snprintf (key, 256, "volume%d.replica_count", count);
+ ret = dict_set_int32 (volumes, key, volinfo->replica_count);
if (ret)
goto out;
@@ -324,8 +405,91 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
if (ret)
goto out;
+ volume_id_str = gf_strdup (uuid_utoa (volinfo->volume_id));
+ if (!volume_id_str)
+ goto out;
+
+ snprintf (key, sizeof (key), "volume%d.volume_id", count);
+ ret = dict_set_dynstr (volumes, key, volume_id_str);
+ if (ret)
+ goto out;
+
+ snprintf (key, 256, "volume%d.rebalance", count);
+ ret = dict_set_int32 (volumes, key, volinfo->rebal.defrag_cmd);
+ if (ret)
+ goto out;
+
+#ifdef HAVE_BD_XLATOR
+ if (volinfo->caps) {
+ caps = 0;
+ snprintf (key, 256, "volume%d.xlator0", count);
+ buf = GF_MALLOC (256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
+ goto out;
+ }
+ if (volinfo->caps & CAPS_BD)
+ snprintf (buf, 256, "BD");
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret) {
+ GF_FREE (buf);
+ goto out;
+ }
+
+ if (volinfo->caps & CAPS_THIN) {
+ snprintf (key, 256, "volume%d.xlator0.caps%d", count,
+ caps++);
+ buf = GF_MALLOC (256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
+ goto out;
+ }
+ snprintf (buf, 256, "thin");
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret) {
+ GF_FREE (buf);
+ goto out;
+ }
+ }
+
+ if (volinfo->caps & CAPS_OFFLOAD_COPY) {
+ snprintf (key, 256, "volume%d.xlator0.caps%d", count,
+ caps++);
+ buf = GF_MALLOC (256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
+ goto out;
+ }
+ snprintf (buf, 256, "offload_copy");
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret) {
+ GF_FREE (buf);
+ goto out;
+ }
+ }
+
+ if (volinfo->caps & CAPS_OFFLOAD_SNAPSHOT) {
+ snprintf (key, 256, "volume%d.xlator0.caps%d", count,
+ caps++);
+ buf = GF_MALLOC (256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
+ goto out;
+ }
+ snprintf (buf, 256, "offload_snapshot");
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret) {
+ GF_FREE (buf);
+ goto out;
+ }
+ }
+
+ }
+#endif
+
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
char brick[1024] = {0,};
+ char brick_uuid[64] = {0,};
snprintf (key, 256, "volume%d.brick%d", count, i);
snprintf (brick, 1024, "%s:%s", brickinfo->hostname,
brickinfo->path);
@@ -333,6 +497,25 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
ret = dict_set_dynstr (volumes, key, buf);
if (ret)
goto out;
+ snprintf (key, 256, "volume%d.brick%d.uuid", count, i);
+ snprintf (brick_uuid, 64, "%s", uuid_utoa (brickinfo->uuid));
+ buf = gf_strdup (brick_uuid);
+ if (!buf)
+ goto out;
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret)
+ goto out;
+
+#ifdef HAVE_BD_XLATOR
+ if (volinfo->caps & CAPS_BD) {
+ snprintf (key, 256, "volume%d.vg%d", count, i);
+ snprintf (brick, 1024, "%s", brickinfo->vg);
+ buf = gf_strdup (brick);
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret)
+ goto out;
+ }
+#endif
i++;
}
@@ -342,25 +525,14 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
goto out;
}
- pairs = dict->members_list;
+ pack.dict = volumes;
+ pack.vol_count = count;
+ pack.opt_count = 0;
+ dict_foreach (dict, _build_option_key, (void *) &pack);
+ dict_foreach (priv->opts, _build_option_key, &pack);
- while (pairs) {
- if (1 == glusterd_check_option_exists (pairs->key, NULL)) {
- value = pairs->value;
- if (!value)
- continue;
-
- snprintf (reconfig_key, 256, "volume%d.option.%s", count,
- pairs->key);
- ret = dict_set_str (volumes, reconfig_key, value->data);
- if (!ret)
- opt_count++;
- }
- pairs = pairs->next;
- }
-
- snprintf (key, 256, "volume%d.opt_count", count);
- ret = dict_set_int32 (volumes, key, opt_count);
+ snprintf (key, 256, "volume%d.opt_count", pack.vol_count);
+ ret = dict_set_int32 (volumes, key, pack.opt_count);
out:
return ret;
}
@@ -370,13 +542,18 @@ glusterd_friend_find (uuid_t uuid, char *hostname,
glusterd_peerinfo_t **peerinfo)
{
int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
if (uuid) {
ret = glusterd_friend_find_by_uuid (uuid, peerinfo);
if (ret) {
- gf_log ("glusterd", GF_LOG_INFO,
- "Unable to find peer by uuid");
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Unable to find peer by uuid: %s",
+ uuid_utoa (uuid));
} else {
goto out;
}
@@ -387,7 +564,7 @@ glusterd_friend_find (uuid_t uuid, char *hostname,
ret = glusterd_friend_find_by_hostname (hostname, peerinfo);
if (ret) {
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_log (this->name, GF_LOG_DEBUG,
"Unable to find hostname: %s", hostname);
} else {
goto out;
@@ -399,54 +576,205 @@ out:
}
int32_t
-glusterd_op_txn_begin ()
+glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
+ char *err_str, size_t err_len)
{
- int32_t ret = -1;
- glusterd_conf_t *priv = NULL;
- int32_t locked = 0;
+ int32_t ret = -1;
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ int32_t locked = 0;
+ char *tmp = NULL;
+ char *volname = NULL;
+ uuid_t *txn_id = NULL;
+ uuid_t *originator_uuid = NULL;
+ glusterd_op_info_t txn_op_info = {{0},};
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
- priv = THIS->private;
+ GF_ASSERT (req);
+ GF_ASSERT ((op > GD_OP_NONE) && (op < GD_OP_MAX));
+ GF_ASSERT (NULL != ctx);
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
GF_ASSERT (priv);
- ret = glusterd_lock (priv->uuid);
+ dict = ctx;
+
+ /* Generate a transaction-id for this operation and
+ * save it in the dict. This transaction id distinguishes
+ * each transaction, and helps separate opinfos in the
+ * op state machine. */
+ txn_id = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!txn_id)
+ goto out;
+
+ uuid_generate (*txn_id);
+ ret = dict_set_bin (dict, "transaction_id",
+ txn_id, sizeof(*txn_id));
if (ret) {
- gf_log ("glusterd", GF_LOG_ERROR,
- "Unable to acquire local lock, ret: %d", ret);
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set transaction id.");
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Transaction_id = %s", uuid_utoa (*txn_id));
+
+ /* Save the MY_UUID as the originator_uuid. This originator_uuid
+ * will be used by is_origin_glusterd() to determine if a node
+ * is the originator node for a command. */
+ originator_uuid = GF_CALLOC (1, sizeof(uuid_t),
+ gf_common_mt_uuid_t);
+ if (!originator_uuid) {
+ ret = -1;
goto out;
}
+ uuid_copy (*originator_uuid, MY_UUID);
+ ret = dict_set_bin (dict, "originator_uuid",
+ originator_uuid, sizeof (uuid_t));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set originator uuid.");
+ goto out;
+ }
+
+ /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
+ if (priv->op_version < 3) {
+ ret = glusterd_lock (MY_UUID);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire lock on localhost, ret: %d",
+ ret);
+ snprintf (err_str, err_len,
+ "Another transaction is in progress. "
+ "Please try again after sometime.");
+ goto out;
+ }
+ } else {
+ /* If no volname is given as a part of the command, locks will
+ * not be held */
+ ret = dict_get_str (dict, "volname", &tmp);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Failed to get volume "
+ "name");
+ goto local_locking_done;
+ } else {
+ /* Use a copy of volname, as cli response will be
+ * sent before the unlock, and the volname in the
+ * dict, might be removed */
+ volname = gf_strdup (tmp);
+ if (!volname)
+ goto out;
+ }
+
+ ret = glusterd_mgmt_v3_lock (volname, MY_UUID, "vol");
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire lock for %s", volname);
+ snprintf (err_str, err_len,
+ "Another transaction is in progress for %s. "
+ "Please try again after sometime.", volname);
+ goto out;
+ }
+ }
+
locked = 1;
- gf_log ("glusterd", GF_LOG_INFO, "Acquired local lock");
+ gf_log (this->name, GF_LOG_DEBUG, "Acquired lock on localhost");
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_START_LOCK, NULL);
+local_locking_done:
- gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ /* If no volname is given as a part of the command, locks will
+ * not be held, hence sending stage event. */
+ if (volname)
+ event_type = GD_OP_EVENT_START_LOCK;
+ else {
+ txn_op_info.state.state = GD_OP_STATE_LOCK_SENT;
+ event_type = GD_OP_EVENT_ALL_ACC;
+ }
+
+ /* Save opinfo for this transaction with the transaction id */
+ glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, ctx, req);
+
+ ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+ if (ctx)
+ dict_unref (ctx);
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, ctx);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to acquire cluster"
+ " lock.");
+ goto out;
+ }
out:
- if (locked && ret)
- glusterd_unlock (priv->uuid);
+ if (locked && ret) {
+ /* Based on the op-version, we release the
+ * cluster or mgmt_v3 lock */
+ if (priv->op_version < 3)
+ glusterd_unlock (MY_UUID);
+ else {
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID,
+ "vol");
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to release lock for %s",
+ volname);
+ ret = -1;
+ }
+ }
+
+ if (volname)
+ GF_FREE (volname);
+
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int
-glusterd_handle_cluster_lock (rpcsvc_request_t *req)
+__glusterd_handle_cluster_lock (rpcsvc_request_t *req)
{
- gd1_mgmt_cluster_lock_req lock_req = {{0},};
- int32_t ret = -1;
- glusterd_op_lock_ctx_t *ctx = NULL;
+ dict_t *op_ctx = NULL;
+ int32_t ret = -1;
+ gd1_mgmt_cluster_lock_req lock_req = {{0},};
+ glusterd_op_lock_ctx_t *ctx = NULL;
+ glusterd_op_t op = GD_OP_EVENT_LOCK;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_op_info_t txn_op_info = {{0},};
+ uuid_t *txn_id = &global_txn_id;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
- if (!gd_xdr_to_mgmt_cluster_lock_req (req->msg[0], &lock_req)) {
- //failed to decode msg;
+ ret = xdr_to_generic (req->msg[0], &lock_req,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode lock "
+ "request received from peer");
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO,
- "Received LOCK from uuid: %s", uuid_utoa (lock_req.uuid));
+ gf_log (this->name, GF_LOG_DEBUG, "Received LOCK from uuid: %s",
+ uuid_utoa (lock_req.uuid));
+ if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (lock_req.uuid));
+ ret = -1;
+ goto out;
+ }
ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
@@ -457,11 +785,32 @@ glusterd_handle_cluster_lock (rpcsvc_request_t *req)
uuid_copy (ctx->uuid, lock_req.uuid);
ctx->req = req;
+ ctx->dict = NULL;
+
+ op_ctx = dict_new ();
+ if (!op_ctx) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set new dict");
+ goto out;
+ }
+
+ glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, op_ctx, req);
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, ctx);
+ ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+ dict_unref (txn_op_info.op_ctx);
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, txn_id, ctx);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to inject event GD_OP_EVENT_LOCK");
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
glusterd_friend_sm ();
glusterd_op_sm ();
@@ -470,117 +819,177 @@ out:
}
int
+glusterd_handle_cluster_lock (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cluster_lock);
+}
+
+int
glusterd_req_ctx_create (rpcsvc_request_t *rpc_req,
glusterd_op_t op, uuid_t uuid,
char *buf_val, size_t buf_len,
gf_gld_mem_types_t mem_type,
glusterd_req_ctx_t **req_ctx_out)
{
- int ret = -1;
- glusterd_req_ctx_t *req_ctx = NULL;
- char str[50] = {0,};
- dict_t *dict = NULL;
- char volname[GLUSTERD_MAX_VOLUME_NAME] = {0};
- char *dup_volname = NULL;
+ int ret = -1;
+ char str[50] = {0,};
+ glusterd_req_ctx_t *req_ctx = NULL;
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
uuid_unparse (uuid, str);
- gf_log ("glusterd", GF_LOG_INFO,
- "Received op from uuid: %s", str);
+ gf_log (this->name, GF_LOG_DEBUG, "Received op from uuid %s", str);
dict = dict_new ();
if (!dict)
goto out;
- req_ctx = GF_CALLOC (1, sizeof (*req_ctx), mem_type);
+ req_ctx = GF_CALLOC (1, sizeof (*req_ctx), mem_type);
if (!req_ctx) {
goto out;
}
uuid_copy (req_ctx->uuid, uuid);
req_ctx->op = op;
- if (GD_OP_DELETE_VOLUME == op) {
- strncpy (volname, buf_val, buf_len);
- dup_volname = gf_strdup (volname);
- if (dup_volname) {
- ret = dict_set_dynstr (dict, "volname", dup_volname);
- if (ret) {
- gf_log ("", GF_LOG_WARNING,
- "failed to set volume name from payload");
- goto out;
- }
- } else {
- ret = -1;
- goto out;
- }
- } else {
- ret = dict_unserialize (buf_val, buf_len, &dict);
-
- if (ret) {
- gf_log ("", GF_LOG_WARNING,
- "failed to unserialize the dictionary");
- goto out;
- }
+ ret = dict_unserialize (buf_val, buf_len, &dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
}
req_ctx->dict = dict;
- req_ctx->req = rpc_req;
+ req_ctx->req = rpc_req;
*req_ctx_out = req_ctx;
ret = 0;
out:
if (ret) {
if (dict)
dict_unref (dict);
- if (req_ctx)
- GF_FREE (req_ctx);
+ GF_FREE (req_ctx);
}
return ret;
}
int
-glusterd_handle_stage_op (rpcsvc_request_t *req)
+__glusterd_handle_stage_op (rpcsvc_request_t *req)
{
int32_t ret = -1;
glusterd_req_ctx_t *req_ctx = NULL;
gd1_mgmt_stage_op_req op_req = {{0},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ uuid_t *txn_id = &global_txn_id;
+ glusterd_op_info_t txn_op_info = {{0},};
+ glusterd_op_sm_state_info_t state;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
- if (!gd_xdr_to_mgmt_stage_op_req (req->msg[0], &op_req)) {
- //failed to decode msg;
+
+ ret = xdr_to_generic (req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_stage_op_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode stage "
+ "request received from peer");
req->rpc_err = GARBAGE_ARGS;
goto out;
}
+ if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
ret = glusterd_req_ctx_create (req, op_req.op, op_req.uuid,
op_req.buf.buf_val, op_req.buf.buf_len,
gf_gld_mt_op_stage_ctx_t, &req_ctx);
if (ret)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP, req_ctx);
+ ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
+ /* In cases where there is no volname, the receivers won't have a
+ * transaction opinfo created, as for those operations, the locking
+ * phase where the transaction opinfos are created, won't be called. */
+ ret = glusterd_get_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "No transaction's opinfo set");
+
+ state.state = GD_OP_STATE_LOCKED;
+ glusterd_txn_opinfo_init (&txn_op_info, &state,
+ &op_req.op, req_ctx->dict, req);
+
+ ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+ dict_unref (req_ctx->dict);
+ goto out;
+ }
+ }
+
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP,
+ txn_id, req_ctx);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to inject event GD_OP_EVENT_STAGE_OP");
out:
- if (op_req.buf.buf_val)
- free (op_req.buf.buf_val);//malloced by xdr
+ free (op_req.buf.buf_val);//malloced by xdr
glusterd_friend_sm ();
glusterd_op_sm ();
return ret;
}
int
-glusterd_handle_commit_op (rpcsvc_request_t *req)
+glusterd_handle_stage_op (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_stage_op);
+}
+
+
+int
+__glusterd_handle_commit_op (rpcsvc_request_t *req)
{
int32_t ret = -1;
glusterd_req_ctx_t *req_ctx = NULL;
gd1_mgmt_commit_op_req op_req = {{0},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ uuid_t *txn_id = &global_txn_id;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
- if (!gd_xdr_to_mgmt_commit_op_req (req->msg[0], &op_req)) {
- //failed to decode msg;
+ ret = xdr_to_generic (req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_commit_op_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode commit "
+ "request received from peer");
req->rpc_err = GARBAGE_ARGS;
goto out;
}
+ if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
//the structures should always be equal
GF_ASSERT (sizeof (gd1_mgmt_commit_op_req) == sizeof (gd1_mgmt_stage_op_req));
ret = glusterd_req_ctx_create (req, op_req.op, op_req.uuid,
@@ -589,69 +998,124 @@ glusterd_handle_commit_op (rpcsvc_request_t *req)
if (ret)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP, req_ctx);
- if (ret)
- goto out;
- ret = glusterd_op_init_ctx (op_req.op);
+ ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP,
+ txn_id, req_ctx);
out:
- if (op_req.buf.buf_val)
- free (op_req.buf.buf_val);//malloced by xdr
+ free (op_req.buf.buf_val);//malloced by xdr
glusterd_friend_sm ();
glusterd_op_sm ();
return ret;
}
+
int
-glusterd_handle_cli_probe (rpcsvc_request_t *req)
+glusterd_handle_commit_op (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_commit_op);
+}
+
+int
+__glusterd_handle_cli_probe (rpcsvc_request_t *req)
{
int32_t ret = -1;
- gf1_cli_probe_req cli_req = {0,};
- glusterd_peerinfo_t *peerinfo = NULL;
- gf_boolean_t run_fsm = _gf_true;
+ gf_cli_req cli_req = {{0,},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ gf_boolean_t run_fsm = _gf_true;
+ xlator_t *this = NULL;
+ char *bind_name = NULL;
+ dict_t *dict = NULL;
+ char *hostname = NULL;
+ int port = 0;
+
GF_ASSERT (req);
+ this = THIS;
- if (!gf_xdr_to_cli_probe_req (req->msg[0], &cli_req)) {
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
//failed to decode msg;
gf_log ("", GF_LOG_ERROR, "xdr decoding error");
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_cmd_log ("peer probe", " on host %s:%d", cli_req.hostname,
- cli_req.port);
+ if (cli_req.dict.dict_len) {
+ dict = dict_new ();
+
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len, &dict);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ }
+ }
+
+ ret = dict_get_str (dict, "hostname", &hostname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get hostname");
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "port", &port);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get port");
+ goto out;
+ }
+
+ if (glusterd_is_any_volume_in_server_quorum (this) &&
+ !does_gd_meet_server_quorum (this)) {
+ glusterd_xfer_cli_probe_resp (req, -1, GF_PROBE_QUORUM_NOT_MET,
+ NULL, hostname, port, dict);
+ gf_log (this->name, GF_LOG_ERROR, "Quorum does not meet, "
+ "rejecting operation");
+ ret = 0;
+ goto out;
+ }
+
gf_log ("glusterd", GF_LOG_INFO, "Received CLI probe req %s %d",
- cli_req.hostname, cli_req.port);
+ hostname, port);
- if (!(ret = glusterd_is_local_addr(cli_req.hostname))) {
+ if (dict_get_str(this->options,"transport.socket.bind-address",
+ &bind_name) == 0) {
+ gf_log ("glusterd", GF_LOG_DEBUG,
+ "only checking probe address vs. bind address");
+ ret = gf_is_same_address (bind_name, hostname);
+ }
+ else {
+ ret = gf_is_local_addr (hostname);
+ }
+ if (ret) {
glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_LOCALHOST,
- cli_req.hostname, cli_req.port);
+ NULL, hostname, port, dict);
+ ret = 0;
goto out;
}
- if (!(ret = glusterd_friend_find_by_hostname(cli_req.hostname,
- &peerinfo))) {
- if (strcmp (peerinfo->hostname, cli_req.hostname) == 0) {
+ if (!(ret = glusterd_friend_find_by_hostname (hostname, &peerinfo))) {
+ if (strcmp (peerinfo->hostname, hostname) == 0) {
- gf_log ("glusterd", GF_LOG_DEBUG, "Probe host %s port %d"
- " already a peer", cli_req.hostname, cli_req.port);
+ gf_log ("glusterd", GF_LOG_DEBUG, "Probe host %s port "
+ "%d already a peer", hostname, port);
glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND,
- cli_req.hostname, cli_req.port);
+ NULL, hostname, port,
+ dict);
goto out;
}
}
- ret = glusterd_probe_begin (req, cli_req.hostname, cli_req.port);
-
- gf_cmd_log ("peer probe","on host %s:%d %s",cli_req.hostname, cli_req.port,
- (ret) ? "FAILED" : "SUCCESS");
+ ret = glusterd_probe_begin (req, hostname, port, dict);
if (ret == GLUSTERD_CONNECTION_AWAITED) {
//fsm should be run after connection establishes
run_fsm = _gf_false;
ret = 0;
}
+
out:
- if (cli_req.hostname)
- free (cli_req.hostname);//its malloced by xdr
+ free (cli_req.dict.dict_val);
if (run_fsm) {
glusterd_friend_sm ();
@@ -662,14 +1126,24 @@ out:
}
int
-glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
+glusterd_handle_cli_probe (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_cli_probe);
+}
+
+int
+__glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
{
int32_t ret = -1;
- gf1_cli_probe_req cli_req = {0,};
- uuid_t uuid = {0};
- int op_errno = 0;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
+ gf_cli_req cli_req = {{0,},};
+ uuid_t uuid = {0};
+ int op_errno = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ dict_t *dict = NULL;
+ char *hostname = NULL;
+ int port = 0;
+ int flags = 0;
this = THIS;
GF_ASSERT (this);
@@ -677,55 +1151,99 @@ glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
GF_ASSERT (priv);
GF_ASSERT (req);
- if (!gf_xdr_to_cli_probe_req (req->msg[0], &cli_req)) {
+ ret = xdr_to_generic (req->msg[0], &cli_req,
+ (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
}
+ if (cli_req.dict.dict_len) {
+ dict = dict_new ();
+
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len, &dict);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ }
+ }
+
gf_log ("glusterd", GF_LOG_INFO, "Received CLI deprobe req");
- ret = glusterd_hostname_to_uuid (cli_req.hostname, uuid);
+ ret = dict_get_str (dict, "hostname", &hostname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get hostname");
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "port", &port);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get port");
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "flags", &flags);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get flags");
+ goto out;
+ }
+
+ ret = glusterd_hostname_to_uuid (hostname, uuid);
if (ret) {
op_errno = GF_DEPROBE_NOT_FRIEND;
goto out;
}
- if (!uuid_compare (uuid, priv->uuid)) {
+ if (!uuid_compare (uuid, MY_UUID)) {
op_errno = GF_DEPROBE_LOCALHOST;
ret = -1;
goto out;
}
- if (!uuid_is_null (uuid)) {
- ret = glusterd_all_volume_cond_check (
- glusterd_friend_brick_belongs,
- -1, &uuid);
- if (ret) {
- op_errno = GF_DEPROBE_BRICK_EXIST;
+ if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
+ if (!uuid_is_null (uuid)) {
+ /* Check if peers are connected, except peer being detached*/
+ if (!glusterd_chk_peers_connected_befriended (uuid)) {
+ ret = -1;
+ op_errno = GF_DEPROBE_FRIEND_DOWN;
+ goto out;
+ }
+ ret = glusterd_all_volume_cond_check (
+ glusterd_friend_brick_belongs,
+ -1, &uuid);
+ if (ret) {
+ op_errno = GF_DEPROBE_BRICK_EXIST;
+ goto out;
+ }
+ }
+
+ if (glusterd_is_any_volume_in_server_quorum (this) &&
+ !does_gd_meet_server_quorum (this)) {
+ gf_log (this->name, GF_LOG_ERROR, "Quorum does not "
+ "meet, rejecting operation");
+ ret = -1;
+ op_errno = GF_DEPROBE_QUORUM_NOT_MET;
goto out;
}
}
if (!uuid_is_null (uuid)) {
- ret = glusterd_deprobe_begin (req, cli_req.hostname,
- cli_req.port, uuid);
+ ret = glusterd_deprobe_begin (req, hostname, port, uuid, dict);
} else {
- ret = glusterd_deprobe_begin (req, cli_req.hostname,
- cli_req.port, NULL);
+ ret = glusterd_deprobe_begin (req, hostname, port, NULL, dict);
}
- gf_cmd_log ("peer deprobe", "on host %s:%d %s", cli_req.hostname,
- cli_req.port, (ret) ? "FAILED" : "SUCCESS");
out:
+ free (cli_req.dict.dict_val);
+
if (ret) {
- ret = glusterd_xfer_cli_deprobe_resp (req, ret, op_errno,
- cli_req.hostname);
+ ret = glusterd_xfer_cli_deprobe_resp (req, ret, op_errno, NULL,
+ hostname, dict);
}
- if (cli_req.hostname)
- free (cli_req.hostname);//malloced by xdr
-
glusterd_friend_sm ();
glusterd_op_sm ();
@@ -733,7 +1251,13 @@ out:
}
int
-glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
+glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_cli_deprobe);
+}
+
+int
+__glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
{
int32_t ret = -1;
gf1_cli_peer_list_req cli_req = {0,};
@@ -741,7 +1265,9 @@ glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
GF_ASSERT (req);
- if (!gf_xdr_to_cli_peer_list_req (req->msg[0], &cli_req)) {
+ ret = xdr_to_generic (req->msg[0], &cli_req,
+ (xdrproc_t)xdr_gf1_cli_peer_list_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
@@ -779,15 +1305,24 @@ out:
}
int
-glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
+glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_list_friends);
+}
+
+int
+__glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
- gf1_cli_get_vol_req cli_req = {0,};
+ gf_cli_req cli_req = {{0,}};
dict_t *dict = NULL;
+ int32_t flags = 0;
GF_ASSERT (req);
- if (!gf_xdr_to_cli_get_vol_req (req->msg[0], &cli_req)) {
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
@@ -812,7 +1347,13 @@ glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
}
}
- ret = glusterd_get_volumes (req, dict, cli_req.flags);
+ ret = dict_get_int32 (dict, "flags", &flags);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "failed to get flags");
+ goto out;
+ }
+
+ ret = glusterd_get_volumes (req, dict, flags);
out:
if (dict)
@@ -824,988 +1365,313 @@ out:
return ret;
}
-int32_t
-glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
- gf_boolean_t is_ctx_free)
+int
+glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
{
- int ret = -1;
- GF_ASSERT (req);
- GF_ASSERT ((op > GD_OP_NONE) && (op < GD_OP_MAX));
- GF_ASSERT ((NULL != ctx) || (_gf_false == is_ctx_free));
-
- glusterd_op_set_op (op);
- glusterd_op_set_ctx (op, ctx);
- glusterd_op_set_ctx_free (op, is_ctx_free);
- glusterd_op_set_req (req);
-
- ret = glusterd_op_txn_begin ();
-
- return ret;
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_get_volume);
}
int
-glusterd_handle_create_volume (rpcsvc_request_t *req)
-{
- int32_t ret = -1;
- gf1_cli_create_vol_req cli_req = {0,};
- dict_t *dict = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- char *brick = NULL;
- char *bricks = NULL;
- char *volname = NULL;
- int brick_count = 0;
- char *tmpptr = NULL;
- int i = 0;
- char *brick_list = NULL;
- void *cli_rsp = NULL;
- char err_str[2048] = {0,};
- gf1_cli_create_vol_rsp rsp = {0,};
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- char *free_ptr = NULL;
- char *trans_type = NULL;
- uuid_t volume_id = {0,};
- glusterd_brickinfo_t *tmpbrkinfo = NULL;
- glusterd_volinfo_t tmpvolinfo = {{0},};
- int lock_fail = 0;
- glusterd_op_t cli_op = GD_OP_CREATE_VOLUME;
+__glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ uuid_t uuid = {0};
+ gf_cli_rsp rsp = {0,};
+ gf_cli_req cli_req = {{0,}};
+ char msg_str[2048] = {0,};
GF_ASSERT (req);
- INIT_LIST_HEAD (&tmpvolinfo.bricks);
-
- ret = glusterd_op_set_cli_op (cli_op);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- snprintf (err_str, sizeof (err_str), "Another operation is in "
- "progress, please retry after some time");
- goto out;
- }
-
this = THIS;
- GF_ASSERT(this);
-
priv = this->private;
+ GF_ASSERT (priv);
- ret = -1;
- if (!gf_xdr_to_cli_create_vol_req (req->msg[0], &cli_req)) {
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
- snprintf (err_str, sizeof (err_str), "Garbage args received");
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO, "Received create volume req");
+ gf_log ("glusterd", GF_LOG_DEBUG, "Received uuid reset req");
- if (cli_req.bricks.bricks_len) {
+ if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
dict = dict_new ();
- ret = dict_unserialize (cli_req.bricks.bricks_val,
- cli_req.bricks.bricks_len,
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len,
&dict);
if (ret < 0) {
gf_log ("glusterd", GF_LOG_ERROR,
"failed to "
"unserialize req-buffer to dictionary");
- snprintf (err_str, sizeof (err_str), "Unable to decode "
+ snprintf (msg_str, sizeof (msg_str), "Unable to decode "
"the buffer");
goto out;
} else {
- dict->extra_stdfree = cli_req.bricks.bricks_val;
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
}
- ret = dict_get_str (dict, "volname", &volname);
-
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get volume name");
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "name");
- goto out;
- }
- gf_cmd_log ("Volume create", "on volname: %s attempted", volname);
-
- if ((ret = glusterd_check_volume_exists (volname))) {
- snprintf(err_str, 2048, "Volume %s already exists", volname);
- gf_log ("glusterd", GF_LOG_ERROR, "%s", err_str);
+ /* In the above section if dict_unserialize is successful, ret is set
+ * to zero.
+ */
+ ret = -1;
+ // Do not allow peer reset if there are any volumes in the cluster
+ if (!list_empty (&priv->volumes)) {
+ snprintf (msg_str, sizeof (msg_str), "volumes are already "
+ "present in the cluster. Resetting uuid is not "
+ "allowed");
+ gf_log (this->name, GF_LOG_WARNING, "%s", msg_str);
goto out;
}
- ret = dict_get_int32 (dict, "count", &brick_count);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get count");
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "brick count");
+ // Do not allow peer reset if trusted storage pool is already formed
+ if (!list_empty (&priv->peers)) {
+ snprintf (msg_str, sizeof (msg_str),"trusted storage pool "
+ "has been already formed. Please detach this peer "
+ "from the pool and reset its uuid.");
+ gf_log (this->name, GF_LOG_WARNING, "%s", msg_str);
goto out;
}
- ret = dict_get_str (dict, "transport", &trans_type);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get transport-type");
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "transport-type");
- goto out;
- }
- ret = dict_get_str (dict, "bricks", &bricks);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get bricks");
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "bricks");
- goto out;
- }
+ uuid_copy (uuid, priv->uuid);
+ ret = glusterd_uuid_generate_save ();
- uuid_generate (volume_id);
- free_ptr = gf_strdup (uuid_utoa (volume_id));
- ret = dict_set_dynstr (dict, "volume-id", free_ptr);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "unable to set volume-id");
- snprintf (err_str, sizeof (err_str), "Unable to set volume "
- "id");
+ if (!uuid_compare (uuid, MY_UUID)) {
+ snprintf (msg_str, sizeof (msg_str), "old uuid and the new uuid"
+ " are same. Try gluster peer reset again");
+ gf_log (this->name, GF_LOG_ERROR, "%s", msg_str);
+ ret = -1;
goto out;
}
- free_ptr = NULL;
-
- if (bricks) {
- brick_list = gf_strdup (bricks);
- free_ptr = brick_list;
- }
-
- gf_cmd_log ("Volume create", "on volname: %s type:%s count:%d bricks:%s",
- cli_req.volname, ((cli_req.type == 0)? "DEFAULT":
- ((cli_req.type == 1)? "STRIPE":"REPLICATE")), cli_req.count,
- bricks);
-
-
- while ( i < brick_count) {
- i++;
- brick= strtok_r (brick_list, " \n", &tmpptr);
- brick_list = tmpptr;
- ret = glusterd_brickinfo_from_brick (brick, &brickinfo);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get "
- "brick info from brick %s", brick);
- goto out;
- }
-
- ret = glusterd_new_brick_validate (brick, brickinfo, err_str,
- sizeof (err_str));
- if (ret)
- goto out;
- ret = glusterd_volume_brickinfo_get (brickinfo->uuid,
- brickinfo->hostname,
- brickinfo->path,
- &tmpvolinfo, &tmpbrkinfo);
- if (!ret) {
- ret = -1;
- snprintf (err_str, sizeof (err_str), "Brick: %s:%s, %s"
- " in the arguments mean the same",
- tmpbrkinfo->hostname, tmpbrkinfo->path,
- brick);
- goto out;
- }
- list_add_tail (&brickinfo->brick_list, &tmpvolinfo.bricks);
- brickinfo = NULL;
- }
-
- ret = glusterd_op_begin (req, GD_OP_CREATE_VOLUME, dict, _gf_true);
- gf_cmd_log ("Volume create", "on volname: %s %s", volname,
- (ret != 0) ? "FAILED": "SUCCESS");
out:
if (ret) {
- if (dict)
- dict_unref (dict);
rsp.op_ret = -1;
- rsp.op_errno = 0;
- rsp.volname = "";
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str), "Operation failed");
- rsp.op_errstr = err_str;
- cli_rsp = &rsp;
- glusterd_submit_reply(req, cli_rsp, NULL, 0, NULL,
- gf_xdr_serialize_cli_create_vol_rsp);
- if (!lock_fail)
- (void) glusterd_opinfo_unlock ();
-
- ret = 0; //Client response sent, prevent second response
- }
-
- if (free_ptr)
- GF_FREE(free_ptr);
-
- glusterd_volume_brickinfos_delete (&tmpvolinfo);
- if (brickinfo)
- glusterd_brickinfo_delete (brickinfo);
- if (cli_req.volname)
- free (cli_req.volname); // its a malloced by xdr
-
- glusterd_friend_sm ();
- glusterd_op_sm ();
-
- return ret;
-}
-
-int
-glusterd_handle_cli_start_volume (rpcsvc_request_t *req)
-{
- int32_t ret = -1;
- gf1_cli_start_vol_req cli_req = {0,};
- int lock_fail = 0;
- char *dup_volname = NULL;
- dict_t *dict = NULL;
- glusterd_op_t cli_op = GD_OP_START_VOLUME;
-
- GF_ASSERT (req);
-
- ret = glusterd_op_set_cli_op (cli_op);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d", ret);
- lock_fail = 1;
- goto out;
- }
-
- ret = -1;
- if (!gf_xdr_to_cli_start_vol_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
+ if (msg_str[0] == '\0')
+ snprintf (msg_str, sizeof (msg_str), "Operation "
+ "failed");
+ rsp.op_errstr = msg_str;
+ ret = 0;
+ } else {
+ rsp.op_errstr = "";
}
- gf_log ("glusterd", GF_LOG_INFO, "Received start vol req"
- "for volume %s", cli_req.volname);
-
- dict = dict_new ();
-
- if (!dict)
- goto out;
-
- dup_volname = gf_strdup (cli_req.volname);
- if (!dup_volname)
- goto out;
-
- ret = dict_set_dynstr (dict, "volname", dup_volname);
- if (ret)
- goto out;
-
- ret = dict_set_int32 (dict, "flags", cli_req.flags);
- if (ret)
- goto out;
- ret = glusterd_op_begin (req, GD_OP_START_VOLUME, dict, _gf_true);
-
- gf_cmd_log ("volume start","on volname: %s %s", cli_req.volname,
- ((ret == 0) ? "SUCCESS": "FAILED"));
-
-out:
- if (ret && dict)
- dict_unref (dict);
- if (cli_req.volname)
- free (cli_req.volname); //its malloced by xdr
-
- glusterd_friend_sm ();
- glusterd_op_sm ();
-
- if (ret) {
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- NULL, "operation failed");
- if (!lock_fail)
- (void) glusterd_opinfo_unlock ();
-
- }
+ glusterd_to_cli (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_cli_rsp, dict);
return ret;
}
-
int
-glusterd_handle_cli_stop_volume (rpcsvc_request_t *req)
+glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf1_cli_stop_vol_req cli_req = {0,};
- int lock_fail = 0;
- char *dup_volname = NULL;
- dict_t *dict = NULL;
- glusterd_op_t cli_op = GD_OP_STOP_VOLUME;
-
- GF_ASSERT (req);
-
- ret = glusterd_op_set_cli_op (cli_op);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- goto out;
- }
-
- ret = -1;
- if (!gf_xdr_to_cli_stop_vol_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- gf_log ("glusterd", GF_LOG_INFO, "Received stop vol req"
- "for volume %s", cli_req.volname);
-
- dict = dict_new ();
-
- if (!dict)
- goto out;
-
- dup_volname = gf_strdup (cli_req.volname);
- if (!dup_volname)
- goto out;
-
- ret = dict_set_dynstr (dict, "volname", dup_volname);
- if (ret)
- goto out;
-
- ret = dict_set_int32 (dict, "flags", cli_req.flags);
- if (ret)
- goto out;
-
- ret = glusterd_op_begin (req, GD_OP_STOP_VOLUME, dict, _gf_true);
- gf_cmd_log ("Volume stop","on volname: %s %s", cli_req.volname,
- ((ret)?"FAILED":"SUCCESS"));
-
-out:
- if (cli_req.volname)
- free (cli_req.volname); //its malloced by xdr
-
- glusterd_friend_sm ();
- glusterd_op_sm ();
-
- if (ret) {
- if (dict)
- dict_unref (dict);
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- NULL, "operation failed");
- if (!lock_fail)
- (void) glusterd_opinfo_unlock ();
- }
-
- return ret;
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_uuid_reset);
}
int
-glusterd_handle_cli_delete_volume (rpcsvc_request_t *req)
+__glusterd_handle_cli_uuid_get (rpcsvc_request_t *req)
{
- int lock_fail = 0;
- int32_t ret = -1;
- gf1_cli_delete_vol_req cli_req = {0,};
- glusterd_op_delete_volume_ctx_t *ctx = NULL;
- glusterd_op_t cli_op = GD_OP_DELETE_VOLUME;
+ int ret = -1;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ gf_cli_rsp rsp = {0,};
+ gf_cli_req cli_req = {{0,}};
+ char msg_str[2048] = {0,};
+ char uuid_str[64] = {0,};
GF_ASSERT (req);
- ret = glusterd_op_set_cli_op (cli_op);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- goto out;
- }
-
- ret = -1;
- if (!gf_xdr_to_cli_delete_vol_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
- gf_cmd_log ("Volume delete","on volname: %s attempted", cli_req.volname);
-
- gf_log ("glusterd", GF_LOG_INFO, "Received delete vol req"
- "for volume %s", cli_req.volname);
-
-
- ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_delete_volume_ctx_t);
- if (!ctx)
- goto out;
-
- strncpy (ctx->volume_name, cli_req.volname, GD_VOLUME_NAME_MAX);
-
- ret = glusterd_op_begin (req, GD_OP_DELETE_VOLUME, ctx, _gf_true);
- gf_cmd_log ("Volume delete", "on volname: %s %s", cli_req.volname,
- ((ret) ? "FAILED" : "SUCCESS"));
-
-out:
- if (cli_req.volname)
- free (cli_req.volname); //its malloced by xdr
-
- glusterd_friend_sm ();
- glusterd_op_sm ();
-
- if (ret) {
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- NULL, "operation failed");
- if (!lock_fail)
- (void) glusterd_opinfo_unlock ();
- }
-
- return ret;
-}
-
-int
-glusterd_handle_add_brick (rpcsvc_request_t *req)
-{
- int32_t ret = -1;
- gf1_cli_add_brick_req cli_req = {0,};
- dict_t *dict = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- char *brick = NULL;
- char *bricks = NULL;
- char *volname = NULL;
- int brick_count = 0;
- char *tmpptr = NULL;
- int i = 0;
- char *brick_list = NULL;
- void *cli_rsp = NULL;
- char err_str[2048] = {0,};
- gf1_cli_add_brick_rsp rsp = {0,};
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- char *free_ptr = NULL;
- glusterd_brickinfo_t *tmpbrkinfo = NULL;
- glusterd_volinfo_t tmpvolinfo = {{0},};
- int lock_fail = 0;
- glusterd_op_t cli_op = GD_OP_ADD_BRICK;
-
this = THIS;
- GF_ASSERT(this);
-
priv = this->private;
+ GF_ASSERT (priv);
- GF_ASSERT (req);
-
- INIT_LIST_HEAD (&tmpvolinfo.bricks);
-
- ret = glusterd_op_set_cli_op (cli_op);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- snprintf (err_str, sizeof (err_str), "Another operation is in "
- "progress, please retry after some time");
- goto out;
- }
-
- ret = -1;
- if (!gf_xdr_to_cli_add_brick_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
req->rpc_err = GARBAGE_ARGS;
- snprintf (err_str, sizeof (err_str), "Garbage args received");
goto out;
}
- gf_cmd_log ("Volume add-brick", "on volname: %s attempted",
- cli_req.volname);
- gf_log ("glusterd", GF_LOG_INFO, "Received add brick req");
+ gf_log ("glusterd", GF_LOG_DEBUG, "Received uuid get req");
- if (cli_req.bricks.bricks_len) {
- /* Unserialize the dictionary */
+ if (cli_req.dict.dict_len) {
dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
- ret = dict_unserialize (cli_req.bricks.bricks_val,
- cli_req.bricks.bricks_len,
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len,
&dict);
if (ret < 0) {
gf_log ("glusterd", GF_LOG_ERROR,
"failed to "
"unserialize req-buffer to dictionary");
- snprintf (err_str, sizeof (err_str), "Unable to decode "
+ snprintf (msg_str, sizeof (msg_str), "Unable to decode "
"the buffer");
goto out;
- } else {
- dict->extra_stdfree = cli_req.bricks.bricks_val;
- }
- }
- ret = dict_get_str (dict, "volname", &volname);
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get volume name");
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "name");
- goto out;
+ }
}
- if (!(ret = glusterd_check_volume_exists (volname))) {
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
ret = -1;
- snprintf(err_str, 2048, "Volume %s does not exist", volname);
- gf_log ("glusterd", GF_LOG_ERROR, "%s", err_str);
goto out;
}
- ret = dict_get_int32 (dict, "count", &brick_count);
+ uuid_utoa_r (MY_UUID, uuid_str);
+ ret = dict_set_str (rsp_dict, "uuid", uuid_str);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get count");
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "brick count");
- goto out;
- }
-
- if (!(ret = glusterd_volinfo_find (volname, &volinfo))) {
- if (volinfo->type == GF_CLUSTER_TYPE_NONE)
- goto brick_val;
- if (!brick_count || !volinfo->sub_count)
- goto brick_val;
-
- /* If the brick count is less than sub_count then, allow add-brick only for
- plain replicate volume since in plain stripe brick_count becoming less than
- the sub_count is not allowed */
- if (volinfo->brick_count < volinfo->sub_count && (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) ) {
- if ((volinfo->sub_count - volinfo->brick_count) == brick_count)
- goto brick_val;
- }
-
- if ((brick_count % volinfo->sub_count) != 0) {
- snprintf(err_str, 2048, "Incorrect number of bricks"
- " supplied %d for type %s with count %d",
- brick_count, (volinfo->type == 1)? "STRIPE":
- "REPLICATE", volinfo->sub_count);
- gf_log("glusterd", GF_LOG_ERROR, "%s", err_str);
- ret = -1;
- goto out;
- }
- } else {
- snprintf (err_str, sizeof (err_str), "Unable to get volinfo "
- "for volume name %s", volname);
- gf_log ("glusterd", GF_LOG_ERROR, "%s", err_str);
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set uuid in "
+ "dictionary.");
goto out;
}
-brick_val:
- ret = dict_get_str (dict, "bricks", &bricks);
+ ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "bricks");
- gf_log ("glusterd", GF_LOG_ERROR, "%s", err_str);
+ gf_log (this->name, GF_LOG_ERROR, "Failed to serialize "
+ "dictionary.");
goto out;
}
-
- if (bricks)
- brick_list = gf_strdup (bricks);
- if (!brick_list) {
- ret = -1;
- snprintf (err_str, sizeof (err_str), "Out of memory");
- goto out;
- } else {
- free_ptr = brick_list;
- }
-
- gf_cmd_log ("Volume add-brick", "volname: %s type %s count:%d bricks:%s"
- ,volname, ((volinfo->type == 0)? "DEFAULT" : ((volinfo->type
- == 1)? "STRIPE": "REPLICATE")), brick_count, brick_list);
-
-
- while ( i < brick_count) {
- i++;
- brick= strtok_r (brick_list, " \n", &tmpptr);
- brick_list = tmpptr;
- brickinfo = NULL;
- ret = glusterd_brickinfo_from_brick (brick, &brickinfo);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get "
- "brick info from brick %s", brick);
- goto out;
- }
- ret = glusterd_new_brick_validate (brick, brickinfo, err_str,
- sizeof (err_str));
- if (ret)
- goto out;
- ret = glusterd_volume_brickinfo_get (brickinfo->uuid,
- brickinfo->hostname,
- brickinfo->path,
- &tmpvolinfo, &tmpbrkinfo);
- if (!ret) {
- ret = -1;
- snprintf (err_str, sizeof (err_str), "Brick: %s:%s, %s"
- " in the arguments mean the same",
- tmpbrkinfo->hostname, tmpbrkinfo->path,
- brick);
- goto out;
- }
- list_add_tail (&brickinfo->brick_list, &tmpvolinfo.bricks);
- brickinfo = NULL;
- }
-
- ret = glusterd_op_begin (req, GD_OP_ADD_BRICK, dict, _gf_true);
- gf_cmd_log ("Volume add-brick","on volname: %s %s", volname,
- (ret != 0)? "FAILED" : "SUCCESS");
-
+ ret = 0;
out:
if (ret) {
- if (dict)
- dict_unref (dict);
rsp.op_ret = -1;
- rsp.op_errno = 0;
- rsp.volname = "";
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str), "Operation failed");
- rsp.op_errstr = err_str;
- cli_rsp = &rsp;
- glusterd_submit_reply(req, cli_rsp, NULL, 0, NULL,
- gf_xdr_serialize_cli_add_brick_rsp);
- if (!lock_fail)
- glusterd_opinfo_unlock();
- ret = 0; //sent error to cli, prevent second reply
- }
+ if (msg_str[0] == '\0')
+ snprintf (msg_str, sizeof (msg_str), "Operation "
+ "failed");
+ rsp.op_errstr = msg_str;
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ } else {
+ rsp.op_errstr = "";
+
+ }
- if (free_ptr)
- GF_FREE (free_ptr);
- glusterd_volume_brickinfos_delete (&tmpvolinfo);
- if (brickinfo)
- glusterd_brickinfo_delete (brickinfo);
- if (cli_req.volname)
- free (cli_req.volname); //its malloced by xdr
+ glusterd_to_cli (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_cli_rsp, dict);
- return ret;
+ return 0;
+}
+int
+glusterd_handle_cli_uuid_get (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_uuid_get);
}
int
-glusterd_handle_replace_brick (rpcsvc_request_t *req)
+__glusterd_handle_cli_list_volume (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf1_cli_replace_brick_req cli_req = {0,};
- dict_t *dict = NULL;
- char *src_brick = NULL;
- char *dst_brick = NULL;
- int32_t op = 0;
- char operation[256];
- int lock_fail = 0;
- glusterd_op_t cli_op = GD_OP_REPLACE_BRICK;
+ int ret = -1;
+ dict_t *dict = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ int count = 0;
+ char key[1024] = {0,};
+ gf_cli_rsp rsp = {0,};
GF_ASSERT (req);
- ret = glusterd_op_set_cli_op (cli_op);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- goto out;
- }
+ priv = THIS->private;
+ GF_ASSERT (priv);
- ret = -1;
- if (!gf_xdr_to_cli_replace_brick_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
+ dict = dict_new ();
+ if (!dict)
goto out;
- }
-
- gf_log ("glusterd", GF_LOG_INFO, "Received replace brick req");
-
- if (cli_req.bricks.bricks_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
- ret = dict_unserialize (cli_req.bricks.bricks_val,
- cli_req.bricks.bricks_len,
- &dict);
- if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
+ list_for_each_entry (volinfo, &priv->volumes, vol_list) {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "volume%d", count);
+ ret = dict_set_str (dict, key, volinfo->volname);
+ if (ret)
goto out;
- } else {
- dict->extra_stdfree = cli_req.bricks.bricks_val;
- }
- }
-
- ret = dict_get_int32 (dict, "operation", &op);
- if (ret) {
- gf_log ("", GF_LOG_DEBUG,
- "dict_get on operation failed");
- goto out;
+ count++;
}
- ret = dict_get_str (dict, "src-brick", &src_brick);
-
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get src brick");
+ ret = dict_set_int32 (dict, "count", count);
+ if (ret)
goto out;
- }
- gf_log ("", GF_LOG_DEBUG,
- "src brick=%s", src_brick);
- ret = dict_get_str (dict, "dst-brick", &dst_brick);
-
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get dest brick");
+ ret = dict_allocate_and_serialize (dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret)
goto out;
- }
-
- gf_log ("", GF_LOG_DEBUG,
- "dst brick=%s", dst_brick);
- switch (op) {
- case GF_REPLACE_OP_START: strcpy (operation, "start");
- break;
- case GF_REPLACE_OP_COMMIT: strcpy (operation, "commit");
- break;
- case GF_REPLACE_OP_PAUSE: strcpy (operation, "pause");
- break;
- case GF_REPLACE_OP_ABORT: strcpy (operation, "abort");
- break;
- case GF_REPLACE_OP_STATUS: strcpy (operation, "status");
- break;
- case GF_REPLACE_OP_COMMIT_FORCE: strcpy (operation, "commit-force");
- break;
- default:strcpy (operation, "unknown");
- break;
- }
+ ret = 0;
- gf_log ("glusterd", GF_LOG_INFO, "Received replace brick %s request", operation);
- gf_cmd_log ("Volume replace-brick","volname: %s src_brick:%s"
- " dst_brick:%s op:%s",cli_req.volname, src_brick, dst_brick
- ,operation);
+out:
+ rsp.op_ret = ret;
+ if (ret)
+ rsp.op_errstr = "Error listing volumes";
+ else
+ rsp.op_errstr = "";
- ret = glusterd_op_begin (req, GD_OP_REPLACE_BRICK, dict, _gf_true);
- gf_cmd_log ("Volume replace-brick","on volname: %s %s", cli_req.volname,
- (ret) ? "FAILED" : "SUCCESS");
+ glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_cli_rsp);
+ ret = 0;
-out:
- if (ret && dict)
+ if (dict)
dict_unref (dict);
- if (cli_req.volname)
- free (cli_req.volname);//malloced by xdr
glusterd_friend_sm ();
glusterd_op_sm ();
- if (ret) {
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- NULL, "operation failed");
- if (!lock_fail)
- (void) glusterd_opinfo_unlock ();
-
- }
-
return ret;
}
-
-
-
int
-glusterd_handle_reset_volume (rpcsvc_request_t *req)
+glusterd_handle_cli_list_volume (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf1_cli_reset_vol_req cli_req = {0,};
- dict_t *dict = NULL;
- int lock_fail = 0;
- glusterd_op_t cli_op = GD_OP_RESET_VOLUME;
-
- GF_ASSERT (req);
-
- ret = glusterd_op_set_cli_op (cli_op);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- goto out;
- }
-
- ret = -1;
- if (!gf_xdr_to_cli_set_vol_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR, "failed to "
- "unserialize req-buffer to dictionary");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
- }
-
- gf_cmd_log ("Volume reset", "volume : %s", cli_req.volname);
- ret = glusterd_op_begin (req, GD_OP_RESET_VOLUME, dict, _gf_true);
- gf_cmd_log ("Volume reset", " on volume %s %s ", cli_req.volname,
- ((ret == 0)? " SUCCEDED":" FAILED"));
-
-out:
- if (cli_req.volname)
- free (cli_req.volname);//malloced by xdr
-
- glusterd_friend_sm ();
- glusterd_op_sm ();
- if (ret) {
- if (dict)
- dict_unref (dict);
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- NULL, "operation failed");
- if (!lock_fail)
- (void) glusterd_opinfo_unlock ();
- }
-
- return ret;
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_list_volume);
}
-int
-glusterd_handle_gsync_set (rpcsvc_request_t *req)
+int32_t
+glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
+ char *err_str, size_t err_len)
{
- int32_t ret = 0;
- dict_t *dict = NULL;
- gf1_cli_gsync_set_req cli_req = {{0},};
- int lock_fail = 0;
- glusterd_op_t cli_op = GD_OP_GSYNC_SET;
- char *master = NULL;
- char *slave = NULL;
- char operation[256] = {0,};
- int type = 0;
- glusterd_conf_t *priv = NULL;
- char *host_uuid = NULL;
-
- GF_ASSERT (req);
- GF_ASSERT (THIS);
- GF_ASSERT (THIS->private);
-
- priv = THIS->private;
-
- ret = glusterd_op_set_cli_op (cli_op);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- goto out;
- }
-
- ret = -1;
- if (!gf_xdr_to_cli_gsync_set_req (req->msg[0], &cli_req)) {
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (cli_req.dict.dict_len) {
- dict = dict_new ();
- if (!dict)
- goto out;
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR, "failed to "
- "unserialize req-buffer to dictionary");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
-
- host_uuid = gf_strdup (uuid_utoa(priv->uuid));
- if (host_uuid == NULL) {
- gf_log ("glusterd", GF_LOG_ERROR, "failed to get"
- "the uuid of the host machine");
- ret = -1;
- goto out;
- }
- ret = dict_set_dynstr (dict, "host-uuid", host_uuid);
- if (ret)
- goto out;
-
- }
-
- ret = dict_get_str (dict, "master", &master);
- if (ret < 0) {
- gf_log ("", GF_LOG_INFO, "master not found, while handling"
- GEOREP" options");
- master = "(No Master)";
- }
-
- ret = dict_get_str (dict, "slave", &slave);
- if (ret < 0) {
- gf_log ("", GF_LOG_INFO, "slave not not found, while"
- "handling "GEOREP" options");
- slave = "(No Slave)";
- }
-
- ret = dict_get_int32 (dict, "type", &type);
- if (ret < 0) {
- gf_log ("", GF_LOG_WARNING, "command type not found, while"
- "handling "GEOREP" options");
- goto out;
- }
-
- switch (type) {
-
- case GF_GSYNC_OPTION_TYPE_START:
- strncpy (operation, "start", sizeof (operation));
- break;
-
- case GF_GSYNC_OPTION_TYPE_STOP:
- strncpy (operation, "stop", sizeof (operation));
- break;
-
- case GF_GSYNC_OPTION_TYPE_CONFIGURE:
- strncpy (operation, "configure", sizeof (operation));
- break;
-
- case GF_GSYNC_OPTION_TYPE_STATUS:
- strncpy (operation, "status", sizeof (operation));
- break;
- }
-
- gf_cmd_log ("volume "GEOREP, " %s command on %s,%s", operation, master,
- slave);
- ret = glusterd_op_begin (req, GD_OP_GSYNC_SET, dict, _gf_true);
- gf_cmd_log ("volume "GEOREP, " %s command on %s,%s %s ", operation,
- master, slave, (ret != 0)? "FAILED" : "SUCCEEDED");
+ int ret = -1;
-out:
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ ret = glusterd_op_txn_begin (req, op, ctx, err_str, err_len);
- if (ret) {
- if (dict)
- dict_unref (dict);
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- NULL, "operation failed");
- if (!lock_fail)
- (void) glusterd_opinfo_unlock ();
- }
return ret;
}
int
-glusterd_handle_quota (rpcsvc_request_t *req)
+__glusterd_handle_reset_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
- gf1_cli_quota_req cli_req = {0,};
- dict_t *dict = NULL;
- int lock_fail = 0;
- glusterd_op_t cli_op = GD_OP_QUOTA;
- char operation[256] = {0, };
- char *volname = NULL;
- int32_t type = 0;
+ gf_cli_req cli_req = {{0,}};
+ dict_t *dict = NULL;
+ glusterd_op_t cli_op = GD_OP_RESET_VOLUME;
+ char *volname = NULL;
+ char err_str[2048] = {0,};
+ xlator_t *this = NULL;
GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
- ret = glusterd_op_set_cli_op (cli_op);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- goto out;
- }
-
- if (!gf_xdr_to_cli_quota_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ snprintf (err_str, sizeof (err_str), "Failed to decode request "
+ "received from cli");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
req->rpc_err = GARBAGE_ARGS;
goto out;
}
@@ -1818,8 +1684,10 @@ glusterd_handle_quota (rpcsvc_request_t *req)
cli_req.dict.dict_len,
&dict);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR, "failed to "
+ gf_log (this->name, GF_LOG_ERROR, "failed to "
"unserialize req-buffer to dictionary");
+ snprintf (err_str, sizeof (err_str), "Unable to decode "
+ "the command");
goto out;
} else {
dict->extra_stdfree = cli_req.dict.dict_val;
@@ -1828,83 +1696,60 @@ glusterd_handle_quota (rpcsvc_request_t *req)
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log ("", GF_LOG_WARNING, "Unable to get volume name, while"
- "handling quota command");
+ snprintf (err_str, sizeof (err_str), "Failed to get volume "
+ "name");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
goto out;
}
+ gf_log (this->name, GF_LOG_DEBUG, "Received volume reset request for "
+ "volume %s", volname);
- ret = dict_get_int32 (dict, "type", &type);
- if (ret) {
- gf_log ("", GF_LOG_WARNING, "Unable to get type of cmd. , while"
- "handling quota command");
- goto out;
- }
-
- switch (type) {
- case GF_QUOTA_OPTION_TYPE_ENABLE:
- strncpy (operation, "enable", sizeof (operation));
- break;
-
- case GF_QUOTA_OPTION_TYPE_DISABLE:
- strncpy (operation, "disable", sizeof (operation));
- break;
-
- case GF_QUOTA_OPTION_TYPE_LIMIT_USAGE:
- strncpy (operation, "limit-usage", sizeof (operation));
- break;
-
- case GF_QUOTA_OPTION_TYPE_REMOVE:
- strncpy (operation, "remove", sizeof (operation));
- break;
- }
- gf_cmd_log ("volume quota", " %s command on %s", operation, volname);
- ret = glusterd_op_begin (req, GD_OP_QUOTA, dict, _gf_true);
- gf_cmd_log ("volume quota", " %s command on %s %s", operation,volname,
- (ret != 0)? "FAILED" : "SUCCEEDED");
+ ret = glusterd_op_begin_synctask (req, GD_OP_RESET_VOLUME, dict);
out:
- glusterd_friend_sm ();
- glusterd_op_sm ();
-
if (ret) {
- if (dict)
- dict_unref (dict);
+ if (err_str[0] == '\0')
+ snprintf (err_str, sizeof (err_str),
+ "Operation failed");
ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- NULL, "operation failed");
- if (!lock_fail)
- (void) glusterd_opinfo_unlock ();
+ dict, err_str);
}
- if (cli_req.volname)
- free (cli_req.volname); //malloced by xdr
return ret;
}
int
-glusterd_handle_set_volume (rpcsvc_request_t *req)
+glusterd_handle_reset_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_reset_volume);
+}
+
+int
+__glusterd_handle_set_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
- gf1_cli_set_vol_req cli_req = {0,};
+ gf_cli_req cli_req = {{0,}};
dict_t *dict = NULL;
- int lock_fail = 0;
glusterd_op_t cli_op = GD_OP_SET_VOLUME;
char *key = NULL;
char *value = NULL;
char *volname = NULL;
+ char *op_errstr = NULL;
+ gf_boolean_t help = _gf_false;
+ char err_str[2048] = {0,};
+ xlator_t *this = NULL;
- GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
- ret = glusterd_op_set_cli_op (cli_op);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- goto out;
- }
+ GF_ASSERT (req);
- ret = -1;
- if (!gf_xdr_to_cli_set_vol_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ snprintf (err_str, sizeof (err_str), "Failed to decode "
+ "request received from cli");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
req->rpc_err = GARBAGE_ARGS;
goto out;
}
@@ -1917,9 +1762,11 @@ glusterd_handle_set_volume (rpcsvc_request_t *req)
cli_req.dict.dict_len,
&dict);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR,
+ gf_log (this->name, GF_LOG_ERROR,
"failed to "
"unserialize req-buffer to dictionary");
+ snprintf (err_str, sizeof (err_str), "Unable to decode "
+ "the command");
goto out;
} else {
dict->extra_stdfree = cli_req.dict.dict_val;
@@ -1928,590 +1775,135 @@ glusterd_handle_set_volume (rpcsvc_request_t *req)
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log ("", GF_LOG_WARNING, "Unable to get volume name, while"
- "handling volume set command");
+ snprintf (err_str, sizeof (err_str), "Failed to get volume "
+ "name while handling volume set command");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ goto out;
+ }
+
+ if (strcmp (volname, "help") == 0 ||
+ strcmp (volname, "help-xml") == 0) {
+ ret = glusterd_volset_help (dict, &op_errstr);
+ help = _gf_true;
goto out;
}
ret = dict_get_str (dict, "key1", &key);
if (ret) {
- gf_log ("", GF_LOG_WARNING, "Unable to get key, while"
- "handling volume set for %s",volname);
+ snprintf (err_str, sizeof (err_str), "Failed to get key while"
+ " handling volume set for %s", volname);
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
goto out;
}
ret = dict_get_str (dict, "value1", &value);
if (ret) {
- gf_log ("", GF_LOG_WARNING, "Unable to get value, while"
- "handling volume set for %s",volname);
+ snprintf (err_str, sizeof (err_str), "Failed to get value while"
+ " handling volume set for %s", volname);
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
goto out;
}
+ gf_log (this->name, GF_LOG_DEBUG, "Received volume set request for "
+ "volume %s", volname);
- gf_cmd_log ("volume set", "volume-name:%s: key:%s, value:%s",volname,
- key, value);
- ret = glusterd_op_begin (req, GD_OP_SET_VOLUME, dict, _gf_true);
- gf_cmd_log ("volume set", "volume-name:%s: key:%s, value:%s %s",
- volname, key, value, (ret == 0)? "SUCCEDED" : "FAILED" );
-out:
- if (cli_req.volname)
- free (cli_req.volname);//malloced by xdr
+ ret = glusterd_op_begin_synctask (req, GD_OP_SET_VOLUME, dict);
- glusterd_friend_sm ();
- glusterd_op_sm ();
-
- if (ret) {
- if (dict)
- dict_unref (dict);
+out:
+ if (help)
+ ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, dict,
+ (op_errstr)? op_errstr:"");
+ else if (ret) {
+ if (err_str[0] == '\0')
+ snprintf (err_str, sizeof (err_str),
+ "Operation failed");
ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- NULL, "operation failed");
- if (!lock_fail)
- (void) glusterd_opinfo_unlock ();
+ dict, err_str);
}
+ if (op_errstr)
+ GF_FREE (op_errstr);
+
return ret;
}
int
-glusterd_handle_remove_brick (rpcsvc_request_t *req)
+glusterd_handle_set_volume (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf1_cli_remove_brick_req cli_req = {0,};
- dict_t *dict = NULL;
- int32_t count = 0;
- char *brick = NULL;
- char key[256] = {0,};
- char *brick_list = NULL;
- int i = 1;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- int32_t pos = 0;
- int32_t sub_volume = 0;
- int32_t sub_volume_start = 0;
- int32_t sub_volume_end = 0;
- glusterd_brickinfo_t *tmp = NULL;
- char err_str[2048] = {0};
- gf1_cli_remove_brick_rsp rsp = {0,};
- void *cli_rsp = NULL;
- char vol_type[256] = {0,};
- int lock_fail = 0;
- glusterd_op_t cli_op = GD_OP_REMOVE_BRICK;
+ return glusterd_big_locked_handler (req, __glusterd_handle_set_volume);
+}
- GF_ASSERT (req);
+int
+__glusterd_handle_sync_volume (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{0,}};
+ dict_t *dict = NULL;
+ gf_cli_rsp cli_rsp = {0.};
+ char msg[2048] = {0,};
+ char *volname = NULL;
+ gf1_cli_sync_volume flags = 0;
+ char *hostname = NULL;
+ xlator_t *this = NULL;
- ret = glusterd_op_set_cli_op (cli_op);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- goto out;
- }
+ GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
- ret = -1;
- if (!gf_xdr_to_cli_remove_brick_req (req->msg[0], &cli_req)) {
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_cmd_log ("Volume remove-brick","on volname: %s attempted",cli_req.volname);
- gf_log ("glusterd", GF_LOG_INFO, "Received rem brick req");
-
- if (cli_req.bricks.bricks_len) {
+ if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
dict = dict_new ();
- ret = dict_unserialize (cli_req.bricks.bricks_val,
- cli_req.bricks.bricks_len,
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len,
&dict);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR,
+ gf_log (this->name, GF_LOG_ERROR,
"failed to "
"unserialize req-buffer to dictionary");
+ snprintf (msg, sizeof (msg), "Unable to decode the "
+ "command");
goto out;
} else {
- dict->extra_stdfree = cli_req.bricks.bricks_val;
- }
- }
-
- ret = dict_get_int32 (dict, "count", &count);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get count");
- goto out;
- }
-
- ret = glusterd_volinfo_find (cli_req.volname, &volinfo);
- if (ret) {
- snprintf (err_str, 2048, "Volume %s does not exist",
- cli_req.volname);
- gf_log ("", GF_LOG_ERROR, "%s", err_str);
- goto out;
- }
-
- if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE)
- strcpy (vol_type, "replica");
- else if (volinfo->type == GF_CLUSTER_TYPE_STRIPE)
- strcpy (vol_type, "stripe");
- else
- strcpy (vol_type, "distribute");
-
- /* Do not allow remove-brick if the volume is plain stripe */
- if ((volinfo->type == GF_CLUSTER_TYPE_STRIPE) && (volinfo->brick_count == volinfo->sub_count)) {
- snprintf (err_str, 2048, "Removing brick from a plain stripe is not allowed");
- gf_log ("glusterd", GF_LOG_ERROR, "%s", err_str);
- ret = -1;
- goto out;
- }
-
- /* Do not allow remove-brick if the bricks given is less than the replica count
- or stripe count */
- if (((volinfo->type == GF_CLUSTER_TYPE_REPLICATE) || (volinfo->type == GF_CLUSTER_TYPE_STRIPE))
- && !(volinfo->brick_count <= volinfo->sub_count)) {
- if (volinfo->sub_count && (count % volinfo->sub_count != 0)) {
- snprintf (err_str, 2048, "Remove brick incorrect"
- " brick count of %d for %s %d",
- count, vol_type, volinfo->sub_count);
- gf_log ("", GF_LOG_ERROR, "%s", err_str);
- ret = -1;
- goto out;
- }
- }
-
- brick_list = GF_MALLOC (120000 * sizeof(*brick_list),gf_common_mt_char);
-
- if (!brick_list) {
- ret = -1;
- goto out;
- }
-
- strcpy (brick_list, " ");
- while ( i <= count) {
- snprintf (key, 256, "brick%d", i);
- ret = dict_get_str (dict, key, &brick);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get %s", key);
- goto out;
- }
- gf_log ("", GF_LOG_DEBUG, "Remove brick count %d brick: %s",
- i, brick);
-
- ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo);
- if (ret) {
- snprintf(err_str, 2048,"Incorrect brick %s for volume"
- " %s", brick, cli_req.volname);
- gf_log ("", GF_LOG_ERROR, "%s", err_str);
- goto out;
- }
- strcat(brick_list, brick);
- strcat(brick_list, " ");
-
- i++;
- if ((volinfo->type == GF_CLUSTER_TYPE_NONE) ||
- (volinfo->brick_count <= volinfo->sub_count))
- continue;
-
- pos = 0;
- list_for_each_entry (tmp, &volinfo->bricks, brick_list) {
-
- if ((!strcmp (tmp->hostname,brickinfo->hostname)) &&
- !strcmp (tmp->path, brickinfo->path)) {
- gf_log ("", GF_LOG_INFO, "Found brick");
- if (!sub_volume && volinfo->sub_count) {
- sub_volume = (pos / volinfo->
- sub_count) + 1;
- sub_volume_start = volinfo->sub_count *
- (sub_volume - 1);
- sub_volume_end = (volinfo->sub_count *
- sub_volume) -1 ;
- } else {
- if (pos < sub_volume_start ||
- pos >sub_volume_end) {
- ret = -1;
- snprintf(err_str, 2048,"Bricks"
- " not from same subvol"
- " for %s", vol_type);
- gf_log ("",GF_LOG_ERROR,
- "%s", err_str);
- goto out;
- }
- }
- break;
- }
- pos++;
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
}
- gf_cmd_log ("Volume remove-brick","volname: %s count:%d bricks:%s",
- cli_req.volname, count, brick_list);
- ret = glusterd_op_begin (req, GD_OP_REMOVE_BRICK, dict, _gf_true);
- gf_cmd_log ("Volume remove-brick","on volname: %s %s",cli_req.volname,
- (ret) ? "FAILED" : "SUCCESS");
-
-out:
- if (ret) {
- if (dict)
- dict_unref (dict);
- rsp.op_ret = -1;
- rsp.op_errno = 0;
- rsp.volname = "";
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str), "Operation failed");
- gf_log ("", GF_LOG_ERROR, "%s", err_str);
- rsp.op_errstr = err_str;
- cli_rsp = &rsp;
- glusterd_submit_reply(req, cli_rsp, NULL, 0, NULL,
- gf_xdr_serialize_cli_remove_brick_rsp);
- if (!lock_fail)
- glusterd_opinfo_unlock();
-
- ret = 0; //sent error to cli, prevent second reply
-
- }
- if (brick_list)
- GF_FREE (brick_list);
- if (cli_req.volname)
- free (cli_req.volname); //its malloced by xdr
-
- glusterd_friend_sm ();
- glusterd_op_sm ();
-
- return ret;
-}
-
-int
-glusterd_handle_log_filename (rpcsvc_request_t *req)
-{
- int32_t ret = -1;
- gf1_cli_log_filename_req cli_req = {0,};
- dict_t *dict = NULL;
- int lock_fail = 0;
- glusterd_op_t cli_op = GD_OP_LOG_FILENAME;
-
- GF_ASSERT (req);
-
- ret = glusterd_op_set_cli_op (cli_op);
+ ret = dict_get_str (dict, "hostname", &hostname);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- goto out;
- }
-
- ret = -1;
- if (!gf_xdr_to_cli_log_filename_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- gf_log ("glusterd", GF_LOG_INFO, "Received log filename req "
- "for volume %s", cli_req.volname);
-
- dict = dict_new ();
- if (!dict)
- goto out;
-
- ret = dict_set_dynmstr (dict, "volname", cli_req.volname);
- if (ret)
- goto out;
- ret = dict_set_dynmstr (dict, "brick", cli_req.brick);
- if (ret)
- goto out;
- ret = dict_set_dynmstr (dict, "path", cli_req.path);
- if (ret)
+ snprintf (msg, sizeof (msg), "Failed to get hostname");
+ gf_log (this->name, GF_LOG_ERROR, "%s", msg);
goto out;
-
- ret = glusterd_op_begin (req, GD_OP_LOG_FILENAME, dict, _gf_true);
-
-out:
- if (ret && dict)
- dict_unref (dict);
-
- glusterd_friend_sm ();
- glusterd_op_sm ();
-
- if (ret) {
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- NULL, "operation failed");
- if (!lock_fail)
- (void) glusterd_opinfo_unlock ();
-
}
- return ret;
-}
-
-int
-glusterd_handle_log_locate (rpcsvc_request_t *req)
-{
- int32_t ret = -1;
- gf1_cli_log_locate_req cli_req = {0,};
- gf1_cli_log_locate_rsp rsp = {0,};
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- char tmp_str[PATH_MAX] = {0,};
- char *tmp_brick = NULL;
- uint32_t found = 0;
- glusterd_brickinfo_t *tmpbrkinfo = NULL;
- int lock_fail = 0;
- glusterd_op_t cli_op = GD_OP_LOG_LOCATE;
-
- GF_ASSERT (req);
-
- priv = THIS->private;
-
- ret = glusterd_op_set_cli_op (cli_op);
+ ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- goto out;
- }
-
- ret = -1;
- if (!gf_xdr_to_cli_log_locate_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- gf_log ("glusterd", GF_LOG_INFO, "Received log locate req "
- "for volume %s", cli_req.volname);
-
- if (strchr (cli_req.brick, ':')) {
- /* TODO: need to get info of only that brick and then
- tell what is the exact location */
- tmp_brick = gf_strdup (cli_req.brick);
- if (!tmp_brick)
- goto out;
-
- gf_log ("", GF_LOG_DEBUG, "brick : %s", cli_req.brick);
- ret = glusterd_brickinfo_from_brick (tmp_brick, &tmpbrkinfo);
+ ret = dict_get_int32 (dict, "flags", (int32_t*)&flags);
if (ret) {
- gf_log ("glusterd", GF_LOG_ERROR,
- "Cannot get brickinfo from the brick");
+ snprintf (msg, sizeof (msg), "Failed to get volume name"
+ " or flags");
+ gf_log (this->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
}
- ret = glusterd_volinfo_find (cli_req.volname, &volinfo);
- if (ret) {
- rsp.path = "request sent on non-existent volume";
- goto out;
- }
-
- list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (tmpbrkinfo) {
- ret = glusterd_resolve_brick (tmpbrkinfo);
- if (ret) {
- gf_log ("glusterd", GF_LOG_ERROR,
- "cannot resolve the brick");
- goto out;
- }
- if (uuid_compare (tmpbrkinfo->uuid, brickinfo->uuid) || strcmp (brickinfo->path, tmpbrkinfo->path))
- continue;
- }
-
- if (brickinfo->logfile) {
- strcpy (tmp_str, brickinfo->logfile);
- rsp.path = dirname (tmp_str);
- found = 1;
- } else {
- snprintf (tmp_str, PATH_MAX, "%s/bricks/",
- DEFAULT_LOG_FILE_DIRECTORY);
- rsp.path = tmp_str;
- found = 1;
- }
- break;
- }
-
- if (!found) {
- snprintf (tmp_str, PATH_MAX, "brick %s:%s does not exitst in the volume %s",
- tmpbrkinfo->hostname, tmpbrkinfo->path, cli_req.volname);
- rsp.path = tmp_str;
- }
-
- ret = 0;
-out:
- if (tmp_brick)
- GF_FREE (tmp_brick);
- if (tmpbrkinfo)
- glusterd_brickinfo_delete (tmpbrkinfo);
- rsp.op_ret = ret;
- if (!rsp.path)
- rsp.path = "Operation failed";
-
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gf_xdr_serialize_cli_log_locate_rsp);
-
- if (cli_req.brick)
- free (cli_req.brick); //its malloced by xdr
- if (cli_req.volname)
- free (cli_req.volname); //its malloced by xdr
-
- glusterd_friend_sm ();
- glusterd_op_sm ();
-
- if (!lock_fail)
- (void) glusterd_opinfo_unlock ();
-
- return ret;
-}
-
-int
-glusterd_handle_log_rotate (rpcsvc_request_t *req)
-{
- int32_t ret = -1;
- gf1_cli_log_rotate_req cli_req = {0,};
- dict_t *dict = NULL;
- int lock_fail = 0;
- glusterd_op_t cli_op = GD_OP_LOG_ROTATE;
-
- GF_ASSERT (req);
-
- ret = glusterd_op_set_cli_op (cli_op);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- goto out;
- }
-
- ret = -1;
- if (!gf_xdr_to_cli_log_rotate_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- gf_log ("glusterd", GF_LOG_INFO, "Received log rotate req "
- "for volume %s", cli_req.volname);
-
- dict = dict_new ();
- if (!dict)
- goto out;
-
- ret = dict_set_dynmstr (dict, "volname", cli_req.volname);
- if (ret)
- goto out;
-
- ret = dict_set_dynmstr (dict, "brick", cli_req.brick);
- if (ret)
- goto out;
-
- ret = dict_set_uint64 (dict, "rotate-key", (uint64_t)time (NULL));
- if (ret)
- goto out;
-
- ret = glusterd_op_begin (req, GD_OP_LOG_ROTATE, dict, _gf_true);
-
-out:
- if (ret && dict)
- dict_unref (dict);
-
- glusterd_friend_sm ();
- glusterd_op_sm ();
-
- if (ret) {
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- NULL, "operation failed");
- if (!lock_fail)
- (void) glusterd_opinfo_unlock ();
- }
-
- return ret;
-}
-
-int
-glusterd_handle_sync_volume (rpcsvc_request_t *req)
-{
- int32_t ret = -1;
- gf1_cli_sync_volume_req cli_req = {0,};
- dict_t *dict = NULL;
- gf1_cli_sync_volume_rsp cli_rsp = {0.};
- char msg[2048] = {0,};
- gf_boolean_t free_hostname = _gf_true;
- gf_boolean_t free_volname = _gf_true;
- glusterd_volinfo_t *volinfo = NULL;
- int lock_fail = 0;
- glusterd_op_t cli_op = GD_OP_SYNC_VOLUME;
-
- GF_ASSERT (req);
-
- ret = glusterd_op_set_cli_op (cli_op);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- goto out;
- }
-
- ret = -1;
- if (!gf_xdr_to_cli_sync_volume_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
- gf_log ("glusterd", GF_LOG_INFO, "Received volume sync req "
- "for volume %s",
- (cli_req.flags & GF_CLI_SYNC_ALL) ? "all" : cli_req.volname);
-
- dict = dict_new ();
- if (!dict) {
- gf_log ("", GF_LOG_ERROR, "Can't allocate sync vol dict");
- goto out;
- }
+ gf_log (this->name, GF_LOG_INFO, "Received volume sync req "
+ "for volume %s", (flags & GF_CLI_SYNC_ALL) ? "all" : volname);
- if (!glusterd_is_local_addr (cli_req.hostname)) {
+ if (gf_is_local_addr (hostname)) {
ret = -1;
snprintf (msg, sizeof (msg), "sync from localhost"
" not allowed");
+ gf_log (this->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
- ret = dict_set_dynmstr (dict, "hostname", cli_req.hostname);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "hostname set failed");
- snprintf (msg, sizeof (msg), "hostname set failed");
- goto out;
- } else {
- free_hostname = _gf_false;
- }
-
- ret = dict_set_int32 (dict, "flags", cli_req.flags);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "volume flags set failed");
- snprintf (msg, sizeof (msg), "volume flags set failed");
- goto out;
- }
-
- if (!cli_req.flags) {
- ret = glusterd_volinfo_find (cli_req.volname, &volinfo);
- if (!ret) {
- snprintf (msg, sizeof (msg), "please delete the "
- "volume: %s before sync", cli_req.volname);
- ret = -1;
- goto out;
- }
-
- ret = dict_set_dynmstr (dict, "volname", cli_req.volname);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "volume name set failed");
- snprintf (msg, sizeof (msg), "volume name set failed");
- goto out;
- } else {
- free_volname = _gf_false;
- }
- } else {
- free_volname = _gf_false;
- if (glusterd_volume_count_get ()) {
- snprintf (msg, sizeof (msg), "please delete all the "
- "volumes before full sync");
- ret = -1;
- goto out;
- }
- }
-
- ret = glusterd_op_begin (req, GD_OP_SYNC_VOLUME, dict, _gf_true);
+ ret = glusterd_op_begin_synctask (req, GD_OP_SYNC_VOLUME, dict);
out:
if (ret) {
@@ -2519,28 +1911,22 @@ out:
cli_rsp.op_errstr = msg;
if (msg[0] == '\0')
snprintf (msg, sizeof (msg), "Operation failed");
- glusterd_submit_reply(req, &cli_rsp, NULL, 0, NULL,
- gf_xdr_from_cli_sync_volume_rsp);
- if (free_hostname && cli_req.hostname)
- free (cli_req.hostname);
- if (free_volname && cli_req.volname)
- free (cli_req.volname);
- if (dict)
- dict_unref (dict);
-
- if (!lock_fail)
- (void) glusterd_opinfo_unlock ();
+ glusterd_to_cli (req, &cli_rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_cli_rsp, dict);
ret = 0; //sent error to cli, prevent second reply
}
- glusterd_friend_sm ();
- glusterd_op_sm ();
-
return ret;
}
int
+glusterd_handle_sync_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_sync_volume);
+}
+
+int
glusterd_fsm_log_send_resp (rpcsvc_request_t *req, int op_ret,
char *op_errstr, dict_t *dict)
{
@@ -2555,12 +1941,11 @@ glusterd_fsm_log_send_resp (rpcsvc_request_t *req, int op_ret,
rsp.op_errstr = op_errstr;
if (rsp.op_ret == 0)
ret = dict_allocate_and_serialize (dict, &rsp.fsm_log.fsm_log_val,
- (size_t *)&rsp.fsm_log.fsm_log_len);
+ &rsp.fsm_log.fsm_log_len);
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gf_xdr_from_cli_fsm_log_rsp);
- if (rsp.fsm_log.fsm_log_val)
- GF_FREE (rsp.fsm_log.fsm_log_val);
+ (xdrproc_t)xdr_gf1_cli_fsm_log_rsp);
+ GF_FREE (rsp.fsm_log.fsm_log_val);
gf_log ("glusterd", GF_LOG_DEBUG, "Responded, ret: %d", ret);
@@ -2568,7 +1953,7 @@ glusterd_fsm_log_send_resp (rpcsvc_request_t *req, int op_ret,
}
int
-glusterd_handle_fsm_log (rpcsvc_request_t *req)
+__glusterd_handle_fsm_log (rpcsvc_request_t *req)
{
int32_t ret = -1;
gf1_cli_fsm_log_req cli_req = {0,};
@@ -2581,7 +1966,9 @@ glusterd_handle_fsm_log (rpcsvc_request_t *req)
GF_ASSERT (req);
- if (!gf_xdr_to_cli_fsm_log_req (req->msg[0], &cli_req)) {
+ ret = xdr_to_generic (req->msg[0], &cli_req,
+ (xdrproc_t)xdr_gf1_cli_fsm_log_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
snprintf (msg, sizeof (msg), "Garbage request");
@@ -2612,8 +1999,7 @@ glusterd_handle_fsm_log (rpcsvc_request_t *req)
ret = glusterd_sm_tr_log_add_to_dict (dict, log);
out:
(void)glusterd_fsm_log_send_resp (req, ret, msg, dict);
- if (cli_req.name)
- free (cli_req.name);//malloced by xdr
+ free (cli_req.name);//malloced by xdr
if (dict)
dict_unref (dict);
@@ -2624,6 +2010,12 @@ out:
}
int
+glusterd_handle_fsm_log (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_fsm_log);
+}
+
+int
glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status)
{
@@ -2635,10 +2027,9 @@ glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status)
rsp.op_ret = status;
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_cluster_lock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
- gf_log ("glusterd", GF_LOG_INFO,
- "Responded, ret: %d", ret);
+ gf_log (THIS->name, GF_LOG_DEBUG, "Responded to lock, ret: %d", ret);
return 0;
}
@@ -2655,33 +2046,99 @@ glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
glusterd_get_uuid (&rsp.uuid);
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_cluster_unlock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
- gf_log ("glusterd", GF_LOG_INFO,
- "Responded to unlock, ret: %d", ret);
+ gf_log (THIS->name, GF_LOG_DEBUG, "Responded to unlock, ret: %d", ret);
return ret;
}
int
-glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
+glusterd_op_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
+ int32_t status)
+{
+
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+ GF_ASSERT (txn_id);
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
+ uuid_copy (rsp.txn_id, *txn_id);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
+
+ gf_log (THIS->name, GF_LOG_DEBUG, "Responded to mgmt_v3 lock, ret: %d",
+ ret);
+
+ return ret;
+}
+
+int
+glusterd_op_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
+ int32_t status)
+{
+
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+ GF_ASSERT (txn_id);
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
+ glusterd_get_uuid (&rsp.uuid);
+ uuid_copy (rsp.txn_id, *txn_id);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
+
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "Responded to mgmt_v3 unlock, ret: %d",
+ ret);
+
+ return ret;
+}
+
+int
+__glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
{
gd1_mgmt_cluster_unlock_req unlock_req = {{0}, };
int32_t ret = -1;
glusterd_op_lock_ctx_t *ctx = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ uuid_t *txn_id = &global_txn_id;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
- if (!gd_xdr_to_mgmt_cluster_unlock_req (req->msg[0], &unlock_req)) {
- //failed to decode msg;
+ ret = xdr_to_generic (req->msg[0], &unlock_req,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode unlock "
+ "request received from peer");
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_log (this->name, GF_LOG_DEBUG,
"Received UNLOCK from uuid: %s", uuid_utoa (unlock_req.uuid));
+ if (glusterd_friend_find_by_uuid (unlock_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (unlock_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
if (!ctx) {
@@ -2690,8 +2147,9 @@ glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
}
uuid_copy (ctx->uuid, unlock_req.uuid);
ctx->req = req;
+ ctx->dict = NULL;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, ctx);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, txn_id, ctx);
out:
glusterd_friend_sm ();
@@ -2701,14 +2159,25 @@ out:
}
int
+glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cluster_unlock);
+}
+
+int
glusterd_op_stage_send_resp (rpcsvc_request_t *req,
int32_t op, int32_t status,
char *op_errstr, dict_t *rsp_dict)
{
gd1_mgmt_stage_op_rsp rsp = {{0},};
int ret = -1;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
+
rsp.op_ret = status;
glusterd_get_uuid (&rsp.uuid);
rsp.op = op;
@@ -2717,22 +2186,19 @@ glusterd_op_stage_send_resp (rpcsvc_request_t *req,
else
rsp.op_errstr = "";
- ret = dict_allocate_and_serialize (rsp_dict,
- &rsp.dict.dict_val,
- (size_t *)&rsp.dict.dict_len);
+ ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
if (ret < 0) {
- gf_log ("", GF_LOG_DEBUG,
+ gf_log (this->name, GF_LOG_ERROR,
"failed to get serialized length of dict");
return ret;
}
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_stage_op_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp);
- gf_log ("glusterd", GF_LOG_INFO,
- "Responded to stage, ret: %d", ret);
- if (rsp.dict.dict_val)
- GF_FREE (rsp.dict.dict_val);
+ gf_log (this->name, GF_LOG_DEBUG, "Responded to stage, ret: %d", ret);
+ GF_FREE (rsp.dict.dict_val);
return ret;
}
@@ -2744,7 +2210,10 @@ glusterd_op_commit_send_resp (rpcsvc_request_t *req,
{
gd1_mgmt_commit_op_rsp rsp = {{0}, };
int ret = -1;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
rsp.op_ret = status;
glusterd_get_uuid (&rsp.uuid);
@@ -2756,11 +2225,10 @@ glusterd_op_commit_send_resp (rpcsvc_request_t *req,
rsp.op_errstr = "";
if (rsp_dict) {
- ret = dict_allocate_and_serialize (rsp_dict,
- &rsp.dict.dict_val,
- (size_t *)&rsp.dict.dict_len);
+ ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
if (ret < 0) {
- gf_log ("", GF_LOG_DEBUG,
+ gf_log (this->name, GF_LOG_ERROR,
"failed to get serialized length of dict");
goto out;
}
@@ -2768,26 +2236,26 @@ glusterd_op_commit_send_resp (rpcsvc_request_t *req,
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_commit_op_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp);
- gf_log ("glusterd", GF_LOG_INFO,
- "Responded to commit, ret: %d", ret);
+ gf_log (this->name, GF_LOG_DEBUG, "Responded to commit, ret: %d", ret);
out:
- if (rsp.dict.dict_val)
- GF_FREE (rsp.dict.dict_val);
+ GF_FREE (rsp.dict.dict_val);
return ret;
}
int
-glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
+__glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
{
int32_t ret = -1;
gd1_mgmt_friend_req friend_req = {{0},};
gf_boolean_t run_fsm = _gf_true;
GF_ASSERT (req);
- if (!gd_xdr_to_mgmt_friend_req (req->msg[0], &friend_req)) {
+ ret = xdr_to_generic (req->msg[0], &friend_req,
+ (xdrproc_t)xdr_gd1_mgmt_friend_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
@@ -2806,8 +2274,7 @@ glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
}
out:
- if (friend_req.hostname)
- free (friend_req.hostname);//malloced by xdr
+ free (friend_req.hostname);//malloced by xdr
if (run_fsm) {
glusterd_friend_sm ();
@@ -2818,14 +2285,23 @@ out:
}
int
-glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
+glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_incoming_friend_req);
+}
+
+int
+__glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
{
int32_t ret = -1;
gd1_mgmt_friend_req friend_req = {{0},};
char remote_hostname[UNIX_PATH_MAX + 1] = {0,};
GF_ASSERT (req);
- if (!gd_xdr_to_mgmt_friend_req (req->msg[0], &friend_req)) {
+ ret = xdr_to_generic (req->msg[0], &friend_req,
+ (xdrproc_t)xdr_gd1_mgmt_friend_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
@@ -2844,10 +2320,8 @@ glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
remote_hostname, friend_req.port);
out:
- if (friend_req.hostname)
- free (friend_req.hostname);//malloced by xdr
- if (friend_req.vols.vols_val)
- free (friend_req.vols.vols_val);//malloced by xdr
+ free (friend_req.hostname);//malloced by xdr
+ free (friend_req.vols.vols_val);//malloced by xdr
glusterd_friend_sm ();
glusterd_op_sm ();
@@ -2855,6 +2329,13 @@ out:
return ret;
}
+int
+glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_incoming_unfriend_req);
+
+}
int
glusterd_handle_friend_update_delete (dict_t *dict)
@@ -2902,7 +2383,7 @@ out:
}
int
-glusterd_handle_friend_update (rpcsvc_request_t *req)
+__glusterd_handle_friend_update (rpcsvc_request_t *req)
{
int32_t ret = -1;
gd1_mgmt_friend_update friend_req = {{0},};
@@ -2928,7 +2409,9 @@ glusterd_handle_friend_update (rpcsvc_request_t *req)
priv = this->private;
GF_ASSERT (priv);
- if (!gd_xdr_to_mgmt_friend_update (req->msg[0], &friend_req)) {
+ ret = xdr_to_generic (req->msg[0], &friend_req,
+ (xdrproc_t)xdr_gd1_mgmt_friend_update);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
@@ -2988,7 +2471,13 @@ glusterd_handle_friend_update (rpcsvc_request_t *req)
gf_log ("", GF_LOG_INFO, "Received uuid: %s, hostname:%s",
uuid_buf, hostname);
- if (!uuid_compare (uuid, priv->uuid)) {
+ if (uuid_is_null (uuid)) {
+ gf_log (this->name, GF_LOG_WARNING, "Updates mustn't "
+ "contain peer with 'null' uuid");
+ continue;
+ }
+
+ if (!uuid_compare (uuid, MY_UUID)) {
gf_log ("", GF_LOG_INFO, "Received my uuid as Friend");
i++;
continue;
@@ -3007,22 +2496,21 @@ glusterd_handle_friend_update (rpcsvc_request_t *req)
ret = glusterd_friend_add (hostname, friend_req.port,
GD_FRIEND_STATE_BEFRIENDED,
- &uuid, NULL, &peerinfo, 0, &args);
+ &uuid, &peerinfo, 0, &args);
i++;
}
out:
- uuid_copy (rsp.uuid, priv->uuid);
+ uuid_copy (rsp.uuid, MY_UUID);
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_friend_update_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_friend_update_rsp);
if (dict) {
if (!dict->extra_stdfree && friend_req.friends.friends_val)
free (friend_req.friends.friends_val);//malloced by xdr
dict_unref (dict);
} else {
- if (friend_req.friends.friends_val)
- free (friend_req.friends.friends_val);//malloced by xdr
+ free (friend_req.friends.friends_val);//malloced by xdr
}
glusterd_friend_sm ();
@@ -3032,7 +2520,14 @@ out:
}
int
-glusterd_handle_probe_query (rpcsvc_request_t *req)
+glusterd_handle_friend_update (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_friend_update);
+}
+
+int
+__glusterd_handle_probe_query (rpcsvc_request_t *req)
{
int32_t ret = -1;
xlator_t *this = NULL;
@@ -3046,13 +2541,14 @@ glusterd_handle_probe_query (rpcsvc_request_t *req)
GF_ASSERT (req);
- if (!gd_xdr_to_mgmt_probe_req (req->msg[0], &probe_req)) {
+ ret = xdr_to_generic (req->msg[0], &probe_req,
+ (xdrproc_t)xdr_gd1_mgmt_probe_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
}
-
this = THIS;
conf = this->private;
@@ -3064,6 +2560,20 @@ glusterd_handle_probe_query (rpcsvc_request_t *req)
gf_log ("glusterd", GF_LOG_INFO,
"Received probe from uuid: %s", uuid_utoa (probe_req.uuid));
+ /* Check for uuid collision and handle it in a user friendly way by
+ * sending the error.
+ */
+ if (!uuid_compare (probe_req.uuid, MY_UUID)) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Peer uuid %s is same as "
+ "local uuid. Please check the uuid of both the peers "
+ "from %s/%s", uuid_utoa (probe_req.uuid),
+ GLUSTERD_DEFAULT_WORKDIR, GLUSTERD_INFO_FILE);
+ rsp.op_ret = -1;
+ rsp.op_errno = GF_PROBE_SAME_UUID;
+ rsp.port = port;
+ goto respond;
+ }
+
ret = glusterd_remote_hostname_get (req, remote_hostname,
sizeof (remote_hostname));
if (ret) {
@@ -3080,7 +2590,7 @@ glusterd_handle_probe_query (rpcsvc_request_t *req)
args.mode = GD_MODE_ON;
ret = glusterd_friend_add (remote_hostname, port,
GD_FRIEND_STATE_PROBE_RCVD,
- NULL, NULL, &peerinfo, 0, &args);
+ NULL, &peerinfo, 0, &args);
if (ret) {
gf_log ("", GF_LOG_ERROR, "Failed to add peer %s",
remote_hostname);
@@ -3088,20 +2598,22 @@ glusterd_handle_probe_query (rpcsvc_request_t *req)
}
}
- uuid_copy (rsp.uuid, conf->uuid);
+respond:
+ uuid_copy (rsp.uuid, MY_UUID);
rsp.hostname = probe_req.hostname;
+ rsp.op_errstr = "";
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_probe_rsp);
+ glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_probe_rsp);
+ ret = 0;
gf_log ("glusterd", GF_LOG_INFO, "Responded to %s, op_ret: %d, "
- "op_errno: %d, ret: %d", probe_req.hostname,
+ "op_errno: %d, ret: %d", remote_hostname,
rsp.op_ret, rsp.op_errno, ret);
out:
- if (probe_req.hostname)
- free (probe_req.hostname);//malloced by xdr
+ free (probe_req.hostname);//malloced by xdr
glusterd_friend_sm ();
glusterd_op_sm ();
@@ -3109,99 +2621,289 @@ out:
return ret;
}
+int glusterd_handle_probe_query (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_probe_query);
+}
+
int
-glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
+__glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
- gf1_cli_stats_volume_req cli_req = {0,};
+ gf_cli_req cli_req = {{0,}};
dict_t *dict = NULL;
- char msg[2048] = {0,};
- gf_boolean_t free_volname = _gf_true;
- int lock_fail = 0;
glusterd_op_t cli_op = GD_OP_PROFILE_VOLUME;
- dict_t *tmp_dict = NULL;
+ char *volname = NULL;
+ int32_t op = 0;
+ char err_str[2048] = {0,};
+ xlator_t *this = NULL;
GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
- ret = glusterd_op_set_cli_op (cli_op);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set cli op: %d",
- ret);
- lock_fail = 1;
- goto out;
- }
-
- ret = -1;
- if (!gf_xdr_to_cli_stats_volume_req (req->msg[0], &cli_req)) {
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO, "Received volume profile req "
- "for volume %s", cli_req.volname);
+ if (cli_req.dict.dict_len > 0) {
+ dict = dict_new();
+ if (!dict)
+ goto out;
+ dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len, &dict);
+ }
- dict = dict_new ();
- if (!dict)
- goto out;
- ret = dict_set_dynmstr (dict, "volname", cli_req.volname);
+ ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "volume name set failed");
- snprintf (msg, sizeof (msg), "volume name set failed");
+ snprintf (err_str, sizeof (err_str), "Unable to get volume "
+ "name");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
goto out;
- } else {
- free_volname = _gf_false;
}
- ret = dict_set_int32 (dict, "op", cli_req.op);
+ gf_log (this->name, GF_LOG_INFO, "Received volume profile req "
+ "for volume %s", volname);
+ ret = dict_get_int32 (dict, "op", &op);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "op set failed");
+ snprintf (err_str, sizeof (err_str), "Unable to get operation");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
goto out;
}
- if (cli_req.dict_req.dict_req_len > 0) {
- tmp_dict = dict_new();
- if (!tmp_dict)
- goto out;
- dict_unserialize (cli_req.dict_req.dict_req_val,
- cli_req.dict_req.dict_req_len, &tmp_dict);
+ ret = glusterd_op_begin (req, cli_op, dict, err_str, sizeof (err_str));
- dict_copy (tmp_dict, dict);
+out:
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
+ free (cli_req.dict.dict_val);
+
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf (err_str, sizeof (err_str),
+ "Operation failed");
+ ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
+ dict, err_str);
}
- gf_cmd_log ("Volume stats", "volume : %s, op: %d", cli_req.volname,
- cli_req.op);
- ret = glusterd_op_begin (req, cli_op, dict, _gf_true);
- gf_cmd_log ("Volume stats", " on volume %s, op: %d %s ",
- cli_req.volname, cli_req.op,
- ((ret == 0)? " SUCCEDED":" FAILED"));
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_profile_volume);
+}
+
+int
+__glusterd_handle_getwd (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf1_cli_getwd_rsp rsp = {0,};
+ glusterd_conf_t *priv = NULL;
+
+ GF_ASSERT (req);
+
+ priv = THIS->private;
+ GF_ASSERT (priv);
+
+ gf_log ("glusterd", GF_LOG_INFO, "Received getwd req");
+
+ rsp.wd = priv->workdir;
+
+ glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf1_cli_getwd_rsp);
+ ret = 0;
-out:
glusterd_friend_sm ();
glusterd_op_sm ();
- if (tmp_dict)
- dict_unref (tmp_dict);
+ return ret;
+}
+
+int
+glusterd_handle_getwd (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_getwd);
+}
+
+int
+__glusterd_handle_mount (rpcsvc_request_t *req)
+{
+ gf1_cli_mount_req mnt_req = {0,};
+ gf1_cli_mount_rsp rsp = {0,};
+ dict_t *dict = NULL;
+ int ret = 0;
+ glusterd_conf_t *priv = NULL;
+
+ GF_ASSERT (req);
+ priv = THIS->private;
+
+ ret = xdr_to_generic (req->msg[0], &mnt_req,
+ (xdrproc_t)xdr_gf1_cli_mount_req);
+ if (ret < 0) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ gf_log ("glusterd", GF_LOG_INFO, "Received mount req");
+
+ if (mnt_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new ();
+
+ ret = dict_unserialize (mnt_req.dict.dict_val,
+ mnt_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ rsp.op_ret = -1;
+ rsp.op_errno = -EINVAL;
+ goto out;
+ } else {
+ dict->extra_stdfree = mnt_req.dict.dict_val;
+ }
+ }
+
+ synclock_unlock (&priv->big_lock);
+ rsp.op_ret = glusterd_do_mount (mnt_req.label, dict,
+ &rsp.path, &rsp.op_errno);
+ synclock_lock (&priv->big_lock);
+
+ out:
+ if (!rsp.path)
+ rsp.path = "";
- if (ret && dict)
+ glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf1_cli_mount_rsp);
+ ret = 0;
+
+ if (dict)
dict_unref (dict);
- if (cli_req.dict_req.dict_req_val)
- free (cli_req.dict_req.dict_req_val);
- if (free_volname)
- free (cli_req.volname); // malloced by xdr
- if (ret) {
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- NULL, "operation failed");
- if (!lock_fail)
- (void) glusterd_opinfo_unlock ();
+ if (*rsp.path)
+ GF_FREE (rsp.path);
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
+ return ret;
+}
+
+int
+glusterd_handle_mount (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_mount);
+}
+
+int
+__glusterd_handle_umount (rpcsvc_request_t *req)
+{
+ gf1_cli_umount_req umnt_req = {0,};
+ gf1_cli_umount_rsp rsp = {0,};
+ char *mountbroker_root = NULL;
+ char mntp[PATH_MAX] = {0,};
+ char *path = NULL;
+ runner_t runner = {0,};
+ int ret = 0;
+ xlator_t *this = THIS;
+ gf_boolean_t dir_ok = _gf_false;
+ char *pdir = NULL;
+ char *t = NULL;
+ glusterd_conf_t *priv = NULL;
+ GF_ASSERT (req);
+ GF_ASSERT (this);
+ priv = this->private;
+
+ ret = xdr_to_generic (req->msg[0], &umnt_req,
+ (xdrproc_t)xdr_gf1_cli_umount_req);
+ if (ret < 0) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ rsp.op_ret = -1;
+ goto out;
}
- gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log ("glusterd", GF_LOG_INFO, "Received umount req");
+
+ if (dict_get_str (this->options, "mountbroker-root",
+ &mountbroker_root) != 0) {
+ rsp.op_errno = ENOENT;
+ goto out;
+ }
+
+ /* check if it is allowed to umount path */
+ path = gf_strdup (umnt_req.path);
+ if (!path) {
+ rsp.op_errno = ENOMEM;
+ goto out;
+ }
+ dir_ok = _gf_false;
+ pdir = dirname (path);
+ t = strtail (pdir, mountbroker_root);
+ if (t && *t == '/') {
+ t = strtail(++t, MB_HIVE);
+ if (t && !*t)
+ dir_ok = _gf_true;
+ }
+ GF_FREE (path);
+ if (!dir_ok) {
+ rsp.op_errno = EACCES;
+ goto out;
+ }
+
+ runinit (&runner);
+ runner_add_args (&runner, "umount", umnt_req.path, NULL);
+ if (umnt_req.lazy)
+ runner_add_arg (&runner, "-l");
+ synclock_unlock (&priv->big_lock);
+ rsp.op_ret = runner_run (&runner);
+ synclock_lock (&priv->big_lock);
+ if (rsp.op_ret == 0) {
+ if (realpath (umnt_req.path, mntp))
+ rmdir (mntp);
+ else {
+ rsp.op_ret = -1;
+ rsp.op_errno = errno;
+ }
+ if (unlink (umnt_req.path) != 0) {
+ rsp.op_ret = -1;
+ rsp.op_errno = errno;
+ }
+ }
+
+ out:
+ if (rsp.op_errno)
+ rsp.op_ret = -1;
+
+ glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf1_cli_umount_rsp);
+ ret = 0;
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
return ret;
}
int
+glusterd_handle_umount (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_umount);
+}
+
+int
glusterd_friend_remove (uuid_t uuid, char *hostname)
{
int ret = 0;
@@ -3211,6 +2913,9 @@ glusterd_friend_remove (uuid_t uuid, char *hostname)
if (ret)
goto out;
+ ret = glusterd_friend_remove_cleanup_vols (peerinfo->uuid);
+ if (ret)
+ gf_log (THIS->name, GF_LOG_WARNING, "Volumes cleanup failed");
ret = glusterd_friend_cleanup (peerinfo);
out:
gf_log ("", GF_LOG_DEBUG, "returning %d", ret);
@@ -3231,8 +2936,9 @@ glusterd_rpc_create (struct rpc_clnt **rpc,
GF_ASSERT (this);
GF_ASSERT (options);
- new_rpc = rpc_clnt_new (options, this->ctx, this->name);
+ /* TODO: is 32 enough? or more ? */
+ new_rpc = rpc_clnt_new (options, this->ctx, this->name, 16);
if (!new_rpc)
goto out;
@@ -3248,7 +2954,7 @@ out:
}
}
- gf_log ("", GF_LOG_DEBUG, "returning %d", ret);
+ gf_log (this->name, GF_LOG_DEBUG, "returning %d", ret);
return ret;
}
@@ -3271,8 +2977,8 @@ glusterd_transport_keepalive_options_get (int *interval, int *time)
}
int
-glusterd_transport_inet_keepalive_options_build (dict_t **options,
- const char *hostname, int port)
+glusterd_transport_inet_options_build (dict_t **options, const char *hostname,
+ int port)
{
dict_t *dict = NULL;
int32_t interval = -1;
@@ -3284,10 +2990,25 @@ glusterd_transport_inet_keepalive_options_build (dict_t **options,
if (!port)
port = GLUSTERD_DEFAULT_PORT;
+
+ /* Build default transport options */
ret = rpc_transport_inet_options_build (&dict, hostname, port);
if (ret)
goto out;
+ /* Set frame-timeout to 10mins. Default timeout of 30 mins is too long
+ * when compared to 2 mins for cli timeout. This ensures users don't
+ * wait too long after cli timesout before being able to resume normal
+ * operations
+ */
+ ret = dict_set_int32 (dict, "frame-timeout", 600);
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Failed to set frame-timeout");
+ goto out;
+ }
+
+ /* Set keepalive options */
glusterd_transport_keepalive_options_get (&interval, &time);
if ((interval > 0) || (time > 0))
@@ -3299,83 +3020,117 @@ out:
}
int
-glusterd_friend_add (const char *hoststr, int port,
- glusterd_friend_sm_state_t state,
- uuid_t *uuid,
- struct rpc_clnt *rpc,
- glusterd_peerinfo_t **friend,
- gf_boolean_t restore,
- glusterd_peerctx_args_t *args)
+glusterd_friend_rpc_create (xlator_t *this, glusterd_peerinfo_t *peerinfo,
+ glusterd_peerctx_args_t *args)
{
- int ret = 0;
- glusterd_conf_t *conf = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_peerctx_t *peerctx = NULL;
- gf_boolean_t is_allocated = _gf_false;
dict_t *options = NULL;
-
- conf = THIS->private;
- GF_ASSERT (conf)
- GF_ASSERT (hoststr);
+ int ret = -1;
+ glusterd_peerctx_t *peerctx = NULL;
+ data_t *data = NULL;
peerctx = GF_CALLOC (1, sizeof (*peerctx), gf_gld_mt_peerctx_t);
- if (!peerctx) {
- ret = -1;
+ if (!peerctx)
goto out;
- }
if (args)
peerctx->args = *args;
- ret = glusterd_peerinfo_new (&peerinfo, state, uuid, hoststr);
+ peerctx->peerinfo = peerinfo;
+
+ ret = glusterd_transport_inet_options_build (&options,
+ peerinfo->hostname,
+ peerinfo->port);
if (ret)
goto out;
- peerctx->peerinfo = peerinfo;
- if (friend)
- *friend = peerinfo;
- if (!rpc) {
- ret = glusterd_transport_inet_keepalive_options_build (&options,
- hoststr, port);
- if (ret)
- goto out;
- ret = glusterd_rpc_create (&rpc, options,
- glusterd_peer_rpc_notify,
- peerctx);
- if (ret) {
- gf_log ("glusterd", GF_LOG_ERROR, "failed to create rpc for"
- " peer %s", (char*)hoststr);
- goto out;
+ /*
+ * For simulated multi-node testing, we need to make sure that we
+ * create our RPC endpoint with the same address that the peer would
+ * use to reach us.
+ */
+ if (this->options) {
+ data = dict_get(this->options,"transport.socket.bind-address");
+ if (data) {
+ ret = dict_set(options,
+ "transport.socket.source-addr",data);
}
- is_allocated = _gf_true;
}
- peerinfo->rpc = rpc;
+ ret = glusterd_rpc_create (&peerinfo->rpc, options,
+ glusterd_peer_rpc_notify, peerctx);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to create rpc for"
+ " peer %s", peerinfo->hostname);
+ goto out;
+ }
+ peerctx = NULL;
+ ret = 0;
+out:
+ GF_FREE (peerctx);
+ return ret;
+}
- if (!restore)
- ret = glusterd_store_peerinfo (peerinfo);
+int
+glusterd_friend_add (const char *hoststr, int port,
+ glusterd_friend_sm_state_t state,
+ uuid_t *uuid,
+ glusterd_peerinfo_t **friend,
+ gf_boolean_t restore,
+ glusterd_peerctx_args_t *args)
+{
+ int ret = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
- list_add_tail (&peerinfo->uuid_list, &conf->peers);
+ this = THIS;
+ conf = this->private;
+ GF_ASSERT (conf);
+ GF_ASSERT (hoststr);
-out:
+ ret = glusterd_peerinfo_new (friend, state, uuid, hoststr, port);
if (ret) {
- if (peerctx)
- GF_FREE (peerctx);
- if (is_allocated && rpc) {
- (void) rpc_clnt_unref (rpc);
+ goto out;
+ }
+
+ /*
+ * We can't add to the list after calling glusterd_friend_rpc_create,
+ * even if it succeeds, because by then the callback to take it back
+ * off and free might have happened already (notably in the case of an
+ * invalid peer name). That would mean we're adding something that had
+ * just been free, and we're likely to crash later.
+ */
+ list_add_tail (&(*friend)->uuid_list, &conf->peers);
+
+ //restore needs to first create the list of peers, then create rpcs
+ //to keep track of quorum in race-free manner. In restore for each peer
+ //rpc-create calls rpc_notify when the friend-list is partially
+ //constructed, leading to wrong quorum calculations.
+ if (!restore) {
+ ret = glusterd_store_peerinfo (*friend);
+ if (ret == 0) {
+ synclock_unlock (&conf->big_lock);
+ ret = glusterd_friend_rpc_create (this, *friend, args);
+ synclock_lock (&conf->big_lock);
}
- if (peerinfo) {
- peerinfo->rpc = NULL;
- (void) glusterd_friend_cleanup (peerinfo);
+ else {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to store peerinfo");
}
}
- gf_log ("glusterd", GF_LOG_INFO, "connect returned %d", ret);
+ if (ret) {
+ (void) glusterd_friend_cleanup (*friend);
+ *friend = NULL;
+ }
+
+out:
+ gf_log (this->name, GF_LOG_INFO, "connect returned %d", ret);
return ret;
}
int
-glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port)
+glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
+ dict_t *dict)
{
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
@@ -3391,9 +3146,10 @@ glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port)
" for host: %s (%d)", hoststr, port);
args.mode = GD_MODE_ON;
args.req = req;
+ args.dict = dict;
ret = glusterd_friend_add ((char *)hoststr, port,
GD_FRIEND_STATE_DEFAULT,
- NULL, NULL, &peerinfo, 0, &args);
+ NULL, &peerinfo, 0, &args);
if ((!ret) && (!peerinfo->connected)) {
ret = GLUSTERD_CONNECTION_AWAITED;
}
@@ -3411,11 +3167,12 @@ glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port)
event->peerinfo = peerinfo;
ret = glusterd_friend_sm_inject_event (event);
glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_SUCCESS,
- (char*)hoststr, port);
+ NULL, (char*)hoststr,
+ port, dict);
}
} else {
- glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND,
- (char*)hoststr, port);
+ glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, NULL,
+ (char*)hoststr, port, dict);
}
out:
@@ -3425,7 +3182,7 @@ out:
int
glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
- uuid_t uuid)
+ uuid_t uuid, dict_t *dict)
{
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
@@ -3466,6 +3223,7 @@ glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
ctx->hostname = gf_strdup (hoststr);
ctx->port = port;
ctx->req = req;
+ ctx->dict = dict;
event->ctx = ctx;
@@ -3500,11 +3258,11 @@ glusterd_xfer_friend_remove_resp (rpcsvc_request_t *req, char *hostname, int por
conf = this->private;
- uuid_copy (rsp.uuid, conf->uuid);
+ uuid_copy (rsp.uuid, MY_UUID);
rsp.hostname = hostname;
rsp.port = port;
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_friend_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
gf_log ("glusterd", GF_LOG_INFO,
"Responded to %s (%d), ret: %d", hostname, port, ret);
@@ -3513,76 +3271,236 @@ glusterd_xfer_friend_remove_resp (rpcsvc_request_t *req, char *hostname, int por
int
-glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *hostname, int port,
- int32_t op_ret, int32_t op_errno)
+glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *myhostname,
+ char *remote_hostname, int port, int32_t op_ret,
+ int32_t op_errno)
{
gd1_mgmt_friend_rsp rsp = {{0}, };
int32_t ret = -1;
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
- GF_ASSERT (hostname);
+ GF_ASSERT (myhostname);
this = THIS;
GF_ASSERT (this);
conf = this->private;
- uuid_copy (rsp.uuid, conf->uuid);
+ uuid_copy (rsp.uuid, MY_UUID);
rsp.op_ret = op_ret;
rsp.op_errno = op_errno;
- rsp.hostname = gf_strdup (hostname);
+ rsp.hostname = gf_strdup (myhostname);
rsp.port = port;
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_friend_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
gf_log ("glusterd", GF_LOG_INFO,
- "Responded to %s (%d), ret: %d", hostname, port, ret);
- if (rsp.hostname)
- GF_FREE (rsp.hostname)
+ "Responded to %s (%d), ret: %d", remote_hostname, port, ret);
+ GF_FREE (rsp.hostname);
return ret;
}
+static void
+set_probe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr,
+ size_t len, char *hostname, int port)
+{
+ if ((op_errstr) && (strcmp (op_errstr, ""))) {
+ snprintf (errstr, len, "%s", op_errstr);
+ return;
+ }
+
+ if (!op_ret) {
+ switch (op_errno) {
+ case GF_PROBE_LOCALHOST:
+ snprintf (errstr, len, "Probe on localhost not "
+ "needed");
+ break;
+
+ case GF_PROBE_FRIEND:
+ snprintf (errstr, len, "Host %s port %d already"
+ " in peer list", hostname, port);
+ break;
+
+ default:
+ if (op_errno != 0)
+ snprintf (errstr, len, "Probe returned "
+ "with unknown errno %d",
+ op_errno);
+ break;
+ }
+ } else {
+ switch (op_errno) {
+ case GF_PROBE_ANOTHER_CLUSTER:
+ snprintf (errstr, len, "%s is already part of "
+ "another cluster", hostname);
+ break;
+
+ case GF_PROBE_VOLUME_CONFLICT:
+ snprintf (errstr, len, "Atleast one volume on "
+ "%s conflicts with existing volumes "
+ "in the cluster", hostname);
+ break;
+
+ case GF_PROBE_UNKNOWN_PEER:
+ snprintf (errstr, len, "%s responded with "
+ "'unknown peer' error, this could "
+ "happen if %s doesn't have localhost "
+ "in its peer database", hostname,
+ hostname);
+ break;
+
+ case GF_PROBE_ADD_FAILED:
+ snprintf (errstr, len, "Failed to add peer "
+ "information on %s", hostname);
+ break;
+
+ case GF_PROBE_SAME_UUID:
+ snprintf (errstr, len, "Peer uuid (host %s) is "
+ "same as local uuid", hostname);
+ break;
+
+ case GF_PROBE_QUORUM_NOT_MET:
+ snprintf (errstr, len, "Cluster quorum is not "
+ "met. Changing peers is not allowed "
+ "in this state");
+ break;
+
+ default:
+ snprintf (errstr, len, "Probe returned with "
+ "unknown errno %d", op_errno);
+ break;
+ }
+ }
+}
+
int
glusterd_xfer_cli_probe_resp (rpcsvc_request_t *req, int32_t op_ret,
- int32_t op_errno, char *hostname, int port)
+ int32_t op_errno, char *op_errstr, char *hostname,
+ int port, dict_t *dict)
{
- gf1_cli_probe_rsp rsp = {0, };
+ gf_cli_rsp rsp = {0,};
int32_t ret = -1;
+ char errstr[2048] = {0,};
+ char *cmd_str = NULL;
+ xlator_t *this = THIS;
GF_ASSERT (req);
+ GF_ASSERT (this);
+
+ (void) set_probe_error_str (op_ret, op_errno, op_errstr, errstr,
+ sizeof (errstr), hostname, port);
+
+ if (dict) {
+ ret = dict_get_str (dict, "cmd-str", &cmd_str);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "command string");
+ }
rsp.op_ret = op_ret;
rsp.op_errno = op_errno;
- rsp.hostname = hostname;
- rsp.port = port;
+ rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
+
+ gf_cmd_log ("", "%s : %s %s %s", cmd_str,
+ (op_ret) ? "FAILED" : "SUCCESS",
+ (errstr[0] != '\0') ? ":" : " ",
+ (errstr[0] != '\0') ? errstr : " ");
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gf_xdr_serialize_cli_probe_rsp);
+ (xdrproc_t)xdr_gf_cli_rsp);
- gf_log ("glusterd", GF_LOG_INFO, "Responded to CLI, ret: %d",ret);
+ if (dict)
+ dict_unref (dict);
+ gf_log (this->name, GF_LOG_DEBUG, "Responded to CLI, ret: %d",ret);
return ret;
}
+static void
+set_deprobe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr,
+ size_t len, char *hostname)
+{
+ if ((op_errstr) && (strcmp (op_errstr, ""))) {
+ snprintf (errstr, len, "%s", op_errstr);
+ return;
+ }
+
+ if (op_ret) {
+ switch (op_errno) {
+ case GF_DEPROBE_LOCALHOST:
+ snprintf (errstr, len, "%s is localhost",
+ hostname);
+ break;
+
+ case GF_DEPROBE_NOT_FRIEND:
+ snprintf (errstr, len, "%s is not part of "
+ "cluster", hostname);
+ break;
+
+ case GF_DEPROBE_BRICK_EXIST:
+ snprintf (errstr, len, "Brick(s) with the peer "
+ "%s exist in cluster", hostname);
+ break;
+
+ case GF_DEPROBE_FRIEND_DOWN:
+ snprintf (errstr, len, "One of the peers is "
+ "probably down. Check with "
+ "'peer status'");
+ break;
+
+ case GF_DEPROBE_QUORUM_NOT_MET:
+ snprintf (errstr, len, "Cluster quorum is not "
+ "met. Changing peers is not allowed "
+ "in this state");
+ break;
+
+ default:
+ snprintf (errstr, len, "Detach returned with "
+ "unknown errno %d", op_errno);
+ break;
+
+ }
+ }
+}
+
+
int
glusterd_xfer_cli_deprobe_resp (rpcsvc_request_t *req, int32_t op_ret,
- int32_t op_errno, char *hostname)
+ int32_t op_errno, char *op_errstr,
+ char *hostname, dict_t *dict)
{
- gf1_cli_deprobe_rsp rsp = {0, };
+ gf_cli_rsp rsp = {0,};
int32_t ret = -1;
+ char *cmd_str = NULL;
+ char errstr[2048] = {0,};
GF_ASSERT (req);
+ (void) set_deprobe_error_str (op_ret, op_errno, op_errstr, errstr,
+ sizeof (errstr), hostname);
+
+ if (dict) {
+ ret = dict_get_str (dict, "cmd-str", &cmd_str);
+ if (ret)
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to get "
+ "command string");
+ }
+
rsp.op_ret = op_ret;
rsp.op_errno = op_errno;
- rsp.hostname = hostname;
+ rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
+
+ gf_cmd_log ("", "%s : %s %s %s", cmd_str,
+ (op_ret) ? "FAILED" : "SUCCESS",
+ (errstr[0] != '\0') ? ":" : " ",
+ (errstr[0] != '\0') ? errstr : " ");
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gf_xdr_serialize_cli_deprobe_rsp);
+ (xdrproc_t)xdr_gf_cli_rsp);
- gf_log ("glusterd", GF_LOG_INFO, "Responded to CLI, ret: %d",ret);
+ gf_log (THIS->name, GF_LOG_DEBUG, "Responded to CLI, ret: %d",ret);
return ret;
}
@@ -3596,39 +3514,52 @@ glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
int32_t count = 0;
dict_t *friends = NULL;
gf1_cli_peer_list_rsp rsp = {0,};
+ char my_uuid_str[64] = {0,};
+ char key[256] = {0,};
priv = THIS->private;
GF_ASSERT (priv);
- if (!list_empty (&priv->peers)) {
- friends = dict_new ();
- if (!friends) {
- gf_log ("", GF_LOG_WARNING, "Out of Memory");
- goto out;
- }
- } else {
- ret = 0;
+ friends = dict_new ();
+ if (!friends) {
+ gf_log ("", GF_LOG_WARNING, "Out of Memory");
goto out;
}
-
- if (flags == GF_CLI_LIST_ALL) {
- list_for_each_entry (entry, &priv->peers, uuid_list) {
- count++;
- ret = glusterd_add_peer_detail_to_dict (entry,
+ if (!list_empty (&priv->peers)) {
+ list_for_each_entry (entry, &priv->peers, uuid_list) {
+ count++;
+ ret = glusterd_add_peer_detail_to_dict (entry,
friends, count);
- if (ret)
- goto out;
+ if (ret)
+ goto out;
+ }
+ }
- }
+ if (flags == GF_CLI_LIST_POOL_NODES) {
+ count++;
+ snprintf (key, 256, "friend%d.uuid", count);
+ uuid_utoa_r (MY_UUID, my_uuid_str);
+ ret = dict_set_str (friends, key, my_uuid_str);
+ if (ret)
+ goto out;
- ret = dict_set_int32 (friends, "count", count);
+ snprintf (key, 256, "friend%d.hostname", count);
+ ret = dict_set_str (friends, key, "localhost");
+ if (ret)
+ goto out;
- if (ret)
- goto out;
+ snprintf (key, 256, "friend%d.connected", count);
+ ret = dict_set_int32 (friends, key, 1);
+ if (ret)
+ goto out;
}
+ ret = dict_set_int32 (friends, "count", count);
+ if (ret)
+ goto out;
+
ret = dict_allocate_and_serialize (friends, &rsp.friends.friends_val,
- (size_t *)&rsp.friends.friends_len);
+ &rsp.friends.friends_len);
if (ret)
goto out;
@@ -3641,10 +3572,10 @@ out:
rsp.op_ret = ret;
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gf_xdr_serialize_cli_peer_list_rsp);
- if (rsp.friends.friends_val)
- GF_FREE (rsp.friends.friends_val);
+ glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf1_cli_peer_list_rsp);
+ ret = 0;
+ GF_FREE (rsp.friends.friends_val);
return ret;
}
@@ -3657,7 +3588,7 @@ glusterd_get_volumes (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
glusterd_volinfo_t *entry = NULL;
int32_t count = 0;
dict_t *volumes = NULL;
- gf1_cli_get_vol_rsp rsp = {0,};
+ gf_cli_rsp rsp = {0,};
char *volname = NULL;
priv = THIS->private;
@@ -3734,8 +3665,8 @@ respond:
ret = dict_set_int32 (volumes, "count", count);
if (ret)
goto out;
- ret = dict_allocate_and_serialize (volumes, &rsp.volumes.volumes_val,
- (size_t *)&rsp.volumes.volumes_len);
+ ret = dict_allocate_and_serialize (volumes, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
if (ret)
goto out;
@@ -3744,29 +3675,230 @@ respond:
out:
rsp.op_ret = ret;
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gf_xdr_serialize_cli_peer_list_rsp);
+ rsp.op_errstr = "";
+ glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_cli_rsp);
+ ret = 0;
if (volumes)
dict_unref (volumes);
- if (rsp.volumes.volumes_val)
- GF_FREE (rsp.volumes.volumes_val);
+ GF_FREE (rsp.dict.dict_val);
return ret;
}
int
-glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event,
- void *data)
+__glusterd_handle_status_volume (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ uint32_t cmd = 0;
+ dict_t *dict = NULL;
+ char *volname = 0;
+ gf_cli_req cli_req = {{0,}};
+ glusterd_op_t cli_op = GD_OP_STATUS_VOLUME;
+ char err_str[2048] = {0,};
+ xlator_t *this = NULL;
+
+ GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len > 0) {
+ dict = dict_new();
+ if (!dict)
+ goto out;
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len, &dict);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to "
+ "unserialize buffer");
+ snprintf (err_str, sizeof (err_str), "Unable to decode "
+ "the command");
+ goto out;
+ }
+
+ }
+
+ ret = dict_get_uint32 (dict, "cmd", &cmd);
+ if (ret)
+ goto out;
+
+ if (!(cmd & GF_CLI_STATUS_ALL)) {
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str), "Unable to get "
+ "volume name");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ goto out;
+ }
+ gf_log (this->name, GF_LOG_INFO,
+ "Received status volume req for volume %s", volname);
+
+ }
+
+ ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict);
+
+out:
+
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf (err_str, sizeof (err_str),
+ "Operation failed");
+ ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
+ dict, err_str);
+ }
+ free (cli_req.dict.dict_val);
+
+ return ret;
+}
+
+int
+glusterd_handle_status_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_status_volume);
+}
+
+int
+__glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{0,}};
+ glusterd_op_t cli_op = GD_OP_CLEARLOCKS_VOLUME;
+ char *volname = NULL;
+ dict_t *dict = NULL;
+ char err_str[2048] = {0,};
+ xlator_t *this = NULL;
+
+ GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = -1;
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ dict = dict_new ();
+
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to unserialize req-buffer to"
+ " dictionary");
+ snprintf (err_str, sizeof (err_str), "unable to decode "
+ "the command");
+ goto out;
+ }
+
+ } else {
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "Empty cli request.");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str), "Unable to get volume "
+ "name");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_INFO, "Received clear-locks volume req "
+ "for volume %s", volname);
+
+ ret = glusterd_op_begin_synctask (req, GD_OP_CLEARLOCKS_VOLUME, dict);
+
+out:
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf (err_str, sizeof (err_str),
+ "Operation failed");
+ ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
+ dict, err_str);
+ }
+ free (cli_req.dict.dict_val);
+
+ return ret;
+}
+
+int
+glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_clearlocks_volume);
+}
+
+static int
+get_brickinfo_from_brickid (char *brickid, glusterd_brickinfo_t **brickinfo)
+{
+ glusterd_volinfo_t *volinfo = NULL;
+ char *volid_str = NULL;
+ char *brick = NULL;
+ char *brickid_dup = NULL;
+ uuid_t volid = {0};
+ int ret = -1;
+
+ brickid_dup = gf_strdup (brickid);
+ if (!brickid_dup)
+ goto out;
+
+ volid_str = brickid_dup;
+ brick = strchr (brickid_dup, ':');
+ *brick = '\0';
+ brick++;
+ if (!volid_str || !brick)
+ goto out;
+
+ uuid_parse (volid_str, volid);
+ ret = glusterd_volinfo_find_by_volume_id (volid, &volinfo);
+ if (ret) {
+ /* Check if it a snapshot volume */
+ ret = glusterd_snap_volinfo_find_by_volume_id (volid, &volinfo);
+ if (ret)
+ goto out;
+ }
+
+ ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo,
+ brickinfo);
+ if (ret)
+ goto out;
+
+ ret = 0;
+out:
+ GF_FREE (brickid_dup);
+ return ret;
+}
+
+int
+__glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
{
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
int ret = 0;
+ char *brickid = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
- brickinfo = mydata;
- if (!brickinfo)
+ brickid = mydata;
+ if (!brickid)
+ return 0;
+
+ ret = get_brickinfo_from_brickid (brickid, &brickinfo);
+ if (ret)
return 0;
this = THIS;
@@ -3776,17 +3908,21 @@ glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
switch (event) {
case RPC_CLNT_CONNECT:
- gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT");
+ gf_log (this->name, GF_LOG_DEBUG, "Connected to %s:%s",
+ brickinfo->hostname, brickinfo->path);
glusterd_set_brick_status (brickinfo, GF_BRICK_STARTED);
ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
break;
case RPC_CLNT_DISCONNECT:
- gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT");
+ if (GF_BRICK_STARTED == brickinfo->status)
+ gf_log (this->name, GF_LOG_INFO, "Disconnected from "
+ "%s:%s", brickinfo->hostname, brickinfo->path);
+
glusterd_set_brick_status (brickinfo, GF_BRICK_STOPPED);
- if (brickinfo->timer && brickinfo->timer->callbk)
- brickinfo->timer->callbk (brickinfo->timer->data);
+ if (rpc_clnt_is_disabled (rpc))
+ GF_FREE (brickid);
break;
default:
@@ -3799,15 +3935,116 @@ glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
}
int
-glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event,
- void *data)
+glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ return glusterd_big_locked_notify (rpc, mydata, event, data,
+ __glusterd_brick_rpc_notify);
+}
+
+int
+__glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
{
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
+ char *server = NULL;
int ret = 0;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_peerctx_t *peerctx = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ server = mydata;
+ if (!server)
+ return 0;
+
+ switch (event) {
+ case RPC_CLNT_CONNECT:
+ gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT");
+ (void) glusterd_nodesvc_set_online_status (server, _gf_true);
+ ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
+
+ break;
+
+ case RPC_CLNT_DISCONNECT:
+ gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT");
+ (void) glusterd_nodesvc_set_online_status (server, _gf_false);
+ break;
+
+ default:
+ gf_log (this->name, GF_LOG_TRACE,
+ "got some other RPC event %d", event);
+ break;
+ }
+
+ return ret;
+}
+
+int
+glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ return glusterd_big_locked_notify (rpc, mydata, event, data,
+ __glusterd_nodesvc_rpc_notify);
+}
+
+int
+glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx)
+{
+ int ret = -1;
+ glusterd_friend_sm_event_t *new_event = NULL;
+ glusterd_peerinfo_t *peerinfo = peerctx->peerinfo;
+ rpcsvc_request_t *req = peerctx->args.req;
+ char *errstr = peerctx->errstr;
+ dict_t *dict = NULL;
+
+ GF_ASSERT (peerctx);
+
+ peerinfo = peerctx->peerinfo;
+ req = peerctx->args.req;
+ dict = peerctx->args.dict;
+ errstr = peerctx->errstr;
+
+ ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_REMOVE_FRIEND,
+ &new_event);
+ if (!ret) {
+ if (!req) {
+ gf_log (THIS->name, GF_LOG_WARNING,
+ "Unable to find the request for responding "
+ "to User (%s)", peerinfo->hostname);
+ goto out;
+ }
+
+ glusterd_xfer_cli_probe_resp (req, -1, ENOTCONN, errstr,
+ peerinfo->hostname,
+ peerinfo->port, dict);
+
+ new_event->peerinfo = peerinfo;
+ ret = glusterd_friend_sm_inject_event (new_event);
+
+ } else {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to create event for removing peer %s",
+ peerinfo->hostname);
+ }
+
+out:
+ return ret;
+}
+
+int
+__glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peerctx_t *peerctx = NULL;
+ gf_boolean_t quorum_action = _gf_false;
+ glusterd_volinfo_t *volinfo = NULL;
peerctx = mydata;
if (!peerctx)
@@ -3822,23 +4059,51 @@ glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
{
gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT");
peerinfo->connected = 1;
+ peerinfo->quorum_action = _gf_true;
- ret = glusterd_peer_handshake (this, rpc, peerctx);
+ ret = glusterd_peer_dump_version (this, rpc, peerctx);
if (ret)
gf_log ("", GF_LOG_ERROR, "glusterd handshake failed");
break;
}
case RPC_CLNT_DISCONNECT:
+ {
+ gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT %d",
+ peerinfo->state.state);
+
+ if (peerinfo->connected) {
+ list_for_each_entry (volinfo, &conf->volumes, vol_list) {
+ ret = glusterd_mgmt_v3_unlock (volinfo->volname,
+ peerinfo->uuid,
+ "vol");
+ if (ret)
+ gf_log (this->name, GF_LOG_TRACE,
+ "Lock not released for %s",
+ volinfo->volname);
+ }
- //Inject friend disconnected here
+ ret = 0;
+ }
- gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT");
- peerinfo->connected = 0;
+ if ((peerinfo->quorum_contrib != QUORUM_DOWN) &&
+ (peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED)) {
+ peerinfo->quorum_contrib = QUORUM_DOWN;
+ quorum_action = _gf_true;
+ peerinfo->quorum_action = _gf_false;
+ }
- //default_notify (this, GF_EVENT_CHILD_DOWN, NULL);
- break;
+ /* Remove peer if it is not a friend and connection/handshake
+ * fails, and notify cli. Happens only during probe.
+ */
+ if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT) {
+ glusterd_friend_remove_notify (peerctx);
+ goto out;
+ }
+ peerinfo->connected = 0;
+ break;
+ }
default:
gf_log (this->name, GF_LOG_TRACE,
"got some other RPC event %d", event);
@@ -3846,10 +4111,23 @@ glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
break;
}
+out:
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ if (quorum_action)
+ glusterd_do_quorum_action ();
return ret;
}
int
+glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ return glusterd_big_locked_notify (rpc, mydata, event, data,
+ __glusterd_peer_rpc_notify);
+}
+
+int
glusterd_null (rpcsvc_request_t *req)
{
@@ -3857,98 +4135,103 @@ glusterd_null (rpcsvc_request_t *req)
}
rpcsvc_actor_t gd_svc_mgmt_actors[] = {
- [GLUSTERD_MGMT_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, NULL},
- [GLUSTERD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", GLUSTERD_MGMT_PROBE_QUERY, glusterd_handle_probe_query, NULL, NULL},
- [GLUSTERD_MGMT_FRIEND_ADD] = { "FRIEND_ADD", GLUSTERD_MGMT_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, NULL},
- [GLUSTERD_MGMT_FRIEND_REMOVE] = { "FRIEND_REMOVE", GLUSTERD_MGMT_FRIEND_REMOVE, glusterd_handle_incoming_unfriend_req, NULL, NULL},
- [GLUSTERD_MGMT_FRIEND_UPDATE] = { "FRIEND_UPDATE", GLUSTERD_MGMT_FRIEND_UPDATE, glusterd_handle_friend_update, NULL, NULL},
- [GLUSTERD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GLUSTERD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, NULL},
- [GLUSTERD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GLUSTERD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, NULL},
- [GLUSTERD_MGMT_STAGE_OP] = { "STAGE_OP", GLUSTERD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, NULL},
- [GLUSTERD_MGMT_COMMIT_OP] = { "COMMIT_OP", GLUSTERD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, NULL},
+ [GLUSTERD_MGMT_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GLUSTERD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GLUSTERD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_STAGE_OP] = { "STAGE_OP", GLUSTERD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_COMMIT_OP] = { "COMMIT_OP", GLUSTERD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, 0, DRC_NA},
};
struct rpcsvc_program gd_svc_mgmt_prog = {
.progname = "GlusterD svc mgmt",
.prognum = GD_MGMT_PROGRAM,
.progver = GD_MGMT_VERSION,
- .numactors = GD_MGMT_PROCCNT,
+ .numactors = GLUSTERD_MGMT_MAXVALUE,
.actors = gd_svc_mgmt_actors,
+ .synctask = _gf_true,
};
-rpcsvc_actor_t gd_svc_cli_actors[] = {
- [GLUSTER_CLI_PROBE] = { "CLI_PROBE", GLUSTER_CLI_PROBE, glusterd_handle_cli_probe, NULL, NULL},
- [GLUSTER_CLI_CREATE_VOLUME] = { "CLI_CREATE_VOLUME", GLUSTER_CLI_CREATE_VOLUME, glusterd_handle_create_volume, NULL,NULL},
- [GLUSTER_CLI_DEFRAG_VOLUME] = { "CLI_DEFRAG_VOLUME", GLUSTER_CLI_DEFRAG_VOLUME, glusterd_handle_defrag_volume_v2, NULL,NULL},
- [GLUSTER_CLI_DEPROBE] = { "FRIEND_REMOVE", GLUSTER_CLI_DEPROBE, glusterd_handle_cli_deprobe, NULL, NULL},
- [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, NULL},
- [GLUSTER_CLI_START_VOLUME] = { "START_VOLUME", GLUSTER_CLI_START_VOLUME, glusterd_handle_cli_start_volume, NULL, NULL},
- [GLUSTER_CLI_STOP_VOLUME] = { "STOP_VOLUME", GLUSTER_CLI_STOP_VOLUME, glusterd_handle_cli_stop_volume, NULL, NULL},
- [GLUSTER_CLI_DELETE_VOLUME] = { "DELETE_VOLUME", GLUSTER_CLI_DELETE_VOLUME, glusterd_handle_cli_delete_volume, NULL, NULL},
- [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, NULL},
- [GLUSTER_CLI_ADD_BRICK] = { "ADD_BRICK", GLUSTER_CLI_ADD_BRICK, glusterd_handle_add_brick, NULL, NULL},
- [GLUSTER_CLI_REPLACE_BRICK] = { "REPLACE_BRICK", GLUSTER_CLI_REPLACE_BRICK, glusterd_handle_replace_brick, NULL, NULL},
- [GLUSTER_CLI_REMOVE_BRICK] = { "REMOVE_BRICK", GLUSTER_CLI_REMOVE_BRICK, glusterd_handle_remove_brick, NULL, NULL},
- [GLUSTER_CLI_LOG_FILENAME] = { "LOG FILENAME", GLUSTER_CLI_LOG_FILENAME, glusterd_handle_log_filename, NULL, NULL},
- [GLUSTER_CLI_LOG_LOCATE] = { "LOG LOCATE", GLUSTER_CLI_LOG_LOCATE, glusterd_handle_log_locate, NULL, NULL},
- [GLUSTER_CLI_LOG_ROTATE] = { "LOG FILENAME", GLUSTER_CLI_LOG_ROTATE, glusterd_handle_log_rotate, NULL, NULL},
- [GLUSTER_CLI_SET_VOLUME] = { "SET_VOLUME", GLUSTER_CLI_SET_VOLUME, glusterd_handle_set_volume, NULL, NULL},
- [GLUSTER_CLI_SYNC_VOLUME] = { "SYNC_VOLUME", GLUSTER_CLI_SYNC_VOLUME, glusterd_handle_sync_volume, NULL, NULL},
- [GLUSTER_CLI_RESET_VOLUME] = { "RESET_VOLUME", GLUSTER_CLI_RESET_VOLUME, glusterd_handle_reset_volume, NULL, NULL},
- [GLUSTER_CLI_FSM_LOG] = { "FSM_LOG", GLUSTER_CLI_FSM_LOG, glusterd_handle_fsm_log, NULL, NULL},
- [GLUSTER_CLI_GSYNC_SET] = { "GSYNC_SET", GLUSTER_CLI_GSYNC_SET, glusterd_handle_gsync_set, NULL, NULL},
- [GLUSTER_CLI_PROFILE_VOLUME] = { "STATS_VOLUME", GLUSTER_CLI_PROFILE_VOLUME, glusterd_handle_cli_profile_volume, NULL, NULL},
- [GLUSTER_CLI_QUOTA] = { "QUOTA", GLUSTER_CLI_QUOTA, glusterd_handle_quota, NULL, NULL},
+rpcsvc_actor_t gd_svc_peer_actors[] = {
+ [GLUSTERD_FRIEND_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0, DRC_NA},
+ [GLUSTERD_PROBE_QUERY] = { "PROBE_QUERY", GLUSTERD_PROBE_QUERY, glusterd_handle_probe_query, NULL, 0, DRC_NA},
+ [GLUSTERD_FRIEND_ADD] = { "FRIEND_ADD", GLUSTERD_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, 0, DRC_NA},
+ [GLUSTERD_FRIEND_REMOVE] = { "FRIEND_REMOVE", GLUSTERD_FRIEND_REMOVE, glusterd_handle_incoming_unfriend_req, NULL, 0, DRC_NA},
+ [GLUSTERD_FRIEND_UPDATE] = { "FRIEND_UPDATE", GLUSTERD_FRIEND_UPDATE, glusterd_handle_friend_update, NULL, 0, DRC_NA},
+};
+struct rpcsvc_program gd_svc_peer_prog = {
+ .progname = "GlusterD svc peer",
+ .prognum = GD_FRIEND_PROGRAM,
+ .progver = GD_FRIEND_VERSION,
+ .numactors = GLUSTERD_FRIEND_MAXVALUE,
+ .actors = gd_svc_peer_actors,
+ .synctask = _gf_false,
+};
+
+
+
+rpcsvc_actor_t gd_svc_cli_actors[] = {
+ [GLUSTER_CLI_PROBE] = { "CLI_PROBE", GLUSTER_CLI_PROBE, glusterd_handle_cli_probe, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_CREATE_VOLUME] = { "CLI_CREATE_VOLUME", GLUSTER_CLI_CREATE_VOLUME, glusterd_handle_create_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_DEFRAG_VOLUME] = { "CLI_DEFRAG_VOLUME", GLUSTER_CLI_DEFRAG_VOLUME, glusterd_handle_defrag_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_DEPROBE] = { "FRIEND_REMOVE", GLUSTER_CLI_DEPROBE, glusterd_handle_cli_deprobe, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_UUID_RESET] = { "UUID_RESET", GLUSTER_CLI_UUID_RESET, glusterd_handle_cli_uuid_reset, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_UUID_GET] = { "UUID_GET", GLUSTER_CLI_UUID_GET, glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_START_VOLUME] = { "START_VOLUME", GLUSTER_CLI_START_VOLUME, glusterd_handle_cli_start_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_STOP_VOLUME] = { "STOP_VOLUME", GLUSTER_CLI_STOP_VOLUME, glusterd_handle_cli_stop_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_DELETE_VOLUME] = { "DELETE_VOLUME", GLUSTER_CLI_DELETE_VOLUME, glusterd_handle_cli_delete_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_ADD_BRICK] = { "ADD_BRICK", GLUSTER_CLI_ADD_BRICK, glusterd_handle_add_brick, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_REPLACE_BRICK] = { "REPLACE_BRICK", GLUSTER_CLI_REPLACE_BRICK, glusterd_handle_replace_brick, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_REMOVE_BRICK] = { "REMOVE_BRICK", GLUSTER_CLI_REMOVE_BRICK, glusterd_handle_remove_brick, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_LOG_ROTATE] = { "LOG FILENAME", GLUSTER_CLI_LOG_ROTATE, glusterd_handle_log_rotate, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_SET_VOLUME] = { "SET_VOLUME", GLUSTER_CLI_SET_VOLUME, glusterd_handle_set_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_SYNC_VOLUME] = { "SYNC_VOLUME", GLUSTER_CLI_SYNC_VOLUME, glusterd_handle_sync_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_RESET_VOLUME] = { "RESET_VOLUME", GLUSTER_CLI_RESET_VOLUME, glusterd_handle_reset_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_FSM_LOG] = { "FSM_LOG", GLUSTER_CLI_FSM_LOG, glusterd_handle_fsm_log, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GSYNC_SET] = { "GSYNC_SET", GLUSTER_CLI_GSYNC_SET, glusterd_handle_gsync_set, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_PROFILE_VOLUME] = { "STATS_VOLUME", GLUSTER_CLI_PROFILE_VOLUME, glusterd_handle_cli_profile_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_QUOTA] = { "QUOTA", GLUSTER_CLI_QUOTA, glusterd_handle_quota, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GETWD] = { "GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd, NULL, 1, DRC_NA},
+ [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME, glusterd_handle_status_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_MOUNT] = { "MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount, NULL, 1, DRC_NA},
+ [GLUSTER_CLI_UMOUNT] = { "UMOUNT", GLUSTER_CLI_UMOUNT, glusterd_handle_umount, NULL, 1, DRC_NA},
+ [GLUSTER_CLI_HEAL_VOLUME] = { "HEAL_VOLUME", GLUSTER_CLI_HEAL_VOLUME, glusterd_handle_cli_heal_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", GLUSTER_CLI_STATEDUMP_VOLUME, glusterd_handle_cli_statedump_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", GLUSTER_CLI_CLRLOCKS_VOLUME, glusterd_handle_cli_clearlocks_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", GLUSTER_CLI_COPY_FILE, glusterd_handle_copy_file, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC, glusterd_handle_sys_exec, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_SNAP] = {"SNAP", GLUSTER_CLI_SNAP, glusterd_handle_snapshot, NULL, 0, DRC_NA},
};
struct rpcsvc_program gd_svc_cli_prog = {
.progname = "GlusterD svc cli",
.prognum = GLUSTER_CLI_PROGRAM,
.progver = GLUSTER_CLI_VERSION,
- .numactors = GLUSTER_CLI_PROCCNT,
+ .numactors = GLUSTER_CLI_MAXVALUE,
.actors = gd_svc_cli_actors,
+ .synctask = _gf_true,
};
-/* Keeping below programs for backword compatibility */
-
-rpcsvc_actor_t glusterd1_mgmt_actors[] = {
- [GD_MGMT_NULL] = { "NULL", GD_MGMT_NULL, glusterd_null, NULL, NULL},
- [GD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", GD_MGMT_PROBE_QUERY, glusterd_handle_probe_query, NULL, NULL},
- [GD_MGMT_FRIEND_ADD] = { "FRIEND_ADD", GD_MGMT_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, NULL},
- [GD_MGMT_FRIEND_REMOVE] = { "FRIEND_REMOVE", GD_MGMT_FRIEND_REMOVE, glusterd_handle_incoming_unfriend_req, NULL, NULL},
- [GD_MGMT_FRIEND_UPDATE] = { "FRIEND_UPDATE", GD_MGMT_FRIEND_UPDATE, glusterd_handle_friend_update, NULL, NULL},
- [GD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, NULL},
- [GD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, NULL},
- [GD_MGMT_STAGE_OP] = { "STAGE_OP", GD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, NULL},
- [GD_MGMT_COMMIT_OP] = { "COMMIT_OP", GD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, NULL},
- [GD_MGMT_CLI_PROBE] = { "CLI_PROBE", GD_MGMT_CLI_PROBE, glusterd_handle_cli_probe, NULL, NULL},
- [GD_MGMT_CLI_CREATE_VOLUME] = { "CLI_CREATE_VOLUME", GD_MGMT_CLI_CREATE_VOLUME, glusterd_handle_create_volume, NULL,NULL},
- [GD_MGMT_CLI_DEFRAG_VOLUME] = { "CLI_DEFRAG_VOLUME", GD_MGMT_CLI_DEFRAG_VOLUME, glusterd_handle_defrag_volume, NULL,NULL},
- [GD_MGMT_CLI_DEPROBE] = { "FRIEND_REMOVE", GD_MGMT_CLI_DEPROBE, glusterd_handle_cli_deprobe, NULL, NULL},
- [GD_MGMT_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GD_MGMT_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, NULL},
- [GD_MGMT_CLI_START_VOLUME] = { "START_VOLUME", GD_MGMT_CLI_START_VOLUME, glusterd_handle_cli_start_volume, NULL, NULL},
- [GD_MGMT_CLI_STOP_VOLUME] = { "STOP_VOLUME", GD_MGMT_CLI_STOP_VOLUME, glusterd_handle_cli_stop_volume, NULL, NULL},
- [GD_MGMT_CLI_DELETE_VOLUME] = { "DELETE_VOLUME", GD_MGMT_CLI_DELETE_VOLUME, glusterd_handle_cli_delete_volume, NULL, NULL},
- [GD_MGMT_CLI_GET_VOLUME] = { "GET_VOLUME", GD_MGMT_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, NULL},
- [GD_MGMT_CLI_ADD_BRICK] = { "ADD_BRICK", GD_MGMT_CLI_ADD_BRICK, glusterd_handle_add_brick, NULL, NULL},
- [GD_MGMT_CLI_REPLACE_BRICK] = { "REPLACE_BRICK", GD_MGMT_CLI_REPLACE_BRICK, glusterd_handle_replace_brick, NULL, NULL},
- [GD_MGMT_CLI_REMOVE_BRICK] = { "REMOVE_BRICK", GD_MGMT_CLI_REMOVE_BRICK, glusterd_handle_remove_brick, NULL, NULL},
- [GD_MGMT_CLI_LOG_FILENAME] = { "LOG FILENAME", GD_MGMT_CLI_LOG_FILENAME, glusterd_handle_log_filename, NULL, NULL},
- [GD_MGMT_CLI_LOG_LOCATE] = { "LOG LOCATE", GD_MGMT_CLI_LOG_LOCATE, glusterd_handle_log_locate, NULL, NULL},
- [GD_MGMT_CLI_LOG_ROTATE] = { "LOG FILENAME", GD_MGMT_CLI_LOG_ROTATE, glusterd_handle_log_rotate, NULL, NULL},
- [GD_MGMT_CLI_SET_VOLUME] = { "SET_VOLUME", GD_MGMT_CLI_SET_VOLUME, glusterd_handle_set_volume, NULL, NULL},
- [GD_MGMT_CLI_SYNC_VOLUME] = { "SYNC_VOLUME", GD_MGMT_CLI_SYNC_VOLUME, glusterd_handle_sync_volume, NULL, NULL},
- [GD_MGMT_CLI_RESET_VOLUME] = { "RESET_VOLUME", GD_MGMT_CLI_RESET_VOLUME, glusterd_handle_reset_volume, NULL, NULL},
- [GD_MGMT_CLI_FSM_LOG] = { "FSM_LOG", GD_MGMT_CLI_FSM_LOG, glusterd_handle_fsm_log, NULL, NULL},
- [GD_MGMT_CLI_GSYNC_SET] = {"GSYNC_SET", GD_MGMT_CLI_GSYNC_SET, glusterd_handle_gsync_set, NULL, NULL},
- [GD_MGMT_CLI_PROFILE_VOLUME] = { "STATS_VOLUME", GD_MGMT_CLI_PROFILE_VOLUME, glusterd_handle_cli_profile_volume, NULL, NULL}
+/* This is a minimal RPC prog, which contains only the readonly RPC procs from
+ * the cli rpcsvc
+ */
+rpcsvc_actor_t gd_svc_cli_actors_ro[] = {
+ [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_UUID_GET] = { "UUID_GET", GLUSTER_CLI_UUID_GET, glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GETWD] = { "GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd, NULL, 1, DRC_NA},
+ [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME, glusterd_handle_status_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, 0, DRC_NA},
};
-struct rpcsvc_program glusterd1_mop_prog = {
- .progname = "GlusterD0.0.1",
- .prognum = GLUSTERD1_MGMT_PROGRAM,
- .progver = GLUSTERD1_MGMT_VERSION,
- .numactors = GLUSTERD1_MGMT_PROCCNT,
- .actors = glusterd1_mgmt_actors,
+struct rpcsvc_program gd_svc_cli_prog_ro = {
+ .progname = "GlusterD svc cli read-only",
+ .prognum = GLUSTER_CLI_PROGRAM,
+ .progver = GLUSTER_CLI_VERSION,
+ .numactors = GLUSTER_CLI_MAXVALUE,
+ .actors = gd_svc_cli_actors_ro,
+ .synctask = _gf_true,
};