summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-handler.c
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c3992
1 files changed, 3031 insertions, 961 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 3161849e4..71d076624 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -1,22 +1,12 @@
/*
- Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
+ Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
@@ -36,17 +26,21 @@
#include "compat.h"
#include "compat-errno.h"
#include "statedump.h"
+#include "run.h"
#include "glusterd-mem-types.h"
#include "glusterd.h"
#include "glusterd-sm.h"
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
#include "glusterd-store.h"
+#include "glusterd-locks.h"
-#include "glusterd1.h"
-#include "cli1.h"
-#include "rpc-clnt.h"
#include "glusterd1-xdr.h"
+#include "cli1-xdr.h"
+#include "xdr-generic.h"
+#include "rpc-clnt.h"
+#include "glusterd-volgen.h"
+#include "glusterd-mountbroker.h"
#include <sys/resource.h>
#include <inttypes.h>
@@ -54,87 +48,67 @@
#include "defaults.c"
#include "common-utils.h"
+#include "globals.h"
+#include "glusterd-syncop.h"
-static int
-glusterd_friend_find_by_hostname (const char *hoststr,
- glusterd_peerinfo_t **peerinfo)
-{
- int ret = -1;
- glusterd_conf_t *priv = NULL;
- glusterd_peerinfo_t *entry = NULL;
- glusterd_peer_hostname_t *name = NULL;
-
- GF_ASSERT (hoststr);
- GF_ASSERT (peerinfo);
-
- *peerinfo = NULL;
- priv = THIS->private;
-
- GF_ASSERT (priv);
-
- list_for_each_entry (entry, &priv->peers, uuid_list) {
- list_for_each_entry (name, &entry->hostnames, hostname_list) {
- if (!strncmp (name->hostname, hoststr,
- 1024)) {
+#ifdef HAVE_BD_XLATOR
+#include <lvm2app.h>
+#endif
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Friend %s found.. state: %d", hoststr,
- entry->state.state);
- *peerinfo = entry;
- return 0;
- }
- }
- }
+extern uuid_t global_txn_id;
+int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event,
+ void *data, rpc_clnt_notify_t notify_fn)
+{
+ glusterd_conf_t *priv = THIS->private;
+ int ret = -1;
+ synclock_lock (&priv->big_lock);
+ ret = notify_fn (rpc, mydata, event, data);
+ synclock_unlock (&priv->big_lock);
return ret;
}
-static int
-glusterd_friend_find_by_uuid (uuid_t uuid,
- glusterd_peerinfo_t **peerinfo)
+int glusterd_big_locked_handler (rpcsvc_request_t *req, rpcsvc_actor actor_fn)
{
- int ret = -1;
- glusterd_conf_t *priv = NULL;
- glusterd_peerinfo_t *entry = NULL;
-
- GF_ASSERT (peerinfo);
-
- *peerinfo = NULL;
- priv = THIS->private;
-
- GF_ASSERT (priv);
-
- list_for_each_entry (entry, &priv->peers, uuid_list) {
- if (!uuid_compare (entry->uuid, uuid)) {
+ glusterd_conf_t *priv = THIS->private;
+ int ret = -1;
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Friend found.. state: %d",
- entry->state.state);
- *peerinfo = entry;
- return 0;
- }
- }
+ synclock_lock (&priv->big_lock);
+ ret = actor_fn (req);
+ synclock_unlock (&priv->big_lock);
return ret;
}
static int
-glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid, char *hostname, int port)
+glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid,
+ char *hostname, int port,
+ gd1_mgmt_friend_req *friend_req)
{
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_friend_sm_event_t *event = NULL;
glusterd_friend_req_ctx_t *ctx = NULL;
+ char rhost[UNIX_PATH_MAX + 1] = {0};
+ uuid_t friend_uuid = {0};
+ dict_t *dict = NULL;
+ uuid_parse (uuid_utoa (uuid), friend_uuid);
if (!port)
- port = 6969; // TODO: use define values.
+ port = GF_DEFAULT_BASE_PORT;
- ret = glusterd_friend_find (uuid, hostname, &peerinfo);
+ ret = glusterd_remote_hostname_get (req, rhost, sizeof (rhost));
+ ret = glusterd_friend_find (uuid, rhost, &peerinfo);
if (ret) {
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Unable to find peer");
-
+ ret = glusterd_xfer_friend_add_resp (req, hostname, rhost, port,
+ -1, GF_PROBE_UNKNOWN_PEER);
+ if (friend_req->vols.vols_val) {
+ free (friend_req->vols.vols_val);
+ friend_req->vols.vols_val = NULL;
+ }
+ goto out;
}
ret = glusterd_friend_sm_new_event
@@ -160,10 +134,25 @@ glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid, char *hostname,
ctx->hostname = gf_strdup (hostname);
ctx->req = req;
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (friend_req->vols.vols_val,
+ friend_req->vols.vols_len,
+ &dict);
+
+ if (ret)
+ goto out;
+ else
+ dict->extra_stdfree = friend_req->vols.vols_val;
+
+ ctx->vols = dict;
event->ctx = ctx;
ret = glusterd_friend_sm_inject_event (event);
-
if (ret) {
gf_log ("glusterd", GF_LOG_ERROR, "Unable to inject event %d, "
"ret = %d", event->event, ret);
@@ -176,14 +165,23 @@ out:
if (0 != ret) {
if (ctx && ctx->hostname)
GF_FREE (ctx->hostname);
- if (ctx)
- GF_FREE (ctx);
+ GF_FREE (ctx);
+ if (dict) {
+ if ((!dict->extra_stdfree) &&
+ friend_req->vols.vols_val)
+ free (friend_req->vols.vols_val);
+ dict_unref (dict);
+ } else {
+ free (friend_req->vols.vols_val);
+ }
+ GF_FREE (event);
+ } else {
+ if (peerinfo && (0 == peerinfo->connected))
+ ret = GLUSTERD_CONNECTION_AWAITED;
}
-
return ret;
}
-
static int
glusterd_handle_unfriend_req (rpcsvc_request_t *req, uuid_t uuid,
char *hostname, int port)
@@ -194,18 +192,21 @@ glusterd_handle_unfriend_req (rpcsvc_request_t *req, uuid_t uuid,
glusterd_friend_req_ctx_t *ctx = NULL;
if (!port)
- port = 6969; //TODO: use define'd macro
+ port = GF_DEFAULT_BASE_PORT;
ret = glusterd_friend_find (uuid, hostname, &peerinfo);
if (ret) {
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Unable to find peer");
-
+ gf_log ("glusterd", GF_LOG_CRITICAL,
+ "Received remove-friend from unknown peer %s",
+ hostname);
+ ret = glusterd_xfer_friend_remove_resp (req, hostname,
+ port);
+ goto out;
}
ret = glusterd_friend_sm_new_event
- (GD_FRIEND_EVENT_REMOVE_FRIEND, &event);
+ (GD_FRIEND_EVENT_RCVD_REMOVE_FRIEND, &event);
if (ret) {
gf_log ("", GF_LOG_ERROR, "event generation failed: %d", ret);
@@ -243,8 +244,7 @@ out:
if (0 != ret) {
if (ctx && ctx->hostname)
GF_FREE (ctx->hostname);
- if (ctx)
- GF_FREE (ctx);
+ GF_FREE (ctx);
}
return ret;
@@ -257,13 +257,14 @@ glusterd_add_peer_detail_to_dict (glusterd_peerinfo_t *peerinfo,
int ret = -1;
char key[256] = {0, };
+ char *peer_uuid_str = NULL;
GF_ASSERT (peerinfo);
GF_ASSERT (friends);
snprintf (key, 256, "friend%d.uuid", count);
- uuid_unparse (peerinfo->uuid, peerinfo->uuid_str);
- ret = dict_set_str (friends, key, peerinfo->uuid_str);
+ peer_uuid_str = gd_peer_uuid_str (peerinfo);
+ ret = dict_set_str (friends, key, peer_uuid_str);
if (ret)
goto out;
@@ -277,8 +278,19 @@ glusterd_add_peer_detail_to_dict (glusterd_peerinfo_t *peerinfo,
if (ret)
goto out;
+ snprintf (key, 256, "friend%d.stateId", count);
+ ret = dict_set_int32 (friends, key, peerinfo->state.state);
+ if (ret)
+ goto out;
+
snprintf (key, 256, "friend%d.state", count);
- ret = dict_set_int32 (friends, key, (int32_t)peerinfo->state.state);
+ ret = dict_set_str (friends, key,
+ glusterd_friend_sm_state_name_get(peerinfo->state.state));
+ if (ret)
+ goto out;
+
+ snprintf (key, 256, "friend%d.connected", count);
+ ret = dict_set_int32 (friends, key, (int32_t)peerinfo->connected);
if (ret)
goto out;
@@ -286,9 +298,34 @@ out:
return ret;
}
+struct args_pack {
+ dict_t *dict;
+ int vol_count;
+ int opt_count;
+};
+
static int
+_build_option_key (dict_t *d, char *k, data_t *v, void *tmp)
+{
+ char reconfig_key[256] = {0, };
+ struct args_pack *pack = NULL;
+ int ret = -1;
+
+ pack = tmp;
+ if (strcmp (k, GLUSTERD_GLOBAL_OPT_VERSION) == 0)
+ return 0;
+ snprintf (reconfig_key, 256, "volume%d.option.%s",
+ pack->vol_count, k);
+ ret = dict_set_str (pack->dict, reconfig_key, v->data);
+ if (0 == ret)
+ pack->opt_count++;
+
+ return 0;
+}
+
+int
glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
- dict_t *volumes, int count)
+ dict_t *volumes, int count)
{
int ret = -1;
@@ -296,10 +333,23 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo = NULL;
char *buf = NULL;
int i = 1;
+ dict_t *dict = NULL;
+ glusterd_conf_t *priv = NULL;
+ char *volume_id_str = NULL;
+ struct args_pack pack = {0,};
+ xlator_t *this = NULL;
+#ifdef HAVE_BD_XLATOR
+ int caps = 0;
+#endif
GF_ASSERT (volinfo);
GF_ASSERT (volumes);
+ this = THIS;
+ priv = this->private;
+
+ GF_ASSERT (priv);
+
snprintf (key, 256, "volume%d.name", count);
ret = dict_set_str (volumes, key, volinfo->volname);
if (ret)
@@ -315,13 +365,131 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
if (ret)
goto out;
+ /* As of now, the snap volumes are also displayed as part of
+ volume info command. So this change is to display whether
+ the volume is original volume or the snap_volume. If
+ displaying of snap volumes in volume info o/p is not needed
+ this should be removed.
+ */
+ snprintf (key, 256, "volume%d.snap_volume", count);
+ ret = dict_set_int32 (volumes, key, volinfo->is_snap_volume);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "failed to set whether "
+ "the volume is a snap volume or actual volume (%s)",
+ volinfo->volname);
+ goto out;
+ }
+
snprintf (key, 256, "volume%d.brick_count", count);
ret = dict_set_int32 (volumes, key, volinfo->brick_count);
if (ret)
goto out;
+ snprintf (key, 256, "volume%d.dist_count", count);
+ ret = dict_set_int32 (volumes, key, volinfo->dist_leaf_count);
+ if (ret)
+ goto out;
+
+ snprintf (key, 256, "volume%d.stripe_count", count);
+ ret = dict_set_int32 (volumes, key, volinfo->stripe_count);
+ if (ret)
+ goto out;
+
+ snprintf (key, 256, "volume%d.replica_count", count);
+ ret = dict_set_int32 (volumes, key, volinfo->replica_count);
+ if (ret)
+ goto out;
+
+ snprintf (key, 256, "volume%d.transport", count);
+ ret = dict_set_int32 (volumes, key, volinfo->transport_type);
+ if (ret)
+ goto out;
+
+ volume_id_str = gf_strdup (uuid_utoa (volinfo->volume_id));
+ if (!volume_id_str)
+ goto out;
+
+ snprintf (key, sizeof (key), "volume%d.volume_id", count);
+ ret = dict_set_dynstr (volumes, key, volume_id_str);
+ if (ret)
+ goto out;
+
+ snprintf (key, 256, "volume%d.rebalance", count);
+ ret = dict_set_int32 (volumes, key, volinfo->rebal.defrag_cmd);
+ if (ret)
+ goto out;
+
+#ifdef HAVE_BD_XLATOR
+ if (volinfo->caps) {
+ caps = 0;
+ snprintf (key, 256, "volume%d.xlator0", count);
+ buf = GF_MALLOC (256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
+ goto out;
+ }
+ if (volinfo->caps & CAPS_BD)
+ snprintf (buf, 256, "BD");
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret) {
+ GF_FREE (buf);
+ goto out;
+ }
+
+ if (volinfo->caps & CAPS_THIN) {
+ snprintf (key, 256, "volume%d.xlator0.caps%d", count,
+ caps++);
+ buf = GF_MALLOC (256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
+ goto out;
+ }
+ snprintf (buf, 256, "thin");
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret) {
+ GF_FREE (buf);
+ goto out;
+ }
+ }
+
+ if (volinfo->caps & CAPS_OFFLOAD_COPY) {
+ snprintf (key, 256, "volume%d.xlator0.caps%d", count,
+ caps++);
+ buf = GF_MALLOC (256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
+ goto out;
+ }
+ snprintf (buf, 256, "offload_copy");
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret) {
+ GF_FREE (buf);
+ goto out;
+ }
+ }
+
+ if (volinfo->caps & CAPS_OFFLOAD_SNAPSHOT) {
+ snprintf (key, 256, "volume%d.xlator0.caps%d", count,
+ caps++);
+ buf = GF_MALLOC (256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
+ goto out;
+ }
+ snprintf (buf, 256, "offload_snapshot");
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret) {
+ GF_FREE (buf);
+ goto out;
+ }
+ }
+
+ }
+#endif
+
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
char brick[1024] = {0,};
+ char brick_uuid[64] = {0,};
snprintf (key, 256, "volume%d.brick%d", count, i);
snprintf (brick, 1024, "%s:%s", brickinfo->hostname,
brickinfo->path);
@@ -329,8 +497,42 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
ret = dict_set_dynstr (volumes, key, buf);
if (ret)
goto out;
+ snprintf (key, 256, "volume%d.brick%d.uuid", count, i);
+ snprintf (brick_uuid, 64, "%s", uuid_utoa (brickinfo->uuid));
+ buf = gf_strdup (brick_uuid);
+ if (!buf)
+ goto out;
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret)
+ goto out;
+
+#ifdef HAVE_BD_XLATOR
+ if (volinfo->caps & CAPS_BD) {
+ snprintf (key, 256, "volume%d.vg%d", count, i);
+ snprintf (brick, 1024, "%s", brickinfo->vg);
+ buf = gf_strdup (brick);
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret)
+ goto out;
+ }
+#endif
i++;
}
+
+ dict = volinfo->dict;
+ if (!dict) {
+ ret = 0;
+ goto out;
+ }
+
+ pack.dict = volumes;
+ pack.vol_count = count;
+ pack.opt_count = 0;
+ dict_foreach (dict, _build_option_key, (void *) &pack);
+ dict_foreach (priv->opts, _build_option_key, &pack);
+
+ snprintf (key, 256, "volume%d.opt_count", pack.vol_count);
+ ret = dict_set_int32 (volumes, key, pack.opt_count);
out:
return ret;
}
@@ -340,13 +542,18 @@ glusterd_friend_find (uuid_t uuid, char *hostname,
glusterd_peerinfo_t **peerinfo)
{
int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
if (uuid) {
ret = glusterd_friend_find_by_uuid (uuid, peerinfo);
if (ret) {
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Unable to find peer by uuid");
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Unable to find peer by uuid: %s",
+ uuid_utoa (uuid));
} else {
goto out;
}
@@ -357,7 +564,7 @@ glusterd_friend_find (uuid_t uuid, char *hostname,
ret = glusterd_friend_find_by_hostname (hostname, peerinfo);
if (ret) {
- gf_log ("glusterd", GF_LOG_NORMAL,
+ gf_log (this->name, GF_LOG_DEBUG,
"Unable to find hostname: %s", hostname);
} else {
goto out;
@@ -368,546 +575,705 @@ out:
return ret;
}
-int
-glusterd_handle_cluster_lock (rpcsvc_request_t *req)
+int32_t
+glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
+ char *err_str, size_t err_len)
{
- gd1_mgmt_cluster_lock_req lock_req = {{0},};
- int32_t ret = -1;
- char str[50] = {0,};
- glusterd_op_lock_ctx_t *ctx = NULL;
+ int32_t ret = -1;
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ int32_t locked = 0;
+ char *tmp = NULL;
+ char *volname = NULL;
+ uuid_t *txn_id = NULL;
+ uuid_t *originator_uuid = NULL;
+ glusterd_op_info_t txn_op_info = {{0},};
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
GF_ASSERT (req);
+ GF_ASSERT ((op > GD_OP_NONE) && (op < GD_OP_MAX));
+ GF_ASSERT (NULL != ctx);
- if (!gd_xdr_to_mgmt_cluster_lock_req (req->msg[0], &lock_req)) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ dict = ctx;
+
+ /* Generate a transaction-id for this operation and
+ * save it in the dict. This transaction id distinguishes
+ * each transaction, and helps separate opinfos in the
+ * op state machine. */
+ txn_id = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!txn_id)
+ goto out;
+
+ uuid_generate (*txn_id);
+
+ ret = dict_set_bin (dict, "transaction_id",
+ txn_id, sizeof(*txn_id));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set transaction id.");
goto out;
}
- uuid_unparse (lock_req.uuid, str);
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Received LOCK from uuid: %s", str);
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Transaction_id = %s", uuid_utoa (*txn_id));
+ /* Save the MY_UUID as the originator_uuid. This originator_uuid
+ * will be used by is_origin_glusterd() to determine if a node
+ * is the originator node for a command. */
+ originator_uuid = GF_CALLOC (1, sizeof(uuid_t),
+ gf_common_mt_uuid_t);
+ if (!originator_uuid) {
+ ret = -1;
+ goto out;
+ }
- ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_stage_ctx_t);
+ uuid_copy (*originator_uuid, MY_UUID);
+ ret = dict_set_bin (dict, "originator_uuid",
+ originator_uuid, sizeof (uuid_t));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set originator uuid.");
+ goto out;
+ }
- if (!ctx) {
- //respond here
- return -1;
+ /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
+ if (priv->op_version < 3) {
+ ret = glusterd_lock (MY_UUID);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire lock on localhost, ret: %d",
+ ret);
+ snprintf (err_str, err_len,
+ "Another transaction is in progress. "
+ "Please try again after sometime.");
+ goto out;
+ }
+ } else {
+ /* If no volname is given as a part of the command, locks will
+ * not be held */
+ ret = dict_get_str (dict, "volname", &tmp);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Failed to get volume "
+ "name");
+ goto local_locking_done;
+ } else {
+ /* Use a copy of volname, as cli response will be
+ * sent before the unlock, and the volname in the
+ * dict, might be removed */
+ volname = gf_strdup (tmp);
+ if (!volname)
+ goto out;
+ }
+
+ ret = glusterd_mgmt_v3_lock (volname, MY_UUID, "vol");
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire lock for %s", volname);
+ snprintf (err_str, err_len,
+ "Another transaction is in progress for %s. "
+ "Please try again after sometime.", volname);
+ goto out;
+ }
}
- uuid_copy (ctx->uuid, lock_req.uuid);
- ctx->req = req;
+ locked = 1;
+ gf_log (this->name, GF_LOG_DEBUG, "Acquired lock on localhost");
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, ctx);
+local_locking_done:
+
+ /* If no volname is given as a part of the command, locks will
+ * not be held, hence sending stage event. */
+ if (volname)
+ event_type = GD_OP_EVENT_START_LOCK;
+ else {
+ txn_op_info.state.state = GD_OP_STATE_LOCK_SENT;
+ event_type = GD_OP_EVENT_ALL_ACC;
+ }
+
+ /* Save opinfo for this transaction with the transaction id */
+ glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, ctx, req);
+
+ ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+ if (ctx)
+ dict_unref (ctx);
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, ctx);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to acquire cluster"
+ " lock.");
+ goto out;
+ }
out:
- gf_log ("", GF_LOG_NORMAL, "Returning %d", ret);
+ if (locked && ret) {
+ /* Based on the op-version, we release the
+ * cluster or mgmt_v3 lock */
+ if (priv->op_version < 3)
+ glusterd_unlock (MY_UUID);
+ else {
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID,
+ "vol");
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to release lock for %s",
+ volname);
+ ret = -1;
+ }
+ }
+
+ if (volname)
+ GF_FREE (volname);
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int
-glusterd_handle_stage_op (rpcsvc_request_t *req)
+__glusterd_handle_cluster_lock (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- char str[50] = {0,};
- gd1_mgmt_stage_op_req stage_req = {{0,}};
- glusterd_op_stage_ctx_t *ctx = NULL;
+ dict_t *op_ctx = NULL;
+ int32_t ret = -1;
+ gd1_mgmt_cluster_lock_req lock_req = {{0},};
+ glusterd_op_lock_ctx_t *ctx = NULL;
+ glusterd_op_t op = GD_OP_EVENT_LOCK;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_op_info_t txn_op_info = {{0},};
+ uuid_t *txn_id = &global_txn_id;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
- if (!gd_xdr_to_mgmt_stage_op_req (req->msg[0], &stage_req)) {
- //failed to decode msg;
+ ret = xdr_to_generic (req->msg[0], &lock_req,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode lock "
+ "request received from peer");
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- uuid_unparse (stage_req.uuid, str);
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Received stage op from uuid: %s", str);
+ gf_log (this->name, GF_LOG_DEBUG, "Received LOCK from uuid: %s",
+ uuid_utoa (lock_req.uuid));
+
+ if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (lock_req.uuid));
+ ret = -1;
+ goto out;
+ }
- ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_stage_ctx_t);
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
if (!ctx) {
//respond here
return -1;
}
- //CHANGE THIS
- uuid_copy (ctx->stage_req.uuid, stage_req.uuid);
- ctx->stage_req.op = stage_req.op;
- ctx->stage_req.buf.buf_len = stage_req.buf.buf_len;
- ctx->stage_req.buf.buf_val = GF_CALLOC (1, stage_req.buf.buf_len,
- gf_gld_mt_string);
- if (!ctx->stage_req.buf.buf_val)
+ uuid_copy (ctx->uuid, lock_req.uuid);
+ ctx->req = req;
+ ctx->dict = NULL;
+
+ op_ctx = dict_new ();
+ if (!op_ctx) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set new dict");
goto out;
+ }
- memcpy (ctx->stage_req.buf.buf_val, stage_req.buf.buf_val,
- stage_req.buf.buf_len);
+ glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, op_ctx, req);
- ctx->req = req;
+ ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+ dict_unref (txn_op_info.op_ctx);
+ goto out;
+ }
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP, ctx);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, txn_id, ctx);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to inject event GD_OP_EVENT_LOCK");
out:
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
return ret;
}
int
-glusterd_handle_commit_op (rpcsvc_request_t *req)
+glusterd_handle_cluster_lock (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- char str[50] = {0,};
- gd1_mgmt_commit_op_req commit_req = {{0},};
- glusterd_op_commit_ctx_t *ctx = NULL;
-
- GF_ASSERT (req);
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cluster_lock);
+}
- if (!gd_xdr_to_mgmt_commit_op_req (req->msg[0], &commit_req)) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
+int
+glusterd_req_ctx_create (rpcsvc_request_t *rpc_req,
+ glusterd_op_t op, uuid_t uuid,
+ char *buf_val, size_t buf_len,
+ gf_gld_mem_types_t mem_type,
+ glusterd_req_ctx_t **req_ctx_out)
+{
+ int ret = -1;
+ char str[50] = {0,};
+ glusterd_req_ctx_t *req_ctx = NULL;
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
- uuid_unparse (commit_req.uuid, str);
+ this = THIS;
+ GF_ASSERT (this);
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Received commit op from uuid: %s", str);
+ uuid_unparse (uuid, str);
+ gf_log (this->name, GF_LOG_DEBUG, "Received op from uuid %s", str);
- ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_commit_ctx_t);
+ dict = dict_new ();
+ if (!dict)
+ goto out;
- if (!ctx) {
- //respond here
- return -1;
+ req_ctx = GF_CALLOC (1, sizeof (*req_ctx), mem_type);
+ if (!req_ctx) {
+ goto out;
}
- ctx->req = req;
- //CHANGE THIS
- uuid_copy (ctx->stage_req.uuid, commit_req.uuid);
- ctx->stage_req.op = commit_req.op;
- ctx->stage_req.buf.buf_len = commit_req.buf.buf_len;
- ctx->stage_req.buf.buf_val = GF_CALLOC (1, commit_req.buf.buf_len,
- gf_gld_mt_string);
- if (!ctx->stage_req.buf.buf_val)
+ uuid_copy (req_ctx->uuid, uuid);
+ req_ctx->op = op;
+ ret = dict_unserialize (buf_val, buf_len, &dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
goto out;
+ }
- memcpy (ctx->stage_req.buf.buf_val, commit_req.buf.buf_val,
- commit_req.buf.buf_len);
-
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP, ctx);
-
+ req_ctx->dict = dict;
+ req_ctx->req = rpc_req;
+ *req_ctx_out = req_ctx;
+ ret = 0;
out:
+ if (ret) {
+ if (dict)
+ dict_unref (dict);
+ GF_FREE (req_ctx);
+ }
return ret;
}
int
-glusterd_handle_cli_probe (rpcsvc_request_t *req)
+__glusterd_handle_stage_op (rpcsvc_request_t *req)
{
int32_t ret = -1;
- gf1_cli_probe_req cli_req = {0,};
+ glusterd_req_ctx_t *req_ctx = NULL;
+ gd1_mgmt_stage_op_req op_req = {{0},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ uuid_t *txn_id = &global_txn_id;
+ glusterd_op_info_t txn_op_info = {{0},};
+ glusterd_op_sm_state_info_t state;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
- if (!gf_xdr_to_cli_probe_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
- gf_log ("", 1, "error");
+ ret = xdr_to_generic (req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_stage_op_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode stage "
+ "request received from peer");
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_log ("glusterd", GF_LOG_NORMAL, "Received CLI probe req %s %d",
- cli_req.hostname, cli_req.port);
+ if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+ ret = glusterd_req_ctx_create (req, op_req.op, op_req.uuid,
+ op_req.buf.buf_val, op_req.buf.buf_len,
+ gf_gld_mt_op_stage_ctx_t, &req_ctx);
+ if (ret)
+ goto out;
- ret = glusterd_probe_begin (req, cli_req.hostname, cli_req.port);
+ ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
-out:
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
+ /* In cases where there is no volname, the receivers won't have a
+ * transaction opinfo created, as for those operations, the locking
+ * phase where the transaction opinfos are created, won't be called. */
+ ret = glusterd_get_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "No transaction's opinfo set");
+
+ state.state = GD_OP_STATE_LOCKED;
+ glusterd_txn_opinfo_init (&txn_op_info, &state,
+ &op_req.op, req_ctx->dict, req);
+
+ ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+ dict_unref (req_ctx->dict);
+ goto out;
+ }
+ }
+
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP,
+ txn_id, req_ctx);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to inject event GD_OP_EVENT_STAGE_OP");
+
+ out:
+ free (op_req.buf.buf_val);//malloced by xdr
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
return ret;
}
int
-glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
+glusterd_handle_stage_op (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_stage_op);
+}
+
+
+int
+__glusterd_handle_commit_op (rpcsvc_request_t *req)
{
int32_t ret = -1;
- gf1_cli_probe_req cli_req = {0,};
+ glusterd_req_ctx_t *req_ctx = NULL;
+ gd1_mgmt_commit_op_req op_req = {{0},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ uuid_t *txn_id = &global_txn_id;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
- if (!gf_xdr_to_cli_probe_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
+ ret = xdr_to_generic (req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_commit_op_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode commit "
+ "request received from peer");
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_log ("glusterd", GF_LOG_NORMAL, "Received CLI deprobe req");
+ if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ //the structures should always be equal
+ GF_ASSERT (sizeof (gd1_mgmt_commit_op_req) == sizeof (gd1_mgmt_stage_op_req));
+ ret = glusterd_req_ctx_create (req, op_req.op, op_req.uuid,
+ op_req.buf.buf_val, op_req.buf.buf_len,
+ gf_gld_mt_op_commit_ctx_t, &req_ctx);
+ if (ret)
+ goto out;
+
+ ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
- ret = glusterd_deprobe_begin (req, cli_req.hostname, cli_req.port);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP,
+ txn_id, req_ctx);
out:
+ free (op_req.buf.buf_val);//malloced by xdr
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
return ret;
}
int
-glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
+glusterd_handle_commit_op (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_commit_op);
+}
+
+int
+__glusterd_handle_cli_probe (rpcsvc_request_t *req)
{
int32_t ret = -1;
- gf1_cli_peer_list_req cli_req = {0,};
- dict_t *dict = NULL;
+ gf_cli_req cli_req = {{0,},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ gf_boolean_t run_fsm = _gf_true;
+ xlator_t *this = NULL;
+ char *bind_name = NULL;
+ dict_t *dict = NULL;
+ char *hostname = NULL;
+ int port = 0;
GF_ASSERT (req);
+ this = THIS;
- if (!gf_xdr_to_cli_peer_list_req (req->msg[0], &cli_req)) {
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
//failed to decode msg;
+ gf_log ("", GF_LOG_ERROR, "xdr decoding error");
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_log ("glusterd", GF_LOG_NORMAL, "Received cli list req");
-
if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
+ dict = dict_new ();
ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
+ cli_req.dict.dict_len, &dict);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR,
- "failed to "
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
"unserialize req-buffer to dictionary");
goto out;
}
}
- ret = glusterd_list_friends (req, dict, cli_req.flags);
-
-out:
- return ret;
-}
-
-int
-glusterd_check_and_rebalance (glusterd_volinfo_t *volinfo, char *dir)
-{
- int ret = -1;
- int dst_fd = -1;
- int src_fd = -1;
- DIR *fd = NULL;
- glusterd_defrag_info_t *defrag = NULL;
- struct dirent *entry = NULL;
- struct stat stbuf = {0,};
- struct stat new_stbuf = {0,};
- char full_path[1024] = {0,};
- char tmp_filename[1024] = {0,};
- char value[128] = {0,};
-
- defrag = volinfo->defrag;
- if (!defrag)
+ ret = dict_get_str (dict, "hostname", &hostname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get hostname");
goto out;
+ }
- fd = opendir (dir);
- if (!fd)
+ ret = dict_get_int32 (dict, "port", &port);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get port");
goto out;
+ }
- do {
- entry = readdir (fd);
- if (!entry)
- break;
-
- if (!strcmp (entry->d_name, ".") || !strcmp (entry->d_name, ".."))
- continue;
-
- snprintf (full_path, 1024, "%s/%s", dir, entry->d_name);
-
- ret = stat (full_path, &stbuf);
- if (ret == -1)
- continue;
-
- if (S_ISDIR (stbuf.st_mode)) {
- //getfattr -n trusted.distribute.fix.layout "$path" ;
- getxattr (full_path, "trusted.distribute.fix.layout",
- &value, 128);
-
- ret = glusterd_check_and_rebalance (volinfo, full_path);
- }
- if (S_ISREG (stbuf.st_mode) && ((stbuf.st_mode & 01000) == 01000)) {
- /* TODO: run the defrag */
- snprintf (tmp_filename, 1024, "%s/.%s.gfs%zu", dir,
- entry->d_name, stbuf.st_size);
-
- dst_fd = creat (tmp_filename, (stbuf.st_mode & ~01000));
- if (dst_fd == -1)
- continue;
-
- src_fd = open (full_path, O_RDONLY);
- if (src_fd == -1) {
- close (dst_fd);
- continue;
- }
-
- while (1) {
- ret = read (src_fd, defrag->databuf, 131072);
- if (!ret || (ret < 0)) {
- close (dst_fd);
- close (src_fd);
- break;
- }
- ret = write (dst_fd, defrag->databuf, ret);
- if (ret < 0) {
- close (dst_fd);
- close (src_fd);
- break;
- }
- }
-
- ret = stat (full_path, &new_stbuf);
- if (ret < 0)
- continue;
- if (new_stbuf.st_mtime != stbuf.st_mtime)
- continue;
-
- ret = rename (tmp_filename, full_path);
- if (ret != -1) {
- LOCK (&defrag->lock);
- {
- defrag->total_files += 1;
- defrag->total_data += stbuf.st_size;
- }
- UNLOCK (&defrag->lock);
- }
- }
+ if (glusterd_is_any_volume_in_server_quorum (this) &&
+ !does_gd_meet_server_quorum (this)) {
+ glusterd_xfer_cli_probe_resp (req, -1, GF_PROBE_QUORUM_NOT_MET,
+ NULL, hostname, port, dict);
+ gf_log (this->name, GF_LOG_ERROR, "Quorum does not meet, "
+ "rejecting operation");
ret = 0;
+ goto out;
+ }
- LOCK (&defrag->lock);
- {
- if (S_ISREG (stbuf.st_mode))
- defrag->num_files_lookedup += 1;
- if (volinfo->defrag_status == GF_DEFRAG_STATUS_STOPED)
- ret = 1;
- }
- UNLOCK (&defrag->lock);
- if (ret)
- break;
-
- /* Write the full 'glusterfs-defrag' here */
-
- } while (1);
-
- closedir (fd);
+ gf_log ("glusterd", GF_LOG_INFO, "Received CLI probe req %s %d",
+ hostname, port);
- if (!entry)
+ if (dict_get_str(this->options,"transport.socket.bind-address",
+ &bind_name) == 0) {
+ gf_log ("glusterd", GF_LOG_DEBUG,
+ "only checking probe address vs. bind address");
+ ret = gf_is_same_address (bind_name, hostname);
+ }
+ else {
+ ret = gf_is_local_addr (hostname);
+ }
+ if (ret) {
+ glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_LOCALHOST,
+ NULL, hostname, port, dict);
ret = 0;
-out:
- return ret;
-}
-
-void *
-glusterd_defrag_start (void *data)
-{
- glusterd_volinfo_t *volinfo = data;
- glusterd_defrag_info_t *defrag = NULL;
- char cmd_str[1024] = {0,};
- int ret = -1;
-
- /* TODO: make it more generic.. */
- defrag = volinfo->defrag;
- if (!defrag)
goto out;
+ }
- ret = glusterd_check_and_rebalance (volinfo, defrag->mount);
-
- /* TODO: This should run in a thread, and finish the thread when
- the task is complete. While defrag is running, keep updating
- files */
+ if (!(ret = glusterd_friend_find_by_hostname (hostname, &peerinfo))) {
+ if (strcmp (peerinfo->hostname, hostname) == 0) {
- volinfo->defrag_status = GF_DEFRAG_STATUS_COMPLETE;
- volinfo->rebalance_files = defrag->total_files;
- volinfo->rebalance_data = defrag->total_data;
- volinfo->lookedup_files = defrag->num_files_lookedup;
-out:
- gf_log ("defrag", GF_LOG_NORMAL, "defrag on %s complete",
- defrag->mount);
-
- snprintf (cmd_str, 1024, "umount %s", defrag->mount);
- system (cmd_str);
- volinfo->defrag = NULL;
- LOCK_DESTROY (&defrag->lock);
- GF_FREE (defrag);
+ gf_log ("glusterd", GF_LOG_DEBUG, "Probe host %s port "
+ "%d already a peer", hostname, port);
+ glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND,
+ NULL, hostname, port,
+ dict);
+ goto out;
+ }
+ }
+ ret = glusterd_probe_begin (req, hostname, port, dict);
- return NULL;
-}
+ if (ret == GLUSTERD_CONNECTION_AWAITED) {
+ //fsm should be run after connection establishes
+ run_fsm = _gf_false;
+ ret = 0;
+ }
-int
-glusterd_defrag_stop (glusterd_volinfo_t *volinfo,
- gf1_cli_defrag_vol_rsp *rsp)
-{
- /* TODO: set a variaeble 'stop_defrag' here, it should be checked
- in defrag loop */
- if (!volinfo || !volinfo->defrag)
- goto out;
+out:
+ free (cli_req.dict.dict_val);
- LOCK (&volinfo->defrag->lock);
- {
- volinfo->defrag_status = GF_DEFRAG_STATUS_STOPED;
- rsp->files = volinfo->defrag->total_files;
- rsp->size = volinfo->defrag->total_data;
+ if (run_fsm) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
}
- UNLOCK (&volinfo->defrag->lock);
- rsp->op_ret = 0;
-out:
- return 0;
+ return ret;
}
int
-glusterd_defrag_status_get (glusterd_volinfo_t *volinfo,
- gf1_cli_defrag_vol_rsp *rsp)
+glusterd_handle_cli_probe (rpcsvc_request_t *req)
{
- if (!volinfo)
- goto out;
-
- if (volinfo->defrag) {
- LOCK (&volinfo->defrag->lock);
- {
- rsp->files = volinfo->defrag->total_files;
- rsp->size = volinfo->defrag->total_data;
- rsp->lookedup_files = volinfo->defrag->num_files_lookedup;
- }
- UNLOCK (&volinfo->defrag->lock);
- } else {
- rsp->files = volinfo->rebalance_files;
- rsp->size = volinfo->rebalance_data;
- rsp->lookedup_files = volinfo->lookedup_files;
- }
- rsp->op_ret = 0;
-out:
- return 0;
+ return glusterd_big_locked_handler (req, __glusterd_handle_cli_probe);
}
int
-glusterd_handle_defrag_volume (rpcsvc_request_t *req)
+__glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf1_cli_defrag_vol_req cli_req = {0,};
- glusterd_conf_t *priv = NULL;
- char cmd_str[4096] = {0,};
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_defrag_info_t *defrag = NULL;
- gf1_cli_defrag_vol_rsp rsp = {0,};
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{0,},};
+ uuid_t uuid = {0};
+ int op_errno = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ dict_t *dict = NULL;
+ char *hostname = NULL;
+ int port = 0;
+ int flags = 0;
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
GF_ASSERT (req);
- priv = THIS->private;
- if (!gf_xdr_to_cli_defrag_vol_req (req->msg[0], &cli_req)) {
+ ret = xdr_to_generic (req->msg[0], &cli_req,
+ (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_log ("glusterd", GF_LOG_NORMAL, "Received defrag volume on %s",
- cli_req.volname);
+ if (cli_req.dict.dict_len) {
+ dict = dict_new ();
- rsp.volname = cli_req.volname;
- rsp.op_ret = -1;
- if (glusterd_volinfo_find(cli_req.volname, &volinfo)) {
- gf_log ("glusterd", GF_LOG_NORMAL, "Received defrag on invalid"
- " volname %s", cli_req.volname);
- goto out;
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len, &dict);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ }
}
- if (volinfo->status != GLUSTERD_STATUS_STARTED) {
- gf_log ("glusterd", GF_LOG_NORMAL, "Received defrag on stopped"
- " volname %s", cli_req.volname);
+ gf_log ("glusterd", GF_LOG_INFO, "Received CLI deprobe req");
+
+ ret = dict_get_str (dict, "hostname", &hostname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get hostname");
goto out;
}
- switch (cli_req.cmd) {
- case GF_DEFRAG_CMD_START:
- {
- if (volinfo->defrag) {
- gf_log ("glusterd", GF_LOG_DEBUG,
- "defrag on volume %s already started",
- cli_req.volname);
- goto out;
- }
+ ret = dict_get_int32 (dict, "port", &port);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get port");
+ goto out;
+ }
- volinfo->defrag = GF_CALLOC (1, sizeof (glusterd_defrag_info_t),
- gf_gld_mt_defrag_info);
- if (!volinfo->defrag)
- goto out;
+ ret = dict_get_int32 (dict, "flags", &flags);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get flags");
+ goto out;
+ }
- defrag = volinfo->defrag;
+ ret = glusterd_hostname_to_uuid (hostname, uuid);
+ if (ret) {
+ op_errno = GF_DEPROBE_NOT_FRIEND;
+ goto out;
+ }
- LOCK_INIT (&defrag->lock);
- snprintf (defrag->mount, 1024, "%s/mount/%s",
- priv->workdir, cli_req.volname);
- /* Create a directory, mount glusterfs over it, start glusterfs-defrag */
- snprintf (cmd_str, 4096, "mkdir -p %s", defrag->mount);
- ret = system (cmd_str);
+ if (!uuid_compare (uuid, MY_UUID)) {
+ op_errno = GF_DEPROBE_LOCALHOST;
+ ret = -1;
+ goto out;
+ }
- if (ret) {
- gf_log("glusterd", GF_LOG_DEBUG, "command: %s failed", cmd_str);
- goto out;
+ if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
+ if (!uuid_is_null (uuid)) {
+ /* Check if peers are connected, except peer being detached*/
+ if (!glusterd_chk_peers_connected_befriended (uuid)) {
+ ret = -1;
+ op_errno = GF_DEPROBE_FRIEND_DOWN;
+ goto out;
+ }
+ ret = glusterd_all_volume_cond_check (
+ glusterd_friend_brick_belongs,
+ -1, &uuid);
+ if (ret) {
+ op_errno = GF_DEPROBE_BRICK_EXIST;
+ goto out;
+ }
}
- snprintf (cmd_str, 4096, "glusterfs -f %s/vols/%s/%s-tcp.vol "
- "--xlator-option dht0.unhashed-sticky-bit=yes "
- "--xlator-option dht0.lookup-unhashed=yes "
- "--volume-name quickread %s",
- priv->workdir, cli_req.volname, cli_req.volname,
- defrag->mount);
- ret = system (cmd_str);
-
- if (ret) {
- gf_log("glusterd", GF_LOG_DEBUG, "command: %s failed", cmd_str);
+ if (glusterd_is_any_volume_in_server_quorum (this) &&
+ !does_gd_meet_server_quorum (this)) {
+ gf_log (this->name, GF_LOG_ERROR, "Quorum does not "
+ "meet, rejecting operation");
+ ret = -1;
+ op_errno = GF_DEPROBE_QUORUM_NOT_MET;
goto out;
}
- rsp.op_ret = 0;
- ret = pthread_create (&defrag->th, NULL, glusterd_defrag_start,
- volinfo);
- if (ret) {
- snprintf (cmd_str, 1024, "umount -l %s", defrag->mount);
- ret = system (cmd_str);
- rsp.op_ret = -1;
- }
- break;
}
- case GF_DEFRAG_CMD_STOP:
- ret = glusterd_defrag_stop (volinfo, &rsp);
- break;
- case GF_DEFRAG_CMD_STATUS:
- ret = glusterd_defrag_status_get (volinfo, &rsp);
- break;
- default:
- break;
+
+ if (!uuid_is_null (uuid)) {
+ ret = glusterd_deprobe_begin (req, hostname, port, uuid, dict);
+ } else {
+ ret = glusterd_deprobe_begin (req, hostname, port, NULL, dict);
}
- if (ret)
- gf_log("glusterd", GF_LOG_DEBUG, "command: %s failed",cmd_str);
+
out:
+ free (cli_req.dict.dict_val);
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gf_xdr_serialize_cli_defrag_vol_rsp);
+ if (ret) {
+ ret = glusterd_xfer_cli_deprobe_resp (req, ret, op_errno, NULL,
+ hostname, dict);
+ }
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
return ret;
}
int
-glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
+glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_cli_deprobe);
+}
+
+int
+__glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
{
int32_t ret = -1;
- gf1_cli_get_vol_req cli_req = {0,};
+ gf1_cli_peer_list_req cli_req = {0,};
dict_t *dict = NULL;
GF_ASSERT (req);
- if (!gf_xdr_to_cli_get_vol_req (req->msg[0], &cli_req)) {
+ ret = xdr_to_generic (req->msg[0], &cli_req,
+ (xdrproc_t)xdr_gf1_cli_peer_list_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_log ("glusterd", GF_LOG_NORMAL, "Received get vol req");
+ gf_log ("glusterd", GF_LOG_INFO, "Received cli list req");
if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
@@ -921,241 +1287,735 @@ glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
"failed to "
"unserialize req-buffer to dictionary");
goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
}
- ret = glusterd_get_volumes (req, dict, cli_req.flags);
+ ret = glusterd_list_friends (req, dict, cli_req.flags);
out:
+ if (dict)
+ dict_unref (dict);
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
return ret;
}
int
-glusterd_handle_create_volume (rpcsvc_request_t *req)
+glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_list_friends);
+}
+
+int
+__glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
- gf1_cli_create_vol_req cli_req = {0,};
+ gf_cli_req cli_req = {{0,}};
dict_t *dict = NULL;
+ int32_t flags = 0;
GF_ASSERT (req);
- if (!gf_xdr_to_cli_create_vol_req (req->msg[0], &cli_req)) {
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_log ("glusterd", GF_LOG_NORMAL, "Received create volume req");
+ gf_log ("glusterd", GF_LOG_INFO, "Received get vol req");
- if (cli_req.bricks.bricks_len) {
+ if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
dict = dict_new ();
- ret = dict_unserialize (cli_req.bricks.bricks_val,
- cli_req.bricks.bricks_len,
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len,
&dict);
if (ret < 0) {
gf_log ("glusterd", GF_LOG_ERROR,
"failed to "
"unserialize req-buffer to dictionary");
goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
}
- ret = glusterd_create_volume (req, dict);
+ ret = dict_get_int32 (dict, "flags", &flags);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "failed to get flags");
+ goto out;
+ }
+
+ ret = glusterd_get_volumes (req, dict, flags);
out:
+ if (dict)
+ dict_unref (dict);
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
return ret;
}
int
-glusterd_handle_cli_start_volume (rpcsvc_request_t *req)
+glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf1_cli_start_vol_req cli_req = {0,};
- //dict_t *dict = NULL;
- int32_t flags = 0;
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_get_volume);
+}
+
+int
+__glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ uuid_t uuid = {0};
+ gf_cli_rsp rsp = {0,};
+ gf_cli_req cli_req = {{0,}};
+ char msg_str[2048] = {0,};
GF_ASSERT (req);
- if (!gf_xdr_to_cli_start_vol_req (req->msg[0], &cli_req)) {
+ this = THIS;
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_log ("glusterd", GF_LOG_NORMAL, "Received start vol req"
- "for volume %s", cli_req.volname);
+ gf_log ("glusterd", GF_LOG_DEBUG, "Received uuid reset req");
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new ();
+
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf (msg_str, sizeof (msg_str), "Unable to decode "
+ "the buffer");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
+ }
+ }
+
+ /* In the above section if dict_unserialize is successful, ret is set
+ * to zero.
+ */
+ ret = -1;
+ // Do not allow peer reset if there are any volumes in the cluster
+ if (!list_empty (&priv->volumes)) {
+ snprintf (msg_str, sizeof (msg_str), "volumes are already "
+ "present in the cluster. Resetting uuid is not "
+ "allowed");
+ gf_log (this->name, GF_LOG_WARNING, "%s", msg_str);
+ goto out;
+ }
+
+ // Do not allow peer reset if trusted storage pool is already formed
+ if (!list_empty (&priv->peers)) {
+ snprintf (msg_str, sizeof (msg_str),"trusted storage pool "
+ "has been already formed. Please detach this peer "
+ "from the pool and reset its uuid.");
+ gf_log (this->name, GF_LOG_WARNING, "%s", msg_str);
+ goto out;
+ }
- ret = glusterd_start_volume (req, cli_req.volname, flags);
+ uuid_copy (uuid, priv->uuid);
+ ret = glusterd_uuid_generate_save ();
+
+ if (!uuid_compare (uuid, MY_UUID)) {
+ snprintf (msg_str, sizeof (msg_str), "old uuid and the new uuid"
+ " are same. Try gluster peer reset again");
+ gf_log (this->name, GF_LOG_ERROR, "%s", msg_str);
+ ret = -1;
+ goto out;
+ }
out:
+ if (ret) {
+ rsp.op_ret = -1;
+ if (msg_str[0] == '\0')
+ snprintf (msg_str, sizeof (msg_str), "Operation "
+ "failed");
+ rsp.op_errstr = msg_str;
+ ret = 0;
+ } else {
+ rsp.op_errstr = "";
+ }
+
+ glusterd_to_cli (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_cli_rsp, dict);
+
return ret;
}
int
-glusterd_handle_cli_stop_volume (rpcsvc_request_t *req)
+glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf1_cli_stop_vol_req cli_req = {0,};
- int32_t flags = 0;
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_uuid_reset);
+}
+
+int
+__glusterd_handle_cli_uuid_get (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ gf_cli_rsp rsp = {0,};
+ gf_cli_req cli_req = {{0,}};
+ char msg_str[2048] = {0,};
+ char uuid_str[64] = {0,};
GF_ASSERT (req);
- if (!gf_xdr_to_cli_stop_vol_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
+ this = THIS;
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_log ("glusterd", GF_LOG_NORMAL, "Received stop vol req"
- "for volume %s", cli_req.volname);
+ gf_log ("glusterd", GF_LOG_DEBUG, "Received uuid get req");
- ret = glusterd_stop_volume (req, cli_req.volname, flags);
+ if (cli_req.dict.dict_len) {
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf (msg_str, sizeof (msg_str), "Unable to decode "
+ "the buffer");
+ goto out;
+
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
+
+ }
+ }
+
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
+ ret = -1;
+ goto out;
+ }
+ uuid_utoa_r (MY_UUID, uuid_str);
+ ret = dict_set_str (rsp_dict, "uuid", uuid_str);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set uuid in "
+ "dictionary.");
+ goto out;
+ }
+
+ ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to serialize "
+ "dictionary.");
+ goto out;
+ }
+ ret = 0;
out:
- return ret;
+ if (ret) {
+ rsp.op_ret = -1;
+ if (msg_str[0] == '\0')
+ snprintf (msg_str, sizeof (msg_str), "Operation "
+ "failed");
+ rsp.op_errstr = msg_str;
+
+ } else {
+ rsp.op_errstr = "";
+
+ }
+
+ glusterd_to_cli (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_cli_rsp, dict);
+
+ return 0;
+}
+int
+glusterd_handle_cli_uuid_get (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_uuid_get);
}
int
-glusterd_handle_cli_delete_volume (rpcsvc_request_t *req)
+__glusterd_handle_cli_list_volume (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf1_cli_delete_vol_req cli_req = {0,};
- int32_t flags = 0;
+ int ret = -1;
+ dict_t *dict = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ int count = 0;
+ char key[1024] = {0,};
+ gf_cli_rsp rsp = {0,};
GF_ASSERT (req);
- if (!gf_xdr_to_cli_delete_vol_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
+ priv = THIS->private;
+ GF_ASSERT (priv);
+
+ dict = dict_new ();
+ if (!dict)
goto out;
+
+ list_for_each_entry (volinfo, &priv->volumes, vol_list) {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "volume%d", count);
+ ret = dict_set_str (dict, key, volinfo->volname);
+ if (ret)
+ goto out;
+ count++;
}
- gf_log ("glusterd", GF_LOG_NORMAL, "Received delete vol req"
- "for volume %s", cli_req.volname);
+ ret = dict_set_int32 (dict, "count", count);
+ if (ret)
+ goto out;
+
+ ret = dict_allocate_and_serialize (dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret)
+ goto out;
- ret = glusterd_delete_volume (req, cli_req.volname, flags);
+ ret = 0;
out:
+ rsp.op_ret = ret;
+ if (ret)
+ rsp.op_errstr = "Error listing volumes";
+ else
+ rsp.op_errstr = "";
+
+ glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_cli_rsp);
+ ret = 0;
+
+ if (dict)
+ dict_unref (dict);
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
return ret;
}
int
-glusterd_handle_add_brick (rpcsvc_request_t *req)
+glusterd_handle_cli_list_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_list_volume);
+}
+
+int32_t
+glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
+ char *err_str, size_t err_len)
+{
+ int ret = -1;
+
+ ret = glusterd_op_txn_begin (req, op, ctx, err_str, err_len);
+
+ return ret;
+}
+
+int
+__glusterd_handle_reset_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
- gf1_cli_add_brick_req cli_req = {0,};
+ gf_cli_req cli_req = {{0,}};
dict_t *dict = NULL;
+ glusterd_op_t cli_op = GD_OP_RESET_VOLUME;
+ char *volname = NULL;
+ char err_str[2048] = {0,};
+ xlator_t *this = NULL;
GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
- if (!gf_xdr_to_cli_add_brick_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ snprintf (err_str, sizeof (err_str), "Failed to decode request "
+ "received from cli");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_log ("glusterd", GF_LOG_NORMAL, "Received add brick req");
-
- if (cli_req.bricks.bricks_len) {
+ if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
dict = dict_new ();
- ret = dict_unserialize (cli_req.bricks.bricks_val,
- cli_req.bricks.bricks_len,
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len,
&dict);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
+ gf_log (this->name, GF_LOG_ERROR, "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf (err_str, sizeof (err_str), "Unable to decode "
+ "the command");
goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
}
- ret = glusterd_add_brick (req, dict);
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str), "Failed to get volume "
+ "name");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ goto out;
+ }
+ gf_log (this->name, GF_LOG_DEBUG, "Received volume reset request for "
+ "volume %s", volname);
+
+ ret = glusterd_op_begin_synctask (req, GD_OP_RESET_VOLUME, dict);
out:
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf (err_str, sizeof (err_str),
+ "Operation failed");
+ ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
+ dict, err_str);
+ }
+
return ret;
}
int
-glusterd_handle_replace_brick (rpcsvc_request_t *req)
+glusterd_handle_reset_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_reset_volume);
+}
+
+int
+__glusterd_handle_set_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
- gf1_cli_replace_brick_req cli_req = {0,};
+ gf_cli_req cli_req = {{0,}};
dict_t *dict = NULL;
+ glusterd_op_t cli_op = GD_OP_SET_VOLUME;
+ char *key = NULL;
+ char *value = NULL;
+ char *volname = NULL;
+ char *op_errstr = NULL;
+ gf_boolean_t help = _gf_false;
+ char err_str[2048] = {0,};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
- if (!gf_xdr_to_cli_replace_brick_req (req->msg[0], &cli_req)) {
- //failed to decode msg;
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ snprintf (err_str, sizeof (err_str), "Failed to decode "
+ "request received from cli");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_log ("glusterd", GF_LOG_NORMAL, "Received replace brick req");
-
- if (cli_req.bricks.bricks_len) {
+ if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
dict = dict_new ();
- ret = dict_unserialize (cli_req.bricks.bricks_val,
- cli_req.bricks.bricks_len,
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len,
&dict);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR,
+ gf_log (this->name, GF_LOG_ERROR,
"failed to "
"unserialize req-buffer to dictionary");
+ snprintf (err_str, sizeof (err_str), "Unable to decode "
+ "the command");
goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
}
- ret = glusterd_replace_brick (req, dict);
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str), "Failed to get volume "
+ "name while handling volume set command");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ goto out;
+ }
+
+ if (strcmp (volname, "help") == 0 ||
+ strcmp (volname, "help-xml") == 0) {
+ ret = glusterd_volset_help (dict, &op_errstr);
+ help = _gf_true;
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "key1", &key);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str), "Failed to get key while"
+ " handling volume set for %s", volname);
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "value1", &value);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str), "Failed to get value while"
+ " handling volume set for %s", volname);
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ goto out;
+ }
+ gf_log (this->name, GF_LOG_DEBUG, "Received volume set request for "
+ "volume %s", volname);
+
+ ret = glusterd_op_begin_synctask (req, GD_OP_SET_VOLUME, dict);
out:
+ if (help)
+ ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, dict,
+ (op_errstr)? op_errstr:"");
+ else if (ret) {
+ if (err_str[0] == '\0')
+ snprintf (err_str, sizeof (err_str),
+ "Operation failed");
+ ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
+ dict, err_str);
+ }
+ if (op_errstr)
+ GF_FREE (op_errstr);
+
return ret;
}
int
-glusterd_handle_remove_brick (rpcsvc_request_t *req)
+glusterd_handle_set_volume (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf1_cli_remove_brick_req cli_req = {0,};
- dict_t *dict = NULL;
+ return glusterd_big_locked_handler (req, __glusterd_handle_set_volume);
+}
+
+int
+__glusterd_handle_sync_volume (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{0,}};
+ dict_t *dict = NULL;
+ gf_cli_rsp cli_rsp = {0.};
+ char msg[2048] = {0,};
+ char *volname = NULL;
+ gf1_cli_sync_volume flags = 0;
+ char *hostname = NULL;
+ xlator_t *this = NULL;
GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
- if (!gf_xdr_to_cli_remove_brick_req (req->msg[0], &cli_req)) {
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- gf_log ("glusterd", GF_LOG_NORMAL, "Received rem brick req");
-
- if (cli_req.bricks.bricks_len) {
+ if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
dict = dict_new ();
- ret = dict_unserialize (cli_req.bricks.bricks_val,
- cli_req.bricks.bricks_len,
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len,
&dict);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR,
+ gf_log (this->name, GF_LOG_ERROR,
"failed to "
"unserialize req-buffer to dictionary");
+ snprintf (msg, sizeof (msg), "Unable to decode the "
+ "command");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
+ }
+ }
+
+ ret = dict_get_str (dict, "hostname", &hostname);
+ if (ret) {
+ snprintf (msg, sizeof (msg), "Failed to get hostname");
+ gf_log (this->name, GF_LOG_ERROR, "%s", msg);
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ ret = dict_get_int32 (dict, "flags", (int32_t*)&flags);
+ if (ret) {
+ snprintf (msg, sizeof (msg), "Failed to get volume name"
+ " or flags");
+ gf_log (this->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
}
- ret = glusterd_remove_brick (req, dict);
+ gf_log (this->name, GF_LOG_INFO, "Received volume sync req "
+ "for volume %s", (flags & GF_CLI_SYNC_ALL) ? "all" : volname);
+
+ if (gf_is_local_addr (hostname)) {
+ ret = -1;
+ snprintf (msg, sizeof (msg), "sync from localhost"
+ " not allowed");
+ gf_log (this->name, GF_LOG_ERROR, "%s", msg);
+ goto out;
+ }
+
+ ret = glusterd_op_begin_synctask (req, GD_OP_SYNC_VOLUME, dict);
out:
+ if (ret) {
+ cli_rsp.op_ret = -1;
+ cli_rsp.op_errstr = msg;
+ if (msg[0] == '\0')
+ snprintf (msg, sizeof (msg), "Operation failed");
+ glusterd_to_cli (req, &cli_rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_cli_rsp, dict);
+
+ ret = 0; //sent error to cli, prevent second reply
+ }
+
return ret;
}
int
+glusterd_handle_sync_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_sync_volume);
+}
+
+int
+glusterd_fsm_log_send_resp (rpcsvc_request_t *req, int op_ret,
+ char *op_errstr, dict_t *dict)
+{
+
+ int ret = -1;
+ gf1_cli_fsm_log_rsp rsp = {0};
+
+ GF_ASSERT (req);
+ GF_ASSERT (op_errstr);
+
+ rsp.op_ret = op_ret;
+ rsp.op_errstr = op_errstr;
+ if (rsp.op_ret == 0)
+ ret = dict_allocate_and_serialize (dict, &rsp.fsm_log.fsm_log_val,
+ &rsp.fsm_log.fsm_log_len);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf1_cli_fsm_log_rsp);
+ GF_FREE (rsp.fsm_log.fsm_log_val);
+
+ gf_log ("glusterd", GF_LOG_DEBUG, "Responded, ret: %d", ret);
+
+ return 0;
+}
+
+int
+__glusterd_handle_fsm_log (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf1_cli_fsm_log_req cli_req = {0,};
+ dict_t *dict = NULL;
+ glusterd_sm_tr_log_t *log = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ char msg[2048] = {0};
+ glusterd_peerinfo_t *peerinfo = NULL;
+
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &cli_req,
+ (xdrproc_t)xdr_gf1_cli_fsm_log_req);
+ if (ret < 0) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ snprintf (msg, sizeof (msg), "Garbage request");
+ goto out;
+ }
+
+ if (strcmp ("", cli_req.name) == 0) {
+ this = THIS;
+ conf = this->private;
+ log = &conf->op_sm_log;
+ } else {
+ ret = glusterd_friend_find_by_hostname (cli_req.name,
+ &peerinfo);
+ if (ret) {
+ snprintf (msg, sizeof (msg), "%s is not a peer",
+ cli_req.name);
+ goto out;
+ }
+ log = &peerinfo->sm_log;
+ }
+
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_sm_tr_log_add_to_dict (dict, log);
+out:
+ (void)glusterd_fsm_log_send_resp (req, ret, msg, dict);
+ free (cli_req.name);//malloced by xdr
+ if (dict)
+ dict_unref (dict);
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
+ return 0;//send 0 to avoid double reply
+}
+
+int
+glusterd_handle_fsm_log (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_fsm_log);
+}
+
+int
glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status)
{
@@ -1167,10 +2027,9 @@ glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status)
rsp.op_ret = status;
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_cluster_lock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Responded, ret: %d", ret);
+ gf_log (THIS->name, GF_LOG_DEBUG, "Responded to lock, ret: %d", ret);
return 0;
}
@@ -1187,34 +2046,98 @@ glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
glusterd_get_uuid (&rsp.uuid);
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_cluster_unlock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Responded to unlock, ret: %d", ret);
+ gf_log (THIS->name, GF_LOG_DEBUG, "Responded to unlock, ret: %d", ret);
return ret;
}
int
-glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
+glusterd_op_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
+ int32_t status)
+{
+
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+ GF_ASSERT (txn_id);
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
+ uuid_copy (rsp.txn_id, *txn_id);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
+
+ gf_log (THIS->name, GF_LOG_DEBUG, "Responded to mgmt_v3 lock, ret: %d",
+ ret);
+
+ return ret;
+}
+
+int
+glusterd_op_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
+ int32_t status)
+{
+
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+ GF_ASSERT (txn_id);
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
+ glusterd_get_uuid (&rsp.uuid);
+ uuid_copy (rsp.txn_id, *txn_id);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
+
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "Responded to mgmt_v3 unlock, ret: %d",
+ ret);
+
+ return ret;
+}
+
+int
+__glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
{
gd1_mgmt_cluster_unlock_req unlock_req = {{0}, };
int32_t ret = -1;
- char str[50] = {0, };
glusterd_op_lock_ctx_t *ctx = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ uuid_t *txn_id = &global_txn_id;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
- if (!gd_xdr_to_mgmt_cluster_unlock_req (req->msg[0], &unlock_req)) {
- //failed to decode msg;
+ ret = xdr_to_generic (req->msg[0], &unlock_req,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode unlock "
+ "request received from peer");
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- uuid_unparse (unlock_req.uuid, str);
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Received UNLOCK from uuid: %s", str);
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Received UNLOCK from uuid: %s", uuid_utoa (unlock_req.uuid));
+
+ if (glusterd_friend_find_by_uuid (unlock_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (unlock_req.uuid));
+ ret = -1;
+ goto out;
+ }
ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
@@ -1224,114 +2147,246 @@ glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
}
uuid_copy (ctx->uuid, unlock_req.uuid);
ctx->req = req;
+ ctx->dict = NULL;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, ctx);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, txn_id, ctx);
out:
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
return ret;
}
int
-glusterd_op_stage_send_resp (rpcsvc_request_t *req,
- int32_t op, int32_t status)
+glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cluster_unlock);
+}
- gd1_mgmt_stage_op_rsp rsp = {{0},};
- int ret = -1;
+int
+glusterd_op_stage_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status,
+ char *op_errstr, dict_t *rsp_dict)
+{
+ gd1_mgmt_stage_op_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
+
rsp.op_ret = status;
glusterd_get_uuid (&rsp.uuid);
rsp.op = op;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to get serialized length of dict");
+ return ret;
+ }
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_stage_op_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp);
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Responded to stage, ret: %d", ret);
+ gf_log (this->name, GF_LOG_DEBUG, "Responded to stage, ret: %d", ret);
+ GF_FREE (rsp.dict.dict_val);
return ret;
}
int
glusterd_op_commit_send_resp (rpcsvc_request_t *req,
- int32_t op, int32_t status)
+ int32_t op, int32_t status, char *op_errstr,
+ dict_t *rsp_dict)
{
- gd1_mgmt_commit_op_rsp rsp = {{0}, };
- int ret = -1;
+ gd1_mgmt_commit_op_rsp rsp = {{0}, };
+ int ret = -1;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
rsp.op_ret = status;
glusterd_get_uuid (&rsp.uuid);
rsp.op = op;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ if (rsp_dict) {
+ ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+ }
+
+
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_commit_op_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp);
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Responded to commit, ret: %d", ret);
+ gf_log (this->name, GF_LOG_DEBUG, "Responded to commit, ret: %d", ret);
+out:
+ GF_FREE (rsp.dict.dict_val);
return ret;
}
int
-glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
+__glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
{
int32_t ret = -1;
gd1_mgmt_friend_req friend_req = {{0},};
- char str[50];
+ gf_boolean_t run_fsm = _gf_true;
GF_ASSERT (req);
- if (!gd_xdr_to_mgmt_friend_req (req->msg[0], &friend_req)) {
+ ret = xdr_to_generic (req->msg[0], &friend_req,
+ (xdrproc_t)xdr_gd1_mgmt_friend_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- uuid_unparse (friend_req.uuid, str);
-
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Received probe from uuid: %s", str);
+ gf_log ("glusterd", GF_LOG_INFO,
+ "Received probe from uuid: %s", uuid_utoa (friend_req.uuid));
ret = glusterd_handle_friend_req (req, friend_req.uuid,
- friend_req.hostname, friend_req.port);
+ friend_req.hostname, friend_req.port,
+ &friend_req);
+
+ if (ret == GLUSTERD_CONNECTION_AWAITED) {
+ //fsm should be run after connection establishes
+ run_fsm = _gf_false;
+ ret = 0;
+ }
out:
+ free (friend_req.hostname);//malloced by xdr
+
+ if (run_fsm) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
return ret;
}
int
-glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
+glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_incoming_friend_req);
+}
+
+int
+__glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
{
int32_t ret = -1;
gd1_mgmt_friend_req friend_req = {{0},};
- char str[50];
+ char remote_hostname[UNIX_PATH_MAX + 1] = {0,};
GF_ASSERT (req);
- if (!gd_xdr_to_mgmt_friend_req (req->msg[0], &friend_req)) {
+ ret = xdr_to_generic (req->msg[0], &friend_req,
+ (xdrproc_t)xdr_gd1_mgmt_friend_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- uuid_unparse (friend_req.uuid, str);
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Received unfriend from uuid: %s", str);
+ gf_log ("glusterd", GF_LOG_INFO,
+ "Received unfriend from uuid: %s", uuid_utoa (friend_req.uuid));
+ ret = glusterd_remote_hostname_get (req, remote_hostname,
+ sizeof (remote_hostname));
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get the remote hostname");
+ goto out;
+ }
ret = glusterd_handle_unfriend_req (req, friend_req.uuid,
- friend_req.hostname, friend_req.port);
+ remote_hostname, friend_req.port);
out:
+ free (friend_req.hostname);//malloced by xdr
+ free (friend_req.vols.vols_val);//malloced by xdr
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
return ret;
}
int
-glusterd_handle_friend_update (rpcsvc_request_t *req)
+glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_incoming_unfriend_req);
+
+}
+
+int
+glusterd_handle_friend_update_delete (dict_t *dict)
+{
+ char *hostname = NULL;
+ int32_t ret = -1;
+
+ GF_ASSERT (dict);
+
+ ret = dict_get_str (dict, "hostname", &hostname);
+ if (ret)
+ goto out;
+
+ ret = glusterd_friend_remove (NULL, hostname);
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_friend_hostname_update (glusterd_peerinfo_t *peerinfo,
+ char *hostname,
+ gf_boolean_t store_update)
+{
+ char *new_hostname = NULL;
+ int ret = 0;
+
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (hostname);
+
+ new_hostname = gf_strdup (hostname);
+ if (!new_hostname) {
+ ret = -1;
+ goto out;
+ }
+
+ GF_FREE (peerinfo->hostname);
+ peerinfo->hostname = new_hostname;
+ if (store_update)
+ ret = glusterd_store_peerinfo (peerinfo);
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int
+__glusterd_handle_friend_update (rpcsvc_request_t *req)
{
int32_t ret = -1;
gd1_mgmt_friend_update friend_req = {{0},};
- char str[50] = {0,};
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
@@ -1344,6 +2399,8 @@ glusterd_handle_friend_update (rpcsvc_request_t *req)
int i = 1;
int count = 0;
uuid_t uuid = {0,};
+ glusterd_peerctx_args_t args = {0};
+ int32_t op = 0;
GF_ASSERT (req);
@@ -1352,15 +2409,22 @@ glusterd_handle_friend_update (rpcsvc_request_t *req)
priv = this->private;
GF_ASSERT (priv);
- if (!gd_xdr_to_mgmt_friend_update (req->msg[0], &friend_req)) {
+ ret = xdr_to_generic (req->msg[0], &friend_req,
+ (xdrproc_t)xdr_gd1_mgmt_friend_update);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- uuid_unparse (friend_req.uuid, str);
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Received friend update from uuid: %s", str);
+ ret = glusterd_friend_find (friend_req.uuid, NULL, &tmp);
+ if (ret) {
+ gf_log ("", GF_LOG_CRITICAL, "Received friend update request "
+ "from unknown peer %s", uuid_utoa (friend_req.uuid));
+ goto out;
+ }
+ gf_log ("glusterd", GF_LOG_INFO,
+ "Received friend update from uuid: %s", uuid_utoa (friend_req.uuid));
if (friend_req.friends.friends_len) {
/* Unserialize the dictionary */
@@ -1374,6 +2438,8 @@ glusterd_handle_friend_update (rpcsvc_request_t *req)
"failed to "
"unserialize req-buffer to dictionary");
goto out;
+ } else {
+ dict->extra_stdfree = friend_req.friends.friends_val;
}
}
@@ -1381,6 +2447,16 @@ glusterd_handle_friend_update (rpcsvc_request_t *req)
if (ret)
goto out;
+ ret = dict_get_int32 (dict, "op", &op);
+ if (ret)
+ goto out;
+
+ if (GD_FRIEND_UPDATE_DEL == op) {
+ ret = glusterd_handle_friend_update_delete (dict);
+ goto out;
+ }
+
+ args.mode = GD_MODE_ON;
while ( i <= count) {
snprintf (key, sizeof (key), "friend%d.uuid", i);
ret = dict_get_str (dict, key, &uuid_buf);
@@ -1392,11 +2468,17 @@ glusterd_handle_friend_update (rpcsvc_request_t *req)
if (ret)
goto out;
- gf_log ("", GF_LOG_NORMAL, "Received uuid: %s, hostname:%s",
+ gf_log ("", GF_LOG_INFO, "Received uuid: %s, hostname:%s",
uuid_buf, hostname);
- if (!uuid_compare (uuid, priv->uuid)) {
- gf_log ("", GF_LOG_NORMAL, "Received my uuid as Friend");
+ if (uuid_is_null (uuid)) {
+ gf_log (this->name, GF_LOG_WARNING, "Updates mustn't "
+ "contain peer with 'null' uuid");
+ continue;
+ }
+
+ if (!uuid_compare (uuid, MY_UUID)) {
+ gf_log ("", GF_LOG_INFO, "Received my uuid as Friend");
i++;
continue;
}
@@ -1404,232 +2486,703 @@ glusterd_handle_friend_update (rpcsvc_request_t *req)
ret = glusterd_friend_find (uuid, hostname, &tmp);
if (!ret) {
+ if (strcmp (hostname, tmp->hostname) != 0) {
+ glusterd_friend_hostname_update (tmp, hostname,
+ _gf_true);
+ }
i++;
continue;
}
ret = glusterd_friend_add (hostname, friend_req.port,
GD_FRIEND_STATE_BEFRIENDED,
- &uuid, NULL, &peerinfo, 0);
+ &uuid, &peerinfo, 0, &args);
i++;
}
out:
- uuid_copy (rsp.uuid, priv->uuid);
+ uuid_copy (rsp.uuid, MY_UUID);
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_friend_update_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_friend_update_rsp);
+ if (dict) {
+ if (!dict->extra_stdfree && friend_req.friends.friends_val)
+ free (friend_req.friends.friends_val);//malloced by xdr
+ dict_unref (dict);
+ } else {
+ free (friend_req.friends.friends_val);//malloced by xdr
+ }
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
return ret;
}
int
-glusterd_handle_probe_query (rpcsvc_request_t *req)
+glusterd_handle_friend_update (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- char str[50];
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- gd1_mgmt_probe_req probe_req = {{0},};
- gd1_mgmt_probe_rsp rsp = {{0},};
- char hostname[1024] = {0};
- glusterd_peer_hostname_t *name = NULL;
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_friend_update);
+}
- GF_ASSERT (req);
+int
+__glusterd_handle_probe_query (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ gd1_mgmt_probe_req probe_req = {{0},};
+ gd1_mgmt_probe_rsp rsp = {{0},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peerctx_args_t args = {0};
+ int port = 0;
+ char remote_hostname[UNIX_PATH_MAX + 1] = {0,};
- probe_req.hostname = hostname;
+ GF_ASSERT (req);
- if (!gd_xdr_to_mgmt_probe_req (req->msg[0], &probe_req)) {
+ ret = xdr_to_generic (req->msg[0], &probe_req,
+ (xdrproc_t)xdr_gd1_mgmt_probe_req);
+ if (ret < 0) {
//failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
goto out;
}
-
this = THIS;
conf = this->private;
- uuid_unparse (probe_req.uuid, str);
-
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Received probe from uuid: %s", str);
-
- ret = glusterd_peer_hostname_new (probe_req.hostname, &name);
+ if (probe_req.port)
+ port = probe_req.port;
+ else
+ port = GF_DEFAULT_BASE_PORT;
+
+ gf_log ("glusterd", GF_LOG_INFO,
+ "Received probe from uuid: %s", uuid_utoa (probe_req.uuid));
+
+ /* Check for uuid collision and handle it in a user friendly way by
+ * sending the error.
+ */
+ if (!uuid_compare (probe_req.uuid, MY_UUID)) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Peer uuid %s is same as "
+ "local uuid. Please check the uuid of both the peers "
+ "from %s/%s", uuid_utoa (probe_req.uuid),
+ GLUSTERD_DEFAULT_WORKDIR, GLUSTERD_INFO_FILE);
+ rsp.op_ret = -1;
+ rsp.op_errno = GF_PROBE_SAME_UUID;
+ rsp.port = port;
+ goto respond;
+ }
+ ret = glusterd_remote_hostname_get (req, remote_hostname,
+ sizeof (remote_hostname));
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get new peer_hostname");
- } else {
- list_add_tail (&name->hostname_list, &conf->hostnames);
+ gf_log ("", GF_LOG_ERROR, "Unable to get the remote hostname");
+ goto out;
+ }
+ ret = glusterd_friend_find (probe_req.uuid, remote_hostname, &peerinfo);
+ if ((ret != 0 ) && (!list_empty (&conf->peers))) {
+ rsp.op_ret = -1;
+ rsp.op_errno = GF_PROBE_ANOTHER_CLUSTER;
+ } else if (ret) {
+ gf_log ("glusterd", GF_LOG_INFO, "Unable to find peerinfo"
+ " for host: %s (%d)", remote_hostname, port);
+ args.mode = GD_MODE_ON;
+ ret = glusterd_friend_add (remote_hostname, port,
+ GD_FRIEND_STATE_PROBE_RCVD,
+ NULL, &peerinfo, 0, &args);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Failed to add peer %s",
+ remote_hostname);
+ rsp.op_errno = GF_PROBE_ADD_FAILED;
+ }
}
+respond:
+ uuid_copy (rsp.uuid, MY_UUID);
- uuid_copy (rsp.uuid, conf->uuid);
rsp.hostname = probe_req.hostname;
+ rsp.op_errstr = "";
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_probe_rsp);
+ glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_probe_rsp);
+ ret = 0;
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Responded to %s, ret: %d", probe_req.hostname, ret);
+ gf_log ("glusterd", GF_LOG_INFO, "Responded to %s, op_ret: %d, "
+ "op_errno: %d, ret: %d", remote_hostname,
+ rsp.op_ret, rsp.op_errno, ret);
out:
+ free (probe_req.hostname);//malloced by xdr
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
return ret;
}
+int glusterd_handle_probe_query (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_probe_query);
+}
+
int
-glusterd_friend_add (const char *hoststr, int port,
- glusterd_friend_sm_state_t state,
- uuid_t *uuid,
- struct rpc_clnt *rpc,
- glusterd_peerinfo_t **friend,
- gf_boolean_t restore)
+__glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
{
- int ret = 0;
- glusterd_conf_t *priv = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- dict_t *options = NULL;
- struct rpc_clnt_config rpc_cfg = {0,};
- glusterd_peer_hostname_t *name = NULL;
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{0,}};
+ dict_t *dict = NULL;
+ glusterd_op_t cli_op = GD_OP_PROFILE_VOLUME;
+ char *volname = NULL;
+ int32_t op = 0;
+ char err_str[2048] = {0,};
+ xlator_t *this = NULL;
- priv = THIS->private;
+ GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
- peerinfo = GF_CALLOC (1, sizeof(*peerinfo), gf_gld_mt_peerinfo_t);
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
- if (!peerinfo)
- return -1;
+ if (cli_req.dict.dict_len > 0) {
+ dict = dict_new();
+ if (!dict)
+ goto out;
+ dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len, &dict);
+ }
- if (friend)
- *friend = peerinfo;
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str), "Unable to get volume "
+ "name");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ goto out;
+ }
- INIT_LIST_HEAD (&peerinfo->hostnames);
- peerinfo->state.state = state;
- if (hoststr) {
- ret = glusterd_peer_hostname_new ((char *)hoststr, &name);
- if (ret)
- goto out;
- list_add_tail (&peerinfo->hostnames, &name->hostname_list);
- rpc_cfg.remote_host = gf_strdup (hoststr);
- peerinfo->hostname = gf_strdup (hoststr);
+ gf_log (this->name, GF_LOG_INFO, "Received volume profile req "
+ "for volume %s", volname);
+ ret = dict_get_int32 (dict, "op", &op);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str), "Unable to get operation");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ goto out;
}
- INIT_LIST_HEAD (&peerinfo->uuid_list);
- list_add_tail (&peerinfo->uuid_list, &priv->peers);
+ ret = glusterd_op_begin (req, cli_op, dict, err_str, sizeof (err_str));
- if (uuid) {
- uuid_copy (peerinfo->uuid, *uuid);
+out:
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
+ free (cli_req.dict.dict_val);
+
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf (err_str, sizeof (err_str),
+ "Operation failed");
+ ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
+ dict, err_str);
}
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
- if (hoststr) {
- options = dict_new ();
- if (!options)
- return -1;
+int
+glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_profile_volume);
+}
- ret = dict_set_str (options, "remote-host", (char *)hoststr);
- if (ret)
- goto out;
+int
+__glusterd_handle_getwd (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf1_cli_getwd_rsp rsp = {0,};
+ glusterd_conf_t *priv = NULL;
+ GF_ASSERT (req);
- if (!port)
- port = GLUSTERD_DEFAULT_PORT;
+ priv = THIS->private;
+ GF_ASSERT (priv);
- rpc_cfg.remote_port = port;
+ gf_log ("glusterd", GF_LOG_INFO, "Received getwd req");
- ret = dict_set_int32 (options, "remote-port", port);
- if (ret)
- goto out;
+ rsp.wd = priv->workdir;
- ret = dict_set_str (options, "transport.address-family", "inet");
- if (ret)
- goto out;
+ glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf1_cli_getwd_rsp);
+ ret = 0;
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
- rpc = rpc_clnt_init (&rpc_cfg, options, THIS->ctx, THIS->name);
+ return ret;
+}
+
+int
+glusterd_handle_getwd (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_getwd);
+}
+
+int
+__glusterd_handle_mount (rpcsvc_request_t *req)
+{
+ gf1_cli_mount_req mnt_req = {0,};
+ gf1_cli_mount_rsp rsp = {0,};
+ dict_t *dict = NULL;
+ int ret = 0;
+ glusterd_conf_t *priv = NULL;
+
+ GF_ASSERT (req);
+ priv = THIS->private;
+
+ ret = xdr_to_generic (req->msg[0], &mnt_req,
+ (xdrproc_t)xdr_gf1_cli_mount_req);
+ if (ret < 0) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ gf_log ("glusterd", GF_LOG_INFO, "Received mount req");
+
+ if (mnt_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new ();
- if (!rpc) {
+ ret = dict_unserialize (mnt_req.dict.dict_val,
+ mnt_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
gf_log ("glusterd", GF_LOG_ERROR,
- "rpc init failed for peer: %s!", hoststr);
- ret = -1;
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ rsp.op_ret = -1;
+ rsp.op_errno = -EINVAL;
goto out;
+ } else {
+ dict->extra_stdfree = mnt_req.dict.dict_val;
}
+ }
+
+ synclock_unlock (&priv->big_lock);
+ rsp.op_ret = glusterd_do_mount (mnt_req.label, dict,
+ &rsp.path, &rsp.op_errno);
+ synclock_lock (&priv->big_lock);
+
+ out:
+ if (!rsp.path)
+ rsp.path = "";
+
+ glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf1_cli_mount_rsp);
+ ret = 0;
+
+ if (dict)
+ dict_unref (dict);
+ if (*rsp.path)
+ GF_FREE (rsp.path);
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
+ return ret;
+}
+
+int
+glusterd_handle_mount (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_mount);
+}
- ret = rpc_clnt_register_notify (rpc, glusterd_rpc_notify,
- peerinfo);
+int
+__glusterd_handle_umount (rpcsvc_request_t *req)
+{
+ gf1_cli_umount_req umnt_req = {0,};
+ gf1_cli_umount_rsp rsp = {0,};
+ char *mountbroker_root = NULL;
+ char mntp[PATH_MAX] = {0,};
+ char *path = NULL;
+ runner_t runner = {0,};
+ int ret = 0;
+ xlator_t *this = THIS;
+ gf_boolean_t dir_ok = _gf_false;
+ char *pdir = NULL;
+ char *t = NULL;
+ glusterd_conf_t *priv = NULL;
- peerinfo->rpc = rpc;
+ GF_ASSERT (req);
+ GF_ASSERT (this);
+ priv = this->private;
+ ret = xdr_to_generic (req->msg[0], &umnt_req,
+ (xdrproc_t)xdr_gf1_cli_umount_req);
+ if (ret < 0) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ rsp.op_ret = -1;
+ goto out;
}
- if (!restore)
- ret = glusterd_store_update_peerinfo (peerinfo);
+ gf_log ("glusterd", GF_LOG_INFO, "Received umount req");
+ if (dict_get_str (this->options, "mountbroker-root",
+ &mountbroker_root) != 0) {
+ rsp.op_errno = ENOENT;
+ goto out;
+ }
+
+ /* check if it is allowed to umount path */
+ path = gf_strdup (umnt_req.path);
+ if (!path) {
+ rsp.op_errno = ENOMEM;
+ goto out;
+ }
+ dir_ok = _gf_false;
+ pdir = dirname (path);
+ t = strtail (pdir, mountbroker_root);
+ if (t && *t == '/') {
+ t = strtail(++t, MB_HIVE);
+ if (t && !*t)
+ dir_ok = _gf_true;
+ }
+ GF_FREE (path);
+ if (!dir_ok) {
+ rsp.op_errno = EACCES;
+ goto out;
+ }
+
+ runinit (&runner);
+ runner_add_args (&runner, "umount", umnt_req.path, NULL);
+ if (umnt_req.lazy)
+ runner_add_arg (&runner, "-l");
+ synclock_unlock (&priv->big_lock);
+ rsp.op_ret = runner_run (&runner);
+ synclock_lock (&priv->big_lock);
+ if (rsp.op_ret == 0) {
+ if (realpath (umnt_req.path, mntp))
+ rmdir (mntp);
+ else {
+ rsp.op_ret = -1;
+ rsp.op_errno = errno;
+ }
+ if (unlink (umnt_req.path) != 0) {
+ rsp.op_ret = -1;
+ rsp.op_errno = errno;
+ }
+ }
+
+ out:
+ if (rsp.op_errno)
+ rsp.op_ret = -1;
+
+ glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf1_cli_umount_rsp);
+ ret = 0;
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
-out:
- gf_log ("glusterd", GF_LOG_NORMAL, "connect returned %d", ret);
return ret;
+}
+int
+glusterd_handle_umount (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_umount);
}
+int
+glusterd_friend_remove (uuid_t uuid, char *hostname)
+{
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+
+ ret = glusterd_friend_find (uuid, hostname, &peerinfo);
+ if (ret)
+ goto out;
+ ret = glusterd_friend_remove_cleanup_vols (peerinfo->uuid);
+ if (ret)
+ gf_log (THIS->name, GF_LOG_WARNING, "Volumes cleanup failed");
+ ret = glusterd_friend_cleanup (peerinfo);
+out:
+ gf_log ("", GF_LOG_DEBUG, "returning %d", ret);
+ return ret;
+}
int
-glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port)
+glusterd_rpc_create (struct rpc_clnt **rpc,
+ dict_t *options,
+ rpc_clnt_notify_t notify_fn,
+ void *notify_data)
{
- int ret = -1;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_friend_sm_event_t *event = NULL;
- glusterd_probe_ctx_t *ctx = NULL;
+ struct rpc_clnt *new_rpc = NULL;
+ int ret = -1;
+ xlator_t *this = NULL;
- GF_ASSERT (hoststr);
- GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
- ret = glusterd_friend_find (NULL, (char *)hoststr, &peerinfo);
+ GF_ASSERT (options);
+ /* TODO: is 32 enough? or more ? */
+ new_rpc = rpc_clnt_new (options, this->ctx, this->name, 16);
+ if (!new_rpc)
+ goto out;
+
+ ret = rpc_clnt_register_notify (new_rpc, notify_fn, notify_data);
+ *rpc = new_rpc;
+ if (ret)
+ goto out;
+ ret = rpc_clnt_start (new_rpc);
+out:
if (ret) {
- gf_log ("glusterd", GF_LOG_NORMAL, "Unable to find peerinfo"
- " for host: %s (%d)", hoststr, port);
- ret = glusterd_friend_add ((char *)hoststr, port,
- GD_FRIEND_STATE_DEFAULT,
- NULL, NULL, &peerinfo, 0);
+ if (new_rpc) {
+ (void) rpc_clnt_unref (new_rpc);
+ }
}
- ret = glusterd_friend_sm_new_event
- (GD_FRIEND_EVENT_PROBE, &event);
+ gf_log (this->name, GF_LOG_DEBUG, "returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_transport_keepalive_options_get (int *interval, int *time)
+{
+ int ret = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = dict_get_int32 (this->options,
+ "transport.socket.keepalive-interval",
+ interval);
+ ret = dict_get_int32 (this->options,
+ "transport.socket.keepalive-time",
+ time);
+ return 0;
+}
+
+int
+glusterd_transport_inet_options_build (dict_t **options, const char *hostname,
+ int port)
+{
+ dict_t *dict = NULL;
+ int32_t interval = -1;
+ int32_t time = -1;
+ int ret = 0;
+
+ GF_ASSERT (options);
+ GF_ASSERT (hostname);
+
+ if (!port)
+ port = GLUSTERD_DEFAULT_PORT;
+
+ /* Build default transport options */
+ ret = rpc_transport_inet_options_build (&dict, hostname, port);
+ if (ret)
+ goto out;
+ /* Set frame-timeout to 10mins. Default timeout of 30 mins is too long
+ * when compared to 2 mins for cli timeout. This ensures users don't
+ * wait too long after cli timesout before being able to resume normal
+ * operations
+ */
+ ret = dict_set_int32 (dict, "frame-timeout", 600);
if (ret) {
- gf_log ("glusterd", GF_LOG_ERROR, "Unable to get new event");
- return ret;
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Failed to set frame-timeout");
+ goto out;
}
- ctx = GF_CALLOC (1, sizeof(*ctx), gf_gld_mt_probe_ctx_t);
+ /* Set keepalive options */
+ glusterd_transport_keepalive_options_get (&interval, &time);
- if (!ctx) {
- return ret;
+ if ((interval > 0) || (time > 0))
+ ret = rpc_transport_keepalive_options_set (dict, interval, time);
+ *options = dict;
+out:
+ gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_friend_rpc_create (xlator_t *this, glusterd_peerinfo_t *peerinfo,
+ glusterd_peerctx_args_t *args)
+{
+ dict_t *options = NULL;
+ int ret = -1;
+ glusterd_peerctx_t *peerctx = NULL;
+ data_t *data = NULL;
+
+ peerctx = GF_CALLOC (1, sizeof (*peerctx), gf_gld_mt_peerctx_t);
+ if (!peerctx)
+ goto out;
+
+ if (args)
+ peerctx->args = *args;
+
+ peerctx->peerinfo = peerinfo;
+
+ ret = glusterd_transport_inet_options_build (&options,
+ peerinfo->hostname,
+ peerinfo->port);
+ if (ret)
+ goto out;
+
+ /*
+ * For simulated multi-node testing, we need to make sure that we
+ * create our RPC endpoint with the same address that the peer would
+ * use to reach us.
+ */
+ if (this->options) {
+ data = dict_get(this->options,"transport.socket.bind-address");
+ if (data) {
+ ret = dict_set(options,
+ "transport.socket.source-addr",data);
+ }
}
- ctx->hostname = gf_strdup (hoststr);
- ctx->port = port;
- ctx->req = req;
+ ret = glusterd_rpc_create (&peerinfo->rpc, options,
+ glusterd_peer_rpc_notify, peerctx);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to create rpc for"
+ " peer %s", peerinfo->hostname);
+ goto out;
+ }
+ peerctx = NULL;
+ ret = 0;
+out:
+ GF_FREE (peerctx);
+ return ret;
+}
- event->peerinfo = peerinfo;
- event->ctx = ctx;
+int
+glusterd_friend_add (const char *hoststr, int port,
+ glusterd_friend_sm_state_t state,
+ uuid_t *uuid,
+ glusterd_peerinfo_t **friend,
+ gf_boolean_t restore,
+ glusterd_peerctx_args_t *args)
+{
+ int ret = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
- ret = glusterd_friend_sm_inject_event (event);
+ this = THIS;
+ conf = this->private;
+ GF_ASSERT (conf);
+ GF_ASSERT (hoststr);
+ ret = glusterd_peerinfo_new (friend, state, uuid, hoststr, port);
if (ret) {
- gf_log ("glusterd", GF_LOG_ERROR, "Unable to inject event %d, "
- "ret = %d", event->event, ret);
- return ret;
+ goto out;
}
- if (!peerinfo->connected) {
- return GLUSTERD_CONNECTION_AWAITED;
+ /*
+ * We can't add to the list after calling glusterd_friend_rpc_create,
+ * even if it succeeds, because by then the callback to take it back
+ * off and free might have happened already (notably in the case of an
+ * invalid peer name). That would mean we're adding something that had
+ * just been free, and we're likely to crash later.
+ */
+ list_add_tail (&(*friend)->uuid_list, &conf->peers);
+
+ //restore needs to first create the list of peers, then create rpcs
+ //to keep track of quorum in race-free manner. In restore for each peer
+ //rpc-create calls rpc_notify when the friend-list is partially
+ //constructed, leading to wrong quorum calculations.
+ if (!restore) {
+ ret = glusterd_store_peerinfo (*friend);
+ if (ret == 0) {
+ synclock_unlock (&conf->big_lock);
+ ret = glusterd_friend_rpc_create (this, *friend, args);
+ synclock_lock (&conf->big_lock);
+ }
+ else {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to store peerinfo");
+ }
}
+ if (ret) {
+ (void) glusterd_friend_cleanup (*friend);
+ *friend = NULL;
+ }
+out:
+ gf_log (this->name, GF_LOG_INFO, "connect returned %d", ret);
return ret;
}
int
-glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port)
+glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
+ dict_t *dict)
+{
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peerctx_args_t args = {0};
+ glusterd_friend_sm_event_t *event = NULL;
+
+ GF_ASSERT (hoststr);
+
+ ret = glusterd_friend_find (NULL, (char *)hoststr, &peerinfo);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_INFO, "Unable to find peerinfo"
+ " for host: %s (%d)", hoststr, port);
+ args.mode = GD_MODE_ON;
+ args.req = req;
+ args.dict = dict;
+ ret = glusterd_friend_add ((char *)hoststr, port,
+ GD_FRIEND_STATE_DEFAULT,
+ NULL, &peerinfo, 0, &args);
+ if ((!ret) && (!peerinfo->connected)) {
+ ret = GLUSTERD_CONNECTION_AWAITED;
+ }
+
+ } else if (peerinfo->connected &&
+ (GD_FRIEND_STATE_BEFRIENDED == peerinfo->state.state)) {
+ ret = glusterd_friend_hostname_update (peerinfo, (char*)hoststr,
+ _gf_false);
+ if (ret)
+ goto out;
+ //this is just to rename so inject local acc for cluster update
+ ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_LOCAL_ACC,
+ &event);
+ if (!ret) {
+ event->peerinfo = peerinfo;
+ ret = glusterd_friend_sm_inject_event (event);
+ glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_SUCCESS,
+ NULL, (char*)hoststr,
+ port, dict);
+ }
+ } else {
+ glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, NULL,
+ (char*)hoststr, port, dict);
+ }
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
+ uuid_t uuid, dict_t *dict)
{
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
@@ -1639,10 +3192,10 @@ glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port)
GF_ASSERT (hoststr);
GF_ASSERT (req);
- ret = glusterd_friend_find (NULL, (char *)hoststr, &peerinfo);
+ ret = glusterd_friend_find (uuid, (char *)hoststr, &peerinfo);
if (ret) {
- gf_log ("glusterd", GF_LOG_NORMAL, "Unable to find peerinfo"
+ gf_log ("glusterd", GF_LOG_INFO, "Unable to find peerinfo"
" for host: %s %d", hoststr, port);
goto out;
}
@@ -1670,6 +3223,7 @@ glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port)
ctx->hostname = gf_strdup (hoststr);
ctx->port = port;
ctx->req = req;
+ ctx->dict = dict;
event->ctx = ctx;
@@ -1704,458 +3258,852 @@ glusterd_xfer_friend_remove_resp (rpcsvc_request_t *req, char *hostname, int por
conf = this->private;
- uuid_copy (rsp.uuid, conf->uuid);
+ uuid_copy (rsp.uuid, MY_UUID);
rsp.hostname = hostname;
rsp.port = port;
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_friend_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
- gf_log ("glusterd", GF_LOG_NORMAL,
+ gf_log ("glusterd", GF_LOG_INFO,
"Responded to %s (%d), ret: %d", hostname, port, ret);
return ret;
}
+
int
-glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *hostname, int port)
+glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *myhostname,
+ char *remote_hostname, int port, int32_t op_ret,
+ int32_t op_errno)
{
gd1_mgmt_friend_rsp rsp = {{0}, };
int32_t ret = -1;
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
- GF_ASSERT (hostname);
+ GF_ASSERT (myhostname);
- rsp.op_ret = 0;
this = THIS;
GF_ASSERT (this);
conf = this->private;
- uuid_copy (rsp.uuid, conf->uuid);
- rsp.hostname = gf_strdup (hostname);
+ uuid_copy (rsp.uuid, MY_UUID);
+ rsp.op_ret = op_ret;
+ rsp.op_errno = op_errno;
+ rsp.hostname = gf_strdup (myhostname);
rsp.port = port;
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gd_xdr_serialize_mgmt_friend_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
- gf_log ("glusterd", GF_LOG_NORMAL,
- "Responded to %s (%d), ret: %d", hostname, port, ret);
+ gf_log ("glusterd", GF_LOG_INFO,
+ "Responded to %s (%d), ret: %d", remote_hostname, port, ret);
+ GF_FREE (rsp.hostname);
return ret;
}
+static void
+set_probe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr,
+ size_t len, char *hostname, int port)
+{
+ if ((op_errstr) && (strcmp (op_errstr, ""))) {
+ snprintf (errstr, len, "%s", op_errstr);
+ return;
+ }
+
+ if (!op_ret) {
+ switch (op_errno) {
+ case GF_PROBE_LOCALHOST:
+ snprintf (errstr, len, "Probe on localhost not "
+ "needed");
+ break;
+
+ case GF_PROBE_FRIEND:
+ snprintf (errstr, len, "Host %s port %d already"
+ " in peer list", hostname, port);
+ break;
+
+ default:
+ if (op_errno != 0)
+ snprintf (errstr, len, "Probe returned "
+ "with unknown errno %d",
+ op_errno);
+ break;
+ }
+ } else {
+ switch (op_errno) {
+ case GF_PROBE_ANOTHER_CLUSTER:
+ snprintf (errstr, len, "%s is already part of "
+ "another cluster", hostname);
+ break;
+
+ case GF_PROBE_VOLUME_CONFLICT:
+ snprintf (errstr, len, "Atleast one volume on "
+ "%s conflicts with existing volumes "
+ "in the cluster", hostname);
+ break;
+
+ case GF_PROBE_UNKNOWN_PEER:
+ snprintf (errstr, len, "%s responded with "
+ "'unknown peer' error, this could "
+ "happen if %s doesn't have localhost "
+ "in its peer database", hostname,
+ hostname);
+ break;
+
+ case GF_PROBE_ADD_FAILED:
+ snprintf (errstr, len, "Failed to add peer "
+ "information on %s", hostname);
+ break;
+
+ case GF_PROBE_SAME_UUID:
+ snprintf (errstr, len, "Peer uuid (host %s) is "
+ "same as local uuid", hostname);
+ break;
+
+ case GF_PROBE_QUORUM_NOT_MET:
+ snprintf (errstr, len, "Cluster quorum is not "
+ "met. Changing peers is not allowed "
+ "in this state");
+ break;
+
+ default:
+ snprintf (errstr, len, "Probe returned with "
+ "unknown errno %d", op_errno);
+ break;
+ }
+ }
+}
+
int
glusterd_xfer_cli_probe_resp (rpcsvc_request_t *req, int32_t op_ret,
- int32_t op_errno, char *hostname, int port)
+ int32_t op_errno, char *op_errstr, char *hostname,
+ int port, dict_t *dict)
{
- gf1_cli_probe_rsp rsp = {0, };
+ gf_cli_rsp rsp = {0,};
int32_t ret = -1;
+ char errstr[2048] = {0,};
+ char *cmd_str = NULL;
+ xlator_t *this = THIS;
GF_ASSERT (req);
+ GF_ASSERT (this);
+
+ (void) set_probe_error_str (op_ret, op_errno, op_errstr, errstr,
+ sizeof (errstr), hostname, port);
+
+ if (dict) {
+ ret = dict_get_str (dict, "cmd-str", &cmd_str);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "command string");
+ }
rsp.op_ret = op_ret;
rsp.op_errno = op_errno;
- rsp.hostname = hostname;
- rsp.port = port;
+ rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
+
+ gf_cmd_log ("", "%s : %s %s %s", cmd_str,
+ (op_ret) ? "FAILED" : "SUCCESS",
+ (errstr[0] != '\0') ? ":" : " ",
+ (errstr[0] != '\0') ? errstr : " ");
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gf_xdr_serialize_cli_probe_rsp);
+ (xdrproc_t)xdr_gf_cli_rsp);
- gf_log ("glusterd", GF_LOG_NORMAL, "Responded to CLI, ret: %d",ret);
+ if (dict)
+ dict_unref (dict);
+ gf_log (this->name, GF_LOG_DEBUG, "Responded to CLI, ret: %d",ret);
return ret;
}
+static void
+set_deprobe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr,
+ size_t len, char *hostname)
+{
+ if ((op_errstr) && (strcmp (op_errstr, ""))) {
+ snprintf (errstr, len, "%s", op_errstr);
+ return;
+ }
+
+ if (op_ret) {
+ switch (op_errno) {
+ case GF_DEPROBE_LOCALHOST:
+ snprintf (errstr, len, "%s is localhost",
+ hostname);
+ break;
+
+ case GF_DEPROBE_NOT_FRIEND:
+ snprintf (errstr, len, "%s is not part of "
+ "cluster", hostname);
+ break;
+
+ case GF_DEPROBE_BRICK_EXIST:
+ snprintf (errstr, len, "Brick(s) with the peer "
+ "%s exist in cluster", hostname);
+ break;
+
+ case GF_DEPROBE_FRIEND_DOWN:
+ snprintf (errstr, len, "One of the peers is "
+ "probably down. Check with "
+ "'peer status'");
+ break;
+
+ case GF_DEPROBE_QUORUM_NOT_MET:
+ snprintf (errstr, len, "Cluster quorum is not "
+ "met. Changing peers is not allowed "
+ "in this state");
+ break;
+
+ default:
+ snprintf (errstr, len, "Detach returned with "
+ "unknown errno %d", op_errno);
+ break;
+
+ }
+ }
+}
+
+
int
glusterd_xfer_cli_deprobe_resp (rpcsvc_request_t *req, int32_t op_ret,
- int32_t op_errno, char *hostname)
+ int32_t op_errno, char *op_errstr,
+ char *hostname, dict_t *dict)
{
- gf1_cli_deprobe_rsp rsp = {0, };
+ gf_cli_rsp rsp = {0,};
int32_t ret = -1;
+ char *cmd_str = NULL;
+ char errstr[2048] = {0,};
GF_ASSERT (req);
+ (void) set_deprobe_error_str (op_ret, op_errno, op_errstr, errstr,
+ sizeof (errstr), hostname);
+
+ if (dict) {
+ ret = dict_get_str (dict, "cmd-str", &cmd_str);
+ if (ret)
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to get "
+ "command string");
+ }
+
rsp.op_ret = op_ret;
rsp.op_errno = op_errno;
- rsp.hostname = hostname;
+ rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
+
+ gf_cmd_log ("", "%s : %s %s %s", cmd_str,
+ (op_ret) ? "FAILED" : "SUCCESS",
+ (errstr[0] != '\0') ? ":" : " ",
+ (errstr[0] != '\0') ? errstr : " ");
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gf_xdr_serialize_cli_deprobe_rsp);
+ (xdrproc_t)xdr_gf_cli_rsp);
- gf_log ("glusterd", GF_LOG_NORMAL, "Responded to CLI, ret: %d",ret);
+ gf_log (THIS->name, GF_LOG_DEBUG, "Responded to CLI, ret: %d",ret);
return ret;
}
+
int32_t
-glusterd_op_txn_begin ()
+glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
{
int32_t ret = -1;
glusterd_conf_t *priv = NULL;
- int32_t locked = 0;
+ glusterd_peerinfo_t *entry = NULL;
+ int32_t count = 0;
+ dict_t *friends = NULL;
+ gf1_cli_peer_list_rsp rsp = {0,};
+ char my_uuid_str[64] = {0,};
+ char key[256] = {0,};
priv = THIS->private;
GF_ASSERT (priv);
- ret = glusterd_lock (priv->uuid);
-
- if (ret) {
- gf_log ("glusterd", GF_LOG_ERROR,
- "Unable to acquire local lock, ret: %d", ret);
+ friends = dict_new ();
+ if (!friends) {
+ gf_log ("", GF_LOG_WARNING, "Out of Memory");
goto out;
}
+ if (!list_empty (&priv->peers)) {
+ list_for_each_entry (entry, &priv->peers, uuid_list) {
+ count++;
+ ret = glusterd_add_peer_detail_to_dict (entry,
+ friends, count);
+ if (ret)
+ goto out;
+ }
+ }
- locked = 1;
- gf_log ("glusterd", GF_LOG_NORMAL, "Acquired local lock");
+ if (flags == GF_CLI_LIST_POOL_NODES) {
+ count++;
+ snprintf (key, 256, "friend%d.uuid", count);
+ uuid_utoa_r (MY_UUID, my_uuid_str);
+ ret = dict_set_str (friends, key, my_uuid_str);
+ if (ret)
+ goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_START_LOCK, NULL);
+ snprintf (key, 256, "friend%d.hostname", count);
+ ret = dict_set_str (friends, key, "localhost");
+ if (ret)
+ goto out;
- gf_log ("glusterd", GF_LOG_NORMAL, "Returning %d", ret);
+ snprintf (key, 256, "friend%d.connected", count);
+ ret = dict_set_int32 (friends, key, 1);
+ if (ret)
+ goto out;
+ }
+
+ ret = dict_set_int32 (friends, "count", count);
+ if (ret)
+ goto out;
+ ret = dict_allocate_and_serialize (friends, &rsp.friends.friends_val,
+ &rsp.friends.friends_len);
+
+ if (ret)
+ goto out;
+
+ ret = 0;
out:
- if (locked && ret)
- glusterd_unlock (priv->uuid);
+
+ if (friends)
+ dict_unref (friends);
+
+ rsp.op_ret = ret;
+
+ glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf1_cli_peer_list_rsp);
+ ret = 0;
+ GF_FREE (rsp.friends.friends_val);
+
return ret;
}
int32_t
-glusterd_create_volume (rpcsvc_request_t *req, dict_t *dict)
+glusterd_get_volumes (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
{
- int32_t ret = -1;
- data_t *data = NULL;
+ int32_t ret = -1;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *entry = NULL;
+ int32_t count = 0;
+ dict_t *volumes = NULL;
+ gf_cli_rsp rsp = {0,};
+ char *volname = NULL;
- GF_ASSERT (req);
- GF_ASSERT (dict);
+ priv = THIS->private;
+ GF_ASSERT (priv);
- glusterd_op_set_op (GD_OP_CREATE_VOLUME);
+ volumes = dict_new ();
+ if (!volumes) {
+ gf_log ("", GF_LOG_WARNING, "Out of Memory");
+ goto out;
+ }
- glusterd_op_set_ctx (GD_OP_CREATE_VOLUME, dict);
+ if (list_empty (&priv->volumes)) {
+ ret = 0;
+ goto respond;
+ }
- glusterd_op_set_req (req);
+ if (flags == GF_CLI_GET_VOLUME_ALL) {
+ list_for_each_entry (entry, &priv->volumes, vol_list) {
+ ret = glusterd_add_volume_detail_to_dict (entry,
+ volumes, count);
+ if (ret)
+ goto respond;
- data = dict_get (dict, "volname");
- if (!data)
- goto out;
+ count++;
- data = dict_get (dict, "type");
- if (!data)
- goto out;
+ }
- data = dict_get (dict, "count");
- if (!data)
- goto out;
+ } else if (flags == GF_CLI_GET_NEXT_VOLUME) {
+ ret = dict_get_str (dict, "volname", &volname);
- data = dict_get (dict, "bricks");
- if (!data)
- goto out;
+ if (ret) {
+ if (priv->volumes.next) {
+ entry = list_entry (priv->volumes.next,
+ typeof (*entry),
+ vol_list);
+ }
+ } else {
+ ret = glusterd_volinfo_find (volname, &entry);
+ if (ret)
+ goto respond;
+ entry = list_entry (entry->vol_list.next,
+ typeof (*entry),
+ vol_list);
+ }
- ret = glusterd_op_txn_begin ();
+ if (&entry->vol_list == &priv->volumes) {
+ goto respond;
+ } else {
+ ret = glusterd_add_volume_detail_to_dict (entry,
+ volumes, count);
+ if (ret)
+ goto respond;
-out:
- return ret;
-}
+ count++;
+ }
+ } else if (flags == GF_CLI_GET_VOLUME) {
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret)
+ goto respond;
-int32_t
-glusterd_start_volume (rpcsvc_request_t *req, char *volname, int flags)
-{
- int32_t ret = -1;
- glusterd_op_start_volume_ctx_t *ctx = NULL;
+ ret = glusterd_volinfo_find (volname, &entry);
+ if (ret)
+ goto respond;
- GF_ASSERT (req);
- GF_ASSERT (volname);
+ ret = glusterd_add_volume_detail_to_dict (entry,
+ volumes, count);
+ if (ret)
+ goto respond;
- ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_start_volume_ctx_t);
+ count++;
+ }
- if (!ctx)
+respond:
+ ret = dict_set_int32 (volumes, "count", count);
+ if (ret)
goto out;
+ ret = dict_allocate_and_serialize (volumes, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
- strncpy (ctx->volume_name, volname, GD_VOLUME_NAME_MAX);
+ if (ret)
+ goto out;
- glusterd_op_set_op (GD_OP_START_VOLUME);
+ ret = 0;
+out:
+ rsp.op_ret = ret;
- glusterd_op_set_ctx (GD_OP_START_VOLUME, ctx);
- glusterd_op_set_req (req);
+ rsp.op_errstr = "";
+ glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_cli_rsp);
+ ret = 0;
- ret = glusterd_op_txn_begin ();
+ if (volumes)
+ dict_unref (volumes);
-out:
+ GF_FREE (rsp.dict.dict_val);
return ret;
}
-int32_t
-glusterd_stop_volume (rpcsvc_request_t *req, char *volname, int flags)
+int
+__glusterd_handle_status_volume (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- glusterd_op_stop_volume_ctx_t *ctx = NULL;
+ int32_t ret = -1;
+ uint32_t cmd = 0;
+ dict_t *dict = NULL;
+ char *volname = 0;
+ gf_cli_req cli_req = {{0,}};
+ glusterd_op_t cli_op = GD_OP_STATUS_VOLUME;
+ char err_str[2048] = {0,};
+ xlator_t *this = NULL;
GF_ASSERT (req);
- GF_ASSERT (volname);
-
- ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_stop_volume_ctx_t);
+ this = THIS;
+ GF_ASSERT (this);
- if (!ctx)
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
goto out;
+ }
+
+ if (cli_req.dict.dict_len > 0) {
+ dict = dict_new();
+ if (!dict)
+ goto out;
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len, &dict);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to "
+ "unserialize buffer");
+ snprintf (err_str, sizeof (err_str), "Unable to decode "
+ "the command");
+ goto out;
+ }
- strncpy (ctx->volume_name, volname, GD_VOLUME_NAME_MAX);
+ }
+
+ ret = dict_get_uint32 (dict, "cmd", &cmd);
+ if (ret)
+ goto out;
- glusterd_op_set_op (GD_OP_STOP_VOLUME);
+ if (!(cmd & GF_CLI_STATUS_ALL)) {
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str), "Unable to get "
+ "volume name");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ goto out;
+ }
+ gf_log (this->name, GF_LOG_INFO,
+ "Received status volume req for volume %s", volname);
- glusterd_op_set_ctx (GD_OP_STOP_VOLUME, ctx);
- glusterd_op_set_req (req);
+ }
- ret = glusterd_op_txn_begin ();
+ ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict);
out:
+
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf (err_str, sizeof (err_str),
+ "Operation failed");
+ ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
+ dict, err_str);
+ }
+ free (cli_req.dict.dict_val);
+
return ret;
}
-int32_t
-glusterd_delete_volume (rpcsvc_request_t *req, char *volname, int flags)
+int
+glusterd_handle_status_volume (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- glusterd_op_delete_volume_ctx_t *ctx = NULL;
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_status_volume);
+}
- GF_ASSERT (req);
- GF_ASSERT (volname);
+int
+__glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{0,}};
+ glusterd_op_t cli_op = GD_OP_CLEARLOCKS_VOLUME;
+ char *volname = NULL;
+ dict_t *dict = NULL;
+ char err_str[2048] = {0,};
+ xlator_t *this = NULL;
- ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_delete_volume_ctx_t);
+ GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
- if (!ctx)
+ ret = -1;
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
goto out;
+ }
- strncpy (ctx->volume_name, volname, GD_VOLUME_NAME_MAX);
+ if (cli_req.dict.dict_len) {
+ dict = dict_new ();
- glusterd_op_set_op (GD_OP_DELETE_VOLUME);
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to unserialize req-buffer to"
+ " dictionary");
+ snprintf (err_str, sizeof (err_str), "unable to decode "
+ "the command");
+ goto out;
+ }
- glusterd_op_set_ctx (GD_OP_DELETE_VOLUME, ctx);
- glusterd_op_set_req (req);
+ } else {
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "Empty cli request.");
+ goto out;
+ }
- ret = glusterd_op_txn_begin ();
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str), "Unable to get volume "
+ "name");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_INFO, "Received clear-locks volume req "
+ "for volume %s", volname);
+
+ ret = glusterd_op_begin_synctask (req, GD_OP_CLEARLOCKS_VOLUME, dict);
out:
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf (err_str, sizeof (err_str),
+ "Operation failed");
+ ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
+ dict, err_str);
+ }
+ free (cli_req.dict.dict_val);
+
return ret;
}
-int32_t
-glusterd_add_brick (rpcsvc_request_t *req, dict_t *dict)
+int
+glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req)
{
- int32_t ret = -1;
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_clearlocks_volume);
+}
- GF_ASSERT (req);
- GF_ASSERT (dict);
+static int
+get_brickinfo_from_brickid (char *brickid, glusterd_brickinfo_t **brickinfo)
+{
+ glusterd_volinfo_t *volinfo = NULL;
+ char *volid_str = NULL;
+ char *brick = NULL;
+ char *brickid_dup = NULL;
+ uuid_t volid = {0};
+ int ret = -1;
+
+ brickid_dup = gf_strdup (brickid);
+ if (!brickid_dup)
+ goto out;
- glusterd_op_set_op (GD_OP_ADD_BRICK);
+ volid_str = brickid_dup;
+ brick = strchr (brickid_dup, ':');
+ *brick = '\0';
+ brick++;
+ if (!volid_str || !brick)
+ goto out;
- glusterd_op_set_ctx (GD_OP_ADD_BRICK, dict);
- glusterd_op_set_req (req);
+ uuid_parse (volid_str, volid);
+ ret = glusterd_volinfo_find_by_volume_id (volid, &volinfo);
+ if (ret) {
+ /* Check if it a snapshot volume */
+ ret = glusterd_snap_volinfo_find_by_volume_id (volid, &volinfo);
+ if (ret)
+ goto out;
+ }
- ret = glusterd_op_txn_begin ();
+ ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo,
+ brickinfo);
+ if (ret)
+ goto out;
+ ret = 0;
+out:
+ GF_FREE (brickid_dup);
return ret;
}
-int32_t
-glusterd_replace_brick (rpcsvc_request_t *req, dict_t *dict)
+int
+__glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
{
- int32_t ret = -1;
-
- GF_ASSERT (req);
- GF_ASSERT (dict);
-
- glusterd_op_set_op (GD_OP_REPLACE_BRICK);
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ int ret = 0;
+ char *brickid = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_op_set_ctx (GD_OP_REPLACE_BRICK, dict);
+ brickid = mydata;
+ if (!brickid)
+ return 0;
- ret = glusterd_op_txn_begin ();
+ ret = get_brickinfo_from_brickid (brickid, &brickinfo);
+ if (ret)
+ return 0;
- return ret;
-}
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
-int32_t
-glusterd_remove_brick (rpcsvc_request_t *req, dict_t *dict)
-{
- int32_t ret = -1;
+ switch (event) {
+ case RPC_CLNT_CONNECT:
+ gf_log (this->name, GF_LOG_DEBUG, "Connected to %s:%s",
+ brickinfo->hostname, brickinfo->path);
+ glusterd_set_brick_status (brickinfo, GF_BRICK_STARTED);
+ ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
- GF_ASSERT (req);
- GF_ASSERT (dict);
+ break;
- glusterd_op_set_op (GD_OP_REMOVE_BRICK);
+ case RPC_CLNT_DISCONNECT:
+ if (GF_BRICK_STARTED == brickinfo->status)
+ gf_log (this->name, GF_LOG_INFO, "Disconnected from "
+ "%s:%s", brickinfo->hostname, brickinfo->path);
- glusterd_op_set_ctx (GD_OP_REMOVE_BRICK, dict);
- glusterd_op_set_req (req);
+ glusterd_set_brick_status (brickinfo, GF_BRICK_STOPPED);
+ if (rpc_clnt_is_disabled (rpc))
+ GF_FREE (brickid);
+ break;
- ret = glusterd_op_txn_begin ();
+ default:
+ gf_log (this->name, GF_LOG_TRACE,
+ "got some other RPC event %d", event);
+ break;
+ }
return ret;
}
-int32_t
-glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
+int
+glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
{
- int32_t ret = -1;
- glusterd_conf_t *priv = NULL;
- glusterd_peerinfo_t *entry = NULL;
- int32_t count = 0;
- dict_t *friends = NULL;
- gf1_cli_peer_list_rsp rsp = {0,};
-
- priv = THIS->private;
- GF_ASSERT (priv);
-
- if (!list_empty (&priv->peers)) {
- friends = dict_new ();
- if (!friends) {
- gf_log ("", GF_LOG_WARNING, "Out of Memory");
- goto out;
- }
- } else {
- ret = 0;
- goto out;
- }
-
- if (flags == GF_CLI_LIST_ALL) {
- list_for_each_entry (entry, &priv->peers, uuid_list) {
- count++;
- ret = glusterd_add_peer_detail_to_dict (entry,
- friends, count);
- if (ret)
- goto out;
+ return glusterd_big_locked_notify (rpc, mydata, event, data,
+ __glusterd_brick_rpc_notify);
+}
- }
+int
+__glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ char *server = NULL;
+ int ret = 0;
- ret = dict_set_int32 (friends, "count", count);
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
- if (ret)
- goto out;
- }
+ server = mydata;
+ if (!server)
+ return 0;
- ret = dict_allocate_and_serialize (friends, &rsp.friends.friends_val,
- (size_t *)&rsp.friends.friends_len);
+ switch (event) {
+ case RPC_CLNT_CONNECT:
+ gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT");
+ (void) glusterd_nodesvc_set_online_status (server, _gf_true);
+ ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
- if (ret)
- goto out;
+ break;
- ret = 0;
-out:
+ case RPC_CLNT_DISCONNECT:
+ gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT");
+ (void) glusterd_nodesvc_set_online_status (server, _gf_false);
+ break;
- if (ret) {
- if (friends)
- dict_destroy (friends);
+ default:
+ gf_log (this->name, GF_LOG_TRACE,
+ "got some other RPC event %d", event);
+ break;
}
- rsp.op_ret = ret;
-
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gf_xdr_serialize_cli_peer_list_rsp);
-
return ret;
}
-int32_t
-glusterd_get_volumes (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
+int
+glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
{
- int32_t ret = -1;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *entry = NULL;
- int32_t count = 0;
- dict_t *volumes = NULL;
- gf1_cli_get_vol_rsp rsp = {0,};
+ return glusterd_big_locked_notify (rpc, mydata, event, data,
+ __glusterd_nodesvc_rpc_notify);
+}
- priv = THIS->private;
- GF_ASSERT (priv);
+int
+glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx)
+{
+ int ret = -1;
+ glusterd_friend_sm_event_t *new_event = NULL;
+ glusterd_peerinfo_t *peerinfo = peerctx->peerinfo;
+ rpcsvc_request_t *req = peerctx->args.req;
+ char *errstr = peerctx->errstr;
+ dict_t *dict = NULL;
- if (!list_empty (&priv->volumes)) {
- volumes = dict_new ();
- if (!volumes) {
- gf_log ("", GF_LOG_WARNING, "Out of Memory");
+ GF_ASSERT (peerctx);
+
+ peerinfo = peerctx->peerinfo;
+ req = peerctx->args.req;
+ dict = peerctx->args.dict;
+ errstr = peerctx->errstr;
+
+ ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_REMOVE_FRIEND,
+ &new_event);
+ if (!ret) {
+ if (!req) {
+ gf_log (THIS->name, GF_LOG_WARNING,
+ "Unable to find the request for responding "
+ "to User (%s)", peerinfo->hostname);
goto out;
}
- } else {
- ret = 0;
- goto out;
- }
- if (flags == GF_CLI_GET_VOLUME_ALL) {
- list_for_each_entry (entry, &priv->volumes, vol_list) {
- count++;
- ret = glusterd_add_volume_detail_to_dict (entry,
- volumes, count);
- if (ret)
- goto out;
-
- }
+ glusterd_xfer_cli_probe_resp (req, -1, ENOTCONN, errstr,
+ peerinfo->hostname,
+ peerinfo->port, dict);
- ret = dict_set_int32 (volumes, "count", count);
+ new_event->peerinfo = peerinfo;
+ ret = glusterd_friend_sm_inject_event (new_event);
- if (ret)
- goto out;
+ } else {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to create event for removing peer %s",
+ peerinfo->hostname);
}
- ret = dict_allocate_and_serialize (volumes, &rsp.volumes.volumes_val,
- (size_t *)&rsp.volumes.volumes_len);
-
- if (ret)
- goto out;
-
- ret = 0;
out:
-
-
- rsp.op_ret = ret;
-
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- gf_xdr_serialize_cli_peer_list_rsp);
-
- if (volumes)
- dict_destroy (volumes);
-
- if (rsp.volumes.volumes_val)
- GF_FREE (rsp.volumes.volumes_val);
return ret;
}
int
-glusterd_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
- void *data)
+__glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
{
- xlator_t *this = NULL;
- char *handshake = "on";
- glusterd_conf_t *conf = NULL;
- int ret = 0;
- glusterd_peerinfo_t *peerinfo = NULL;
-
- peerinfo = mydata;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peerctx_t *peerctx = NULL;
+ gf_boolean_t quorum_action = _gf_false;
+ glusterd_volinfo_t *volinfo = NULL;
+
+ peerctx = mydata;
+ if (!peerctx)
+ return 0;
+
+ peerinfo = peerctx->peerinfo;
this = THIS;
conf = this->private;
-
switch (event) {
case RPC_CLNT_CONNECT:
{
-
- gf_log (this->name, GF_LOG_TRACE, "got RPC_CLNT_CONNECT");
+ gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT");
peerinfo->connected = 1;
- glusterd_friend_sm ();
- glusterd_op_sm ();
-
- if ((ret < 0) || (strcasecmp (handshake, "on"))) {
- //ret = glusterd_handshake (this, peerinfo->rpc);
+ peerinfo->quorum_action = _gf_true;
- } else {
- //conf->rpc->connected = 1;
- ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
- }
+ ret = glusterd_peer_dump_version (this, rpc, peerctx);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR, "glusterd handshake failed");
break;
}
case RPC_CLNT_DISCONNECT:
+ {
+ gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT %d",
+ peerinfo->state.state);
+
+ if (peerinfo->connected) {
+ list_for_each_entry (volinfo, &conf->volumes, vol_list) {
+ ret = glusterd_mgmt_v3_unlock (volinfo->volname,
+ peerinfo->uuid,
+ "vol");
+ if (ret)
+ gf_log (this->name, GF_LOG_TRACE,
+ "Lock not released for %s",
+ volinfo->volname);
+ }
- //Inject friend disconnected here
+ ret = 0;
+ }
- gf_log (this->name, GF_LOG_TRACE, "got RPC_CLNT_DISCONNECT");
- peerinfo->connected = 0;
+ if ((peerinfo->quorum_contrib != QUORUM_DOWN) &&
+ (peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED)) {
+ peerinfo->quorum_contrib = QUORUM_DOWN;
+ quorum_action = _gf_true;
+ peerinfo->quorum_action = _gf_false;
+ }
- //default_notify (this, GF_EVENT_CHILD_DOWN, NULL);
- break;
+ /* Remove peer if it is not a friend and connection/handshake
+ * fails, and notify cli. Happens only during probe.
+ */
+ if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT) {
+ glusterd_friend_remove_notify (peerctx);
+ goto out;
+ }
+ peerinfo->connected = 0;
+ break;
+ }
default:
gf_log (this->name, GF_LOG_TRACE,
"got some other RPC event %d", event);
@@ -2163,5 +4111,127 @@ glusterd_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
break;
}
+out:
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ if (quorum_action)
+ glusterd_do_quorum_action ();
return ret;
}
+
+int
+glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ return glusterd_big_locked_notify (rpc, mydata, event, data,
+ __glusterd_peer_rpc_notify);
+}
+
+int
+glusterd_null (rpcsvc_request_t *req)
+{
+
+ return 0;
+}
+
+rpcsvc_actor_t gd_svc_mgmt_actors[] = {
+ [GLUSTERD_MGMT_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GLUSTERD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GLUSTERD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_STAGE_OP] = { "STAGE_OP", GLUSTERD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_COMMIT_OP] = { "COMMIT_OP", GLUSTERD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, 0, DRC_NA},
+};
+
+struct rpcsvc_program gd_svc_mgmt_prog = {
+ .progname = "GlusterD svc mgmt",
+ .prognum = GD_MGMT_PROGRAM,
+ .progver = GD_MGMT_VERSION,
+ .numactors = GLUSTERD_MGMT_MAXVALUE,
+ .actors = gd_svc_mgmt_actors,
+ .synctask = _gf_true,
+};
+
+rpcsvc_actor_t gd_svc_peer_actors[] = {
+ [GLUSTERD_FRIEND_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0, DRC_NA},
+ [GLUSTERD_PROBE_QUERY] = { "PROBE_QUERY", GLUSTERD_PROBE_QUERY, glusterd_handle_probe_query, NULL, 0, DRC_NA},
+ [GLUSTERD_FRIEND_ADD] = { "FRIEND_ADD", GLUSTERD_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, 0, DRC_NA},
+ [GLUSTERD_FRIEND_REMOVE] = { "FRIEND_REMOVE", GLUSTERD_FRIEND_REMOVE, glusterd_handle_incoming_unfriend_req, NULL, 0, DRC_NA},
+ [GLUSTERD_FRIEND_UPDATE] = { "FRIEND_UPDATE", GLUSTERD_FRIEND_UPDATE, glusterd_handle_friend_update, NULL, 0, DRC_NA},
+};
+
+struct rpcsvc_program gd_svc_peer_prog = {
+ .progname = "GlusterD svc peer",
+ .prognum = GD_FRIEND_PROGRAM,
+ .progver = GD_FRIEND_VERSION,
+ .numactors = GLUSTERD_FRIEND_MAXVALUE,
+ .actors = gd_svc_peer_actors,
+ .synctask = _gf_false,
+};
+
+
+
+rpcsvc_actor_t gd_svc_cli_actors[] = {
+ [GLUSTER_CLI_PROBE] = { "CLI_PROBE", GLUSTER_CLI_PROBE, glusterd_handle_cli_probe, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_CREATE_VOLUME] = { "CLI_CREATE_VOLUME", GLUSTER_CLI_CREATE_VOLUME, glusterd_handle_create_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_DEFRAG_VOLUME] = { "CLI_DEFRAG_VOLUME", GLUSTER_CLI_DEFRAG_VOLUME, glusterd_handle_defrag_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_DEPROBE] = { "FRIEND_REMOVE", GLUSTER_CLI_DEPROBE, glusterd_handle_cli_deprobe, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_UUID_RESET] = { "UUID_RESET", GLUSTER_CLI_UUID_RESET, glusterd_handle_cli_uuid_reset, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_UUID_GET] = { "UUID_GET", GLUSTER_CLI_UUID_GET, glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_START_VOLUME] = { "START_VOLUME", GLUSTER_CLI_START_VOLUME, glusterd_handle_cli_start_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_STOP_VOLUME] = { "STOP_VOLUME", GLUSTER_CLI_STOP_VOLUME, glusterd_handle_cli_stop_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_DELETE_VOLUME] = { "DELETE_VOLUME", GLUSTER_CLI_DELETE_VOLUME, glusterd_handle_cli_delete_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_ADD_BRICK] = { "ADD_BRICK", GLUSTER_CLI_ADD_BRICK, glusterd_handle_add_brick, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_REPLACE_BRICK] = { "REPLACE_BRICK", GLUSTER_CLI_REPLACE_BRICK, glusterd_handle_replace_brick, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_REMOVE_BRICK] = { "REMOVE_BRICK", GLUSTER_CLI_REMOVE_BRICK, glusterd_handle_remove_brick, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_LOG_ROTATE] = { "LOG FILENAME", GLUSTER_CLI_LOG_ROTATE, glusterd_handle_log_rotate, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_SET_VOLUME] = { "SET_VOLUME", GLUSTER_CLI_SET_VOLUME, glusterd_handle_set_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_SYNC_VOLUME] = { "SYNC_VOLUME", GLUSTER_CLI_SYNC_VOLUME, glusterd_handle_sync_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_RESET_VOLUME] = { "RESET_VOLUME", GLUSTER_CLI_RESET_VOLUME, glusterd_handle_reset_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_FSM_LOG] = { "FSM_LOG", GLUSTER_CLI_FSM_LOG, glusterd_handle_fsm_log, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GSYNC_SET] = { "GSYNC_SET", GLUSTER_CLI_GSYNC_SET, glusterd_handle_gsync_set, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_PROFILE_VOLUME] = { "STATS_VOLUME", GLUSTER_CLI_PROFILE_VOLUME, glusterd_handle_cli_profile_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_QUOTA] = { "QUOTA", GLUSTER_CLI_QUOTA, glusterd_handle_quota, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GETWD] = { "GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd, NULL, 1, DRC_NA},
+ [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME, glusterd_handle_status_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_MOUNT] = { "MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount, NULL, 1, DRC_NA},
+ [GLUSTER_CLI_UMOUNT] = { "UMOUNT", GLUSTER_CLI_UMOUNT, glusterd_handle_umount, NULL, 1, DRC_NA},
+ [GLUSTER_CLI_HEAL_VOLUME] = { "HEAL_VOLUME", GLUSTER_CLI_HEAL_VOLUME, glusterd_handle_cli_heal_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", GLUSTER_CLI_STATEDUMP_VOLUME, glusterd_handle_cli_statedump_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", GLUSTER_CLI_CLRLOCKS_VOLUME, glusterd_handle_cli_clearlocks_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", GLUSTER_CLI_COPY_FILE, glusterd_handle_copy_file, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC, glusterd_handle_sys_exec, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_SNAP] = {"SNAP", GLUSTER_CLI_SNAP, glusterd_handle_snapshot, NULL, 0, DRC_NA},
+};
+
+struct rpcsvc_program gd_svc_cli_prog = {
+ .progname = "GlusterD svc cli",
+ .prognum = GLUSTER_CLI_PROGRAM,
+ .progver = GLUSTER_CLI_VERSION,
+ .numactors = GLUSTER_CLI_MAXVALUE,
+ .actors = gd_svc_cli_actors,
+ .synctask = _gf_true,
+};
+
+/* This is a minimal RPC prog, which contains only the readonly RPC procs from
+ * the cli rpcsvc
+ */
+rpcsvc_actor_t gd_svc_cli_actors_ro[] = {
+ [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_UUID_GET] = { "UUID_GET", GLUSTER_CLI_UUID_GET, glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GETWD] = { "GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd, NULL, 1, DRC_NA},
+ [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME, glusterd_handle_status_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, 0, DRC_NA},
+};
+
+struct rpcsvc_program gd_svc_cli_prog_ro = {
+ .progname = "GlusterD svc cli read-only",
+ .prognum = GLUSTER_CLI_PROGRAM,
+ .progver = GLUSTER_CLI_VERSION,
+ .numactors = GLUSTER_CLI_MAXVALUE,
+ .actors = gd_svc_cli_actors_ro,
+ .synctask = _gf_true,
+};