summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt/glusterd')
-rw-r--r--xlators/mgmt/glusterd/src/Makefile.am32
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitd-svc.c8
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitrot.c97
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c1347
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c76
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h8
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-errno.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-ganesha.c927
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-geo-rep.c324
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-geo-rep.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c7
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c20
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c1348
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handshake.c385
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-hooks.c126
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-hooks.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.c130
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.h3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-log-ops.c24
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mem-types.h101
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-messages.h158
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c247
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c898
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.h15
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mountbroker.c69
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mountbroker.h6
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-nfs-svc.c13
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-nfs-svc.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c1407
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.h29
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.c346
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.h9
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-pmap.c40
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-pmap.h16
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-proc-mgmt.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quota.c104
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quotad-svc.c10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rcu.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rebalance.c634
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-replace-brick.c66
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-reset-brick.c26
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c190
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-scrub-svc.c12
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-server-quorum.c20
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c153
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h42
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc.c678
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc.h17
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.c275
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.h13
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.c29
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c410
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c415
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-statedump.c10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-statedump.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c1795
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.h58
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.c836
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.h43
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c284
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h46
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c185
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tier.c1382
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.c9
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.h37
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tierd-svc.c503
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tierd-svc.h41
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c3577
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h111
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c2089
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.h34
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c952
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c1129
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c455
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h531
80 files changed, 13589 insertions, 11872 deletions
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am
index aa75344d1ac..685beb42d27 100644
--- a/xlators/mgmt/glusterd/src/Makefile.am
+++ b/xlators/mgmt/glusterd/src/Makefile.am
@@ -5,34 +5,34 @@ endif
xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/mgmt
glusterd_la_CPPFLAGS = $(AM_CPPFLAGS) \
-DFILTERDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/filter\" \
- -DXLATORDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator\"
+ -DXLATORDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator\" \
+ -I$(top_srcdir)/libglusterd/src/
+
glusterd_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \
glusterd-op-sm.c glusterd-utils.c glusterd-rpc-ops.c \
glusterd-store.c glusterd-handshake.c glusterd-pmap.c \
glusterd-volgen.c glusterd-rebalance.c \
glusterd-quota.c glusterd-bitrot.c glusterd-geo-rep.c \
- glusterd-replace-brick.c glusterd-log-ops.c glusterd-tier.c \
+ glusterd-replace-brick.c glusterd-log-ops.c \
glusterd-volume-ops.c glusterd-brick-ops.c glusterd-mountbroker.c \
glusterd-syncop.c glusterd-hooks.c glusterd-volume-set.c \
glusterd-locks.c glusterd-snapshot.c glusterd-mgmt-handler.c \
glusterd-mgmt.c glusterd-peer-utils.c glusterd-statedump.c \
glusterd-snapshot-utils.c glusterd-conn-mgmt.c \
- glusterd-proc-mgmt.c glusterd-svc-mgmt.c glusterd-shd-svc.c \
+ glusterd-proc-mgmt.c glusterd-svc-mgmt.c \
glusterd-nfs-svc.c glusterd-quotad-svc.c glusterd-svc-helper.c \
glusterd-conn-helper.c glusterd-snapd-svc.c glusterd-snapd-svc-helper.c \
glusterd-bitd-svc.c glusterd-scrub-svc.c glusterd-server-quorum.c \
- glusterd-reset-brick.c glusterd-tierd-svc.c glusterd-tierd-svc-helper.c \
- glusterd-gfproxyd-svc.c glusterd-gfproxyd-svc-helper.c
-
+ glusterd-reset-brick.c glusterd-shd-svc.c glusterd-shd-svc-helper.c \
+ glusterd-gfproxyd-svc.c glusterd-gfproxyd-svc-helper.c glusterd-ganesha.c \
+ $(CONTRIBDIR)/mount/mntent.c
glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
+ $(top_builddir)/libglusterd/src/libglusterd.la \
$(top_builddir)/rpc/xdr/src/libgfxdr.la \
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
- $(XML_LIBS) -lcrypto $(URCU_LIBS) $(URCU_CDS_LIBS)
-if ENABLE_BD_XLATOR
-glusterd_la_LIBADD += -llvm2app
-endif
+ $(XML_LIBS) -lcrypto $(URCU_LIBS) $(URCU_CDS_LIBS) $(LIB_DL) $(GF_XLATOR_MGNT_LIBADD)
noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \
glusterd-sm.h glusterd-store.h glusterd-mem-types.h \
@@ -41,13 +41,14 @@ noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \
glusterd-mgmt.h glusterd-messages.h glusterd-peer-utils.h \
glusterd-statedump.h glusterd-snapshot-utils.h glusterd-geo-rep.h \
glusterd-conn-mgmt.h glusterd-conn-helper.h glusterd-proc-mgmt.h \
- glusterd-svc-mgmt.h glusterd-shd-svc.h glusterd-nfs-svc.h \
+ glusterd-svc-mgmt.h glusterd-nfs-svc.h \
glusterd-quotad-svc.h glusterd-svc-helper.h glusterd-snapd-svc.h \
glusterd-snapd-svc-helper.h glusterd-rcu.h glusterd-bitd-svc.h \
glusterd-scrub-svc.h glusterd-server-quorum.h glusterd-errno.h \
- glusterd-tierd-svc.h glusterd-tierd-svc-helper.h \
+ glusterd-shd-svc.h glusterd-shd-svc-helper.h \
glusterd-gfproxyd-svc.h glusterd-gfproxyd-svc-helper.h \
- $(CONTRIBDIR)/userspace-rcu/rculist-extra.h
+ $(CONTRIBDIR)/userspace-rcu/rculist-extra.h \
+ $(CONTRIBDIR)/mount/mntent_compat.h
AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
-I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
@@ -55,7 +56,10 @@ AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
-I$(CONTRIBDIR)/mount -I$(CONTRIBDIR)/userspace-rcu \
-DSBIN_DIR=\"$(sbindir)\" -DDATADIR=\"$(localstatedir)\" \
-DGSYNCD_PREFIX=\"$(GLUSTERFS_LIBEXECDIR)\" \
- -DSYNCDAEMON_COMPILE=$(SYNCDAEMON_COMPILE)
+ -DCONFDIR=\"$(localstatedir)/run/gluster/shared_storage/nfs-ganesha\" \
+ -DGANESHA_PREFIX=\"$(libexecdir)/ganesha\" \
+ -DSYNCDAEMON_COMPILE=$(SYNCDAEMON_COMPILE) \
+ -I$(top_srcdir)/libglusterd/src/
AM_CFLAGS = -Wall $(GF_CFLAGS) $(URCU_CFLAGS) $(URCU_CDS_CFLAGS) $(XML_CFLAGS)
diff --git a/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c b/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
index b01c2599dfb..6adb799b18f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
@@ -8,8 +8,8 @@
cases as published by the Free Software Foundation.
*/
-#include "globals.h"
-#include "run.h"
+#include <glusterfs/globals.h>
+#include <glusterfs/run.h>
#include "glusterd.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
@@ -150,7 +150,7 @@ glusterd_bitdsvc_reconfigure()
gf_boolean_t identical = _gf_false;
this = THIS;
- GF_VALIDATE_OR_GOTO(this->name, this, out);
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
priv = this->private;
GF_VALIDATE_OR_GOTO(this->name, priv, out);
@@ -201,6 +201,6 @@ manager:
ret = priv->bitd_svc.manager(&(priv->bitd_svc), NULL, PROC_START_NO_WAIT);
out:
- gf_msg_debug(this->name, 0, "Returning %d", ret);
+ gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-bitrot.c b/xlators/mgmt/glusterd/src/glusterd-bitrot.c
index 0608badb91d..37429fe9214 100644
--- a/xlators/mgmt/glusterd/src/glusterd-bitrot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-bitrot.c
@@ -8,7 +8,7 @@
cases as published by the Free Software Foundation.
*/
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "cli1-xdr.h"
#include "xdr-generic.h"
#include "glusterd.h"
@@ -16,10 +16,10 @@
#include "glusterd-store.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
-#include "run.h"
-#include "syscall.h"
-#include "byte-order.h"
-#include "compat-errno.h"
+#include <glusterfs/run.h>
+#include <glusterfs/syscall.h>
+#include <glusterfs/byte-order.h>
+#include <glusterfs/compat-errno.h>
#include "glusterd-scrub-svc.h"
#include "glusterd-messages.h"
@@ -34,6 +34,7 @@ const char *gd_bitrot_op_list[GF_BITROT_OPTION_TYPE_MAX] = {
[GF_BITROT_OPTION_TYPE_SCRUB_FREQ] = "scrub-frequency",
[GF_BITROT_OPTION_TYPE_SCRUB] = "scrub",
[GF_BITROT_OPTION_TYPE_EXPIRY_TIME] = "expiry-time",
+ [GF_BITROT_OPTION_TYPE_SIGNER_THREADS] = "signer-threads",
};
int
@@ -319,7 +320,7 @@ glusterd_bitrot_expiry_time(glusterd_volinfo_t *volinfo, dict_t *dict,
int32_t ret = -1;
uint32_t expiry_time = 0;
xlator_t *this = NULL;
- char dkey[1024] = {
+ char dkey[32] = {
0,
};
@@ -354,6 +355,81 @@ out:
return ret;
}
+static gf_boolean_t
+is_bitd_configure_noop(xlator_t *this, glusterd_volinfo_t *volinfo)
+{
+ gf_boolean_t noop = _gf_true;
+ glusterd_brickinfo_t *brickinfo = NULL;
+
+ if (!glusterd_is_bitrot_enabled(volinfo))
+ goto out;
+ else if (volinfo->status != GLUSTERD_STATUS_STARTED)
+ goto out;
+ else {
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (!glusterd_is_local_brick(this, volinfo, brickinfo))
+ continue;
+ noop = _gf_false;
+ return noop;
+ }
+ }
+out:
+ return noop;
+}
+
+static int
+glusterd_bitrot_signer_threads(glusterd_volinfo_t *volinfo, dict_t *dict,
+ char *key, char **op_errstr)
+{
+ int32_t ret = -1;
+ uint32_t signer_th_count = 0;
+ uint32_t existing_th_count = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ char dkey[32] = {
+ 0,
+ };
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
+
+ ret = dict_get_uint32(dict, "signer-threads", &signer_th_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get bitrot signer thread count.");
+ goto out;
+ }
+
+ ret = dict_get_uint32(volinfo->dict, key, &existing_th_count);
+ if (ret == 0 && signer_th_count == existing_th_count) {
+ goto out;
+ }
+
+ snprintf(dkey, sizeof(dkey), "%d", signer_th_count);
+ ret = dict_set_dynstr_with_alloc(volinfo->dict, key, dkey);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set option %s", key);
+ goto out;
+ }
+
+ if (!is_bitd_configure_noop(this, volinfo)) {
+ ret = priv->bitd_svc.manager(&(priv->bitd_svc), NULL,
+ PROC_START_NO_WAIT);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BITDSVC_RECONF_FAIL,
+ "Failed to reconfigure bitrot services");
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
static int
glusterd_bitrot_enable(glusterd_volinfo_t *volinfo, char **op_errstr)
{
@@ -594,6 +670,15 @@ glusterd_op_bitrot(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
volinfo, dict, "features.expiry-time", op_errstr);
if (ret)
goto out;
+ break;
+
+ case GF_BITROT_OPTION_TYPE_SIGNER_THREADS:
+ ret = glusterd_bitrot_signer_threads(
+ volinfo, dict, "features.signer-threads", op_errstr);
+ if (ret)
+ goto out;
+ break;
+
case GF_BITROT_CMD_SCRUB_STATUS:
case GF_BITROT_CMD_SCRUB_ONDEMAND:
break;
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
index 7328f0c38bd..e56cd0e6c74 100644
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
@@ -7,7 +7,7 @@
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "cli1-xdr.h"
#include "xdr-generic.h"
#include "glusterd.h"
@@ -20,42 +20,12 @@
#include "glusterd-svc-helper.h"
#include "glusterd-messages.h"
#include "glusterd-server-quorum.h"
-#include "run.h"
-#include "glusterd-volgen.h"
-#include "syscall.h"
+#include <glusterfs/run.h>
+#include <glusterfs/syscall.h>
#include <sys/signal.h>
/* misc */
-gf_boolean_t
-glusterd_is_tiering_supported(char *op_errstr)
-{
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- gf_boolean_t supported = _gf_false;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO("glusterd", this, out);
-
- conf = this->private;
- GF_VALIDATE_OR_GOTO(this->name, conf, out);
-
- if (conf->op_version < GD_OP_VERSION_3_7_0)
- goto out;
-
- supported = _gf_true;
-
-out:
- if (!supported && op_errstr != NULL && conf)
- sprintf(op_errstr,
- "Tier operation failed. The cluster is "
- "operating at version %d. Tiering"
- " is unavailable in this version.",
- conf->op_version);
-
- return supported;
-}
-
/* In this function, we decide, based on the 'count' of the brick,
where to add it in the current volume. 'count' tells us already
how many of the given bricks are added. other argument are self-
@@ -118,110 +88,6 @@ insert_brick:
}
static int
-gd_addbr_validate_stripe_count(glusterd_volinfo_t *volinfo, int stripe_count,
- int total_bricks, int *type, char *err_str,
- size_t err_len)
-{
- int ret = -1;
-
- switch (volinfo->type) {
- case GF_CLUSTER_TYPE_NONE:
- if ((volinfo->brick_count * stripe_count) == total_bricks) {
- /* Change the volume type */
- *type = GF_CLUSTER_TYPE_STRIPE;
- gf_msg(THIS->name, GF_LOG_INFO, 0,
- GD_MSG_VOL_TYPE_CHANGING_INFO,
- "Changing the type of volume %s from "
- "'distribute' to 'stripe'",
- volinfo->volname);
- ret = 0;
- goto out;
- } else {
- snprintf(err_str, err_len,
- "Incorrect number of "
- "bricks (%d) supplied for stripe count (%d).",
- (total_bricks - volinfo->brick_count), stripe_count);
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
- "%s", err_str);
- goto out;
- }
- break;
- case GF_CLUSTER_TYPE_REPLICATE:
- if (!(total_bricks % (volinfo->replica_count * stripe_count))) {
- /* Change the volume type */
- *type = GF_CLUSTER_TYPE_STRIPE_REPLICATE;
- gf_msg(THIS->name, GF_LOG_INFO, 0,
- GD_MSG_VOL_TYPE_CHANGING_INFO,
- "Changing the type of volume %s from "
- "'replicate' to 'replicate-stripe'",
- volinfo->volname);
- ret = 0;
- goto out;
- } else {
- snprintf(err_str, err_len,
- "Incorrect number of "
- "bricks (%d) supplied for changing volume's "
- "stripe count to %d, need at least %d bricks",
- (total_bricks - volinfo->brick_count), stripe_count,
- (volinfo->replica_count * stripe_count));
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
- "%s", err_str);
- goto out;
- }
- break;
- case GF_CLUSTER_TYPE_STRIPE:
- case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
- if (stripe_count < volinfo->stripe_count) {
- snprintf(err_str, err_len,
- "Incorrect stripe count (%d) supplied. "
- "Volume already has stripe count (%d)",
- stripe_count, volinfo->stripe_count);
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
- "%s", err_str);
- goto out;
- }
- if (stripe_count == volinfo->stripe_count) {
- if (!(total_bricks % volinfo->dist_leaf_count)) {
- /* its same as the one which exists */
- ret = 1;
- goto out;
- }
- }
- if (stripe_count > volinfo->stripe_count) {
- /* We have to make sure before and after 'add-brick',
- the number or subvolumes for distribute will remain
- same, when stripe count is given */
- if ((volinfo->brick_count *
- (stripe_count * volinfo->replica_count)) ==
- (total_bricks * volinfo->dist_leaf_count)) {
- /* Change the dist_leaf_count */
- gf_msg(THIS->name, GF_LOG_INFO, 0,
- GD_MSG_STRIPE_COUNT_CHANGE_INFO,
- "Changing the stripe count of "
- "volume %s from %d to %d",
- volinfo->volname, volinfo->stripe_count,
- stripe_count);
- ret = 0;
- goto out;
- }
- }
- break;
- case GF_CLUSTER_TYPE_DISPERSE:
- snprintf(err_str, err_len,
- "Volume %s cannot be converted "
- "from dispersed to striped-"
- "dispersed",
- volinfo->volname);
- gf_msg(THIS->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED,
- "%s", err_str);
- goto out;
- }
-
-out:
- return ret;
-}
-
-static int
gd_addbr_validate_replica_count(glusterd_volinfo_t *volinfo, int replica_count,
int arbiter_count, int total_bricks, int *type,
char *err_str, int err_len)
@@ -252,32 +118,7 @@ gd_addbr_validate_replica_count(glusterd_volinfo_t *volinfo, int replica_count,
goto out;
}
break;
- case GF_CLUSTER_TYPE_STRIPE:
- if (!(total_bricks % (volinfo->dist_leaf_count * replica_count))) {
- /* Change the volume type */
- *type = GF_CLUSTER_TYPE_STRIPE_REPLICATE;
- gf_msg(THIS->name, GF_LOG_INFO, 0,
- GD_MSG_VOL_TYPE_CHANGING_INFO,
- "Changing the type of volume %s from "
- "'stripe' to 'replicate-stripe'",
- volinfo->volname);
- ret = 0;
- goto out;
- } else {
- snprintf(err_str, err_len,
- "Incorrect number of "
- "bricks (%d) supplied for changing volume's "
- "replica count to %d, need at least %d "
- "bricks",
- (total_bricks - volinfo->brick_count), replica_count,
- (volinfo->dist_leaf_count * replica_count));
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
- "%s", err_str);
- goto out;
- }
- break;
case GF_CLUSTER_TYPE_REPLICATE:
- case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
if (replica_count < volinfo->replica_count) {
snprintf(err_str, err_len,
"Incorrect replica count (%d) supplied. "
@@ -341,25 +182,22 @@ gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo,
{
int ret = -1;
int replica_nodes = 0;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
switch (volinfo->type) {
- case GF_CLUSTER_TYPE_TIER:
- ret = 1;
- goto out;
-
case GF_CLUSTER_TYPE_NONE:
- case GF_CLUSTER_TYPE_STRIPE:
case GF_CLUSTER_TYPE_DISPERSE:
snprintf(err_str, err_len,
"replica count (%d) option given for non replicate "
"volume %s",
replica_count, volinfo->volname);
- gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_REPLICA, "%s",
- err_str);
+ gf_smsg(this->name, GF_LOG_WARNING, EINVAL, GD_MSG_INVALID_ARGUMENT,
+ err_str, NULL);
goto out;
case GF_CLUSTER_TYPE_REPLICATE:
- case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
/* in remove brick, you can only reduce the replica count */
if (replica_count > volinfo->replica_count) {
snprintf(err_str, err_len,
@@ -367,8 +205,8 @@ gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo,
"than volume %s's replica count (%d)",
replica_count, volinfo->volname,
volinfo->replica_count);
- gf_msg(THIS->name, GF_LOG_WARNING, EINVAL, GD_MSG_INVALID_ENTRY,
- "%s", err_str);
+ gf_smsg(this->name, GF_LOG_WARNING, EINVAL,
+ GD_MSG_INVALID_ARGUMENT, err_str, NULL);
goto out;
}
if (replica_count == volinfo->replica_count) {
@@ -382,8 +220,8 @@ gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo,
"(or %dxN)",
brick_count, volinfo->dist_leaf_count,
volinfo->dist_leaf_count);
- gf_msg(THIS->name, GF_LOG_WARNING, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", err_str);
+ gf_smsg(this->name, GF_LOG_WARNING, EINVAL,
+ GD_MSG_INVALID_ARGUMENT, err_str, NULL);
goto out;
}
ret = 1;
@@ -398,6 +236,8 @@ gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo,
"need %d(xN) bricks for reducing replica "
"count of the volume from %d to %d",
replica_nodes, volinfo->replica_count, replica_count);
+ gf_smsg(this->name, GF_LOG_WARNING, EINVAL,
+ GD_MSG_INVALID_ARGUMENT, err_str, NULL);
goto out;
}
break;
@@ -447,6 +287,7 @@ __glusterd_handle_add_brick(rpcsvc_request_t *req)
// failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
snprintf(err_str, sizeof(err_str), "Garbage args received");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
@@ -482,10 +323,13 @@ __glusterd_handle_add_brick(rpcsvc_request_t *req)
goto out;
}
- if (!(ret = glusterd_check_volume_exists(volname))) {
- ret = -1;
- snprintf(err_str, sizeof(err_str), "Volume %s does not exist", volname);
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, "%s",
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get volinfo "
+ "for volume name %s",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, "%s",
err_str);
goto out;
}
@@ -527,57 +371,8 @@ __glusterd_handle_add_brick(rpcsvc_request_t *req)
goto out;
}
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Unable to get volinfo "
- "for volume name %s",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, "%s",
- err_str);
- goto out;
- }
-
total_bricks = volinfo->brick_count + brick_count;
- if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- snprintf(err_str, sizeof(err_str), "Volume %s is already a tier.",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_ALREADY_TIER, "%s",
- err_str);
- ret = -1;
- goto out;
- }
-
- if (glusterd_is_tiering_supported(err_str) == _gf_false) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
- "Tiering not supported at this version");
- ret = -1;
- goto out;
- }
-
- ret = dict_get_int32n(dict, "hot-type", SLEN("hot-type"), &type);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "failed to get type from dictionary");
- goto out;
- }
-
- goto brick_val;
- }
-
- ret = glusterd_disallow_op_for_tier(volinfo, GD_OP_ADD_BRICK, -1);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Add-brick operation is "
- "not supported on a tiered volume %s",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_UNSUPPORTED, "%s",
- err_str);
- goto out;
- }
-
if (!stripe_count && !replica_count) {
if (volinfo->type == GF_CLUSTER_TYPE_NONE)
goto brick_val;
@@ -601,31 +396,6 @@ __glusterd_handle_add_brick(rpcsvc_request_t *req)
count is given */
}
- /* These bricks needs to be added one per a replica or stripe volume */
- if (stripe_count) {
- ret = gd_addbr_validate_stripe_count(volinfo, stripe_count,
- total_bricks, &type, err_str,
- sizeof(err_str));
- if (ret == -1) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COUNT_VALIDATE_FAILED,
- "%s", err_str);
- goto out;
- }
-
- /* if stripe count is same as earlier, set it back to 0 */
- if (ret == 1)
- stripe_count = 0;
-
- ret = dict_set_int32n(dict, "stripe-count", SLEN("stripe-count"),
- stripe_count);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "failed to set the stripe-count in dict");
- goto out;
- }
- goto brick_val;
- }
-
ret = gd_addbr_validate_replica_count(volinfo, replica_count, arbiter_count,
total_bricks, &type, err_str,
sizeof(err_str));
@@ -721,19 +491,20 @@ subvol_matcher_update(int *subvols, glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *tmp = NULL;
int32_t sub_volume = 0;
int pos = 0;
-
- cds_list_for_each_entry(tmp, &volinfo->bricks, brick_list)
- {
- if (strcmp(tmp->hostname, brickinfo->hostname) ||
- strcmp(tmp->path, brickinfo->path)) {
- pos++;
- continue;
+ if (subvols) {
+ cds_list_for_each_entry(tmp, &volinfo->bricks, brick_list)
+ {
+ if (strcmp(tmp->hostname, brickinfo->hostname) ||
+ strcmp(tmp->path, brickinfo->path)) {
+ pos++;
+ continue;
+ }
+ gf_msg_debug(THIS->name, 0, LOGSTR_FOUND_BRICK, brickinfo->hostname,
+ brickinfo->path, volinfo->volname);
+ sub_volume = (pos / volinfo->dist_leaf_count);
+ subvols[sub_volume]++;
+ break;
}
- gf_msg_debug(THIS->name, 0, LOGSTR_FOUND_BRICK, brickinfo->hostname,
- brickinfo->path, volinfo->volname);
- sub_volume = (pos / volinfo->dist_leaf_count);
- subvols[sub_volume]++;
- break;
}
}
@@ -744,8 +515,10 @@ subvol_matcher_verify(int *subvols, glusterd_volinfo_t *volinfo, char *err_str,
int i = 0;
int ret = 0;
int count = volinfo->replica_count - replica_count;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (replica_count) {
+ if (replica_count && subvols) {
for (i = 0; i < volinfo->subvol_count; i++) {
if (subvols[i] != count) {
ret = -1;
@@ -753,6 +526,8 @@ subvol_matcher_verify(int *subvols, glusterd_volinfo_t *volinfo, char *err_str,
"Remove exactly %d"
" brick(s) from each subvolume.",
count);
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_BRICK_SUBVOL_VERIFY_FAIL, err_str, NULL);
break;
}
}
@@ -760,12 +535,14 @@ subvol_matcher_verify(int *subvols, glusterd_volinfo_t *volinfo, char *err_str,
}
do {
- if (subvols[i] % volinfo->dist_leaf_count == 0) {
+ if (subvols && (subvols[i] % volinfo->dist_leaf_count == 0)) {
continue;
} else {
ret = -1;
snprintf(err_str, err_len, "Bricks not from same subvol for %s",
vol_type);
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_BRICK_SUBVOL_VERIFY_FAIL, err_str, NULL);
break;
}
} while (++i < volinfo->subvol_count);
@@ -779,43 +556,6 @@ subvol_matcher_destroy(int *subvols)
GF_FREE(subvols);
}
-int
-glusterd_set_detach_bricks(dict_t *dict, glusterd_volinfo_t *volinfo)
-{
- char key[64] = "";
- char value[2048] = ""; /* hostname + path */
- int brick_num = 0;
- int hot_brick_num = 0;
- glusterd_brickinfo_t *brickinfo;
- int ret = 0;
- int32_t len = 0;
-
- /* cold tier bricks at tail of list so use reverse iteration */
- cds_list_for_each_entry_reverse(brickinfo, &volinfo->bricks, brick_list)
- {
- brick_num++;
- if (brick_num > volinfo->tier_info.cold_brick_count) {
- hot_brick_num++;
- sprintf(key, "brick%d", hot_brick_num);
- len = snprintf(value, sizeof(value), "%s:%s", brickinfo->hostname,
- brickinfo->path);
- if ((len < 0) || (len >= sizeof(value))) {
- return -1;
- }
-
- ret = dict_set_str(dict, key, strdup(value));
- if (ret)
- break;
- }
- }
-
- ret = dict_set_int32n(dict, "count", SLEN("count"), hot_brick_num);
- if (ret)
- return -1;
-
- return hot_brick_num;
-}
-
static int
glusterd_remove_brick_validate_arbiters(glusterd_volinfo_t *volinfo,
int32_t count, int32_t replica_count,
@@ -827,9 +567,11 @@ glusterd_remove_brick_validate_arbiters(glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_brickinfo_t *last = NULL;
char *arbiter_array = NULL;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
- if ((volinfo->type != GF_CLUSTER_TYPE_REPLICATE) &&
- (volinfo->type != GF_CLUSTER_TYPE_STRIPE_REPLICATE))
+ if (volinfo->type != GF_CLUSTER_TYPE_REPLICATE)
goto out;
if (!replica_count || !volinfo->arbiter_count)
@@ -846,6 +588,8 @@ glusterd_remove_brick_validate_arbiters(glusterd_volinfo_t *volinfo,
"Remove arbiter "
"brick(s) only when converting from "
"arbiter to replica 2 subvolume.");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_REMOVE_ARBITER_BRICK, err_str, NULL);
ret = -1;
goto out;
}
@@ -869,7 +613,9 @@ glusterd_remove_brick_validate_arbiters(glusterd_volinfo_t *volinfo,
snprintf(err_str, err_len,
"Removed bricks "
"must contain arbiter when converting"
- " to plain distrubute.");
+ " to plain distribute.");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_REMOVE_ARBITER_BRICK, err_str, NULL);
ret = -1;
break;
}
@@ -893,6 +639,7 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
char key[64] = "";
int keylen;
int i = 1;
+ glusterd_conf_t *conf = NULL;
glusterd_volinfo_t *volinfo = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_brickinfo_t **brickinfo_list = NULL;
@@ -911,12 +658,15 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
GF_ASSERT(req);
this = THIS;
GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
// failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
snprintf(err_str, sizeof(err_str), "Received garbage args");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
@@ -969,14 +719,6 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
goto out;
}
- if ((volinfo->type == GF_CLUSTER_TYPE_TIER) &&
- (glusterd_is_tiering_supported(err_str) == _gf_false)) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
- "Tiering not supported at this version");
- ret = -1;
- goto out;
- }
-
ret = dict_get_int32n(dict, "command", SLEN("command"), &cmd);
if (ret) {
snprintf(err_str, sizeof(err_str),
@@ -987,15 +729,6 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
goto out;
}
- ret = glusterd_disallow_op_for_tier(volinfo, GD_OP_REMOVE_BRICK, cmd);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Removing brick from a Tier volume is not allowed");
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_UNSUPPORTED, "%s",
- err_str);
- goto out;
- }
-
ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
&replica_count);
if (!ret) {
@@ -1027,39 +760,12 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
/* 'vol_type' is used for giving the meaning full error msg for user */
if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
strcpy(vol_type, "replica");
- } else if (volinfo->type == GF_CLUSTER_TYPE_STRIPE) {
- strcpy(vol_type, "stripe");
- } else if (volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) {
- strcpy(vol_type, "stripe-replicate");
} else if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
strcpy(vol_type, "disperse");
} else {
strcpy(vol_type, "distribute");
}
- /* Do not allow remove-brick if the volume is a stripe volume*/
- if ((volinfo->type == GF_CLUSTER_TYPE_STRIPE) &&
- (volinfo->brick_count == volinfo->stripe_count)) {
- snprintf(err_str, sizeof(err_str),
- "Removing brick from a stripe volume is not allowed");
- gf_msg(this->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED, "%s",
- err_str);
- ret = -1;
- goto out;
- }
-
- if (!replica_count && (volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) &&
- (volinfo->brick_count == volinfo->dist_leaf_count)) {
- snprintf(err_str, sizeof(err_str),
- "Removing bricks from stripe-replicate"
- " configuration is not allowed without reducing "
- "replica or stripe count explicitly.");
- gf_msg(this->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED_AC_REQD,
- "%s", err_str);
- ret = -1;
- goto out;
- }
-
if (!replica_count && (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) &&
(volinfo->brick_count == volinfo->dist_leaf_count)) {
snprintf(err_str, sizeof(err_str),
@@ -1074,8 +780,7 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
/* Do not allow remove-brick if the bricks given is less than
the replica count or stripe count */
- if (!replica_count && (volinfo->type != GF_CLUSTER_TYPE_NONE) &&
- (volinfo->type != GF_CLUSTER_TYPE_TIER)) {
+ if (!replica_count && (volinfo->type != GF_CLUSTER_TYPE_NONE)) {
if (volinfo->dist_leaf_count && (count % volinfo->dist_leaf_count)) {
snprintf(err_str, sizeof(err_str),
"Remove brick "
@@ -1088,18 +793,13 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
}
}
- /* subvol match is not required for tiered volume*/
if ((volinfo->type != GF_CLUSTER_TYPE_NONE) &&
- (volinfo->type != GF_CLUSTER_TYPE_TIER) &&
(volinfo->subvol_count > 1)) {
ret = subvol_matcher_init(&subvols, volinfo->subvol_count);
if (ret)
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- count = glusterd_set_detach_bricks(dict, volinfo);
-
brickinfo_list = GF_CALLOC(count, sizeof(*brickinfo_list),
gf_common_mt_pointer);
if (!brickinfo_list) {
@@ -1140,18 +840,10 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
(volinfo->brick_count <= volinfo->dist_leaf_count))
continue;
- /* Find which subvolume the brick belongs to.
- * subvol match is not required for tiered volume
- *
- */
- if (volinfo->type != GF_CLUSTER_TYPE_TIER)
- subvol_matcher_update(subvols, volinfo, brickinfo);
+ subvol_matcher_update(subvols, volinfo, brickinfo);
}
- /* Check if the bricks belong to the same subvolumes.*/
- /* subvol match is not required for tiered volume*/
if ((volinfo->type != GF_CLUSTER_TYPE_NONE) &&
- (volinfo->type != GF_CLUSTER_TYPE_TIER) &&
(volinfo->subvol_count > 1)) {
ret = subvol_matcher_verify(subvols, volinfo, err_str, sizeof(err_str),
vol_type, replica_count);
@@ -1165,7 +857,17 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
if (ret)
goto out;
- ret = glusterd_op_begin_synctask(req, GD_OP_REMOVE_BRICK, dict);
+ if (conf->op_version < GD_OP_VERSION_8_0) {
+ gf_msg_debug(this->name, 0,
+ "The cluster is operating at "
+ "version less than %d. remove-brick operation"
+ "falling back to syncop framework.",
+ GD_OP_VERSION_8_0);
+ ret = glusterd_op_begin_synctask(req, GD_OP_REMOVE_BRICK, dict);
+ } else {
+ ret = glusterd_mgmt_v3_initiate_all_phases(req, GD_OP_REMOVE_BRICK,
+ dict);
+ }
out:
if (ret) {
@@ -1244,7 +946,7 @@ _glusterd_restart_gsync_session(dict_t *this, char *key, data_t *value,
&slave_url, &slave_host,
&slave_vol, &conf_path, errmsg);
if (ret) {
- if (*errmsg)
+ if (errmsg && *errmsg)
gf_msg("glusterd", GF_LOG_ERROR, 0,
GD_MSG_SLAVE_CONFPATH_DETAILS_FETCH_FAIL, "%s", *errmsg);
else
@@ -1315,13 +1017,13 @@ glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count,
0,
};
gf_boolean_t restart_needed = 0;
- int caps = 0;
int brickid = 0;
char key[64] = "";
char *brick_mount_dir = NULL;
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
gf_boolean_t is_valid_add_brick = _gf_false;
+ gf_boolean_t restart_shd = _gf_false;
struct statvfs brickstat = {
0,
};
@@ -1389,7 +1091,7 @@ glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count,
goto out;
}
strncpy(brickinfo->mount_dir, brick_mount_dir,
- sizeof(brickinfo->mount_dir));
+ SLEN(brickinfo->mount_dir));
}
ret = glusterd_resolve_brick(brickinfo);
@@ -1409,10 +1111,7 @@ glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count,
}
brickinfo->statfs_fsid = brickstat.f_fsid;
}
- /* hot tier bricks are added to head of brick list */
- if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
- cds_list_add(&brickinfo->brick_list, &volinfo->bricks);
- } else if (stripe_count || replica_count) {
+ if (stripe_count || replica_count) {
add_brick_at_right_order(brickinfo, volinfo, (i - 1), stripe_count,
replica_count);
} else {
@@ -1477,22 +1176,19 @@ glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count,
if (count)
brick = strtok_r(brick_list + 1, " \n", &saveptr);
-#ifdef HAVE_BD_XLATOR
- if (brickinfo->vg[0])
- caps = CAPS_BD | CAPS_THIN | CAPS_OFFLOAD_COPY | CAPS_OFFLOAD_SNAPSHOT;
-#endif
- /* This check needs to be added to distinguish between
- * attach-tier commands and add-brick commands.
- * When a tier is attached, adding is done via add-brick
- * and setting of pending xattrs shouldn't be done for
- * attach-tiers as they are virtually new volumes.
- */
if (glusterd_is_volume_replicate(volinfo)) {
- if (replica_count &&
- !dict_getn(dict, "attach-tier", SLEN("attach-tier")) &&
- conf->op_version >= GD_OP_VERSION_3_7_10) {
+ if (replica_count && conf->op_version >= GD_OP_VERSION_3_7_10) {
is_valid_add_brick = _gf_true;
+ if (volinfo->status == GLUSTERD_STATUS_STARTED) {
+ ret = volinfo->shd.svc.stop(&(volinfo->shd.svc), SIGTERM);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_GLUSTER_SERVICES_STOP_FAIL,
+ "Failed to stop shd for %s.", volinfo->volname);
+ }
+ restart_shd = _gf_true;
+ }
ret = generate_dummy_client_volfiles(volinfo);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
@@ -1507,22 +1203,6 @@ glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count,
_gf_true);
if (ret)
goto out;
-#ifdef HAVE_BD_XLATOR
- char msg[1024] = "";
- /* Check for VG/thin pool if its BD volume */
- if (brickinfo->vg[0]) {
- ret = glusterd_is_valid_vg(brickinfo, 0, msg);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_CRITICAL, 0, GD_MSG_INVALID_VG, "%s",
- msg);
- goto out;
- }
- /* if anyone of the brick does not have thin support,
- disable it for entire volume */
- caps &= brickinfo->caps;
- } else
- caps = 0;
-#endif
if (gf_uuid_is_null(brickinfo->uuid)) {
ret = glusterd_resolve_brick(brickinfo);
@@ -1567,7 +1247,6 @@ glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count,
dict_foreach(volinfo->gsync_slaves, _glusterd_restart_gsync_session,
&param);
}
- volinfo->caps = caps;
generate_volfiles:
if (conf->op_version <= GD_OP_VERSION_3_7_5) {
@@ -1584,6 +1263,14 @@ generate_volfiles:
out:
GF_FREE(free_ptr1);
GF_FREE(free_ptr2);
+ if (restart_shd) {
+ if (volinfo->shd.svc.manager(&(volinfo->shd.svc), volinfo,
+ PROC_START_NO_WAIT)) {
+ gf_msg("glusterd", GF_LOG_CRITICAL, 0,
+ GD_MSG_GLUSTER_SERVICE_START_FAIL,
+ "Failed to start shd for %s.", volinfo->volname);
+ }
+ }
gf_msg_debug("glusterd", 0, "Returning %d", ret);
return ret;
@@ -1672,14 +1359,14 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
"Unable to get volume name");
goto out;
}
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
"Unable to find volume: %s", volname);
goto out;
}
@@ -1691,13 +1378,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
&replica_count);
if (ret) {
- gf_msg_debug(THIS->name, 0, "Unable to get replica count");
- }
-
- ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
- &arbiter_count);
- if (ret) {
- gf_msg_debug(THIS->name, 0, "No arbiter count present in the dict");
+ gf_msg_debug(this->name, 0, "Unable to get replica count");
}
if (replica_count > 0) {
@@ -1711,19 +1392,20 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
}
}
- if (glusterd_is_volume_replicate(volinfo)) {
+ glusterd_add_peers_to_auth_list(volname);
+
+ if (replica_count && glusterd_is_volume_replicate(volinfo)) {
/* Do not allow add-brick for stopped volumes when replica-count
* is being increased.
*/
- if (conf->op_version >= GD_OP_VERSION_3_7_10 &&
- !dict_getn(dict, "attach-tier", SLEN("attach-tier")) &&
- replica_count && GLUSTERD_STATUS_STOPPED == volinfo->status) {
+ if (GLUSTERD_STATUS_STOPPED == volinfo->status &&
+ conf->op_version >= GD_OP_VERSION_3_7_10) {
ret = -1;
snprintf(msg, sizeof(msg),
" Volume must not be in"
" stopped state when replica-count needs to "
" be increased.");
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
msg);
*op_errstr = gf_strdup(msg);
goto out;
@@ -1731,25 +1413,31 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
/* op-version check for replica 2 to arbiter conversion. If we
* don't have this check, an older peer added as arbiter brick
* will not have the arbiter xlator in its volfile. */
- if ((conf->op_version < GD_OP_VERSION_3_8_0) && (arbiter_count == 1) &&
- (replica_count == 3)) {
- ret = -1;
- snprintf(msg, sizeof(msg),
- "Cluster op-version must "
- "be >= 30800 to add arbiter brick to a "
- "replica 2 volume.");
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
- msg);
- *op_errstr = gf_strdup(msg);
- goto out;
+ if ((replica_count == 3) && (conf->op_version < GD_OP_VERSION_3_8_0)) {
+ ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
+ &arbiter_count);
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "No arbiter count present in the dict");
+ } else if (arbiter_count == 1) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "Cluster op-version must "
+ "be >= 30800 to add arbiter brick to a "
+ "replica 2 volume.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
}
/* Do not allow increasing replica count for arbiter volumes. */
- if (replica_count && volinfo->arbiter_count) {
+ if (volinfo->arbiter_count) {
ret = -1;
snprintf(msg, sizeof(msg),
"Increasing replica count "
"for arbiter volumes is not supported.");
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
msg);
*op_errstr = gf_strdup(msg);
goto out;
@@ -1758,6 +1446,43 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
is_force = dict_get_str_boolean(dict, "force", _gf_false);
+ /* Check brick order if the volume type is replicate or disperse. If
+ * force at the end of command not given then check brick order.
+ * doing this check at the originator node is sufficient.
+ */
+
+ if (!is_force && is_origin_glusterd(dict)) {
+ ret = 0;
+ if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
+ gf_msg_debug(this->name, 0,
+ "Replicate cluster type "
+ "found. Checking brick order.");
+ if (replica_count)
+ ret = glusterd_check_brick_order(dict, msg, volinfo->type,
+ &volname, &bricks, &count,
+ replica_count);
+ else
+ ret = glusterd_check_brick_order(dict, msg, volinfo->type,
+ &volname, &bricks, &count,
+ volinfo->replica_count);
+ } else if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
+ gf_msg_debug(this->name, 0,
+ "Disperse cluster type"
+ " found. Checking brick order.");
+ ret = glusterd_check_brick_order(dict, msg, volinfo->type, &volname,
+ &bricks, &count,
+ volinfo->disperse_count);
+ }
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER,
+ "Not adding brick because of "
+ "bad brick order. %s",
+ msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
+ }
+
if (volinfo->replica_count < replica_count && !is_force) {
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
{
@@ -1774,7 +1499,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (len < 0) {
strcpy(msg, "<error>");
}
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
msg);
*op_errstr = gf_strdup(msg);
goto out;
@@ -1806,46 +1531,40 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
"Volume name %s rebalance is in "
"progress. Please retry after completion",
volname);
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_OIP_RETRY_LATER, "%s", msg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OIP_RETRY_LATER, "%s", msg);
*op_errstr = gf_strdup(msg);
ret = -1;
goto out;
}
- if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
- /*
- * This check is needed because of add/remove brick
- * is not supported on a tiered volume. So once a tier
- * is attached we cannot commit or stop the remove-brick
- * task. Please change this comment once we start supporting
- * add/remove brick on a tiered volume.
- */
- if (!gd_is_remove_brick_committed(volinfo)) {
- snprintf(msg, sizeof(msg),
- "An earlier remove-brick "
- "task exists for volume %s. Either commit it"
- " or stop it before attaching a tier.",
- volinfo->volname);
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_OLD_REMOVE_BRICK_EXISTS,
- "%s", msg);
- *op_errstr = gf_strdup(msg);
- ret = -1;
+ if (volinfo->snap_count > 0 || !cds_list_empty(&volinfo->snap_volumes)) {
+ snprintf(msg, sizeof(msg),
+ "Volume %s has %" PRIu64
+ " snapshots. "
+ "Changing the volume configuration will not effect snapshots."
+ "But the snapshot brick mount should be intact to "
+ "make them function.",
+ volname, volinfo->snap_count);
+ gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SNAP_WARN, "%s", msg);
+ msg[0] = '\0';
+ }
+
+ if (!count) {
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get count");
goto out;
}
}
- ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
- if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get count");
- goto out;
- }
-
- ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get bricks");
- goto out;
+ if (!bricks) {
+ ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get bricks");
+ goto out;
+ }
}
if (bricks) {
@@ -1864,7 +1583,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
"brick path %s is "
"too long",
brick);
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRKPATH_TOO_LONG, "%s",
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRKPATH_TOO_LONG, "%s",
msg);
*op_errstr = gf_strdup(msg);
@@ -1875,7 +1594,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = glusterd_brickinfo_new_from_brick(brick, &brickinfo, _gf_true,
NULL);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND,
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND,
"Add-brick: Unable"
" to get brickinfo");
goto out;
@@ -1891,18 +1610,6 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
}
if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) {
-#ifdef HAVE_BD_XLATOR
- if (brickinfo->vg[0]) {
- ret = glusterd_is_valid_vg(brickinfo, 1, msg);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_VG,
- "%s", msg);
- *op_errstr = gf_strdup(msg);
- goto out;
- }
- }
-#endif
-
ret = glusterd_validate_and_create_brickpath(
brickinfo, volinfo->volume_id, volinfo->volname, op_errstr,
is_force, _gf_false);
@@ -1957,7 +1664,7 @@ out:
GF_FREE(str_ret);
GF_FREE(all_bricks);
- gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
return ret;
}
@@ -1981,6 +1688,8 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
};
glusterd_conf_t *priv = THIS->private;
int pid = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
/* Check whether all the nodes of the bricks to be removed are
* up, if not fail the operation */
@@ -1989,6 +1698,8 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
ret = dict_get_strn(dict, key, keylen, &brick);
if (ret) {
snprintf(msg, sizeof(msg), "Unable to get %s", key);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "key=%s", key, NULL);
*errstr = gf_strdup(msg);
goto out;
}
@@ -2000,54 +1711,30 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
"Incorrect brick "
"%s for volume %s",
brick, volinfo->volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INCORRECT_BRICK,
+ "Brick=%s, Volume=%s", brick, volinfo->volname, NULL);
*errstr = gf_strdup(msg);
goto out;
}
/* Do not allow commit if the bricks are not decommissioned
- * if its a remove brick commit or detach-tier commit
+ * if its a remove brick commit
*/
- if (!brickinfo->decommissioned) {
- if (cmd == GF_OP_CMD_COMMIT) {
- snprintf(msg, sizeof(msg),
- "Brick %s "
- "is not decommissioned. "
- "Use start or force option",
- brick);
- *errstr = gf_strdup(msg);
- ret = -1;
- goto out;
- }
-
- if (cmd == GF_OP_CMD_DETACH_COMMIT ||
- cmd_defrag == GF_DEFRAG_CMD_DETACH_COMMIT) {
- snprintf(msg, sizeof(msg),
- "Bricks in Hot "
- "tier are not decommissioned yet. Use "
- "gluster volume tier <VOLNAME> "
- "detach start to start the decommission process");
- *errstr = gf_strdup(msg);
- ret = -1;
- goto out;
- }
- } else {
- if ((cmd == GF_OP_CMD_DETACH_COMMIT ||
- (cmd_defrag == GF_DEFRAG_CMD_DETACH_COMMIT)) &&
- (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_STARTED)) {
- snprintf(msg, sizeof(msg),
- "Bricks in Hot "
- "tier are not decommissioned yet. Wait for "
- "the detach to complete using gluster volume "
- "tier <VOLNAME> status.");
- *errstr = gf_strdup(msg);
- ret = -1;
- goto out;
- }
+ if (!brickinfo->decommissioned && cmd == GF_OP_CMD_COMMIT) {
+ snprintf(msg, sizeof(msg),
+ "Brick %s "
+ "is not decommissioned. "
+ "Use start or force option",
+ brick);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BRICK_NOT_DECOM,
+ "Use 'start' or 'force' option, Brick=%s", brick, NULL);
+ *errstr = gf_strdup(msg);
+ ret = -1;
+ goto out;
}
if (glusterd_is_local_brick(THIS, volinfo, brickinfo)) {
switch (cmd) {
case GF_OP_CMD_START:
- case GF_OP_CMD_DETACH_START:
goto check;
case GF_OP_CMD_NONE:
default:
@@ -2055,8 +1742,6 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
}
switch (cmd_defrag) {
- case GF_DEFRAG_CMD_DETACH_START:
- break;
case GF_DEFRAG_CMD_NONE:
default:
continue;
@@ -2068,6 +1753,10 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
"brick %s. Use force option to "
"remove the offline brick",
brick);
+ gf_smsg(
+ this->name, GF_LOG_ERROR, errno, GD_MSG_BRICK_STOPPED,
+ "Use 'force' option to remove the offline brick, Brick=%s",
+ brick, NULL);
*errstr = gf_strdup(msg);
ret = -1;
goto out;
@@ -2078,6 +1767,8 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
"Found dead "
"brick %s",
brick);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BRICK_DEAD,
+ "Brick=%s", brick, NULL);
*errstr = gf_strdup(msg);
ret = -1;
goto out;
@@ -2087,29 +1778,33 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
continue;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_uuid(brickinfo->uuid);
if (!peerinfo) {
+ RCU_READ_UNLOCK;
snprintf(msg, sizeof(msg),
"Host node of the "
"brick %s is not in cluster",
brick);
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_BRICK_HOST_NOT_FOUND, "Brick=%s", brick, NULL);
*errstr = gf_strdup(msg);
ret = -1;
- rcu_read_unlock();
goto out;
}
if (!peerinfo->connected) {
+ RCU_READ_UNLOCK;
snprintf(msg, sizeof(msg),
"Host node of the "
"brick %s is down",
brick);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_HOST_DOWN,
+ "Brick=%s", brick, NULL);
*errstr = gf_strdup(msg);
ret = -1;
- rcu_read_unlock();
goto out;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
}
out:
@@ -2184,6 +1879,7 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
errstr = gf_strdup(
"Deleting all the bricks of the "
"volume is not allowed");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_DELETE, NULL);
ret = -1;
goto out;
}
@@ -2192,24 +1888,13 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
switch (cmd) {
case GF_OP_CMD_NONE:
errstr = gf_strdup("no remove-brick command issued");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NO_REMOVE_CMD,
+ NULL);
goto out;
case GF_OP_CMD_STATUS:
ret = 0;
goto out;
-
- case GF_OP_CMD_DETACH_START:
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf(msg, sizeof(msg),
- "volume %s is not a tier "
- "volume",
- volinfo->volname);
- errstr = gf_strdup(msg);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_TIER, "%s",
- errstr);
- goto out;
- }
-
case GF_OP_CMD_START: {
if ((volinfo->type == GF_CLUSTER_TYPE_REPLICATE) &&
dict_getn(dict, "replica-count", SLEN("replica-count"))) {
@@ -2224,21 +1909,12 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
}
if (GLUSTERD_STATUS_STARTED != volinfo->status) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- snprintf(msg, sizeof(msg),
- "Volume %s needs "
- "to be started before detach-tier "
- "(you can use 'force' or 'commit' "
- "to override this behavior)",
- volinfo->volname);
- } else {
- snprintf(msg, sizeof(msg),
- "Volume %s needs "
- "to be started before remove-brick "
- "(you can use 'force' or 'commit' "
- "to override this behavior)",
- volinfo->volname);
- }
+ snprintf(msg, sizeof(msg),
+ "Volume %s needs "
+ "to be started before remove-brick "
+ "(you can use 'force' or 'commit' "
+ "to override this behavior)",
+ volinfo->volname);
errstr = gf_strdup(msg);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_STARTED,
"%s", errstr);
@@ -2287,6 +1963,21 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
goto out;
}
+ if (volinfo->snap_count > 0 ||
+ !cds_list_empty(&volinfo->snap_volumes)) {
+ snprintf(msg, sizeof(msg),
+ "Volume %s has %" PRIu64
+ " snapshots. "
+ "Changing the volume configuration will not effect "
+ "snapshots."
+ "But the snapshot brick mount should be intact to "
+ "make them function.",
+ volname, volinfo->snap_count);
+ gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SNAP_WARN, "%s",
+ msg);
+ msg[0] = '\0';
+ }
+
ret = glusterd_remove_brick_validate_bricks(
cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE);
if (ret)
@@ -2315,55 +2006,16 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
}
case GF_OP_CMD_STOP:
- case GF_OP_CMD_STOP_DETACH_TIER:
ret = 0;
break;
- case GF_OP_CMD_DETACH_COMMIT:
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf(msg, sizeof(msg),
- "volume %s is not a tier "
- "volume",
- volinfo->volname);
- errstr = gf_strdup(msg);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_TIER, "%s",
- errstr);
- goto out;
- }
- if (volinfo->decommission_in_progress) {
- errstr = gf_strdup(
- "use 'force' option as migration "
- "is in progress");
- goto out;
- }
- if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_FAILED) {
- errstr = gf_strdup(
- "use 'force' option as migration "
- "has failed");
- goto out;
- }
-
- ret = glusterd_remove_brick_validate_bricks(
- cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE);
- if (ret)
- goto out;
-
- /* If geo-rep is configured, for this volume, it should be
- * stopped.
- */
- param.volinfo = volinfo;
- ret = glusterd_check_geo_rep_running(&param, op_errstr);
- if (ret || param.is_active) {
- ret = -1;
- goto out;
- }
- break;
-
case GF_OP_CMD_COMMIT:
if (volinfo->decommission_in_progress) {
errstr = gf_strdup(
"use 'force' option as migration "
"is in progress");
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_MIGRATION_PROG,
+ "Use 'force' option", NULL);
goto out;
}
@@ -2371,9 +2023,27 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
errstr = gf_strdup(
"use 'force' option as migration "
"has failed");
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_MIGRATION_FAIL,
+ "Use 'force' option", NULL);
goto out;
}
+ if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_COMPLETE) {
+ if (volinfo->rebal.rebalance_failures > 0 ||
+ volinfo->rebal.skipped_files > 0) {
+ errstr = gf_strdup(
+ "use 'force' option as migration "
+ "of some files might have been skipped or "
+ "has failed");
+ gf_smsg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_MIGRATION_FAIL,
+ "Use 'force' option, some files might have been "
+ "skipped",
+ NULL);
+ goto out;
+ }
+ }
+
ret = glusterd_remove_brick_validate_bricks(
cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE);
if (ret)
@@ -2391,18 +2061,11 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
break;
- case GF_OP_CMD_DETACH_COMMIT_FORCE:
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf(msg, sizeof(msg),
- "volume %s is not a tier "
- "volume",
- volinfo->volname);
- errstr = gf_strdup(msg);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_TIER, "%s",
- errstr);
- goto out;
- }
case GF_OP_CMD_COMMIT_FORCE:
+ case GF_OP_CMD_DETACH_START:
+ case GF_OP_CMD_DETACH_COMMIT:
+ case GF_OP_CMD_DETACH_COMMIT_FORCE:
+ case GF_OP_CMD_STOP_DETACH_TIER:
break;
}
ret = 0;
@@ -2413,7 +2076,8 @@ out:
if (op_errstr)
*op_errstr = errstr;
}
-
+ if (!op_errstr && errstr)
+ GF_FREE(errstr);
return ret;
}
@@ -2495,48 +2159,6 @@ glusterd_remove_brick_migrate_cbk(glusterd_volinfo_t *volinfo,
return ret;
}
-static int
-glusterd_op_perform_attach_tier(dict_t *dict, glusterd_volinfo_t *volinfo,
- int count, char *bricks)
-{
- int ret = 0;
- int replica_count = 0;
- int type = 0;
-
- /*
- * Store the new (cold) tier's structure until the graph is generated.
- * If there is a failure before the graph is generated the
- * structure will revert to its original state.
- */
- volinfo->tier_info.cold_dist_leaf_count = volinfo->dist_leaf_count;
- volinfo->tier_info.cold_type = volinfo->type;
- volinfo->tier_info.cold_brick_count = volinfo->brick_count;
- volinfo->tier_info.cold_replica_count = volinfo->replica_count;
- volinfo->tier_info.cold_disperse_count = volinfo->disperse_count;
- volinfo->tier_info.cold_redundancy_count = volinfo->redundancy_count;
-
- ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
- &replica_count);
- if (!ret)
- volinfo->tier_info.hot_replica_count = replica_count;
- else
- volinfo->tier_info.hot_replica_count = 1;
- volinfo->tier_info.hot_brick_count = count;
- ret = dict_get_int32n(dict, "hot-type", SLEN("hot-type"), &type);
- volinfo->tier_info.hot_type = type;
- ret = dict_set_int32n(dict, "type", SLEN("type"), GF_CLUSTER_TYPE_TIER);
-
- if (!ret)
- ret = dict_set_nstrn(volinfo->dict, "features.ctr-enabled",
- SLEN("features.ctr-enabled"), "on", SLEN("on"));
-
- if (!ret)
- ret = dict_set_nstrn(volinfo->dict, "cluster.tier-mode",
- SLEN("cluster.tier-mode"), "cache", SLEN("cache"));
-
- return ret;
-}
-
int
glusterd_op_add_brick(dict_t *dict, char **op_errstr)
{
@@ -2584,11 +2206,6 @@ glusterd_op_add_brick(dict_t *dict, char **op_errstr)
goto out;
}
- if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
- gf_msg_debug(THIS->name, 0, "Adding tier");
- glusterd_op_perform_attach_tier(dict, volinfo, count, bricks);
- }
-
ret = glusterd_op_perform_add_bricks(volinfo, count, bricks, dict);
if (ret) {
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL,
@@ -2617,94 +2234,118 @@ out:
}
int
-glusterd_op_add_tier_brick(dict_t *dict, char **op_errstr)
+glusterd_post_commit_add_brick(dict_t *dict, char **op_errstr)
{
int ret = 0;
char *volname = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- char *bricks = NULL;
- int32_t count = 0;
- this = THIS;
- GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- priv = this->private;
- GF_VALIDATE_OR_GOTO(this->name, priv, out);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+ ret = glusterd_replace_old_auth_allow_list(volname);
+out:
+ return ret;
+}
+
+int
+glusterd_post_commit_replace_brick(dict_t *dict, char **op_errstr)
+{
+ int ret = 0;
+ char *volname = NULL;
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
"Unable to get volume name");
goto out;
}
+ ret = glusterd_replace_old_auth_allow_list(volname);
+out:
+ return ret;
+}
- ret = glusterd_volinfo_find(volname, &volinfo);
+int
+glusterd_set_rebalance_id_for_remove_brick(dict_t *req_dict, dict_t *rsp_dict)
+{
+ int ret = -1;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ char msg[2048] = {0};
+ char *task_id_str = NULL;
+ xlator_t *this = NULL;
+ int32_t cmd = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(rsp_dict);
+ GF_ASSERT(req_dict);
+ ret = dict_get_strn(rsp_dict, "volname", SLEN("volname"), &volname);
if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
- "Volume not found");
+ gf_msg_debug(this->name, 0, "volname not found");
goto out;
}
- ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
+ ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get count");
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
+ "Unable to allocate memory");
goto out;
}
- ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
+ ret = dict_get_int32n(rsp_dict, "command", SLEN("command"), &cmd);
if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get bricks");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get command");
goto out;
}
- if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
- gf_msg_debug(THIS->name, 0, "Adding tier");
- glusterd_op_perform_attach_tier(dict, volinfo, count, bricks);
- }
+ /* remove brick task id is generted in glusterd_op_stage_remove_brick(),
+ * but rsp_dict is unavailable there. So copying it to rsp_dict from
+ * req_dict here. */
- ret = glusterd_op_perform_add_bricks(volinfo, count, bricks, dict);
- if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL,
- "Unable to add bricks");
- goto out;
+ if (is_origin_glusterd(rsp_dict)) {
+ ret = dict_get_strn(req_dict, GF_REMOVE_BRICK_TID_KEY,
+ SLEN(GF_REMOVE_BRICK_TID_KEY), &task_id_str);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Missing rebalance id for remove-brick");
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_REBALANCE_ID_MISSING,
+ "%s", msg);
+ ret = 0;
+ } else {
+ gf_uuid_parse(task_id_str, volinfo->rebal.rebalance_id);
+
+ ret = glusterd_copy_uuid_to_dict(volinfo->rebal.rebalance_id,
+ rsp_dict, GF_REMOVE_BRICK_TID_KEY,
+ SLEN(GF_REMOVE_BRICK_TID_KEY));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_REMOVE_BRICK_ID_SET_FAIL,
+ "Failed to set remove-brick-id");
+ goto out;
+ }
+ }
}
- if (priv->op_version <= GD_OP_VERSION_3_10_0) {
- ret = glusterd_store_volinfo(volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret)
+ if (!gf_uuid_is_null(volinfo->rebal.rebalance_id) &&
+ GD_OP_REMOVE_BRICK == volinfo->rebal.op) {
+ ret = glusterd_copy_uuid_to_dict(volinfo->rebal.rebalance_id, rsp_dict,
+ GF_REMOVE_BRICK_TID_KEY,
+ SLEN(GF_REMOVE_BRICK_TID_KEY));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set task-id for volume %s", volname);
goto out;
- } else {
- /*
- * The cluster is operating at version greater than
- * gluster-3.10.0. So no need to store volfiles
- * in commit phase, the same will be done
- * in post validate phase with v3 framework.
- */
+ }
}
-
- if (GLUSTERD_STATUS_STARTED == volinfo->status)
- ret = glusterd_svcs_manager(volinfo);
-
out:
return ret;
}
-
-void
-glusterd_op_perform_detach_tier(glusterd_volinfo_t *volinfo)
-{
- volinfo->type = volinfo->tier_info.cold_type;
- volinfo->replica_count = volinfo->tier_info.cold_replica_count;
- volinfo->disperse_count = volinfo->tier_info.cold_disperse_count;
- volinfo->redundancy_count = volinfo->tier_info.cold_redundancy_count;
- volinfo->dist_leaf_count = volinfo->tier_info.cold_dist_leaf_count;
-}
-
int
glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
{
@@ -2721,8 +2362,6 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
int force = 0;
gf1_op_commands cmd = 0;
int32_t replica_count = 0;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_brickinfo_t *tmp = NULL;
char *task_id_str = NULL;
xlator_t *this = NULL;
dict_t *bricks_dict = NULL;
@@ -2730,11 +2369,6 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
int start_remove = 0;
uint32_t commit_hash = 0;
int defrag_cmd = 0;
- int detach_commit = 0;
- void *tier_info = NULL;
- char *cold_shd_key = NULL;
- char *hot_shd_key = NULL;
- int delete_key = 1;
glusterd_conf_t *conf = NULL;
this = THIS;
@@ -2765,7 +2399,7 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
}
cmd = flag;
- if ((GF_OP_CMD_START == cmd) || (GF_OP_CMD_DETACH_START == cmd))
+ if (GF_OP_CMD_START == cmd)
start_remove = 1;
/* Set task-id, if available, in ctx dict for operations other than
@@ -2805,35 +2439,6 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
goto out;
case GF_OP_CMD_STOP:
- case GF_OP_CMD_STOP_DETACH_TIER: {
- /* Fall back to the old volume file */
- cds_list_for_each_entry_safe(brickinfo, tmp, &volinfo->bricks,
- brick_list)
- {
- if (!brickinfo->decommissioned)
- continue;
- brickinfo->decommissioned = 0;
- }
- ret = glusterd_create_volfiles_and_notify_services(volinfo);
- if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0,
- GD_MSG_VOLFILE_CREATE_FAIL, "failed to create volfiles");
- goto out;
- }
-
- ret = glusterd_store_volinfo(volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_SET_FAIL,
- "failed to store volinfo");
- goto out;
- }
-
- ret = 0;
- goto out;
- }
-
- case GF_OP_CMD_DETACH_START:
case GF_OP_CMD_START:
/* Reset defrag status to 'NOT STARTED' whenever a
* remove-brick/rebalance command is issued to remove
@@ -2841,6 +2446,7 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
* Update defrag_cmd as well or it will only be done
* for nodes on which the brick to be removed exists.
*/
+ /* coverity[MIXED_ENUMS] */
volinfo->rebal.defrag_cmd = cmd;
volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED;
ret = dict_get_strn(dict, GF_REMOVE_BRICK_TID_KEY,
@@ -2859,43 +2465,6 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
force = 1;
break;
- case GF_OP_CMD_DETACH_COMMIT:
- case GF_OP_CMD_DETACH_COMMIT_FORCE:
- glusterd_op_perform_detach_tier(volinfo);
- detach_commit = 1;
-
- /* Disabling ctr when detaching a tier, since
- * currently tier is the only consumer of ctr.
- * Revisit this code when this constraint no
- * longer exist.
- */
- dict_deln(volinfo->dict, "features.ctr-enabled",
- SLEN("features.ctr-enabled"));
- dict_deln(volinfo->dict, "cluster.tier-mode",
- SLEN("cluster.tier-mode"));
-
- hot_shd_key = gd_get_shd_key(volinfo->tier_info.hot_type);
- cold_shd_key = gd_get_shd_key(volinfo->tier_info.cold_type);
- if (hot_shd_key) {
- /*
- * Since post detach, shd graph will not contain hot
- * tier. So we need to clear option set for hot tier.
- * For a tiered volume there can be different key
- * for both hot and cold. If hot tier is shd compatible
- * then we need to remove the configured value when
- * detaching a tier, only if the key's are different or
- * cold key is NULL. So we will set delete_key first,
- * and if cold key is not null and they are equal then
- * we will clear the flag. Otherwise we will delete the
- * key.
- */
- if (cold_shd_key)
- delete_key = strcmp(hot_shd_key, cold_shd_key);
- if (delete_key)
- dict_del(volinfo->dict, hot_shd_key);
- }
- /* fall through */
-
case GF_OP_CMD_COMMIT_FORCE:
if (volinfo->decommission_in_progress) {
@@ -2915,6 +2484,11 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
ret = 0;
force = 1;
break;
+ case GF_OP_CMD_DETACH_START:
+ case GF_OP_CMD_DETACH_COMMIT_FORCE:
+ case GF_OP_CMD_DETACH_COMMIT:
+ case GF_OP_CMD_STOP_DETACH_TIER:
+ break;
}
ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
@@ -2923,10 +2497,6 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
"Unable to get count");
goto out;
}
-
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- count = glusterd_set_detach_bricks(dict, volinfo);
-
/* Save the list of bricks for later usage only on starting a
* remove-brick. Right now this is required for displaying the task
* parameters with task status in volume status.
@@ -2979,12 +2549,6 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
i++;
}
- if (detach_commit) {
- /* Clear related information from volinfo */
- tier_info = ((void *)(&volinfo->tier_info));
- memset(tier_info, 0, sizeof(volinfo->tier_info));
- }
-
if (start_remove)
volinfo->rebal.dict = dict_ref(bricks_dict);
@@ -3007,16 +2571,11 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
* volumes undergoing a detach operation, they should not
* be modified here.
*/
- if ((replica_count == 1) && (cmd != GF_OP_CMD_DETACH_COMMIT) &&
- (cmd != GF_OP_CMD_DETACH_COMMIT_FORCE)) {
+ if (replica_count == 1) {
if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
volinfo->type = GF_CLUSTER_TYPE_NONE;
/* backward compatibility */
volinfo->sub_count = 0;
- } else {
- volinfo->type = GF_CLUSTER_TYPE_STRIPE;
- /* backward compatibility */
- volinfo->sub_count = volinfo->dist_leaf_count;
}
}
}
@@ -3050,7 +2609,7 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
}
if (start_remove && volinfo->status == GLUSTERD_STATUS_STARTED) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_RECONF_FAIL,
"Unable to reconfigure NFS-Server");
@@ -3073,8 +2632,6 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
}
/* perform the rebalance operations */
defrag_cmd = GF_DEFRAG_CMD_START_FORCE;
- if (cmd == GF_OP_CMD_DETACH_START)
- defrag_cmd = GF_DEFRAG_CMD_START_DETACH_TIER;
/*
* We need to set this *before* we issue commands to the
* bricks, or else we might end up setting it after the bricks
@@ -3103,7 +2660,7 @@ out:
GF_FREE(brick_tmpstr);
if (bricks_dict)
dict_unref(bricks_dict);
-
+ gf_msg_debug(this->name, 0, "returning %d ", ret);
return ret;
}
@@ -3114,6 +2671,7 @@ glusterd_op_stage_barrier(dict_t *dict, char **op_errstr)
xlator_t *this = NULL;
char *volname = NULL;
glusterd_volinfo_t *vol = NULL;
+ char *barrier_op = NULL;
GF_ASSERT(dict);
this = THIS;
@@ -3141,7 +2699,7 @@ glusterd_op_stage_barrier(dict_t *dict, char **op_errstr)
goto out;
}
- ret = dict_get_str_boolean(dict, "barrier", -1);
+ ret = dict_get_strn(dict, "barrier", SLEN("barrier"), &barrier_op);
if (ret == -1) {
gf_asprintf(op_errstr,
"Barrier op for volume %s not present "
@@ -3220,202 +2778,19 @@ out:
}
int
-__glusterd_handle_add_tier_brick(rpcsvc_request_t *req)
-{
- int32_t ret = -1;
- gf_cli_req cli_req = {{
- 0,
- }};
- dict_t *dict = NULL;
- char *bricks = NULL;
- char *volname = NULL;
- int brick_count = 0;
- void *cli_rsp = NULL;
- char err_str[2048] = "";
- gf_cli_rsp rsp = {
- 0,
- };
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- int32_t replica_count = 0;
- int32_t arbiter_count = 0;
- int type = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO("glusterd", this, out);
-
- GF_VALIDATE_OR_GOTO(this->name, req, out);
-
- ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- /*failed to decode msg*/
- req->rpc_err = GARBAGE_ARGS;
- snprintf(err_str, sizeof(err_str), "Garbage args received");
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, "%s",
- err_str);
- goto out;
- }
-
- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_ADD_BRICK_REQ_RECVD,
- "Received add brick req");
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new();
-
- ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- snprintf(err_str, sizeof(err_str),
- "Unable to decode "
- "the command");
- goto out;
- }
- }
-
- ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
-
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Unable to get volume "
- "name");
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
- err_str);
- goto out;
- }
-
- if (!glusterd_check_volume_exists(volname)) {
- snprintf(err_str, sizeof(err_str), "Volume %s does not exist", volname);
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, "%s",
- err_str);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Unable to get volume "
- "brick count");
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
- err_str);
- goto out;
- }
-
- ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
- &replica_count);
- if (!ret) {
- gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
- "replica-count is %d", replica_count);
- }
-
- ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
- &arbiter_count);
- if (!ret) {
- gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
- "arbiter-count is %d", arbiter_count);
- }
-
- if (!dict_getn(dict, "force", SLEN("force"))) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Failed to get flag");
- ret = -1;
- goto out;
- }
-
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Unable to get volinfo "
- "for volume name %s",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, "%s",
- err_str);
- goto out;
- }
-
- if (glusterd_is_tiering_supported(err_str) == _gf_false) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
- "Tiering not supported at this version");
- ret = -1;
- goto out;
- }
-
- if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- snprintf(err_str, sizeof(err_str), "Volume %s is already a tier.",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_ALREADY_TIER, "%s",
- err_str);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_int32n(dict, "hot-type", SLEN("hot-type"), &type);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "failed to get type from dictionary");
- goto out;
- }
- }
-
- ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Unable to get volume "
- "bricks");
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
- err_str);
- goto out;
- }
-
- if (type != volinfo->type) {
- ret = dict_set_int32n(dict, "type", SLEN("type"), type);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
- "failed to set the new type in dict");
- goto out;
- }
- }
-
- ret = glusterd_mgmt_v3_initiate_all_phases(req, GD_OP_ADD_TIER_BRICK, dict);
-
-out:
- if (ret) {
- rsp.op_ret = -1;
- rsp.op_errno = 0;
- if (err_str[0] == '\0')
- snprintf(err_str, sizeof(err_str), "Operation failed");
- rsp.op_errstr = err_str;
- cli_rsp = &rsp;
- glusterd_to_cli(req, cli_rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp,
- dict);
- ret = 0; /*sent error to cli, prevent second reply*/
- }
-
- free(cli_req.dict.dict_val); /*its malloced by xdr*/
-
- return ret;
-}
-
-int
glusterd_handle_add_tier_brick(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler(req, __glusterd_handle_add_tier_brick);
+ return 0;
}
int
glusterd_handle_attach_tier(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler(req, __glusterd_handle_add_brick);
+ return 0;
}
int
glusterd_handle_detach_tier(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler(req, __glusterd_handle_remove_brick);
+ return 0;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c
index 87025076d27..5c01f0c70b6 100644
--- a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c
@@ -8,7 +8,7 @@
cases as published by the Free Software Foundation.
*/
-#include "xlator.h"
+#include <glusterfs/xlator.h>
#include "rpc-clnt.h"
#include "glusterd.h"
#include "glusterd-conn-mgmt.h"
@@ -26,8 +26,17 @@ glusterd_conn_init(glusterd_conn_t *conn, char *sockpath, int frame_timeout,
xlator_t *this = THIS;
glusterd_svc_t *svc = NULL;
- if (!this)
+ if (!this) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_XLATOR_NOT_DEFINED,
+ NULL);
goto out;
+ }
+
+ options = dict_new();
+ if (!options) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
+ goto out;
+ }
svc = glusterd_conn_get_svc_object(conn);
if (!svc) {
@@ -36,15 +45,17 @@ glusterd_conn_init(glusterd_conn_t *conn, char *sockpath, int frame_timeout,
goto out;
}
- ret = rpc_transport_unix_options_build(&options, sockpath, frame_timeout);
+ ret = rpc_transport_unix_options_build(options, sockpath, frame_timeout);
if (ret)
goto out;
- ret = dict_set_nstrn(options, "transport.socket.ignore-enoent",
- SLEN("transport.socket.ignore-enoent"), "on",
- SLEN("on"));
- if (ret)
+ ret = dict_set_int32n(options, "transport.socket.ignore-enoent",
+ SLEN("transport.socket.ignore-enoent"), 1);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=transport.socket.ignore-enoent", NULL);
goto out;
+ }
/* @options is free'd by rpc_transport when destroyed */
rpc = rpc_clnt_new(options, this, (char *)svc->name, 16);
@@ -58,15 +69,18 @@ glusterd_conn_init(glusterd_conn_t *conn, char *sockpath, int frame_timeout,
goto out;
ret = snprintf(conn->sockpath, sizeof(conn->sockpath), "%s", sockpath);
- if (ret < 0)
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
goto out;
- else
+ } else
ret = 0;
conn->frame_timeout = frame_timeout;
conn->rpc = rpc;
conn->notify = notify;
out:
+ if (options)
+ dict_unref(options);
if (ret) {
if (rpc) {
rpc_clnt_unref(rpc);
@@ -92,7 +106,7 @@ glusterd_conn_connect(glusterd_conn_t *conn)
int
glusterd_conn_disconnect(glusterd_conn_t *conn)
{
- rpc_clnt_disconnect(conn->rpc);
+ rpc_clnt_disable(conn->rpc);
return 0;
}
@@ -133,3 +147,45 @@ glusterd_conn_build_socket_filepath(char *rundir, uuid_t uuid, char *socketpath,
glusterd_set_socket_filepath(sockfilepath, socketpath, len);
return 0;
}
+
+int
+__glusterd_muxsvc_conn_common_notify(struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ glusterd_conf_t *conf = THIS->private;
+ glusterd_svc_proc_t *mux_proc = mydata;
+ int ret = -1;
+
+ /* Silently ignoring this error, exactly like the current
+ * implementation */
+ if (!mux_proc)
+ return 0;
+
+ if (event == RPC_CLNT_DESTROY) {
+ /*RPC_CLNT_DESTROY will only called after mux_proc detached from the
+ * list. So it is safe to call without lock. Processing
+ * RPC_CLNT_DESTROY under a lock will lead to deadlock.
+ */
+ if (mux_proc->data) {
+ glusterd_volinfo_unref(mux_proc->data);
+ mux_proc->data = NULL;
+ }
+ GF_FREE(mux_proc);
+ ret = 0;
+ } else {
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ ret = mux_proc->notify(mux_proc, event);
+ }
+ pthread_mutex_unlock(&conf->attach_lock);
+ }
+ return ret;
+}
+
+int
+glusterd_muxsvc_conn_common_notify(struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ return glusterd_big_locked_notify(rpc, mydata, event, data,
+ __glusterd_muxsvc_conn_common_notify);
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h
index 602c0ba7b84..1b225621ab1 100644
--- a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h
+++ b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h
@@ -20,11 +20,11 @@ typedef int (*glusterd_conn_notify_t)(glusterd_conn_t *conn,
struct glusterd_conn_ {
struct rpc_clnt *rpc;
- char sockpath[PATH_MAX];
- int frame_timeout;
/* Existing daemons tend to specialize their respective
* notify implementations, so ... */
glusterd_conn_notify_t notify;
+ int frame_timeout;
+ char sockpath[PATH_MAX];
};
int
@@ -43,9 +43,11 @@ glusterd_conn_disconnect(glusterd_conn_t *conn);
int
glusterd_conn_common_notify(struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data);
+int
+glusterd_muxsvc_conn_common_notify(struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data);
int32_t
glusterd_conn_build_socket_filepath(char *rundir, uuid_t uuid, char *socketpath,
int len);
-
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-errno.h b/xlators/mgmt/glusterd/src/glusterd-errno.h
index 7e1575b57af..c74070e0e8d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-errno.h
+++ b/xlators/mgmt/glusterd/src/glusterd-errno.h
@@ -27,7 +27,7 @@ enum glusterd_op_errno {
EG_ISSNAP = 30813, /* Volume is a snap volume */
EG_GEOREPRUN = 30814, /* Geo-Replication is running */
EG_NOTTHINP = 30815, /* Bricks are not thinly provisioned */
- EG_NOGANESHA = 30816, /* obsolete ganesha is not enabled */
+ EG_NOGANESHA = 30816, /* Global ganesha is not enabled */
};
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
new file mode 100644
index 00000000000..f08bd6cebee
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -0,0 +1,927 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <glusterfs/common-utils.h>
+#include "glusterd.h"
+#include "glusterd-op-sm.h"
+#include "glusterd-store.h"
+#include "glusterd-utils.h"
+#include "glusterd-volgen.h"
+#include "glusterd-messages.h"
+#include <glusterfs/syscall.h>
+
+#include <ctype.h>
+
+int
+start_ganesha(char **op_errstr);
+
+typedef struct service_command {
+ char *binary;
+ char *service;
+ int (*action)(struct service_command *, char *);
+} service_command;
+
+/* parsing_ganesha_ha_conf will allocate the returned string
+ * to be freed (GF_FREE) by the caller
+ * return NULL if error or not found */
+static char *
+parsing_ganesha_ha_conf(const char *key)
+{
+#define MAX_LINE 1024
+ char scratch[MAX_LINE * 2] = {
+ 0,
+ };
+ char *value = NULL, *pointer = NULL, *end_pointer = NULL;
+ FILE *fp;
+
+ fp = fopen(GANESHA_HA_CONF, "r");
+ if (fp == NULL) {
+ gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
+ "couldn't open the file %s", GANESHA_HA_CONF);
+ goto end_ret;
+ }
+ while ((pointer = fgets(scratch, MAX_LINE, fp)) != NULL) {
+ /* Read config file until we get matching "^[[:space:]]*key" */
+ if (*pointer == '#') {
+ continue;
+ }
+ while (isblank(*pointer)) {
+ pointer++;
+ }
+ if (strncmp(pointer, key, strlen(key))) {
+ continue;
+ }
+ pointer += strlen(key);
+ /* key found : if we fail to parse, we'll return an error
+ * rather than trying next one
+ * - supposition : conf file is bash compatible : no space
+ * around the '=' */
+ if (*pointer != '=') {
+ gf_msg(THIS->name, GF_LOG_ERROR, errno,
+ GD_MSG_GET_CONFIG_INFO_FAILED, "Parsing %s failed at key %s",
+ GANESHA_HA_CONF, key);
+ goto end_close;
+ }
+ pointer++; /* jump the '=' */
+
+ if (*pointer == '"' || *pointer == '\'') {
+ /* dont get the quote */
+ pointer++;
+ }
+ end_pointer = pointer;
+ /* stop at the next closing quote or blank/newline */
+ do {
+ end_pointer++;
+ } while (!(*end_pointer == '\'' || *end_pointer == '"' ||
+ isspace(*end_pointer) || *end_pointer == '\0'));
+ *end_pointer = '\0';
+
+ /* got it. copy it and return */
+ value = gf_strdup(pointer);
+ break;
+ }
+
+end_close:
+ fclose(fp);
+end_ret:
+ return value;
+}
+
+static int
+sc_systemctl_action(struct service_command *sc, char *command)
+{
+ runner_t runner = {
+ 0,
+ };
+
+ runinit(&runner);
+ runner_add_args(&runner, sc->binary, command, sc->service, NULL);
+ return runner_run(&runner);
+}
+
+static int
+sc_service_action(struct service_command *sc, char *command)
+{
+ runner_t runner = {
+ 0,
+ };
+
+ runinit(&runner);
+ runner_add_args(&runner, sc->binary, sc->service, command, NULL);
+ return runner_run(&runner);
+}
+
+static int
+manage_service(char *action)
+{
+ int i = 0;
+ int ret = 0;
+ struct service_command sc_list[] = {{.binary = "/bin/systemctl",
+ .service = "nfs-ganesha",
+ .action = sc_systemctl_action},
+ {.binary = "/sbin/invoke-rc.d",
+ .service = "nfs-ganesha",
+ .action = sc_service_action},
+ {.binary = "/sbin/service",
+ .service = "nfs-ganesha",
+ .action = sc_service_action},
+ {.binary = NULL}};
+
+ while (sc_list[i].binary != NULL) {
+ ret = sys_access(sc_list[i].binary, X_OK);
+ if (ret == 0) {
+ gf_msg_debug(THIS->name, 0, "%s found.", sc_list[i].binary);
+ return sc_list[i].action(&sc_list[i], action);
+ }
+ i++;
+ }
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_UNRECOGNIZED_SVC_MNGR,
+ "Could not %s NFS-Ganesha.Service manager for distro"
+ " not recognized.",
+ action);
+ return ret;
+}
+
+/*
+ * Check if the cluster is a ganesha cluster or not *
+ */
+gf_boolean_t
+glusterd_is_ganesha_cluster()
+{
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t ret_bool = _gf_false;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("ganesha", this, out);
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
+
+ ret = dict_get_str_boolean(priv->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL,
+ _gf_false);
+ if (ret == _gf_true) {
+ ret_bool = _gf_true;
+ gf_msg_debug(this->name, 0, "nfs-ganesha is enabled for the cluster");
+ } else
+ gf_msg_debug(this->name, 0, "nfs-ganesha is disabled for the cluster");
+
+out:
+ return ret_bool;
+}
+
+/* Check if ganesha.enable is set to 'on', that checks if
+ * a particular volume is exported via NFS-Ganesha */
+gf_boolean_t
+glusterd_check_ganesha_export(glusterd_volinfo_t *volinfo)
+{
+ char *value = NULL;
+ gf_boolean_t is_exported = _gf_false;
+ int ret = 0;
+
+ ret = glusterd_volinfo_get(volinfo, "ganesha.enable", &value);
+ if ((ret == 0) && value) {
+ if (strcmp(value, "on") == 0) {
+ gf_msg_debug(THIS->name, 0,
+ "ganesha.enable set"
+ " to %s",
+ value);
+ is_exported = _gf_true;
+ }
+ }
+ return is_exported;
+}
+
+/* *
+ * The below function is called as part of commit phase for volume set option
+ * "ganesha.enable". If the value is "on", it creates export configuration file
+ * and then export the volume via dbus command. Incase of "off", the volume
+ * will be already unexported during stage phase, so it will remove the conf
+ * file from shared storage
+ */
+int
+glusterd_check_ganesha_cmd(char *key, char *value, char **errstr, dict_t *dict)
+{
+ int ret = 0;
+ char *volname = NULL;
+
+ GF_ASSERT(key);
+ GF_ASSERT(value);
+ GF_ASSERT(dict);
+
+ if ((strcmp(key, "ganesha.enable") == 0)) {
+ if ((strcmp(value, "on")) && (strcmp(value, "off"))) {
+ gf_asprintf(errstr,
+ "Invalid value"
+ " for volume set command. Use on/off only.");
+ ret = -1;
+ goto out;
+ }
+ if (strcmp(value, "on") == 0) {
+ ret = glusterd_handle_ganesha_op(dict, errstr, key, value);
+
+ } else if (is_origin_glusterd(dict)) {
+ ret = dict_get_str(dict, "volname", &volname);
+ if (ret) {
+ gf_msg("glusterd-ganesha", GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
+ goto out;
+ }
+ ret = manage_export_config(volname, "off", errstr);
+ }
+ }
+out:
+ if (ret) {
+ gf_msg("glusterd-ganesha", GF_LOG_ERROR, 0,
+ GD_MSG_NFS_GNS_OP_HANDLE_FAIL,
+ "Handling NFS-Ganesha"
+ " op failed.");
+ }
+ return ret;
+}
+
+int
+glusterd_op_stage_set_ganesha(dict_t *dict, char **op_errstr)
+{
+ int ret = -1;
+ char *value = NULL;
+ char *str = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(dict);
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_str(dict, "value", &value);
+ if (value == NULL) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "value not present.");
+ goto out;
+ }
+ /* This dict_get will fail if the user had never set the key before */
+ /*Ignoring the ret value and proceeding */
+ ret = dict_get_str(priv->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, &str);
+ if (str ? strcmp(value, str) == 0 : strcmp(value, "disable") == 0) {
+ gf_asprintf(op_errstr, "nfs-ganesha is already %sd.", value);
+ ret = -1;
+ goto out;
+ }
+
+ if (strcmp(value, "enable") == 0) {
+ ret = start_ganesha(op_errstr);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_NFS_GNS_START_FAIL,
+ "Could not start NFS-Ganesha");
+ }
+ } else {
+ ret = stop_ganesha(op_errstr);
+ if (ret)
+ gf_msg_debug(THIS->name, 0,
+ "Could not stop "
+ "NFS-Ganesha.");
+ }
+
+out:
+
+ if (ret) {
+ if (!(*op_errstr)) {
+ *op_errstr = gf_strdup("Error, Validation Failed");
+ gf_msg_debug(this->name, 0, "Error, Cannot Validate option :%s",
+ GLUSTERD_STORE_KEY_GANESHA_GLOBAL);
+ } else {
+ gf_msg_debug(this->name, 0, "Error, Cannot Validate option");
+ }
+ }
+ return ret;
+}
+
+int
+glusterd_op_set_ganesha(dict_t *dict, char **errstr)
+{
+ int ret = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ char *key = NULL;
+ char *value = NULL;
+ char *next_version = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_str(dict, "key", &key);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Couldn't get key in global option set");
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "value", &value);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Couldn't get value in global option set");
+ goto out;
+ }
+
+ ret = glusterd_handle_ganesha_op(dict, errstr, key, value);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NFS_GNS_SETUP_FAIL,
+ "Initial NFS-Ganesha set up failed");
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstr_with_alloc(priv->opts,
+ GLUSTERD_STORE_KEY_GANESHA_GLOBAL, value);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set"
+ " nfs-ganesha in dict.");
+ goto out;
+ }
+ ret = glusterd_get_next_global_opt_version_str(priv->opts, &next_version);
+ if (ret) {
+ gf_msg_debug(THIS->name, 0,
+ "Could not fetch "
+ " global op version");
+ goto out;
+ }
+ ret = dict_set_str(priv->opts, GLUSTERD_GLOBAL_OPT_VERSION, next_version);
+ if (ret)
+ goto out;
+
+ ret = glusterd_store_options(this, priv->opts);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_FAIL,
+ "Failed to store options");
+ goto out;
+ }
+
+out:
+ gf_msg_debug(this->name, 0, "returning %d", ret);
+ return ret;
+}
+
+/* Following function parse GANESHA_HA_CONF
+ * The sample file looks like below,
+ * HA_NAME="ganesha-ha-360"
+ * HA_VOL_NAME="ha-state"
+ * HA_CLUSTER_NODES="server1,server2"
+ * VIP_rhs_1="10.x.x.x"
+ * VIP_rhs_2="10.x.x.x." */
+
+/* Check if the localhost is listed as one of nfs-ganesha nodes */
+gf_boolean_t
+check_host_list(void)
+{
+ glusterd_conf_t *priv = NULL;
+ char *hostname, *hostlist;
+ gf_boolean_t ret = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ priv = THIS->private;
+ GF_ASSERT(priv);
+
+ hostlist = parsing_ganesha_ha_conf("HA_CLUSTER_NODES");
+ if (hostlist == NULL) {
+ gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_GET_CONFIG_INFO_FAILED,
+ "couldn't get HA_CLUSTER_NODES from file %s", GANESHA_HA_CONF);
+ return _gf_false;
+ }
+
+ /* Hostlist is a comma separated list now */
+ hostname = strtok(hostlist, ",");
+ while (hostname != NULL) {
+ ret = gf_is_local_addr(hostname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_NFS_GNS_HOST_FOUND,
+ "ganesha host found "
+ "Hostname is %s",
+ hostname);
+ break;
+ }
+ hostname = strtok(NULL, ",");
+ }
+
+ GF_FREE(hostlist);
+ return ret;
+}
+
+int
+gd_ganesha_send_dbus(char *volname, char *value)
+{
+ runner_t runner = {
+ 0,
+ };
+ int ret = -1;
+ runinit(&runner);
+
+ GF_VALIDATE_OR_GOTO("glusterd-ganesha", volname, out);
+ GF_VALIDATE_OR_GOTO("glusterd-ganesha", value, out);
+
+ ret = 0;
+ if (check_host_list()) {
+ /* Check whether ganesha is running on this node */
+ if (manage_service("status")) {
+ gf_msg("glusterd-ganesha", GF_LOG_WARNING, 0,
+ GD_MSG_GANESHA_NOT_RUNNING,
+ "Export failed, NFS-Ganesha is not running");
+ } else {
+ runner_add_args(&runner, GANESHA_PREFIX "/dbus-send.sh", CONFDIR,
+ value, volname, NULL);
+ ret = runner_run(&runner);
+ }
+ }
+out:
+ return ret;
+}
+
+int
+manage_export_config(char *volname, char *value, char **op_errstr)
+{
+ runner_t runner = {
+ 0,
+ };
+ int ret = -1;
+
+ GF_ASSERT(volname);
+ runinit(&runner);
+ runner_add_args(&runner, GANESHA_PREFIX "/create-export-ganesha.sh",
+ CONFDIR, value, volname, NULL);
+ ret = runner_run(&runner);
+
+ if (ret && op_errstr)
+ gf_asprintf(op_errstr,
+ "Failed to create"
+ " NFS-Ganesha export config file.");
+
+ return ret;
+}
+
+/* Exports and unexports a particular volume via NFS-Ganesha */
+int
+ganesha_manage_export(dict_t *dict, char *value,
+ gf_boolean_t update_cache_invalidation, char **op_errstr)
+{
+ int ret = -1;
+ glusterd_volinfo_t *volinfo = NULL;
+ dict_t *vol_opts = NULL;
+ char *volname = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ gf_boolean_t option = _gf_false;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+
+ GF_ASSERT(value);
+ GF_ASSERT(dict);
+ GF_ASSERT(priv);
+
+ ret = dict_get_str(dict, "volname", &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+ ret = gf_string2boolean(value, &option);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "invalid value.");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
+ FMTSTR_CHECK_VOL_EXISTS, volname);
+ goto out;
+ }
+
+ ret = glusterd_check_ganesha_export(volinfo);
+ if (ret && option) {
+ gf_asprintf(op_errstr,
+ "ganesha.enable "
+ "is already 'on'.");
+ ret = -1;
+ goto out;
+
+ } else if (!option && !ret) {
+ gf_asprintf(op_errstr,
+ "ganesha.enable "
+ "is already 'off'.");
+ ret = -1;
+ goto out;
+ }
+
+ /* Check if global option is enabled, proceed only then */
+ ret = dict_get_str_boolean(priv->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL,
+ _gf_false);
+ if (ret == -1) {
+ gf_msg_debug(this->name, 0,
+ "Failed to get "
+ "global option dict.");
+ gf_asprintf(op_errstr,
+ "The option "
+ "nfs-ganesha should be "
+ "enabled before setting ganesha.enable.");
+ goto out;
+ }
+ if (!ret) {
+ gf_asprintf(op_errstr,
+ "The option "
+ "nfs-ganesha should be "
+ "enabled before setting ganesha.enable.");
+ ret = -1;
+ goto out;
+ }
+
+ /* *
+ * Create the export file from the node where ganesha.enable "on"
+ * is executed
+ * */
+ if (option && is_origin_glusterd(dict)) {
+ ret = manage_export_config(volname, "on", op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EXPORT_FILE_CREATE_FAIL,
+ "Failed to create"
+ "export file for NFS-Ganesha\n");
+ goto out;
+ }
+ }
+ ret = gd_ganesha_send_dbus(volname, value);
+ if (ret) {
+ gf_asprintf(op_errstr,
+ "Dynamic export addition/deletion failed."
+ " Please see log file for details");
+ goto out;
+ }
+ if (update_cache_invalidation) {
+ vol_opts = volinfo->dict;
+ ret = dict_set_dynstr_with_alloc(vol_opts,
+ "features.cache-invalidation", value);
+ if (ret)
+ gf_asprintf(op_errstr,
+ "Cache-invalidation could not"
+ " be set to %s.",
+ value);
+ ret = glusterd_store_volinfo(volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret)
+ gf_asprintf(op_errstr, "failed to store volinfo for %s",
+ volinfo->volname);
+ }
+out:
+ return ret;
+}
+
+int
+tear_down_cluster(gf_boolean_t run_teardown)
+{
+ int ret = 0;
+ runner_t runner = {
+ 0,
+ };
+ struct stat st = {
+ 0,
+ };
+ DIR *dir = NULL;
+ struct dirent *entry = NULL;
+ struct dirent scratch[2] = {
+ {
+ 0,
+ },
+ };
+ char path[PATH_MAX] = {
+ 0,
+ };
+
+ if (run_teardown) {
+ runinit(&runner);
+ runner_add_args(&runner, GANESHA_PREFIX "/ganesha-ha.sh", "teardown",
+ CONFDIR, NULL);
+ ret = runner_run(&runner);
+ /* *
+ * Remove all the entries in CONFDIR expect ganesha.conf and
+ * ganesha-ha.conf
+ */
+ dir = sys_opendir(CONFDIR);
+ if (!dir) {
+ gf_msg_debug(THIS->name, 0,
+ "Failed to open directory %s. "
+ "Reason : %s",
+ CONFDIR, strerror(errno));
+ ret = 0;
+ goto out;
+ }
+
+ while ((entry = sys_readdir(dir, scratch))) {
+ if (gf_irrelevant_entry(entry))
+ continue;
+ snprintf(path, PATH_MAX, "%s/%s", CONFDIR, entry->d_name);
+ ret = sys_lstat(path, &st);
+ if (ret == -1) {
+ gf_msg_debug(THIS->name, 0,
+ "Failed to stat entry %s :"
+ " %s",
+ path, strerror(errno));
+ goto out;
+ }
+
+ if (strcmp(entry->d_name, "ganesha.conf") == 0 ||
+ strcmp(entry->d_name, "ganesha-ha.conf") == 0)
+ gf_msg_debug(THIS->name, 0,
+ " %s is not required"
+ " to remove",
+ path);
+ else if (S_ISDIR(st.st_mode))
+ ret = recursive_rmdir(path);
+ else
+ ret = sys_unlink(path);
+
+ if (ret) {
+ gf_msg_debug(THIS->name, 0,
+ " Failed to remove %s. "
+ "Reason : %s",
+ path, strerror(errno));
+ }
+
+ gf_msg_debug(THIS->name, 0, "%s %s",
+ ret ? "Failed to remove" : "Removed", entry->d_name);
+ }
+
+ ret = sys_closedir(dir);
+ if (ret) {
+ gf_msg_debug(THIS->name, 0,
+ "Failed to close dir %s. Reason :"
+ " %s",
+ CONFDIR, strerror(errno));
+ }
+ goto exit;
+ }
+
+out:
+ if (dir && sys_closedir(dir)) {
+ gf_msg_debug(THIS->name, 0,
+ "Failed to close dir %s. Reason :"
+ " %s",
+ CONFDIR, strerror(errno));
+ }
+exit:
+ return ret;
+}
+
+int
+setup_cluster(gf_boolean_t run_setup)
+{
+ int ret = 0;
+ runner_t runner = {
+ 0,
+ };
+
+ if (run_setup) {
+ runinit(&runner);
+ runner_add_args(&runner, GANESHA_PREFIX "/ganesha-ha.sh", "setup",
+ CONFDIR, NULL);
+ ret = runner_run(&runner);
+ }
+ return ret;
+}
+
+static int
+teardown(gf_boolean_t run_teardown, char **op_errstr)
+{
+ runner_t runner = {
+ 0,
+ };
+ int ret = 1;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ dict_t *vol_opts = NULL;
+
+ priv = THIS->private;
+
+ ret = tear_down_cluster(run_teardown);
+ if (ret == -1) {
+ gf_asprintf(op_errstr,
+ "Cleanup of NFS-Ganesha"
+ " HA config failed.");
+ goto out;
+ }
+
+ runinit(&runner);
+ runner_add_args(&runner, GANESHA_PREFIX "/ganesha-ha.sh", "cleanup",
+ CONFDIR, NULL);
+ ret = runner_run(&runner);
+ if (ret)
+ gf_msg_debug(THIS->name, 0,
+ "Could not clean up"
+ " NFS-Ganesha related config");
+
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
+ {
+ vol_opts = volinfo->dict;
+ /* All the volumes exported via NFS-Ganesha will be
+ unexported, hence setting the appropriate keys */
+ ret = dict_set_str(vol_opts, "features.cache-invalidation", "off");
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_WARNING, errno, GD_MSG_DICT_SET_FAILED,
+ "Could not set features.cache-invalidation "
+ "to off for %s",
+ volinfo->volname);
+
+ ret = dict_set_str(vol_opts, "ganesha.enable", "off");
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_WARNING, errno, GD_MSG_DICT_SET_FAILED,
+ "Could not set ganesha.enable to off for %s",
+ volinfo->volname);
+
+ ret = glusterd_store_volinfo(volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_SET_FAIL,
+ "failed to store volinfo for %s", volinfo->volname);
+ }
+out:
+ return ret;
+}
+
+int
+stop_ganesha(char **op_errstr)
+{
+ int ret = 0;
+ runner_t runner = {
+ 0,
+ };
+
+ if (check_host_list()) {
+ runinit(&runner);
+ runner_add_args(&runner, GANESHA_PREFIX "/ganesha-ha.sh",
+ "--setup-ganesha-conf-files", CONFDIR, "no", NULL);
+ ret = runner_run(&runner);
+ if (ret) {
+ gf_asprintf(op_errstr,
+ "removal of symlink ganesha.conf "
+ "in /etc/ganesha failed");
+ }
+ ret = manage_service("stop");
+ if (ret)
+ gf_asprintf(op_errstr,
+ "NFS-Ganesha service could not"
+ "be stopped.");
+ }
+ return ret;
+}
+
+int
+start_ganesha(char **op_errstr)
+{
+ int ret = -1;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ runner_t runner = {
+ 0,
+ };
+
+ priv = THIS->private;
+ GF_ASSERT(priv);
+
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
+ {
+#ifdef BUILD_GNFS
+ /* Gluster-nfs has to be disabled across the trusted pool */
+ /* before attempting to start nfs-ganesha */
+ ret = dict_set_str_sizen(volinfo->dict, NFS_DISABLE_MAP_KEY, "on");
+ if (ret)
+ goto out;
+#endif
+ ret = glusterd_store_volinfo(volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ *op_errstr = gf_strdup(
+ "Failed to store the "
+ "Volume information");
+ goto out;
+ }
+ }
+
+ /* If the nfs svc is not initialized it means that the service is not
+ * running, hence we can skip the process of stopping gluster-nfs
+ * service
+ */
+#ifdef BUILD_GNFS
+ if (priv->nfs_svc.inited) {
+ ret = priv->nfs_svc.stop(&(priv->nfs_svc), SIGKILL);
+ if (ret) {
+ ret = -1;
+ gf_asprintf(op_errstr,
+ "Gluster-NFS service could"
+ "not be stopped, exiting.");
+ goto out;
+ }
+ }
+#endif
+
+ if (check_host_list()) {
+ runinit(&runner);
+ runner_add_args(&runner, GANESHA_PREFIX "/ganesha-ha.sh",
+ "--setup-ganesha-conf-files", CONFDIR, "yes", NULL);
+ ret = runner_run(&runner);
+ if (ret) {
+ gf_asprintf(op_errstr,
+ "creation of symlink ganesha.conf "
+ "in /etc/ganesha failed");
+ goto out;
+ }
+ ret = manage_service("start");
+ if (ret)
+ gf_asprintf(op_errstr,
+ "NFS-Ganesha failed to start."
+ "Please see log file for details");
+ }
+
+out:
+ return ret;
+}
+
+static int
+pre_setup(gf_boolean_t run_setup, char **op_errstr)
+{
+ int ret = 0;
+ if (run_setup) {
+ if (!check_host_list()) {
+ gf_asprintf(op_errstr,
+ "Running nfs-ganesha setup command "
+ "from node which is not part of ganesha cluster");
+ return -1;
+ }
+ }
+ ret = setup_cluster(run_setup);
+ if (ret == -1)
+ gf_asprintf(op_errstr,
+ "Failed to set up HA "
+ "config for NFS-Ganesha. "
+ "Please check the log file for details");
+ return ret;
+}
+
+int
+glusterd_handle_ganesha_op(dict_t *dict, char **op_errstr, char *key,
+ char *value)
+{
+ int32_t ret = -1;
+ gf_boolean_t option = _gf_false;
+
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+ GF_ASSERT(key);
+ GF_ASSERT(value);
+
+ if (strcmp(key, "ganesha.enable") == 0) {
+ ret = ganesha_manage_export(dict, value, _gf_true, op_errstr);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* It is possible that the key might not be set */
+ ret = gf_string2boolean(value, &option);
+ if (ret == -1) {
+ gf_asprintf(op_errstr, "Invalid value in key-value pair.");
+ goto out;
+ }
+
+ if (strcmp(key, GLUSTERD_STORE_KEY_GANESHA_GLOBAL) == 0) {
+ /* *
+ * The set up/teardown of pcs cluster should be performed only
+ * once. This will done on the node in which the cli command
+ * 'gluster nfs-ganesha <enable/disable>' got executed. So that
+ * node should part of ganesha HA cluster
+ */
+ if (option) {
+ ret = pre_setup(is_origin_glusterd(dict), op_errstr);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = teardown(is_origin_glusterd(dict), op_errstr);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
index 39617be827b..bf062c87060 100644
--- a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
+++ b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
@@ -7,7 +7,7 @@
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "cli1-xdr.h"
#include "xdr-generic.h"
#include "glusterd.h"
@@ -17,8 +17,8 @@
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
#include "glusterd-svc-helper.h"
-#include "run.h"
-#include "syscall.h"
+#include <glusterfs/run.h>
+#include <glusterfs/syscall.h>
#include "glusterd-messages.h"
#include <signal.h>
@@ -76,6 +76,19 @@ static char *gsync_reserved_opts[] = {"gluster-command",
static char *gsync_no_restart_opts[] = {"checkpoint", "log_rsync_performance",
"log-rsync-performance", NULL};
+void
+set_gsyncd_inet6_arg(runner_t *runner)
+{
+ xlator_t *this = NULL;
+ char *af;
+ int ret;
+
+ this = THIS;
+ ret = dict_get_str(this->options, "transport.address-family", &af);
+ if (ret == 0)
+ runner_argprintf(runner, "--%s", af);
+}
+
int
__glusterd_handle_sys_exec(rpcsvc_request_t *req)
{
@@ -102,13 +115,18 @@ __glusterd_handle_sys_exec(rpcsvc_request_t *req)
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
req->rpc_err = GARBAGE_ARGS;
+ snprintf(err_str, sizeof(err_str), "Garbage args received");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
if (cli_req.dict.dict_len) {
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
goto out;
+ }
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
&dict);
@@ -129,13 +147,18 @@ __glusterd_handle_sys_exec(rpcsvc_request_t *req)
snprintf(err_str, sizeof(err_str),
"Failed to get "
"the uuid of local glusterd");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_UUID_GET_FAIL,
+ NULL);
ret = -1;
goto out;
}
ret = dict_set_dynstr(dict, "host-uuid", host_uuid);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=host-uuid", NULL);
goto out;
+ }
}
ret = glusterd_op_begin_synctask(req, cli_op, dict);
@@ -175,13 +198,18 @@ __glusterd_handle_copy_file(rpcsvc_request_t *req)
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
req->rpc_err = GARBAGE_ARGS;
+ snprintf(err_str, sizeof(err_str), "Garbage args received");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
if (cli_req.dict.dict_len) {
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
goto out;
+ }
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
&dict);
@@ -202,6 +230,8 @@ __glusterd_handle_copy_file(rpcsvc_request_t *req)
snprintf(err_str, sizeof(err_str),
"Failed to get "
"the uuid of local glusterd");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_UUID_GET_FAIL,
+ NULL);
ret = -1;
goto out;
}
@@ -254,13 +284,18 @@ __glusterd_handle_gsync_set(rpcsvc_request_t *req)
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
req->rpc_err = GARBAGE_ARGS;
+ snprintf(err_str, sizeof(err_str), "Garbage args received");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
if (cli_req.dict.dict_len) {
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
goto out;
+ }
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
&dict);
@@ -281,6 +316,8 @@ __glusterd_handle_gsync_set(rpcsvc_request_t *req)
snprintf(err_str, sizeof(err_str),
"Failed to get "
"the uuid of local glusterd");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_UUID_GET_FAIL,
+ NULL);
ret = -1;
goto out;
}
@@ -384,6 +421,7 @@ glusterd_urltransform_init(runner_t *runner, const char *transname)
{
runinit(runner);
runner_add_arg(runner, GSYNCD_PREFIX "/gsyncd");
+ set_gsyncd_inet6_arg(runner);
runner_argprintf(runner, "--%s-url", transname);
}
@@ -588,7 +626,7 @@ struct slave_vol_config {
char old_slvhost[_POSIX_HOST_NAME_MAX + 1];
char old_slvuser[LOGIN_NAME_MAX];
unsigned old_slvidx;
- char slave_voluuid[GF_UUID_BUF_SIZE];
+ char slave_voluuid[UUID_CANONICAL_FORM_LEN + 1];
};
static int
@@ -725,6 +763,7 @@ glusterd_get_slave_voluuid(char *slave_host, char *slave_vol, char *vol_uuid)
runinit(&runner);
runner_add_arg(&runner, GSYNCD_PREFIX "/gsyncd");
+ set_gsyncd_inet6_arg(&runner);
runner_add_arg(&runner, "--slavevoluuid-get");
runner_argprintf(&runner, "%s::%s", slave_host, slave_vol);
@@ -745,7 +784,7 @@ _fcbk_conftodict(char *resbuf, size_t blen, FILE *fp, void *data)
for (;;) {
errno = 0;
- ptr = fgets(resbuf, blen, fp);
+ ptr = fgets(resbuf, blen - 2, fp);
if (!ptr)
break;
v = resbuf + strlen(resbuf) - 1;
@@ -788,6 +827,7 @@ glusterd_gsync_get_config(char *master, char *slave, char *conf_path,
runinit(&runner);
runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL);
runner_argprintf(&runner, "%s", conf_path);
+ set_gsyncd_inet6_arg(&runner);
runner_argprintf(&runner, "--iprefix=%s", DATADIR);
runner_argprintf(&runner, ":%s", master);
runner_add_args(&runner, slave, "--config-get-all", NULL);
@@ -803,12 +843,13 @@ _fcbk_statustostruct(char *resbuf, size_t blen, FILE *fp, void *data)
char *v = NULL;
char *k = NULL;
gf_gsync_status_t *sts_val = NULL;
+ size_t len = 0;
sts_val = (gf_gsync_status_t *)data;
for (;;) {
errno = 0;
- ptr = fgets(resbuf, blen, fp);
+ ptr = fgets(resbuf, blen - 2, fp);
if (!ptr)
break;
@@ -836,47 +877,63 @@ _fcbk_statustostruct(char *resbuf, size_t blen, FILE *fp, void *data)
}
if (strcmp(k, "worker_status") == 0) {
- memcpy(sts_val->worker_status, v, strlen(v));
- sts_val->worker_status[strlen(v)] = '\0';
+ len = min(strlen(v), (sizeof(sts_val->worker_status) - 1));
+ memcpy(sts_val->worker_status, v, len);
+ sts_val->worker_status[len] = '\0';
} else if (strcmp(k, "slave_node") == 0) {
- memcpy(sts_val->slave_node, v, strlen(v));
- sts_val->slave_node[strlen(v)] = '\0';
+ len = min(strlen(v), (sizeof(sts_val->slave_node) - 1));
+ memcpy(sts_val->slave_node, v, len);
+ sts_val->slave_node[len] = '\0';
} else if (strcmp(k, "crawl_status") == 0) {
- memcpy(sts_val->crawl_status, v, strlen(v));
- sts_val->crawl_status[strlen(v)] = '\0';
+ len = min(strlen(v), (sizeof(sts_val->crawl_status) - 1));
+ memcpy(sts_val->crawl_status, v, len);
+ sts_val->crawl_status[len] = '\0';
} else if (strcmp(k, "last_synced") == 0) {
- memcpy(sts_val->last_synced, v, strlen(v));
- sts_val->last_synced[strlen(v)] = '\0';
+ len = min(strlen(v), (sizeof(sts_val->last_synced) - 1));
+ memcpy(sts_val->last_synced, v, len);
+ sts_val->last_synced[len] = '\0';
} else if (strcmp(k, "last_synced_utc") == 0) {
- memcpy(sts_val->last_synced_utc, v, strlen(v));
- sts_val->last_synced_utc[strlen(v)] = '\0';
+ len = min(strlen(v), (sizeof(sts_val->last_synced_utc) - 1));
+ memcpy(sts_val->last_synced_utc, v, len);
+ sts_val->last_synced_utc[len] = '\0';
} else if (strcmp(k, "entry") == 0) {
- memcpy(sts_val->entry, v, strlen(v));
- sts_val->entry[strlen(v)] = '\0';
+ len = min(strlen(v), (sizeof(sts_val->entry) - 1));
+ memcpy(sts_val->entry, v, len);
+ sts_val->entry[len] = '\0';
} else if (strcmp(k, "data") == 0) {
- memcpy(sts_val->data, v, strlen(v));
- sts_val->data[strlen(v)] = '\0';
+ len = min(strlen(v), (sizeof(sts_val->data) - 1));
+ memcpy(sts_val->data, v, len);
+ sts_val->data[len] = '\0';
} else if (strcmp(k, "meta") == 0) {
- memcpy(sts_val->meta, v, strlen(v));
- sts_val->meta[strlen(v)] = '\0';
+ len = min(strlen(v), (sizeof(sts_val->meta) - 1));
+ memcpy(sts_val->meta, v, len);
+ sts_val->meta[len] = '\0';
} else if (strcmp(k, "failures") == 0) {
- memcpy(sts_val->failures, v, strlen(v));
- sts_val->failures[strlen(v)] = '\0';
+ len = min(strlen(v), (sizeof(sts_val->failures) - 1));
+ memcpy(sts_val->failures, v, len);
+ sts_val->failures[len] = '\0';
} else if (strcmp(k, "checkpoint_time") == 0) {
- memcpy(sts_val->checkpoint_time, v, strlen(v));
- sts_val->checkpoint_time[strlen(v)] = '\0';
+ len = min(strlen(v), (sizeof(sts_val->checkpoint_time) - 1));
+ memcpy(sts_val->checkpoint_time, v, len);
+ sts_val->checkpoint_time[len] = '\0';
} else if (strcmp(k, "checkpoint_time_utc") == 0) {
- memcpy(sts_val->checkpoint_time_utc, v, strlen(v));
- sts_val->checkpoint_time_utc[strlen(v)] = '\0';
+ len = min(strlen(v), (sizeof(sts_val->checkpoint_time_utc) - 1));
+ memcpy(sts_val->checkpoint_time_utc, v, len);
+ sts_val->checkpoint_time_utc[len] = '\0';
} else if (strcmp(k, "checkpoint_completed") == 0) {
- memcpy(sts_val->checkpoint_completed, v, strlen(v));
- sts_val->checkpoint_completed[strlen(v)] = '\0';
+ len = min(strlen(v), (sizeof(sts_val->checkpoint_completed) - 1));
+ memcpy(sts_val->checkpoint_completed, v, len);
+ sts_val->checkpoint_completed[len] = '\0';
} else if (strcmp(k, "checkpoint_completion_time") == 0) {
- memcpy(sts_val->checkpoint_completion_time, v, strlen(v));
- sts_val->checkpoint_completion_time[strlen(v)] = '\0';
+ len = min(strlen(v),
+ (sizeof(sts_val->checkpoint_completion_time) - 1));
+ memcpy(sts_val->checkpoint_completion_time, v, len);
+ sts_val->checkpoint_completion_time[len] = '\0';
} else if (strcmp(k, "checkpoint_completion_time_utc") == 0) {
- memcpy(sts_val->checkpoint_completion_time_utc, v, strlen(v));
- sts_val->checkpoint_completion_time_utc[strlen(v)] = '\0';
+ len = min(strlen(v),
+ (sizeof(sts_val->checkpoint_completion_time_utc) - 1));
+ memcpy(sts_val->checkpoint_completion_time_utc, v, len);
+ sts_val->checkpoint_completion_time_utc[len] = '\0';
}
GF_FREE(v);
GF_FREE(k);
@@ -900,6 +957,7 @@ glusterd_gsync_get_status(char *master, char *slave, char *conf_path,
runinit(&runner);
runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL);
runner_argprintf(&runner, "%s", conf_path);
+ set_gsyncd_inet6_arg(&runner);
runner_argprintf(&runner, "--iprefix=%s", DATADIR);
runner_argprintf(&runner, ":%s", master);
runner_add_args(&runner, slave, "--status-get", NULL);
@@ -920,6 +978,7 @@ glusterd_gsync_get_param_file(char *prmfile, const char *param, char *master,
runinit(&runner);
runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL);
runner_argprintf(&runner, "%s", conf_path);
+ set_gsyncd_inet6_arg(&runner);
runner_argprintf(&runner, "--iprefix=%s", DATADIR);
runner_argprintf(&runner, ":%s", master);
runner_add_args(&runner, slave, "--config-get", NULL);
@@ -1342,7 +1401,7 @@ out:
if (slave_host)
GF_FREE(slave_host);
- gf_msg_debug(this->name, 0, "Returning %d.", ret);
+ gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d.", ret);
return ret;
}
@@ -1695,9 +1754,10 @@ glusterd_store_slave_in_info(glusterd_volinfo_t *volinfo, char *slave,
char *value = NULL;
char *slavekey = NULL;
char *slaveentry = NULL;
- char key[512] = {
+ char key[32] = {
0,
};
+ int keylen;
char *t = NULL;
xlator_t *this = NULL;
struct slave_vol_config slave1 = {
@@ -1769,21 +1829,21 @@ glusterd_store_slave_in_info(glusterd_volinfo_t *volinfo, char *slave,
goto out;
/* Given the slave volume uuid, check and get any existing slave */
- memcpy(slave1.slave_voluuid, slave_voluuid, GF_UUID_BUF_SIZE);
+ memcpy(slave1.slave_voluuid, slave_voluuid, UUID_CANONICAL_FORM_LEN);
ret = dict_foreach(volinfo->gsync_slaves, _get_slave_idx_slave_voluuid,
&slave1);
if (ret == 0) { /* New slave */
dict_foreach(volinfo->gsync_slaves, _get_max_gsync_slave_num, &maxslv);
- snprintf(key, sizeof(key), "slave%d", maxslv + 1);
+ keylen = snprintf(key, sizeof(key), "slave%d", maxslv + 1);
- ret = dict_set_dynstr(volinfo->gsync_slaves, key, value);
+ ret = dict_set_dynstrn(volinfo->gsync_slaves, key, keylen, value);
if (ret) {
GF_FREE(value);
goto out;
}
} else if (ret == -1) { /* Existing slave */
- snprintf(key, sizeof(key), "slave%d", slave1.old_slvidx);
+ keylen = snprintf(key, sizeof(key), "slave%d", slave1.old_slvidx);
gf_msg_debug(this->name, 0,
"Replacing key:%s with new value"
@@ -1791,7 +1851,7 @@ glusterd_store_slave_in_info(glusterd_volinfo_t *volinfo, char *slave,
key, value);
/* Add new slave's value, with the same slave index */
- ret = dict_set_dynstr(volinfo->gsync_slaves, key, value);
+ ret = dict_set_dynstrn(volinfo->gsync_slaves, key, keylen, value);
if (ret) {
GF_FREE(value);
goto out;
@@ -2215,6 +2275,9 @@ glusterd_op_verify_gsync_running(glusterd_volinfo_t *volinfo, char *slave,
"Volume %s needs to be started "
"before " GEOREP " start",
volinfo->volname);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_GEO_REP_START_FAILED,
+ "Volume is not in a started state, Volname=%s",
+ volinfo->volname, NULL);
goto out;
}
@@ -2271,7 +2334,6 @@ glusterd_verify_gsync_status_opts(dict_t *dict, char **op_errstr)
char errmsg[PATH_MAX] = {
0,
};
- gf_boolean_t exists = _gf_false;
glusterd_volinfo_t *volinfo = NULL;
int ret = 0;
char *conf_path = NULL;
@@ -2299,9 +2361,8 @@ glusterd_verify_gsync_status_opts(dict_t *dict, char **op_errstr)
goto out;
}
- exists = glusterd_check_volume_exists(volname);
ret = glusterd_volinfo_find(volname, &volinfo);
- if ((ret) || (!exists)) {
+ if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND,
"volume name does not exist");
snprintf(errmsg, sizeof(errmsg),
@@ -2309,7 +2370,6 @@ glusterd_verify_gsync_status_opts(dict_t *dict, char **op_errstr)
" exist",
volname);
*op_errstr = gf_strdup(errmsg);
- ret = -1;
goto out;
}
@@ -2522,6 +2582,7 @@ glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr)
len = snprintf(abs_filename, sizeof(abs_filename), "%s/%s",
priv->workdir, filename);
if ((len < 0) || (len >= sizeof(abs_filename))) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
@@ -2534,6 +2595,9 @@ glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr)
if (len < 0) {
strcpy(errmsg, "<error>");
}
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_REALPATH_GET_FAIL,
+ "Realpath=%s, Reason=%s", priv->workdir, strerror(errno),
+ NULL);
*op_errstr = gf_strdup(errmsg);
ret = -1;
goto out;
@@ -2544,6 +2608,8 @@ glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr)
"Failed to get "
"realpath of %s: %s",
filename, strerror(errno));
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_REALPATH_GET_FAIL,
+ "Filename=%s, Reason=%s", filename, strerror(errno), NULL);
*op_errstr = gf_strdup(errmsg);
ret = -1;
goto out;
@@ -2553,6 +2619,7 @@ glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr)
will succeed for /var/lib/glusterd_bad */
len = snprintf(workdir, sizeof(workdir), "%s/", realpath_workdir);
if ((len < 0) || (len >= sizeof(workdir))) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
@@ -2566,6 +2633,8 @@ glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr)
if (len < 0) {
strcpy(errmsg, "<error>");
}
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_SRC_FILE_ERROR, errmsg,
+ NULL);
*op_errstr = gf_strdup(errmsg);
ret = -1;
goto out;
@@ -2580,6 +2649,8 @@ glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr)
if (len < 0) {
strcpy(errmsg, "<error>");
}
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_SRC_FILE_ERROR, errmsg,
+ NULL);
*op_errstr = gf_strdup(errmsg);
goto out;
}
@@ -2588,9 +2659,9 @@ glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr)
snprintf(errmsg, sizeof(errmsg),
"Source file"
" is not a regular file.");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_SRC_FILE_ERROR, errmsg,
+ NULL);
*op_errstr = gf_strdup(errmsg);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SRC_FILE_ERROR, "%s",
- errmsg);
ret = -1;
goto out;
}
@@ -2794,6 +2865,7 @@ glusterd_verify_slave(char *volname, char *slave_url, char *slave_vol,
char *slave_ip = NULL;
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
+ char *af = NULL;
this = THIS;
GF_ASSERT(this);
@@ -2808,8 +2880,11 @@ glusterd_verify_slave(char *volname, char *slave_url, char *slave_vol,
*/
if (strstr(slave_url, "@")) {
slave_url_buf = gf_strdup(slave_url);
- if (!slave_url_buf)
+ if (!slave_url_buf) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_STRDUP_FAILED,
+ "Slave_url=%s", slave_url, NULL);
goto out;
+ }
slave_user = strtok_r(slave_url_buf, "@", &save_ptr);
slave_ip = strtok_r(NULL, "@", &save_ptr);
@@ -2824,8 +2899,8 @@ glusterd_verify_slave(char *volname, char *slave_url, char *slave_vol,
goto out;
}
- snprintf(log_file_path, sizeof(log_file_path),
- DEFAULT_LOG_FILE_DIRECTORY "/create_verify_log");
+ snprintf(log_file_path, sizeof(log_file_path), "%s/create_verify_log",
+ priv->logdir);
runinit(&runner);
runner_add_args(&runner, GSYNCD_PREFIX "/gverify.sh", NULL);
@@ -2835,9 +2910,16 @@ glusterd_verify_slave(char *volname, char *slave_url, char *slave_vol,
runner_argprintf(&runner, "%s", slave_vol);
runner_argprintf(&runner, "%d", ssh_port);
runner_argprintf(&runner, "%s", log_file_path);
- gf_msg_debug(this->name, 0, "gverify Args = %s %s %s %s %s %s %s",
+ ret = dict_get_str(this->options, "transport.address-family", &af);
+ if (ret)
+ af = "-";
+
+ runner_argprintf(&runner, "%s", af);
+
+ gf_msg_debug(this->name, 0, "gverify Args = %s %s %s %s %s %s %s %s",
runner.argv[0], runner.argv[1], runner.argv[2], runner.argv[3],
- runner.argv[4], runner.argv[5], runner.argv[6]);
+ runner.argv[4], runner.argv[5], runner.argv[6],
+ runner.argv[7]);
runner_redir(&runner, STDOUT_FILENO, RUN_PIPE);
synclock_unlock(&priv->big_lock);
ret = runner_run(&runner);
@@ -3097,7 +3179,6 @@ glusterd_op_stage_gsync_create(dict_t *dict, char **op_errstr)
gf_boolean_t is_force = -1;
gf_boolean_t is_no_verify = -1;
gf_boolean_t is_force_blocker = -1;
- gf_boolean_t exists = _gf_false;
gf_boolean_t is_template_in_use = _gf_false;
glusterd_conf_t *conf = NULL;
glusterd_volinfo_t *volinfo = NULL;
@@ -3147,18 +3228,15 @@ glusterd_op_stage_gsync_create(dict_t *dict, char **op_errstr)
goto out;
}
- exists = glusterd_check_volume_exists(volname);
ret = glusterd_volinfo_find(volname, &volinfo);
- if ((ret) || (!exists)) {
+ if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND,
"volume name does not exist");
snprintf(errmsg, sizeof(errmsg),
"Volume name %s does not"
" exist",
volname);
- *op_errstr = gf_strdup(errmsg);
- gf_msg_debug(this->name, 0, "Returning %d", ret);
- return -1;
+ goto out;
}
ret = glusterd_get_slave_details_confpath(volinfo, dict, &slave_url,
@@ -3399,6 +3477,12 @@ glusterd_op_stage_gsync_create(dict_t *dict, char **op_errstr)
goto out;
}
+ /* There is a remote possibility that slave_host can be NULL when
+ control reaches here. Add a check so we wouldn't crash in next
+ line */
+ if (!slave_host)
+ goto out;
+
/* Now, check whether session is already started.If so, warn!*/
is_different_slavehost = (strcmp(slave_host, slave1.old_slvhost) != 0)
? _gf_true
@@ -3493,7 +3577,6 @@ out:
if (slave_url_buf)
GF_FREE(slave_url_buf);
- gf_msg_debug(this->name, 0, "Returning %d", ret);
return ret;
}
@@ -3572,7 +3655,6 @@ glusterd_op_stage_gsync_set(dict_t *dict, char **op_errstr)
char *statedir = NULL;
char *path_list = NULL;
char *conf_path = NULL;
- gf_boolean_t exists = _gf_false;
glusterd_volinfo_t *volinfo = NULL;
char errmsg[PATH_MAX] = {
0,
@@ -3623,14 +3705,12 @@ glusterd_op_stage_gsync_set(dict_t *dict, char **op_errstr)
goto out;
}
- exists = glusterd_check_volume_exists(volname);
ret = glusterd_volinfo_find(volname, &volinfo);
- if ((ret) || (!exists)) {
+ if (ret) {
snprintf(errmsg, sizeof(errmsg),
"Volume name %s does not"
" exist",
volname);
- ret = -1;
goto out;
}
@@ -3914,7 +3994,7 @@ gd_pause_or_resume_gsync(dict_t *dict, char *master, char *slave,
{
int32_t ret = 0;
int pfd = -1;
- pid_t pid = 0;
+ long pid = 0;
char pidfile[PATH_MAX] = {
0,
};
@@ -3979,8 +4059,9 @@ gd_pause_or_resume_gsync(dict_t *dict, char *master, char *slave,
goto out;
}
- ret = sys_read(pfd, buf, sizeof(buf));
+ ret = sys_read(pfd, buf, sizeof(buf) - 1);
if (ret > 0) {
+ buf[ret] = '\0';
pid = strtol(buf, NULL, 10);
if (is_pause) {
ret = kill(-pid, SIGSTOP);
@@ -4063,6 +4144,7 @@ gd_pause_or_resume_gsync(dict_t *dict, char *master, char *slave,
out:
sys_close(pfd);
+ /* coverity[INTEGER_OVERFLOW] */
return ret;
}
@@ -4072,7 +4154,7 @@ stop_gsync(char *master, char *slave, char **msg, char *conf_path,
{
int32_t ret = 0;
int pfd = -1;
- pid_t pid = 0;
+ long pid = 0;
char pidfile[PATH_MAX] = {
0,
};
@@ -4111,8 +4193,9 @@ stop_gsync(char *master, char *slave, char **msg, char *conf_path,
if (pfd < 0)
goto out;
- ret = sys_read(pfd, buf, sizeof(buf));
+ ret = sys_read(pfd, buf, sizeof(buf) - 1);
if (ret > 0) {
+ buf[ret] = '\0';
pid = strtol(buf, NULL, 10);
ret = kill(-pid, SIGTERM);
if (ret && !is_force) {
@@ -4126,10 +4209,10 @@ stop_gsync(char *master, char *slave, char **msg, char *conf_path,
* still be alive, give some more time
* before SIGKILL (hack)
*/
- usleep(50000);
+ gf_nanosleep(50000 * GF_US_IN_NS);
break;
}
- usleep(50000);
+ gf_nanosleep(50000 * GF_US_IN_NS);
}
kill(-pid, SIGKILL);
sys_unlink(pidfile);
@@ -4138,7 +4221,7 @@ stop_gsync(char *master, char *slave, char **msg, char *conf_path,
out:
sys_close(pfd);
-
+ /* coverity[INTEGER_OVERFLOW] */
return ret;
}
@@ -4209,8 +4292,8 @@ glusterd_gsync_op_already_set(char *master, char *slave, char *conf_path,
}
if (is_bool) {
- if (!strcmp(op_value, "true") || !strcmp(op_value, "1") ||
- !strcmp(op_value, "yes")) {
+ if (op_value && (!strcmp(op_value, "true") || !strcmp(op_value, "1") ||
+ !strcmp(op_value, "yes"))) {
op_val_cli = 1;
} else {
op_val_cli = 0;
@@ -4221,7 +4304,7 @@ glusterd_gsync_op_already_set(char *master, char *slave, char *conf_path,
goto out;
}
} else {
- if (!strcmp(op_val_buf, op_value)) {
+ if (op_value && !strcmp(op_val_buf, op_value)) {
ret = 0;
goto out;
}
@@ -5073,7 +5156,6 @@ glusterd_get_gsync_status(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
char errmsg[PATH_MAX] = {
0,
};
- gf_boolean_t exists = _gf_false;
glusterd_volinfo_t *volinfo = NULL;
int ret = 0;
char my_hostname[256] = {
@@ -5096,9 +5178,8 @@ glusterd_get_gsync_status(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
}
- exists = glusterd_check_volume_exists(volname);
ret = glusterd_volinfo_find(volname, &volinfo);
- if ((ret) || (!exists)) {
+ if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND,
"volume name does not exist");
snprintf(errmsg, sizeof(errmsg),
@@ -5106,7 +5187,6 @@ glusterd_get_gsync_status(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
" exist",
volname);
*op_errstr = gf_strdup(errmsg);
- ret = -1;
goto out;
}
@@ -5479,8 +5559,9 @@ glusterd_op_copy_file(dict_t *dict, char **op_errstr)
}
do {
- ret = sys_read(fd, buf, sizeof(buf));
+ ret = sys_read(fd, buf, sizeof(buf) - 1);
if (ret > 0) {
+ buf[ret] = '\0';
memcpy(contents + bytes_read, buf, ret);
bytes_read += ret;
}
@@ -5935,7 +6016,7 @@ glusterd_get_slave_info(char *slave, char **slave_url, char **hostname,
GF_ASSERT(this);
ret = glusterd_urltransform_single(slave, "normalize", &linearr);
- if (ret == -1) {
+ if ((ret == -1) || (linearr[0] == NULL)) {
ret = snprintf(errmsg, sizeof(errmsg) - 1, "Invalid Url: %s", slave);
errmsg[ret] = '\0';
*op_errstr = gf_strdup(errmsg);
@@ -5946,7 +6027,10 @@ glusterd_get_slave_info(char *slave, char **slave_url, char **hostname,
tmp = strtok_r(linearr[0], "/", &save_ptr);
tmp = strtok_r(NULL, "/", &save_ptr);
- slave = strtok_r(tmp, ":", &save_ptr);
+ slave = NULL;
+ if (tmp != NULL) {
+ slave = strtok_r(tmp, ":", &save_ptr);
+ }
if (slave) {
ret = glusterd_geo_rep_parse_slave(slave, hostname, op_errstr);
if (ret) {
@@ -6200,26 +6284,28 @@ create_conf_file(glusterd_conf_t *conf, char *conf_path)
/* log-file */
runinit_gsyncd_setrx(&runner, conf_path);
- runner_add_args(&runner, "log-file",
- DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP
- "/${mastervol}/${eSlave}.log",
- ".", ".", NULL);
+ runner_add_arg(&runner, "log-file");
+ runner_argprintf(&runner, "%s/%s/${mastervol}/${eSlave}.log", conf->logdir,
+ GEOREP);
+ runner_add_args(&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
/* changelog-log-file */
runinit_gsyncd_setrx(&runner, conf_path);
- runner_add_args(&runner, "changelog-log-file",
- DEFAULT_LOG_FILE_DIRECTORY
- "/" GEOREP "/${mastervol}/${eSlave}${local_id}-changes.log",
- ".", ".", NULL);
+ runner_add_arg(&runner, "changelog-log-file");
+ runner_argprintf(&runner,
+ "%s/%s/${mastervol}/${eSlave}${local_id}-changes.log",
+ conf->logdir, GEOREP);
+ runner_add_args(&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
/* gluster-log-file */
runinit_gsyncd_setrx(&runner, conf_path);
- runner_add_args(&runner, "gluster-log-file",
- DEFAULT_LOG_FILE_DIRECTORY
- "/" GEOREP "/${mastervol}/${eSlave}${local_id}.gluster.log",
- ".", ".", NULL);
+ runner_add_arg(&runner, "gluster-log-file");
+ runner_argprintf(&runner,
+ "%s/%s/${mastervol}/${eSlave}${local_id}.gluster.log",
+ conf->logdir, GEOREP);
+ runner_add_args(&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
/* ignore-deletes */
@@ -6261,33 +6347,35 @@ create_conf_file(glusterd_conf_t *conf, char *conf_path)
/* log-file */
runinit_gsyncd_setrx(&runner, conf_path);
- runner_add_args(
- &runner, "log-file",
- DEFAULT_LOG_FILE_DIRECTORY
- "/" GEOREP
- "-slaves/${session_owner}:${local_node}${local_id}.${slavevol}.log",
- ".", NULL);
+ runner_add_arg(&runner, "log-file");
+ runner_argprintf(&runner,
+ "%s/%s-slaves/"
+ "${session_owner}:${local_node}${local_id}.${slavevol}."
+ "log",
+ conf->logdir, GEOREP);
+ runner_add_args(&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
/* MountBroker log-file */
runinit_gsyncd_setrx(&runner, conf_path);
- runner_add_args(
- &runner, "log-file-mbr",
- DEFAULT_LOG_FILE_DIRECTORY
- "/" GEOREP
- "-slaves/mbr/${session_owner}:${local_node}${local_id}.${slavevol}.log",
- ".", NULL);
+ runner_add_arg(&runner, "log-file-mbr");
+ runner_argprintf(&runner,
+ "%s/%s-slaves/mbr/"
+ "${session_owner}:${local_node}${local_id}.${slavevol}."
+ "log",
+ conf->logdir, GEOREP);
+ runner_add_args(&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
/* gluster-log-file */
runinit_gsyncd_setrx(&runner, conf_path);
- runner_add_args(
- &runner, "gluster-log-file",
- DEFAULT_LOG_FILE_DIRECTORY
- "/" GEOREP
- "-slaves/"
- "${session_owner}:${local_node}${local_id}.${slavevol}.gluster.log",
- ".", NULL);
+ runner_add_arg(&runner, "gluster-log-file");
+ runner_argprintf(&runner,
+ "%s/%s-slaves/"
+ "${session_owner}:${local_node}${local_id}.${slavevol}."
+ "gluster.log",
+ conf->logdir, GEOREP);
+ runner_add_args(&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
out:
@@ -6338,7 +6426,7 @@ glusterd_create_essential_dir_files(glusterd_volinfo_t *volinfo, dict_t *dict,
ret = -1;
goto out;
}
- ret = mkdir_p(buf, 0777, _gf_true);
+ ret = mkdir_p(buf, 0755, _gf_true);
if (ret) {
len = snprintf(errmsg, sizeof(errmsg),
"Unable to create %s"
@@ -6353,13 +6441,13 @@ glusterd_create_essential_dir_files(glusterd_volinfo_t *volinfo, dict_t *dict,
goto out;
}
- ret = snprintf(buf, PATH_MAX, DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP "/%s",
+ ret = snprintf(buf, PATH_MAX, "%s/" GEOREP "/%s", conf->logdir,
volinfo->volname);
if ((ret < 0) || (ret >= PATH_MAX)) {
ret = -1;
goto out;
}
- ret = mkdir_p(buf, 0777, _gf_true);
+ ret = mkdir_p(buf, 0755, _gf_true);
if (ret) {
len = snprintf(errmsg, sizeof(errmsg),
"Unable to create %s"
diff --git a/xlators/mgmt/glusterd/src/glusterd-geo-rep.h b/xlators/mgmt/glusterd/src/glusterd-geo-rep.h
index 5f5fe344406..7d1318f522c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-geo-rep.h
+++ b/xlators/mgmt/glusterd/src/glusterd-geo-rep.h
@@ -30,8 +30,8 @@ typedef struct glusterd_gsync_status_temp {
} glusterd_gsync_status_temp_t;
typedef struct gsync_status_param {
- int is_active;
glusterd_volinfo_t *volinfo;
+ int is_active;
} gsync_status_param_t;
int
diff --git a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c
index 0b56a0eb45a..319bfa140f3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c
+++ b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c
@@ -12,7 +12,7 @@
#include "glusterd-utils.h"
#include "glusterd-gfproxyd-svc-helper.h"
#include "glusterd-messages.h"
-#include "syscall.h"
+#include <glusterfs/syscall.h>
#include "glusterd-volgen.h"
void
@@ -81,7 +81,8 @@ glusterd_svc_build_gfproxyd_volfile_path(glusterd_volinfo_t *volinfo,
void
glusterd_svc_build_gfproxyd_logdir(char *logdir, char *volname, size_t len)
{
- snprintf(logdir, len, "%s/gfproxy/%s", DEFAULT_LOG_FILE_DIRECTORY, volname);
+ glusterd_conf_t *conf = THIS->private;
+ snprintf(logdir, len, "%s/gfproxy/%s", conf->logdir, volname);
}
void
@@ -111,7 +112,7 @@ glusterd_svc_get_gfproxyd_volfile(glusterd_volinfo_t *volinfo, char *svc_name,
goto out;
}
- /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
+ /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
tmp_fd = mkstemp(*tmpvol);
if (tmp_fd < 0) {
gf_msg("glusterd", GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
diff --git a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
index 0a78d4d1fd0..a0bfea41f0f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
@@ -8,8 +8,8 @@
cases as published by the Free Software Foundation.
*/
-#include "globals.h"
-#include "run.h"
+#include <glusterfs/globals.h>
+#include <glusterfs/run.h>
#include "glusterd.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
@@ -18,7 +18,7 @@
#include "glusterd-svc-helper.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-gfproxyd-svc-helper.h"
-#include "syscall.h"
+#include <glusterfs/syscall.h>
void
glusterd_gfproxydsvc_build(glusterd_svc_t *svc)
@@ -310,7 +310,7 @@ glusterd_gfproxydsvc_start(glusterd_svc_t *svc, int flags)
}
runinit(&runner);
- if (this->ctx->cmd_args.valgrind) {
+ if (this->ctx->cmd_args.vgtool != _gf_none) {
len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s",
svc->proc.logdir, svc->proc.logfile);
if ((len < 0) || (len >= PATH_MAX)) {
@@ -318,8 +318,13 @@ glusterd_gfproxydsvc_start(glusterd_svc_t *svc, int flags)
goto out;
}
- runner_add_args(&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes", NULL);
+ if (this->ctx->cmd_args.vgtool == _gf_memcheck)
+ runner_add_args(&runner, "valgrind", "--leak-check=full",
+ "--trace-children=yes", "--track-origins=yes",
+ NULL);
+ else
+ runner_add_args(&runner, "valgrind", "--tool=drd", NULL);
+
runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
}
@@ -370,6 +375,7 @@ int
glusterd_gfproxydsvc_restart()
{
glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *tmp = NULL;
int ret = -1;
xlator_t *this = THIS;
glusterd_conf_t *conf = NULL;
@@ -380,7 +386,7 @@ glusterd_gfproxydsvc_restart()
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
+ cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list)
{
/* Start per volume gfproxyd svc */
if (volinfo->status == GLUSTERD_STATUS_STARTED) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.h b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.h
index db1c8b1e7b0..d396b4015f3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.h
+++ b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.h
@@ -17,8 +17,8 @@
struct glusterd_gfproxydsvc_ {
glusterd_svc_t svc;
- int port;
gf_store_handle_t *handle;
+ int port;
};
typedef struct glusterd_gfproxydsvc_ glusterd_gfproxydsvc_t;
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 12e7d30320e..1b21c40596d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -9,24 +9,25 @@
*/
#include <inttypes.h>
-#include "glusterfs.h"
-#include "compat.h"
-#include "dict.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/dict.h>
#include "protocol-common.h"
-#include "xlator.h"
-#include "logging.h"
-#include "syscall.h"
-#include "timer.h"
-#include "defaults.h"
-#include "compat.h"
-#include "compat-errno.h"
-#include "statedump.h"
-#include "run.h"
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/syscall.h>
+#include <glusterfs/timer.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/statedump.h>
+#include <glusterfs/run.h>
#include "glusterd-mem-types.h"
#include "glusterd.h"
#include "glusterd-sm.h"
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
+#include "glusterd-mgmt.h"
#include "glusterd-server-quorum.h"
#include "glusterd-store.h"
#include "glusterd-locks.h"
@@ -45,15 +46,11 @@
#include <sys/resource.h>
#include <inttypes.h>
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "glusterd-syncop.h"
#include "glusterd-messages.h"
-#ifdef HAVE_BD_XLATOR
-#include <lvm2app.h>
-#endif
-
extern glusterd_op_info_t opinfo;
static int volcount;
@@ -94,16 +91,17 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
glusterd_friend_sm_event_t *event = NULL;
glusterd_friend_req_ctx_t *ctx = NULL;
char rhost[UNIX_PATH_MAX + 1] = {0};
- uuid_t friend_uuid = {0};
dict_t *dict = NULL;
- gf_uuid_parse(uuid_utoa(uuid), friend_uuid);
if (!port)
port = GF_DEFAULT_BASE_PORT;
ret = glusterd_remote_hostname_get(req, rhost, sizeof(rhost));
- rcu_read_lock();
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
+ dict = dict_new();
+
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, rhost);
@@ -129,8 +127,6 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
event->peername = gf_strdup(peerinfo->hostname);
gf_uuid_copy(event->peerid, peerinfo->uuid);
- ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
-
if (!ctx) {
gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
"Unable to allocate memory");
@@ -143,8 +139,8 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
ctx->hostname = gf_strdup(hostname);
ctx->req = req;
- dict = dict_new();
if (!dict) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -152,9 +148,11 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
ret = dict_unserialize(friend_req->vols.vols_val, friend_req->vols.vols_len,
&dict);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ NULL);
goto out;
- else
+ } else
dict->extra_stdfree = friend_req->vols.vols_val;
ctx->vols = dict;
@@ -174,7 +172,7 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
ret = GLUSTERD_CONNECTION_AWAITED;
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret && (ret != GLUSTERD_CONNECTION_AWAITED)) {
if (ctx && ctx->hostname)
@@ -207,11 +205,14 @@ glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
if (!port)
port = GF_DEFAULT_BASE_PORT;
- rcu_read_lock();
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
+
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, hostname);
if (peerinfo == NULL) {
+ RCU_READ_UNLOCK;
gf_msg("glusterd", GF_LOG_CRITICAL, 0, GD_MSG_REQ_FROM_UNKNOWN_PEER,
"Received remove-friend from unknown peer %s", hostname);
ret = glusterd_xfer_friend_remove_resp(req, hostname, port);
@@ -222,6 +223,7 @@ glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
&event);
if (ret) {
+ RCU_READ_UNLOCK;
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
"event generation failed: %d", ret);
goto out;
@@ -232,12 +234,11 @@ glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
gf_uuid_copy(event->peerid, uuid);
- ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
-
if (!ctx) {
+ RCU_READ_UNLOCK;
+ ret = -1;
gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
"Unable to allocate memory");
- ret = -1;
goto out;
}
@@ -251,6 +252,7 @@ glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
ret = glusterd_friend_sm_inject_event(event);
if (ret) {
+ RCU_READ_UNLOCK;
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
"Unable to inject event %d, "
"ret = %d",
@@ -258,10 +260,11 @@ glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
goto out;
}
- ret = 0;
+ RCU_READ_UNLOCK;
+
+ return 0;
out:
- rcu_read_unlock();
if (0 != ret) {
if (ctx && ctx->hostname)
@@ -326,81 +329,6 @@ _build_option_key(dict_t *d, char *k, data_t *v, void *tmp)
}
int
-glusterd_add_tier_volume_detail_to_dict(glusterd_volinfo_t *volinfo,
- dict_t *dict, int count)
-{
- int ret = -1;
- char key[64] = {
- 0,
- };
- int keylen;
-
- GF_ASSERT(volinfo);
- GF_ASSERT(dict);
-
- keylen = snprintf(key, sizeof(key), "volume%d.cold_type", count);
- ret = dict_set_int32n(dict, key, keylen, volinfo->tier_info.cold_type);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.cold_brick_count", count);
- ret = dict_set_int32n(dict, key, keylen,
- volinfo->tier_info.cold_brick_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.cold_dist_count", count);
- ret = dict_set_int32n(dict, key, keylen,
- volinfo->tier_info.cold_dist_leaf_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.cold_replica_count", count);
- ret = dict_set_int32n(dict, key, keylen,
- volinfo->tier_info.cold_replica_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.cold_arbiter_count", count);
- ret = dict_set_int32n(dict, key, keylen, volinfo->arbiter_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.cold_disperse_count", count);
- ret = dict_set_int32n(dict, key, keylen,
- volinfo->tier_info.cold_disperse_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.cold_redundancy_count",
- count);
- ret = dict_set_int32n(dict, key, keylen,
- volinfo->tier_info.cold_redundancy_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.hot_type", count);
- ret = dict_set_int32n(dict, key, keylen, volinfo->tier_info.hot_type);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.hot_brick_count", count);
- ret = dict_set_int32n(dict, key, keylen,
- volinfo->tier_info.hot_brick_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.hot_replica_count", count);
- ret = dict_set_int32n(dict, key, keylen,
- volinfo->tier_info.hot_replica_count);
- if (ret)
- goto out;
-
-out:
- return ret;
-}
-
-int
glusterd_add_arbiter_info_to_bricks(glusterd_volinfo_t *volinfo,
dict_t *volumes, int count)
{
@@ -409,41 +337,18 @@ glusterd_add_arbiter_info_to_bricks(glusterd_volinfo_t *volinfo,
};
int keylen;
int i = 0;
- int start_index = 0;
int ret = 0;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- /*TODO: Add info for hot tier once attach tier of arbiter
- * volumes is supported. */
-
- /* cold tier */
- if (volinfo->tier_info.cold_replica_count == 1 ||
- volinfo->arbiter_count != 1)
- return 0;
-
- i = start_index = volinfo->tier_info.hot_brick_count + 1;
- for (; i <= volinfo->brick_count; i++) {
- if ((i - start_index + 1) % volinfo->tier_info.cold_replica_count !=
- 0)
- continue;
- keylen = snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter",
- count, i);
- ret = dict_set_int32n(volumes, key, keylen, 1);
- if (ret)
- return ret;
- }
- } else {
- if (volinfo->replica_count == 1 || volinfo->arbiter_count != 1)
- return 0;
- for (i = 1; i <= volinfo->brick_count; i++) {
- if (i % volinfo->replica_count != 0)
- continue;
- keylen = snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter",
- count, i);
- ret = dict_set_int32n(volumes, key, keylen, 1);
- if (ret)
- return ret;
- }
+ if (volinfo->replica_count == 1 || volinfo->arbiter_count != 1)
+ return 0;
+ for (i = 1; i <= volinfo->brick_count; i++) {
+ if (i % volinfo->replica_count != 0)
+ continue;
+ keylen = snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter", count,
+ i);
+ ret = dict_set_int32n(volumes, key, keylen, 1);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -458,6 +363,7 @@ glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes,
};
int keylen;
glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_brickinfo_t *ta_brickinfo = NULL;
char *buf = NULL;
int i = 1;
dict_t *dict = NULL;
@@ -467,9 +373,12 @@ glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes,
0,
};
xlator_t *this = NULL;
- GF_UNUSED int caps = 0;
int32_t len = 0;
+ char ta_brick[4096] = {
+ 0,
+ };
+
GF_ASSERT(volinfo);
GF_ASSERT(volumes);
@@ -480,172 +389,129 @@ glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes,
keylen = snprintf(key, sizeof(key), "volume%d.name", count);
ret = dict_set_strn(volumes, key, keylen, volinfo->volname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.type", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->type);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.status", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->status);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.brick_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->brick_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.hot_brick_count", count);
- ret = dict_set_int32n(volumes, key, keylen,
- volinfo->tier_info.hot_brick_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
-
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_add_tier_volume_detail_to_dict(volinfo, volumes, count);
- if (ret)
- goto out;
}
keylen = snprintf(key, sizeof(key), "volume%d.dist_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->dist_leaf_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.stripe_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->stripe_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.replica_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->replica_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.disperse_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->disperse_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.redundancy_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->redundancy_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.arbiter_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->arbiter_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.transport", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->transport_type);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
+
+ keylen = snprintf(key, sizeof(key), "volume%d.thin_arbiter_count", count);
+ ret = dict_set_int32n(volumes, key, keylen, volinfo->thin_arbiter_count);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
+ goto out;
+ }
volume_id_str = gf_strdup(uuid_utoa(volinfo->volume_id));
- if (!volume_id_str)
+ if (!volume_id_str) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.volume_id", count);
ret = dict_set_dynstrn(volumes, key, keylen, volume_id_str);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.rebalance", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->rebal.defrag_cmd);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.snap_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->snap_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
-
-#ifdef HAVE_BD_XLATOR
- if (volinfo->caps) {
- caps = 0;
- keylen = snprintf(key, sizeof(key), "volume%d.xlator0", count);
- buf = GF_MALLOC(256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- if (volinfo->caps & CAPS_BD)
- snprintf(buf, 256, "BD");
- ret = dict_set_dynstrn(volumes, key, keylen, buf);
- if (ret) {
- GF_FREE(buf);
- goto out;
- }
-
- if (volinfo->caps & CAPS_THIN) {
- snprintf(key, sizeof(key), "volume%d.xlator0.caps%d", count,
- caps++);
- buf = GF_MALLOC(256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- snprintf(buf, 256, "thin");
- ret = dict_set_dynstr(volumes, key, buf);
- if (ret) {
- GF_FREE(buf);
- goto out;
- }
- }
-
- if (volinfo->caps & CAPS_OFFLOAD_COPY) {
- snprintf(key, sizeof(key), "volume%d.xlator0.caps%d", count,
- caps++);
- buf = GF_MALLOC(256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- snprintf(buf, 256, "offload_copy");
- ret = dict_set_dynstr(volumes, key, buf);
- if (ret) {
- GF_FREE(buf);
- goto out;
- }
- }
-
- if (volinfo->caps & CAPS_OFFLOAD_SNAPSHOT) {
- snprintf(key, sizeof(key), "volume%d.xlator0.caps%d", count,
- caps++);
- buf = GF_MALLOC(256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- snprintf(buf, 256, "offload_snapshot");
- ret = dict_set_dynstr(volumes, key, buf);
- if (ret) {
- GF_FREE(buf);
- goto out;
- }
- }
-
- if (volinfo->caps & CAPS_OFFLOAD_ZERO) {
- snprintf(key, sizeof(key), "volume%d.xlator0.caps%d", count,
- caps++);
- buf = GF_MALLOC(256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- snprintf(buf, 256, "offload_zerofill");
- ret = dict_set_dynstr(volumes, key, buf);
- if (ret) {
- GF_FREE(buf);
- goto out;
- }
- }
}
-#endif
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
{
@@ -658,42 +524,67 @@ glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes,
len = snprintf(brick, sizeof(brick), "%s:%s", brickinfo->hostname,
brickinfo->path);
if ((len < 0) || (len >= sizeof(brick))) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
buf = gf_strdup(brick);
keylen = snprintf(key, sizeof(key), "volume%d.brick%d", count, i);
ret = dict_set_dynstrn(volumes, key, keylen, buf);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.brick%d.uuid", count, i);
snprintf(brick_uuid, sizeof(brick_uuid), "%s",
uuid_utoa(brickinfo->uuid));
buf = gf_strdup(brick_uuid);
- if (!buf)
+ if (!buf) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "brick_uuid=%s", brick_uuid, NULL);
goto out;
+ }
ret = dict_set_dynstrn(volumes, key, keylen, buf);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
-
-#ifdef HAVE_BD_XLATOR
- if (volinfo->caps & CAPS_BD) {
- snprintf(key, sizeof(key), "volume%d.vg%d", count, i);
- snprintf(brick, sizeof(brick), "%s", brickinfo->vg);
- buf = gf_strdup(brick);
- ret = dict_set_dynstr(volumes, key, buf);
- if (ret)
- goto out;
}
-#endif
+
i++;
}
+ if (volinfo->thin_arbiter_count == 1) {
+ ta_brickinfo = list_first_entry(&volinfo->ta_bricks,
+ glusterd_brickinfo_t, brick_list);
+ len = snprintf(ta_brick, sizeof(ta_brick), "%s:%s",
+ ta_brickinfo->hostname, ta_brickinfo->path);
+ if ((len < 0) || (len >= sizeof(ta_brick))) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
+ ret = -1;
+ goto out;
+ }
+ buf = gf_strdup(ta_brick);
+ keylen = snprintf(key, sizeof(key), "volume%d.thin_arbiter_brick",
+ count);
+ ret = dict_set_dynstrn(volumes, key, keylen, buf);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
+ goto out;
+ }
+ }
+
ret = glusterd_add_arbiter_info_to_bricks(volinfo, volumes, count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_ARBITER_BRICK_SET_INFO_FAIL, NULL);
goto out;
+ }
dict = volinfo->dict;
if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = 0;
goto out;
}
@@ -904,9 +795,9 @@ __glusterd_handle_cluster_lock(rpcsvc_request_t *req)
gf_msg_debug(this->name, 0, "Received LOCK from uuid: %s",
uuid_utoa(lock_req.uuid));
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid(lock_req.uuid) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
"%s doesn't "
@@ -929,6 +820,7 @@ __glusterd_handle_cluster_lock(rpcsvc_request_t *req)
op_ctx = dict_new();
if (!op_ctx) {
+ ret = -1;
gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
"Unable to set new dict");
goto out;
@@ -955,6 +847,9 @@ out:
glusterd_friend_sm();
glusterd_op_sm();
+ if (ret)
+ GF_FREE(ctx);
+
return ret;
}
@@ -985,11 +880,14 @@ glusterd_req_ctx_create(rpcsvc_request_t *rpc_req, int op, uuid_t uuid,
gf_msg_debug(this->name, 0, "Received op from uuid %s", str);
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
req_ctx = GF_CALLOC(1, sizeof(*req_ctx), mem_type);
if (!req_ctx) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
}
@@ -997,8 +895,8 @@ glusterd_req_ctx_create(rpcsvc_request_t *rpc_req, int op, uuid_t uuid,
req_ctx->op = op;
ret = dict_unserialize(buf_val, buf_len, &dict);
if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ NULL);
goto out;
}
@@ -1063,9 +961,9 @@ __glusterd_handle_stage_op(rpcsvc_request_t *req)
ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
gf_msg_debug(this->name, 0, "transaction ID = %s", uuid_utoa(*txn_id));
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
"%s doesn't "
@@ -1077,7 +975,11 @@ __glusterd_handle_stage_op(rpcsvc_request_t *req)
/* In cases where there is no volname, the receivers won't have a
* transaction opinfo created, as for those operations, the locking
- * phase where the transaction opinfos are created, won't be called. */
+ * phase where the transaction opinfos are created, won't be called.
+ * skip_locking will be true for all such transaction and we clear
+ * the txn_opinfo after the staging phase, except for geo-replication
+ * operations where we need to access txn_opinfo in the later phases also.
+ */
ret = glusterd_get_txn_opinfo(txn_id, &txn_op_info);
if (ret) {
gf_msg_debug(this->name, 0, "No transaction's opinfo set");
@@ -1086,7 +988,8 @@ __glusterd_handle_stage_op(rpcsvc_request_t *req)
glusterd_txn_opinfo_init(&txn_op_info, &state, &op_req.op,
req_ctx->dict, req);
- txn_op_info.skip_locking = _gf_true;
+ if (req_ctx->op != GD_OP_GSYNC_SET)
+ txn_op_info.skip_locking = _gf_true;
ret = glusterd_set_txn_opinfo(txn_id, &txn_op_info);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
@@ -1144,9 +1047,9 @@ __glusterd_handle_commit_op(rpcsvc_request_t *req)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
"%s doesn't "
@@ -1267,12 +1170,12 @@ __glusterd_handle_cli_probe(rpcsvc_request_t *req)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_hostname(hostname);
ret = (peerinfo && gd_peer_has_address(peerinfo, hostname));
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg_debug("glusterd", 0,
@@ -1336,6 +1239,7 @@ __glusterd_handle_cli_deprobe(rpcsvc_request_t *req)
glusterd_volinfo_t *tmp = NULL;
glusterd_snap_t *snapinfo = NULL;
glusterd_snap_t *tmpsnap = NULL;
+ gf_boolean_t need_free = _gf_false;
this = THIS;
GF_ASSERT(this);
@@ -1356,6 +1260,13 @@ __glusterd_handle_cli_deprobe(rpcsvc_request_t *req)
if (cli_req.dict.dict_len) {
dict = dict_new();
+ if (dict) {
+ need_free = _gf_true;
+ } else {
+ ret = -1;
+ goto out;
+ }
+
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
&dict);
if (ret < 0) {
@@ -1451,12 +1362,17 @@ __glusterd_handle_cli_deprobe(rpcsvc_request_t *req)
&op_errno);
}
+ need_free = _gf_false;
+
out:
free(cli_req.dict.dict_val);
if (ret) {
ret = glusterd_xfer_cli_deprobe_resp(req, ret, op_errno, NULL, hostname,
dict);
+ if (need_free) {
+ dict_unref(dict);
+ }
}
glusterd_friend_sm();
@@ -1554,7 +1470,7 @@ __glusterd_handle_cli_get_volume(rpcsvc_request_t *req)
goto out;
}
- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_GET_VOL_REQ_RCVD,
+ gf_msg(this->name, GF_LOG_DEBUG, 0, GD_MSG_GET_VOL_REQ_RCVD,
"Received get vol req");
if (cli_req.dict.dict_len) {
@@ -1756,6 +1672,8 @@ __glusterd_handle_cli_uuid_get(rpcsvc_request_t *req)
if (cli_req.dict.dict_len) {
dict = dict_new();
if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
ret = -1;
goto out;
}
@@ -1778,6 +1696,7 @@ __glusterd_handle_cli_uuid_get(rpcsvc_request_t *req)
rsp_dict = dict_new();
if (!rsp_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -1794,9 +1713,8 @@ __glusterd_handle_cli_uuid_get(rpcsvc_request_t *req)
ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "Failed to serialize "
- "dictionary.");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
}
ret = 0;
@@ -1815,6 +1733,10 @@ out:
glusterd_to_cli(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, dict);
+ if (rsp_dict)
+ dict_unref(rsp_dict);
+ GF_FREE(rsp.dict.dict_val);
+
return 0;
}
int
@@ -1845,8 +1767,10 @@ __glusterd_handle_cli_list_volume(rpcsvc_request_t *req)
GF_ASSERT(priv);
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
{
@@ -1858,8 +1782,11 @@ __glusterd_handle_cli_list_volume(rpcsvc_request_t *req)
}
ret = dict_set_int32n(dict, "count", SLEN("count"), count);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=count", NULL);
goto out;
+ }
ret = dict_allocate_and_serialize(dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
@@ -1881,6 +1808,8 @@ out:
if (dict)
dict_unref(dict);
+ GF_FREE(rsp.dict.dict_val);
+
glusterd_friend_sm();
glusterd_op_sm();
@@ -1904,6 +1833,85 @@ glusterd_op_begin(rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
return ret;
}
+int
+__glusterd_handle_ganesha_cmd(rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ dict_t *dict = NULL;
+ glusterd_op_t cli_op = GD_OP_GANESHA;
+ char *op_errstr = NULL;
+ char err_str[2048] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to decode "
+ "request received from cli");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
+ err_str);
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(err_str, sizeof(err_str),
+ "Unable to decode "
+ "the command");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
+ }
+ }
+
+ gf_msg_trace(this->name, 0, "Received global option request");
+
+ ret = glusterd_op_begin_synctask(req, GD_OP_GANESHA, dict);
+out:
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf(err_str, sizeof(err_str), "Operation failed");
+ ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
+ }
+ if (op_errstr)
+ GF_FREE(op_errstr);
+ if (dict)
+ dict_unref(dict);
+
+ return ret;
+}
+
+int
+glusterd_handle_ganesha_cmd(rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler(req, __glusterd_handle_ganesha_cmd);
+}
+
static int
__glusterd_handle_reset_volume(rpcsvc_request_t *req)
{
@@ -2230,9 +2238,8 @@ glusterd_fsm_log_send_resp(rpcsvc_request_t *req, int op_ret, char *op_errstr,
ret = dict_allocate_and_serialize(dict, &rsp.fsm_log.fsm_log_val,
&rsp.fsm_log.fsm_log_len);
if (ret < 0) {
- gf_msg("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
+ gf_smsg("glusterd", GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
return ret;
}
}
@@ -2278,6 +2285,7 @@ __glusterd_handle_fsm_log(rpcsvc_request_t *req)
dict = dict_new();
if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -2286,17 +2294,17 @@ __glusterd_handle_fsm_log(rpcsvc_request_t *req)
conf = this->private;
ret = glusterd_sm_tr_log_add_to_dict(dict, &conf->op_sm_log);
} else {
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_hostname(cli_req.name);
if (!peerinfo) {
+ RCU_READ_UNLOCK;
ret = -1;
snprintf(msg, sizeof(msg), "%s is not a peer", cli_req.name);
} else {
ret = glusterd_sm_tr_log_add_to_dict(dict, &peerinfo->sm_log);
+ RCU_READ_UNLOCK;
}
-
- rcu_read_unlock();
}
out:
@@ -2440,9 +2448,9 @@ __glusterd_handle_cluster_unlock(rpcsvc_request_t *req)
gf_msg_debug(this->name, 0, "Received UNLOCK from uuid: %s",
uuid_utoa(unlock_req.uuid));
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid(unlock_req.uuid) == NULL);
- rcu_read_unlock();
+ RCU_READ_LOCK;
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
"%s doesn't "
@@ -2504,8 +2512,8 @@ glusterd_op_stage_send_resp(rpcsvc_request_t *req, int32_t op, int32_t status,
ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
return ret;
}
@@ -2544,9 +2552,8 @@ glusterd_op_commit_send_resp(rpcsvc_request_t *req, int32_t op, int32_t status,
ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
}
}
@@ -2721,7 +2728,7 @@ __glusterd_handle_friend_update(rpcsvc_request_t *req)
{0},
};
dict_t *dict = NULL;
- char key[100] = {
+ char key[32] = {
0,
};
int keylen;
@@ -2753,11 +2760,11 @@ __glusterd_handle_friend_update(rpcsvc_request_t *req)
}
ret = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
if (glusterd_peerinfo_find(friend_req.uuid, NULL) == NULL) {
ret = -1;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_REQ_FROM_UNKNOWN_PEER,
"Received friend update request "
@@ -2787,12 +2794,18 @@ __glusterd_handle_friend_update(rpcsvc_request_t *req)
}
ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=count", NULL);
goto out;
+ }
ret = dict_get_int32n(dict, "op", SLEN("op"), &op);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=op", NULL);
goto out;
+ }
if (GD_FRIEND_UPDATE_DEL == op) {
(void)glusterd_handle_friend_update_delete(dict);
@@ -2816,7 +2829,7 @@ __glusterd_handle_friend_update(rpcsvc_request_t *req)
snprintf(key, sizeof(key), "friend%d", i);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, NULL);
if (peerinfo == NULL) {
/* Create a new peer and add it to the list as there is
@@ -2861,7 +2874,7 @@ __glusterd_handle_friend_update(rpcsvc_request_t *req)
}
}
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret)
break;
@@ -2964,7 +2977,7 @@ __glusterd_handle_probe_query(rpcsvc_request_t *req)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(probe_req.uuid, remote_hostname);
if ((peerinfo == NULL) && (!cds_list_empty(&conf->peers))) {
rsp.op_ret = -1;
@@ -2984,7 +2997,7 @@ __glusterd_handle_probe_query(rpcsvc_request_t *req)
rsp.op_errno = GF_PROBE_ADD_FAILED;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
respond:
gf_uuid_copy(rsp.uuid, MY_UUID);
@@ -3031,10 +3044,13 @@ __glusterd_handle_cli_profile_volume(rpcsvc_request_t *req)
0,
};
xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
GF_ASSERT(req);
this = THIS;
GF_ASSERT(this);
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
@@ -3048,8 +3064,11 @@ __glusterd_handle_cli_profile_volume(rpcsvc_request_t *req)
if (cli_req.dict.dict_len > 0) {
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
goto out;
+ }
dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, &dict);
}
@@ -3075,12 +3094,21 @@ __glusterd_handle_cli_profile_volume(rpcsvc_request_t *req)
goto out;
}
- ret = glusterd_op_begin(req, cli_op, dict, err_str, sizeof(err_str));
+ if (conf->op_version < GD_OP_VERSION_6_0) {
+ gf_msg_debug(this->name, 0,
+ "The cluster is operating at "
+ "version less than %d. Falling back "
+ "to op-sm framework.",
+ GD_OP_VERSION_6_0);
+ ret = glusterd_op_begin(req, cli_op, dict, err_str, sizeof(err_str));
+ glusterd_friend_sm();
+ glusterd_op_sm();
+ } else {
+ ret = glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase(
+ req, cli_op, dict);
+ }
out:
- glusterd_friend_sm();
- glusterd_op_sm();
-
free(cli_req.dict.dict_val);
if (ret) {
@@ -3267,6 +3295,7 @@ __glusterd_handle_umount(rpcsvc_request_t *req)
/* check if it is allowed to umount path */
path = gf_strdup(umnt_req.path);
if (!path) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, NULL);
rsp.op_errno = ENOMEM;
goto out;
}
@@ -3334,25 +3363,26 @@ glusterd_friend_remove(uuid_t uuid, char *hostname)
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, hostname);
if (peerinfo == NULL) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
ret = glusterd_friend_remove_cleanup_vols(peerinfo->uuid);
+ RCU_READ_UNLOCK;
if (ret)
gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_CLEANUP_FAIL,
"Volumes cleanup failed");
- rcu_read_unlock();
/* Giving up the critical section here as glusterd_peerinfo_cleanup must
* be called from outside a critical section
*/
ret = glusterd_peerinfo_cleanup(peerinfo);
out:
gf_msg_debug(THIS->name, 0, "returning %d", ret);
+ /* coverity[LOCK] */
return ret;
}
@@ -3369,6 +3399,7 @@ glusterd_rpc_create(struct rpc_clnt **rpc, dict_t *options,
GF_ASSERT(this);
GF_ASSERT(options);
+ GF_VALIDATE_OR_GOTO(this->name, rpc, out);
if (force && rpc && *rpc) {
(void)rpc_clnt_unref(*rpc);
@@ -3381,7 +3412,6 @@ glusterd_rpc_create(struct rpc_clnt **rpc, dict_t *options,
goto out;
ret = rpc_clnt_register_notify(new_rpc, notify_fn, notify_data);
- *rpc = new_rpc;
if (ret)
goto out;
ret = rpc_clnt_start(new_rpc);
@@ -3390,6 +3420,8 @@ out:
if (new_rpc) {
(void)rpc_clnt_unref(new_rpc);
}
+ } else {
+ *rpc = new_rpc;
}
gf_msg_debug(this->name, 0, "returning %d", ret);
@@ -3397,11 +3429,10 @@ out:
}
int
-glusterd_transport_inet_options_build(dict_t **options, const char *hostname,
- int port)
+glusterd_transport_inet_options_build(dict_t *dict, const char *hostname,
+ int port, char *af)
{
xlator_t *this = NULL;
- dict_t *dict = NULL;
int32_t interval = -1;
int32_t time = -1;
int32_t timeout = -1;
@@ -3409,14 +3440,14 @@ glusterd_transport_inet_options_build(dict_t **options, const char *hostname,
this = THIS;
GF_ASSERT(this);
- GF_ASSERT(options);
+ GF_ASSERT(dict);
GF_ASSERT(hostname);
if (!port)
port = GLUSTERD_DEFAULT_PORT;
/* Build default transport options */
- ret = rpc_transport_inet_options_build(&dict, hostname, port);
+ ret = rpc_transport_inet_options_build(dict, hostname, port, af);
if (ret)
goto out;
@@ -3456,7 +3487,6 @@ glusterd_transport_inet_options_build(dict_t **options, const char *hostname,
if ((interval > 0) || (time > 0))
ret = rpc_transport_keepalive_options_set(dict, interval, time,
timeout);
- *options = dict;
out:
gf_msg_debug("glusterd", 0, "Returning %d", ret);
return ret;
@@ -3470,10 +3500,19 @@ glusterd_friend_rpc_create(xlator_t *this, glusterd_peerinfo_t *peerinfo,
int ret = -1;
glusterd_peerctx_t *peerctx = NULL;
data_t *data = NULL;
+ char *af = NULL;
peerctx = GF_CALLOC(1, sizeof(*peerctx), gf_gld_mt_peerctx_t);
- if (!peerctx)
+ if (!peerctx) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
+ }
+
+ options = dict_new();
+ if (!options) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
+ goto out;
+ }
if (args)
peerctx->args = *args;
@@ -3485,8 +3524,12 @@ glusterd_friend_rpc_create(xlator_t *this, glusterd_peerinfo_t *peerinfo,
uniquely identify a
peerinfo */
- ret = glusterd_transport_inet_options_build(&options, peerinfo->hostname,
- peerinfo->port);
+ ret = dict_get_str(this->options, "transport.address-family", &af);
+ if (ret)
+ gf_log(this->name, GF_LOG_TRACE,
+ "option transport.address-family is not set in xlator options");
+ ret = glusterd_transport_inet_options_build(options, peerinfo->hostname,
+ peerinfo->port, af);
if (ret)
goto out;
@@ -3495,17 +3538,16 @@ glusterd_friend_rpc_create(xlator_t *this, glusterd_peerinfo_t *peerinfo,
* create our RPC endpoint with the same address that the peer would
* use to reach us.
*/
+
if (this->options) {
data = dict_getn(this->options, "transport.socket.bind-address",
SLEN("transport.socket.bind-address"));
if (data) {
- ret = dict_setn(options, "transport.socket.source-addr",
- SLEN("transport.socket.source-addr"), data);
+ ret = dict_set_sizen(options, "transport.socket.source-addr", data);
}
data = dict_getn(this->options, "ping-timeout", SLEN("ping-timeout"));
if (data) {
- ret = dict_setn(options, "ping-timeout", SLEN("ping-timeout"),
- data);
+ ret = dict_set_sizen(options, "ping-timeout", data);
}
}
@@ -3538,6 +3580,9 @@ glusterd_friend_rpc_create(xlator_t *this, glusterd_peerinfo_t *peerinfo,
peerctx = NULL;
ret = 0;
out:
+ if (options)
+ dict_unref(options);
+
GF_FREE(peerctx);
return ret;
}
@@ -3561,6 +3606,7 @@ glusterd_friend_add(const char *hoststr, int port,
*friend = glusterd_peerinfo_new(state, uuid, hoststr, port);
if (*friend == NULL) {
ret = -1;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_ADD_FAIL, NULL);
goto out;
}
@@ -3659,7 +3705,7 @@ glusterd_probe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
GF_ASSERT(hoststr);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(NULL, hoststr);
if (peerinfo == NULL) {
@@ -3704,7 +3750,7 @@ glusterd_probe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
}
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg_debug("glusterd", 0, "returning %d", ret);
return ret;
}
@@ -3721,7 +3767,7 @@ glusterd_deprobe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
GF_ASSERT(hoststr);
GF_ASSERT(req);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, hoststr);
if (peerinfo == NULL) {
@@ -3782,7 +3828,7 @@ glusterd_deprobe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
peerinfo->detaching = _gf_true;
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
return ret;
}
@@ -4029,8 +4075,11 @@ set_deprobe_error_str(int op_ret, int op_errno, char *op_errstr, char *errstr,
case GF_DEPROBE_BRICK_EXIST:
snprintf(errstr, len,
- "Brick(s) with the peer "
- "%s exist in cluster",
+ "Peer %s hosts one or more bricks. If the peer is in "
+ "not recoverable state then use either replace-brick "
+ "or remove-brick command with force to remove all "
+ "bricks from the peer and attempt the peer detach "
+ "again.",
hostname);
break;
@@ -4135,19 +4184,21 @@ glusterd_list_friends(rpcsvc_request_t *req, dict_t *dict, int32_t flags)
};
int keylen;
- priv = THIS->private;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
+
+ priv = this->private;
GF_ASSERT(priv);
friends = dict_new();
if (!friends) {
- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
- "Out of Memory");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
}
/* Reset ret to 0, needed to prevent failure in case no peers exist */
ret = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
if (!cds_list_empty(&priv->peers)) {
cds_list_for_each_entry_rcu(entry, &priv->peers, uuid_list)
{
@@ -4158,7 +4209,7 @@ glusterd_list_friends(rpcsvc_request_t *req, dict_t *dict, int32_t flags)
}
}
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret)
goto out;
@@ -4167,24 +4218,36 @@ unlock:
keylen = snprintf(key, sizeof(key), "friend%d.uuid", count);
uuid_utoa_r(MY_UUID, my_uuid_str);
ret = dict_set_strn(friends, key, keylen, my_uuid_str);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "friend%d.hostname", count);
ret = dict_set_nstrn(friends, key, keylen, "localhost",
SLEN("localhost"));
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "friend%d.connected", count);
ret = dict_set_int32n(friends, key, keylen, 1);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
}
ret = dict_set_int32n(friends, "count", SLEN("count"), count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=count", NULL);
goto out;
+ }
ret = dict_allocate_and_serialize(friends, &rsp.friends.friends_val,
&rsp.friends.friends_len);
@@ -4356,8 +4419,11 @@ __glusterd_handle_status_volume(rpcsvc_request_t *req)
if (cli_req.dict.dict_len > 0) {
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
goto out;
+ }
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
&dict);
if (ret < 0) {
@@ -4431,17 +4497,6 @@ __glusterd_handle_status_volume(rpcsvc_request_t *req)
goto out;
}
- if ((cmd & GF_CLI_STATUS_TIERD) &&
- (conf->op_version < GD_OP_VERSION_3_10_0)) {
- snprintf(err_str, sizeof(err_str),
- "The cluster is operating "
- "at a lesser version than %d. Getting the status of "
- "tierd is not allowed in this state",
- GD_OP_VERSION_3_6_0);
- ret = -1;
- goto out;
- }
-
if ((cmd & GF_CLI_STATUS_SCRUB) &&
(conf->op_version < GD_OP_VERSION_3_7_0)) {
snprintf(err_str, sizeof(err_str),
@@ -4636,6 +4691,7 @@ __glusterd_handle_barrier(rpcsvc_request_t *req)
dict = dict_new();
if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -4988,6 +5044,8 @@ out:
&rsp.dict.dict_len);
glusterd_submit_reply(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp);
+ GF_FREE(rsp.dict.dict_val);
+ GF_FREE(key_fixed);
return ret;
}
@@ -5168,12 +5226,17 @@ glusterd_print_gsync_status_by_vol(FILE *fp, glusterd_volinfo_t *volinfo)
0,
};
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
+
GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
GF_VALIDATE_OR_GOTO(THIS->name, fp, out);
gsync_rsp_dict = dict_new();
- if (!gsync_rsp_dict)
+ if (!gsync_rsp_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
ret = gethostname(my_hostname, sizeof(my_hostname));
if (ret) {
@@ -5200,7 +5263,7 @@ glusterd_print_snapinfo_by_vol(FILE *fp, glusterd_volinfo_t *volinfo,
glusterd_volinfo_t *tmp_vol = NULL;
glusterd_snap_t *snapinfo = NULL;
int snapcount = 0;
- char timestr[64] = {
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
char snap_status_str[STATUS_STRLEN] = {
@@ -5313,19 +5376,30 @@ glusterd_print_client_details(FILE *fp, dict_t *dict,
brick_req->op = GLUSTERD_BRICK_STATUS;
brick_req->name = "";
+ brick_req->dict.dict_val = NULL;
+ brick_req->dict.dict_len = 0;
ret = dict_set_strn(dict, "brick-name", SLEN("brick-name"),
brickinfo->path);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=brick-name", NULL);
goto out;
+ }
ret = dict_set_int32n(dict, "cmd", SLEN("cmd"), GF_CLI_STATUS_CLIENTS);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=cmd", NULL);
goto out;
+ }
ret = dict_set_strn(dict, "volname", SLEN("volname"), volinfo->volname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=volname", NULL);
goto out;
+ }
ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
&brick_req->input.input_len);
@@ -5456,14 +5530,11 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
uint32_t get_state_cmd = 0;
uint64_t memtotal = 0;
uint64_t memfree = 0;
- int start_index = 0;
char id_str[64] = {
0,
};
char *vol_type_str = NULL;
- char *hot_tier_type_str = NULL;
- char *cold_tier_type_str = NULL;
char transport_type_str[STATUS_STRLEN] = {
0,
@@ -5477,7 +5548,9 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
char vol_status_str[STATUS_STRLEN] = {
0,
};
-
+ char brick_status_str[STATUS_STRLEN] = {
+ 0,
+ };
this = THIS;
GF_VALIDATE_OR_GOTO(THIS->name, this, out);
@@ -5520,7 +5593,7 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
ret = dict_get_strn(dict, "filename", SLEN("filename"), &tmp_str);
if (ret) {
- now = time(NULL);
+ now = gf_time();
strftime(timestamp, sizeof(timestamp), "%Y%m%d_%H%M%S",
localtime(&now));
gf_asprintf(&filename, "%s_%s", "glusterd_state", timestamp);
@@ -5531,10 +5604,9 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
gf_asprintf(&filename, "%s", tmp_str);
}
- if (odir[odirlen - 1] != '/')
- strcat(odir, "/");
+ ret = gf_asprintf(&ofilepath, "%s%s%s", odir,
+ ((odir[odirlen - 1] != '/') ? "/" : ""), filename);
- ret = gf_asprintf(&ofilepath, "%s%s", odir, filename);
if (ret < 0) {
GF_FREE(odir);
GF_FREE(filename);
@@ -5586,6 +5658,8 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
"fetch the value of all volume options "
"for volume %s",
volinfo->volname);
+ if (vol_all_opts)
+ dict_unref(vol_all_opts);
continue;
}
@@ -5610,8 +5684,8 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
if (priv->opts)
dict_foreach(priv->opts, glusterd_print_global_options, fp);
- rcu_read_lock();
fprintf(fp, "\n[Peers]\n");
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
@@ -5640,7 +5714,7 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
count_bkp = 0;
fprintf(fp, "\n");
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
count = 0;
fprintf(fp, "\n[Volumes]\n");
@@ -5709,26 +5783,11 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
brickinfo->hostname);
/* Determine which one is the arbiter brick */
if (volinfo->arbiter_count == 1) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- if (volinfo->tier_info.cold_replica_count != 1) {
- start_index = volinfo->tier_info.hot_brick_count + 1;
- if (count >= start_index &&
- ((count - start_index + 1) %
- volinfo->tier_info.cold_replica_count ==
- 0)) {
- fprintf(fp,
- "Volume%d.Brick%d."
- "is_arbiter: 1\n",
- count_bkp, count);
- }
- }
- } else {
- if (count % volinfo->replica_count == 0) {
- fprintf(fp,
- "Volume%d.Brick%d."
- "is_arbiter: 1\n",
- count_bkp, count);
- }
+ if (count % volinfo->replica_count == 0) {
+ fprintf(fp,
+ "Volume%d.Brick%d."
+ "is_arbiter: 1\n",
+ count_bkp, count);
}
}
/* Add following information only for bricks
@@ -5741,27 +5800,21 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
brickinfo->rdma_port);
fprintf(fp, "Volume%d.Brick%d.port_registered: %d\n", count_bkp,
count, brickinfo->port_registered);
+ glusterd_brick_get_status_str(brickinfo, brick_status_str);
fprintf(fp, "Volume%d.Brick%d.status: %s\n", count_bkp, count,
- brickinfo->status ? "Started" : "Stopped");
-
- /*FIXME: This is a hacky way of figuring out whether a
- * brick belongs to the hot or cold tier */
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- fprintf(fp, "Volume%d.Brick%d.tier: %s\n", count_bkp, count,
- count <= volinfo->tier_info.hot_brick_count ? "Hot"
- : "Cold");
- }
+ brick_status_str);
ret = sys_statvfs(brickinfo->path, &brickstat);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
"statfs error: %s ", strerror(errno));
- goto out;
+ memfree = 0;
+ memtotal = 0;
+ } else {
+ memfree = brickstat.f_bfree * brickstat.f_bsize;
+ memtotal = brickstat.f_blocks * brickstat.f_bsize;
}
- memfree = brickstat.f_bfree * brickstat.f_bsize;
- memtotal = brickstat.f_blocks * brickstat.f_bsize;
-
fprintf(fp, "Volume%d.Brick%d.spacefree: %" PRIu64 "Bytes\n",
count_bkp, count, memfree);
fprintf(fp, "Volume%d.Brick%d.spacetotal: %" PRIu64 "Bytes\n",
@@ -5827,50 +5880,10 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
GF_FREE(rebal_data);
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_volume_get_hot_tier_type_str(volinfo,
- &hot_tier_type_str);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
- "Failed to get hot tier type for "
- "volume: %s",
- volinfo->volname);
- goto out;
- }
-
- ret = glusterd_volume_get_cold_tier_type_str(volinfo,
- &cold_tier_type_str);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
- "Failed to get cold tier type for "
- "volume: %s",
- volinfo->volname);
- goto out;
- }
-
- fprintf(fp, "Volume%d.tier_info.cold_tier_type: %s\n", count,
- cold_tier_type_str);
- fprintf(fp, "Volume%d.tier_info.cold_brick_count: %d\n", count,
- volinfo->tier_info.cold_brick_count);
- fprintf(fp, "Volume%d.tier_info.cold_replica_count: %d\n", count,
- volinfo->tier_info.cold_replica_count);
- fprintf(fp, "Volume%d.tier_info.cold_disperse_count: %d\n", count,
- volinfo->tier_info.cold_disperse_count);
- fprintf(fp, "Volume%d.tier_info.cold_dist_leaf_count: %d\n", count,
- volinfo->tier_info.cold_dist_leaf_count);
- fprintf(fp, "Volume%d.tier_info.cold_redundancy_count: %d\n", count,
- volinfo->tier_info.cold_redundancy_count);
- fprintf(fp, "Volume%d.tier_info.hot_tier_type: %s\n", count,
- hot_tier_type_str);
- fprintf(fp, "Volume%d.tier_info.hot_brick_count: %d\n", count,
- volinfo->tier_info.hot_brick_count);
- fprintf(fp, "Volume%d.tier_info.hot_replica_count: %d\n", count,
- volinfo->tier_info.hot_replica_count);
- fprintf(fp, "Volume%d.tier_info.promoted: %d\n", count,
- volinfo->tier_info.promoted);
- fprintf(fp, "Volume%d.tier_info.demoted: %d\n", count,
- volinfo->tier_info.demoted);
- }
+ fprintf(fp, "Volume%d.shd_svc.online_status: %s\n", count,
+ volinfo->shd.svc.online ? "Online" : "Offline");
+ fprintf(fp, "Volume%d.shd_svc.inited: %s\n", count,
+ volinfo->shd.svc.inited ? "True" : "False");
if (volinfo->rep_brick.src_brick && volinfo->rep_brick.dst_brick) {
fprintf(fp, "Volume%d.replace_brick.src: %s:%s\n", count,
@@ -5895,19 +5908,13 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
count = 0;
fprintf(fp, "\n[Services]\n");
-
- if (priv->shd_svc.inited) {
- fprintf(fp, "svc%d.name: %s\n", ++count, priv->shd_svc.name);
- fprintf(fp, "svc%d.online_status: %s\n\n", count,
- priv->shd_svc.online ? "Online" : "Offline");
- }
-
+#ifdef BUILD_GNFS
if (priv->nfs_svc.inited) {
fprintf(fp, "svc%d.name: %s\n", ++count, priv->nfs_svc.name);
fprintf(fp, "svc%d.online_status: %s\n\n", count,
priv->nfs_svc.online ? "Online" : "Offline");
}
-
+#endif
if (priv->bitd_svc.inited) {
fprintf(fp, "svc%d.name: %s\n", ++count, priv->bitd_svc.name);
fprintf(fp, "svc%d.online_status: %s\n\n", count,
@@ -5943,6 +5950,7 @@ out:
ret = dict_allocate_and_serialize(dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
glusterd_to_cli(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, dict);
+ GF_FREE(rsp.dict.dict_val);
return ret;
}
@@ -6029,14 +6037,27 @@ get_brickinfo_from_brickid(char *brickid, glusterd_brickinfo_t **brickinfo)
uuid_t volid = {0};
int ret = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
+
brickid_dup = gf_strdup(brickid);
- if (!brickid_dup)
+ if (!brickid_dup) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "brick_id=%s", brickid, NULL);
goto out;
+ }
volid_str = brickid_dup;
brick = strchr(brickid_dup, ':');
- if (!volid_str || !brick)
+ if (!volid_str) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
+ goto out;
+ }
+
+ if (!brick) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
goto out;
+ }
*brick = '\0';
brick++;
@@ -6062,43 +6083,6 @@ out:
static int gd_stale_rpc_disconnect_log;
-static int
-glusterd_mark_bricks_stopped_by_proc(glusterd_brick_proc_t *brick_proc)
-{
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_brickinfo_t *brickinfo_tmp = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- int ret = -1;
-
- cds_list_for_each_entry(brickinfo, &brick_proc->bricks, brick_list)
- {
- ret = glusterd_get_volinfo_from_brick(brickinfo->path, &volinfo);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
- "Failed to get volinfo"
- " from brick(%s)",
- brickinfo->path);
- goto out;
- }
- cds_list_for_each_entry(brickinfo_tmp, &volinfo->bricks, brick_list)
- {
- if (strcmp(brickinfo->path, brickinfo_tmp->path) == 0) {
- glusterd_set_brick_status(brickinfo_tmp, GF_BRICK_STOPPED);
- brickinfo_tmp->start_triggered = _gf_false;
- /* When bricks are stopped, ports also need to
- * be cleaned up
- */
- pmap_registry_remove(THIS, brickinfo_tmp->port,
- brickinfo_tmp->path,
- GF_PMAP_PORT_BRICKSERVER, NULL, _gf_true);
- }
- }
- }
- return 0;
-out:
- return ret;
-}
-
int
__glusterd_brick_rpc_notify(struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data)
@@ -6109,11 +6093,12 @@ __glusterd_brick_rpc_notify(struct rpc_clnt *rpc, void *mydata,
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_volinfo_t *volinfo = NULL;
xlator_t *this = NULL;
- int brick_proc_found = 0;
int32_t pid = -1;
glusterd_brickinfo_t *brickinfo_tmp = NULL;
glusterd_brick_proc_t *brick_proc = NULL;
char pidfile[PATH_MAX] = {0};
+ char *brickpath = NULL;
+ gf_boolean_t is_service_running = _gf_true;
brickid = mydata;
if (!brickid)
@@ -6207,10 +6192,16 @@ __glusterd_brick_rpc_notify(struct rpc_clnt *rpc, void *mydata,
/* In case of an abrupt shutdown of a brick PMAP_SIGNOUT
* event is not received by glusterd which can lead to a
* stale port entry in glusterd, so forcibly clean up
- * the same if the process is not running
+ * the same if the process is not running sometime
+ * gf_is_service_running true so to ensure about brick instance
+ * call search_brick_path_from_proc
*/
GLUSTERD_GET_BRICK_PIDFILE(pidfile, volinfo, brickinfo, conf);
- if (!gf_is_service_running(pidfile, &pid)) {
+ is_service_running = gf_is_service_running(pidfile, &pid);
+ if (pid > 0)
+ brickpath = search_brick_path_from_proc(pid,
+ brickinfo->path);
+ if (!is_service_running || !brickpath) {
ret = pmap_registry_remove(
THIS, brickinfo->port, brickinfo->path,
GF_PMAP_PORT_BRICKSERVER, NULL, _gf_true);
@@ -6226,32 +6217,24 @@ __glusterd_brick_rpc_notify(struct rpc_clnt *rpc, void *mydata,
}
}
- if (is_brick_mx_enabled()) {
- cds_list_for_each_entry(brick_proc, &conf->brick_procs,
- brick_proc_list)
+ if (brickpath)
+ GF_FREE(brickpath);
+
+ if (is_brick_mx_enabled() && glusterd_is_brick_started(brickinfo)) {
+ brick_proc = brickinfo->brick_proc;
+ if (!brick_proc)
+ break;
+ cds_list_for_each_entry(brickinfo_tmp, &brick_proc->bricks,
+ mux_bricks)
{
- cds_list_for_each_entry(brickinfo_tmp, &brick_proc->bricks,
- brick_list)
- {
- if (strcmp(brickinfo_tmp->path, brickinfo->path) == 0) {
- ret = glusterd_mark_bricks_stopped_by_proc(
- brick_proc);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_STOP_FAIL,
- "Unable to stop "
- "bricks of process"
- " to which brick(%s)"
- " belongs",
- brickinfo->path);
- goto out;
- }
- brick_proc_found = 1;
- break;
- }
- }
- if (brick_proc_found == 1)
- break;
+ glusterd_set_brick_status(brickinfo_tmp, GF_BRICK_STOPPED);
+ brickinfo_tmp->start_triggered = _gf_false;
+ /* When bricks are stopped, ports also need to
+ * be cleaned up
+ */
+ pmap_registry_remove(
+ THIS, brickinfo_tmp->port, brickinfo_tmp->path,
+ GF_PMAP_PORT_BRICKSERVER, NULL, _gf_true);
}
} else {
glusterd_set_brick_status(brickinfo, GF_BRICK_STOPPED);
@@ -6292,7 +6275,7 @@ glusterd_friend_remove_notify(glusterd_peerctx_t *peerctx, int32_t op_errno)
GF_ASSERT(peerctx);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
gf_msg_debug(THIS->name, 0,
@@ -6332,7 +6315,7 @@ glusterd_friend_remove_notify(glusterd_peerctx_t *peerctx, int32_t op_errno)
}
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
return ret;
}
@@ -6348,6 +6331,8 @@ __glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
glusterd_peerctx_t *peerctx = NULL;
gf_boolean_t quorum_action = _gf_false;
glusterd_volinfo_t *volinfo = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+
uuid_t uuid;
peerctx = mydata;
@@ -6368,8 +6353,16 @@ __glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
default:
break;
}
-
- rcu_read_lock();
+ ctx = this->ctx;
+ GF_VALIDATE_OR_GOTO(this->name, ctx, out);
+ if (ctx->cleanup_started) {
+ gf_log(this->name, GF_LOG_INFO,
+ "glusterd already received a SIGTERM, "
+ "dropping the event %d for peer %s",
+ event, peerctx->peername);
+ return 0;
+ }
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -6482,7 +6475,7 @@ __glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
}
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
glusterd_friend_sm();
glusterd_op_sm();
@@ -6505,20 +6498,26 @@ glusterd_null(rpcsvc_request_t *req)
return 0;
}
-rpcsvc_actor_t gd_svc_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
- [GLUSTERD_MGMT_NULL] = {"NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0,
- DRC_NA},
- [GLUSTERD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK", GLUSTERD_MGMT_CLUSTER_LOCK,
- glusterd_handle_cluster_lock, NULL, 0,
- DRC_NA},
+static rpcsvc_actor_t gd_svc_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
+ [GLUSTERD_MGMT_NULL] = {"NULL", glusterd_null, NULL, GLUSTERD_MGMT_NULL,
+ DRC_NA, 0},
+ [GLUSTERD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK",
+ glusterd_handle_cluster_lock, NULL,
+ GLUSTERD_MGMT_CLUSTER_LOCK, DRC_NA, 0},
[GLUSTERD_MGMT_CLUSTER_UNLOCK] = {"CLUSTER_UNLOCK",
- GLUSTERD_MGMT_CLUSTER_UNLOCK,
- glusterd_handle_cluster_unlock, NULL, 0,
- DRC_NA},
- [GLUSTERD_MGMT_STAGE_OP] = {"STAGE_OP", GLUSTERD_MGMT_STAGE_OP,
- glusterd_handle_stage_op, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_COMMIT_OP] = {"COMMIT_OP", GLUSTERD_MGMT_COMMIT_OP,
- glusterd_handle_commit_op, NULL, 0, DRC_NA},
+ glusterd_handle_cluster_unlock, NULL,
+ GLUSTERD_MGMT_CLUSTER_UNLOCK, DRC_NA, 0},
+ [GLUSTERD_MGMT_STAGE_OP] = {"STAGE_OP", glusterd_handle_stage_op, NULL,
+ GLUSTERD_MGMT_STAGE_OP, DRC_NA, 0},
+ [GLUSTERD_MGMT_COMMIT_OP] =
+ {
+ "COMMIT_OP",
+ glusterd_handle_commit_op,
+ NULL,
+ GLUSTERD_MGMT_COMMIT_OP,
+ DRC_NA,
+ 0,
+ },
};
struct rpcsvc_program gd_svc_mgmt_prog = {
@@ -6530,19 +6529,18 @@ struct rpcsvc_program gd_svc_mgmt_prog = {
.synctask = _gf_true,
};
-rpcsvc_actor_t gd_svc_peer_actors[GLUSTERD_FRIEND_MAXVALUE] = {
- [GLUSTERD_FRIEND_NULL] = {"NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL,
- 0, DRC_NA},
- [GLUSTERD_PROBE_QUERY] = {"PROBE_QUERY", GLUSTERD_PROBE_QUERY,
- glusterd_handle_probe_query, NULL, 0, DRC_NA},
- [GLUSTERD_FRIEND_ADD] = {"FRIEND_ADD", GLUSTERD_FRIEND_ADD,
- glusterd_handle_incoming_friend_req, NULL, 0,
- DRC_NA},
- [GLUSTERD_FRIEND_REMOVE] = {"FRIEND_REMOVE", GLUSTERD_FRIEND_REMOVE,
- glusterd_handle_incoming_unfriend_req, NULL, 0,
- DRC_NA},
- [GLUSTERD_FRIEND_UPDATE] = {"FRIEND_UPDATE", GLUSTERD_FRIEND_UPDATE,
- glusterd_handle_friend_update, NULL, 0, DRC_NA},
+static rpcsvc_actor_t gd_svc_peer_actors[GLUSTERD_FRIEND_MAXVALUE] = {
+ [GLUSTERD_FRIEND_NULL] = {"NULL", glusterd_null, NULL, GLUSTERD_MGMT_NULL,
+ DRC_NA, 0},
+ [GLUSTERD_PROBE_QUERY] = {"PROBE_QUERY", glusterd_handle_probe_query, NULL,
+ GLUSTERD_PROBE_QUERY, DRC_NA, 0},
+ [GLUSTERD_FRIEND_ADD] = {"FRIEND_ADD", glusterd_handle_incoming_friend_req,
+ NULL, GLUSTERD_FRIEND_ADD, DRC_NA, 0},
+ [GLUSTERD_FRIEND_REMOVE] = {"FRIEND_REMOVE",
+ glusterd_handle_incoming_unfriend_req, NULL,
+ GLUSTERD_FRIEND_REMOVE, DRC_NA, 0},
+ [GLUSTERD_FRIEND_UPDATE] = {"FRIEND_UPDATE", glusterd_handle_friend_update,
+ NULL, GLUSTERD_FRIEND_UPDATE, DRC_NA, 0},
};
struct rpcsvc_program gd_svc_peer_prog = {
@@ -6554,116 +6552,109 @@ struct rpcsvc_program gd_svc_peer_prog = {
.synctask = _gf_false,
};
-rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = {
- [GLUSTER_CLI_PROBE] = {"CLI_PROBE", GLUSTER_CLI_PROBE,
- glusterd_handle_cli_probe, NULL, 0, DRC_NA},
+static rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = {
+ [GLUSTER_CLI_PROBE] = {"CLI_PROBE", glusterd_handle_cli_probe, NULL,
+ GLUSTER_CLI_PROBE, DRC_NA, 0},
[GLUSTER_CLI_CREATE_VOLUME] = {"CLI_CREATE_VOLUME",
- GLUSTER_CLI_CREATE_VOLUME,
- glusterd_handle_create_volume, NULL, 0,
- DRC_NA},
+ glusterd_handle_create_volume, NULL,
+ GLUSTER_CLI_CREATE_VOLUME, DRC_NA, 0},
[GLUSTER_CLI_DEFRAG_VOLUME] = {"CLI_DEFRAG_VOLUME",
- GLUSTER_CLI_DEFRAG_VOLUME,
- glusterd_handle_defrag_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_DEPROBE] = {"FRIEND_REMOVE", GLUSTER_CLI_DEPROBE,
- glusterd_handle_cli_deprobe, NULL, 0, DRC_NA},
- [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS,
- glusterd_handle_cli_list_friends, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_UUID_RESET] = {"UUID_RESET", GLUSTER_CLI_UUID_RESET,
- glusterd_handle_cli_uuid_reset, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_UUID_GET] = {"UUID_GET", GLUSTER_CLI_UUID_GET,
- glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
- [GLUSTER_CLI_START_VOLUME] = {"START_VOLUME", GLUSTER_CLI_START_VOLUME,
- glusterd_handle_cli_start_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_STOP_VOLUME] = {"STOP_VOLUME", GLUSTER_CLI_STOP_VOLUME,
- glusterd_handle_cli_stop_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME", GLUSTER_CLI_DELETE_VOLUME,
- glusterd_handle_cli_delete_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", GLUSTER_CLI_GET_VOLUME,
- glusterd_handle_cli_get_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_ADD_BRICK] = {"ADD_BRICK", GLUSTER_CLI_ADD_BRICK,
- glusterd_handle_add_brick, NULL, 0, DRC_NA},
- [GLUSTER_CLI_ATTACH_TIER] = {"ATTACH_TIER", GLUSTER_CLI_ATTACH_TIER,
- glusterd_handle_attach_tier, NULL, 0, DRC_NA},
- [GLUSTER_CLI_REPLACE_BRICK] = {"REPLACE_BRICK", GLUSTER_CLI_REPLACE_BRICK,
- glusterd_handle_replace_brick, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_REMOVE_BRICK] = {"REMOVE_BRICK", GLUSTER_CLI_REMOVE_BRICK,
- glusterd_handle_remove_brick, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_LOG_ROTATE] = {"LOG FILENAME", GLUSTER_CLI_LOG_ROTATE,
- glusterd_handle_log_rotate, NULL, 0, DRC_NA},
- [GLUSTER_CLI_SET_VOLUME] = {"SET_VOLUME", GLUSTER_CLI_SET_VOLUME,
- glusterd_handle_set_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_SYNC_VOLUME] = {"SYNC_VOLUME", GLUSTER_CLI_SYNC_VOLUME,
- glusterd_handle_sync_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_RESET_VOLUME] = {"RESET_VOLUME", GLUSTER_CLI_RESET_VOLUME,
- glusterd_handle_reset_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_FSM_LOG] = {"FSM_LOG", GLUSTER_CLI_FSM_LOG,
- glusterd_handle_fsm_log, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GSYNC_SET] = {"GSYNC_SET", GLUSTER_CLI_GSYNC_SET,
- glusterd_handle_gsync_set, NULL, 0, DRC_NA},
- [GLUSTER_CLI_PROFILE_VOLUME] = {"STATS_VOLUME", GLUSTER_CLI_PROFILE_VOLUME,
- glusterd_handle_cli_profile_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_QUOTA] = {"QUOTA", GLUSTER_CLI_QUOTA, glusterd_handle_quota,
- NULL, 0, DRC_NA},
- [GLUSTER_CLI_GETWD] = {"GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd,
- NULL, 1, DRC_NA},
- [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME,
- glusterd_handle_status_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_MOUNT] = {"MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount,
- NULL, 1, DRC_NA},
- [GLUSTER_CLI_UMOUNT] = {"UMOUNT", GLUSTER_CLI_UMOUNT,
- glusterd_handle_umount, NULL, 1, DRC_NA},
- [GLUSTER_CLI_HEAL_VOLUME] = {"HEAL_VOLUME", GLUSTER_CLI_HEAL_VOLUME,
- glusterd_handle_cli_heal_volume, NULL, 0,
- DRC_NA},
+ glusterd_handle_defrag_volume, NULL,
+ GLUSTER_CLI_DEFRAG_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_DEPROBE] = {"FRIEND_REMOVE", glusterd_handle_cli_deprobe, NULL,
+ GLUSTER_CLI_DEPROBE, DRC_NA, 0},
+ [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS",
+ glusterd_handle_cli_list_friends, NULL,
+ GLUSTER_CLI_LIST_FRIENDS, DRC_NA, 0},
+ [GLUSTER_CLI_UUID_RESET] = {"UUID_RESET", glusterd_handle_cli_uuid_reset,
+ NULL, GLUSTER_CLI_UUID_RESET, DRC_NA, 0},
+ [GLUSTER_CLI_UUID_GET] = {"UUID_GET", glusterd_handle_cli_uuid_get, NULL,
+ GLUSTER_CLI_UUID_GET, DRC_NA, 0},
+ [GLUSTER_CLI_START_VOLUME] = {"START_VOLUME",
+ glusterd_handle_cli_start_volume, NULL,
+ GLUSTER_CLI_START_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_STOP_VOLUME] = {"STOP_VOLUME", glusterd_handle_cli_stop_volume,
+ NULL, GLUSTER_CLI_STOP_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME",
+ glusterd_handle_cli_delete_volume, NULL,
+ GLUSTER_CLI_DELETE_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", glusterd_handle_cli_get_volume,
+ NULL, GLUSTER_CLI_GET_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_ADD_BRICK] = {"ADD_BRICK", glusterd_handle_add_brick, NULL,
+ GLUSTER_CLI_ADD_BRICK, DRC_NA, 0},
+ [GLUSTER_CLI_ATTACH_TIER] = {"ATTACH_TIER", glusterd_handle_attach_tier,
+ NULL, GLUSTER_CLI_ATTACH_TIER, DRC_NA, 0},
+ [GLUSTER_CLI_REPLACE_BRICK] = {"REPLACE_BRICK",
+ glusterd_handle_replace_brick, NULL,
+ GLUSTER_CLI_REPLACE_BRICK, DRC_NA, 0},
+ [GLUSTER_CLI_REMOVE_BRICK] = {"REMOVE_BRICK", glusterd_handle_remove_brick,
+ NULL, GLUSTER_CLI_REMOVE_BRICK, DRC_NA, 0},
+ [GLUSTER_CLI_LOG_ROTATE] = {"LOG FILENAME", glusterd_handle_log_rotate,
+ NULL, GLUSTER_CLI_LOG_ROTATE, DRC_NA, 0},
+ [GLUSTER_CLI_SET_VOLUME] = {"SET_VOLUME", glusterd_handle_set_volume, NULL,
+ GLUSTER_CLI_SET_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_SYNC_VOLUME] = {"SYNC_VOLUME", glusterd_handle_sync_volume,
+ NULL, GLUSTER_CLI_SYNC_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_RESET_VOLUME] = {"RESET_VOLUME", glusterd_handle_reset_volume,
+ NULL, GLUSTER_CLI_RESET_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_FSM_LOG] = {"FSM_LOG", glusterd_handle_fsm_log, NULL,
+ GLUSTER_CLI_FSM_LOG, DRC_NA, 0},
+ [GLUSTER_CLI_GSYNC_SET] = {"GSYNC_SET", glusterd_handle_gsync_set, NULL,
+ GLUSTER_CLI_GSYNC_SET, DRC_NA, 0},
+ [GLUSTER_CLI_PROFILE_VOLUME] = {"STATS_VOLUME",
+ glusterd_handle_cli_profile_volume, NULL,
+ GLUSTER_CLI_PROFILE_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_QUOTA] = {"QUOTA", glusterd_handle_quota, NULL,
+ GLUSTER_CLI_QUOTA, DRC_NA, 0},
+ [GLUSTER_CLI_GETWD] = {"GETWD", glusterd_handle_getwd, NULL,
+ GLUSTER_CLI_GETWD, DRC_NA, 1},
+ [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME",
+ glusterd_handle_status_volume, NULL,
+ GLUSTER_CLI_STATUS_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_MOUNT] = {"MOUNT", glusterd_handle_mount, NULL,
+ GLUSTER_CLI_MOUNT, DRC_NA, 1},
+ [GLUSTER_CLI_UMOUNT] = {"UMOUNT", glusterd_handle_umount, NULL,
+ GLUSTER_CLI_UMOUNT, DRC_NA, 1},
+ [GLUSTER_CLI_HEAL_VOLUME] = {"HEAL_VOLUME", glusterd_handle_cli_heal_volume,
+ NULL, GLUSTER_CLI_HEAL_VOLUME, DRC_NA, 0},
[GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME",
- GLUSTER_CLI_STATEDUMP_VOLUME,
glusterd_handle_cli_statedump_volume,
- NULL, 0, DRC_NA},
- [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME,
- glusterd_handle_cli_list_volume, NULL, 0,
- DRC_NA},
+ NULL, GLUSTER_CLI_STATEDUMP_VOLUME,
+ DRC_NA, 0},
+ [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", glusterd_handle_cli_list_volume,
+ NULL, GLUSTER_CLI_LIST_VOLUME, DRC_NA, 0},
[GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME",
- GLUSTER_CLI_CLRLOCKS_VOLUME,
glusterd_handle_cli_clearlocks_volume,
- NULL, 0, DRC_NA},
- [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", GLUSTER_CLI_COPY_FILE,
- glusterd_handle_copy_file, NULL, 0, DRC_NA},
- [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC,
- glusterd_handle_sys_exec, NULL, 0, DRC_NA},
- [GLUSTER_CLI_SNAP] = {"SNAP", GLUSTER_CLI_SNAP, glusterd_handle_snapshot,
- NULL, 0, DRC_NA},
- [GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER_VOLUME",
- GLUSTER_CLI_BARRIER_VOLUME,
- glusterd_handle_barrier, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", GLUSTER_CLI_GET_VOL_OPT,
- glusterd_handle_get_vol_opt, NULL, 0, DRC_NA},
- [GLUSTER_CLI_BITROT] = {"BITROT", GLUSTER_CLI_BITROT,
- glusterd_handle_bitrot, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GET_STATE] = {"GET_STATE", GLUSTER_CLI_GET_STATE,
- glusterd_handle_get_state, NULL, 0, DRC_NA},
- [GLUSTER_CLI_RESET_BRICK] = {"RESET_BRICK", GLUSTER_CLI_RESET_BRICK,
- glusterd_handle_reset_brick, NULL, 0, DRC_NA},
- [GLUSTER_CLI_TIER] = {"TIER", GLUSTER_CLI_TIER, glusterd_handle_tier, NULL,
- 0, DRC_NA},
+ NULL, GLUSTER_CLI_CLRLOCKS_VOLUME, DRC_NA,
+ 0},
+ [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", glusterd_handle_copy_file, NULL,
+ GLUSTER_CLI_COPY_FILE, DRC_NA, 0},
+ [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", glusterd_handle_sys_exec, NULL,
+ GLUSTER_CLI_SYS_EXEC, DRC_NA, 0},
+ [GLUSTER_CLI_SNAP] = {"SNAP", glusterd_handle_snapshot, NULL,
+ GLUSTER_CLI_SNAP, DRC_NA, 0},
+ [GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER_VOLUME", glusterd_handle_barrier,
+ NULL, GLUSTER_CLI_BARRIER_VOLUME, DRC_NA,
+ 0},
+ [GLUSTER_CLI_GANESHA] = {"GANESHA", glusterd_handle_ganesha_cmd, NULL,
+ GLUSTER_CLI_GANESHA, DRC_NA, 0},
+ [GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", glusterd_handle_get_vol_opt,
+ NULL, DRC_NA, 0},
+ [GLUSTER_CLI_BITROT] = {"BITROT", glusterd_handle_bitrot, NULL,
+ GLUSTER_CLI_BITROT, DRC_NA, 0},
+ [GLUSTER_CLI_GET_STATE] = {"GET_STATE", glusterd_handle_get_state, NULL,
+ GLUSTER_CLI_GET_STATE, DRC_NA, 0},
+ [GLUSTER_CLI_RESET_BRICK] = {"RESET_BRICK", glusterd_handle_reset_brick,
+ NULL, GLUSTER_CLI_RESET_BRICK, DRC_NA, 0},
+ [GLUSTER_CLI_TIER] = {"TIER", glusterd_handle_tier, NULL, GLUSTER_CLI_TIER,
+ DRC_NA, 0},
[GLUSTER_CLI_REMOVE_TIER_BRICK] = {"REMOVE_TIER_BRICK",
- GLUSTER_CLI_REMOVE_TIER_BRICK,
- glusterd_handle_tier, NULL, 0, DRC_NA},
+ glusterd_handle_tier, NULL,
+ GLUSTER_CLI_REMOVE_TIER_BRICK, DRC_NA,
+ 0},
[GLUSTER_CLI_ADD_TIER_BRICK] = {"ADD_TIER_BRICK",
- GLUSTER_CLI_ADD_TIER_BRICK,
- glusterd_handle_add_tier_brick, NULL, 0,
- DRC_NA},
+ glusterd_handle_add_tier_brick, NULL,
+ GLUSTER_CLI_ADD_TIER_BRICK, DRC_NA, 0},
};
struct rpcsvc_program gd_svc_cli_prog = {
@@ -6680,27 +6671,25 @@ struct rpcsvc_program gd_svc_cli_prog = {
* read only queries, the only exception being MOUNT/UMOUNT which is required
* by geo-replication to support unprivileged master -> slave sessions.
*/
-rpcsvc_actor_t gd_svc_cli_trusted_actors[GLUSTER_CLI_MAXVALUE] = {
- [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS,
- glusterd_handle_cli_list_friends, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_UUID_GET] = {"UUID_GET", GLUSTER_CLI_UUID_GET,
- glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", GLUSTER_CLI_GET_VOLUME,
- glusterd_handle_cli_get_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_GETWD] = {"GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd,
- NULL, 1, DRC_NA},
- [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME,
- glusterd_handle_status_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME,
- glusterd_handle_cli_list_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_MOUNT] = {"MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount,
- NULL, 1, DRC_NA},
- [GLUSTER_CLI_UMOUNT] = {"UMOUNT", GLUSTER_CLI_UMOUNT,
- glusterd_handle_umount, NULL, 1, DRC_NA},
+static rpcsvc_actor_t gd_svc_cli_trusted_actors[GLUSTER_CLI_MAXVALUE] = {
+ [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS",
+ glusterd_handle_cli_list_friends, NULL,
+ GLUSTER_CLI_LIST_FRIENDS, DRC_NA, 0},
+ [GLUSTER_CLI_UUID_GET] = {"UUID_GET", glusterd_handle_cli_uuid_get, NULL,
+ GLUSTER_CLI_UUID_GET, DRC_NA, 0},
+ [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", glusterd_handle_cli_get_volume,
+ NULL, GLUSTER_CLI_GET_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_GETWD] = {"GETWD", glusterd_handle_getwd, NULL,
+ GLUSTER_CLI_GETWD, DRC_NA, 1},
+ [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME",
+ glusterd_handle_status_volume, NULL,
+ GLUSTER_CLI_STATUS_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", glusterd_handle_cli_list_volume,
+ NULL, GLUSTER_CLI_LIST_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_MOUNT] = {"MOUNT", glusterd_handle_mount, NULL,
+ GLUSTER_CLI_MOUNT, DRC_NA, 1},
+ [GLUSTER_CLI_UMOUNT] = {"UMOUNT", glusterd_handle_umount, NULL,
+ GLUSTER_CLI_UMOUNT, DRC_NA, 1},
};
struct rpcsvc_program gd_svc_cli_trusted_progs = {
@@ -6711,3 +6700,14 @@ struct rpcsvc_program gd_svc_cli_trusted_progs = {
.actors = gd_svc_cli_trusted_actors,
.synctask = _gf_true,
};
+
+/* As we cant remove the handlers, I'm moving the tier based
+ * handlers to this file as we no longer have gluster-tier.c
+ * and other tier.c files
+ */
+
+int
+glusterd_handle_tier(rpcsvc_request_t *req)
+{
+ return 0;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
index 53b500f4986..d96e35503dd 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
@@ -8,11 +8,11 @@
cases as published by the Free Software Foundation.
*/
-#include "xlator.h"
-#include "defaults.h"
-#include "glusterfs.h"
-#include "syscall.h"
-#include "compat-errno.h"
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/syscall.h>
+#include <glusterfs/compat-errno.h>
#include "glusterd.h"
#include "glusterd-utils.h"
@@ -21,7 +21,6 @@
#include "glusterd-snapshot-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-snapd-svc-helper.h"
-#include "glusterd-tierd-svc-helper.h"
#include "glusterd-volgen.h"
#include "glusterd-quotad-svc.h"
#include "glusterd-messages.h"
@@ -30,6 +29,7 @@
#include "rpcsvc.h"
#include "rpc-common-xdr.h"
#include "glusterd-gfproxyd-svc-helper.h"
+#include "glusterd-shd-svc-helper.h"
extern struct rpc_clnt_program gd_peer_prog;
extern struct rpc_clnt_program gd_mgmt_prog;
@@ -111,6 +111,8 @@ get_snap_volname_and_volinfo(const char *volpath, char **volname,
volfile_token = strtok_r(NULL, "/", &save_ptr);
*volname = gf_strdup(volfile_token);
if (NULL == *volname) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "Volname=%s", volfile_token, NULL);
ret = -1;
goto out;
}
@@ -202,7 +204,7 @@ out:
size_t
build_volfile_path(char *volume_id, char *path, size_t path_len,
- char *trusted_str)
+ char *trusted_str, dict_t *dict)
{
struct stat stbuf = {
0,
@@ -236,6 +238,7 @@ build_volfile_path(char *volume_id, char *path, size_t path_len,
if (volid_ptr) {
volid_ptr = strchr(volid_ptr, '/');
if (!volid_ptr) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
ret = -1;
goto out;
}
@@ -252,45 +255,49 @@ build_volfile_path(char *volume_id, char *path, size_t path_len,
goto out;
}
- volid_ptr = strstr(volume_id, "tierd/");
+ volid_ptr = strstr(volume_id, "gluster/");
if (volid_ptr) {
volid_ptr = strchr(volid_ptr, '/');
if (!volid_ptr) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
ret = -1;
goto out;
}
volid_ptr++;
- ret = glusterd_volinfo_find(volid_ptr, &volinfo);
- if (ret == -1) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
- "Couldn't find volinfo");
- goto out;
- }
- glusterd_svc_build_tierd_volfile_path(volinfo, path, path_len);
+ glusterd_svc_build_volfile_path(volid_ptr, priv->workdir, path,
+ path_len);
ret = 0;
goto out;
}
- volid_ptr = strstr(volume_id, "gluster/");
+ volid_ptr = strstr(volume_id, "gfproxy-client/");
if (volid_ptr) {
volid_ptr = strchr(volid_ptr, '/');
if (!volid_ptr) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
ret = -1;
goto out;
}
volid_ptr++;
- glusterd_svc_build_volfile_path(volid_ptr, priv->workdir, path,
- path_len);
+ ret = glusterd_volinfo_find(volid_ptr, &volinfo);
+ if (ret == -1) {
+ gf_log(this->name, GF_LOG_ERROR, "Couldn't find volinfo");
+ goto out;
+ }
+
+ glusterd_get_gfproxy_client_volfile(volinfo, path, path_len);
+
ret = 0;
goto out;
}
- volid_ptr = strstr(volume_id, "gfproxy-client/");
+ volid_ptr = strstr(volume_id, "gfproxyd/");
if (volid_ptr) {
volid_ptr = strchr(volid_ptr, '/');
if (!volid_ptr) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
ret = -1;
goto out;
}
@@ -302,16 +309,16 @@ build_volfile_path(char *volume_id, char *path, size_t path_len,
goto out;
}
- glusterd_get_gfproxy_client_volfile(volinfo, path, path_len);
-
+ glusterd_svc_build_gfproxyd_volfile_path(volinfo, path, path_len);
ret = 0;
goto out;
}
- volid_ptr = strstr(volume_id, "gfproxyd/");
+ volid_ptr = strstr(volume_id, "shd/");
if (volid_ptr) {
volid_ptr = strchr(volid_ptr, '/');
if (!volid_ptr) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
ret = -1;
goto out;
}
@@ -319,11 +326,19 @@ build_volfile_path(char *volume_id, char *path, size_t path_len,
ret = glusterd_volinfo_find(volid_ptr, &volinfo);
if (ret == -1) {
- gf_log(this->name, GF_LOG_ERROR, "Couldn't find volinfo");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Couldn't find volinfo for volid=%s", volid_ptr);
goto out;
}
- glusterd_svc_build_gfproxyd_volfile_path(volinfo, path, path_len);
+ glusterd_svc_build_shd_volfile_path(volinfo, path, path_len);
+
+ ret = glusterd_svc_set_shd_pidfile(volinfo, dict);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Couldn't set pidfile in dict for volid=%s", volid_ptr);
+ goto out;
+ }
ret = 0;
goto out;
}
@@ -358,6 +373,7 @@ build_volfile_path(char *volume_id, char *path, size_t path_len,
if (volid_ptr) {
volid_ptr = strchr(volid_ptr, '/');
if (!volid_ptr) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
ret = -1;
goto out;
}
@@ -378,6 +394,7 @@ build_volfile_path(char *volume_id, char *path, size_t path_len,
if (volid_ptr) {
volid_ptr = strchr(volid_ptr, '/');
if (!volid_ptr) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
ret = -1;
goto out;
}
@@ -394,6 +411,8 @@ build_volfile_path(char *volume_id, char *path, size_t path_len,
/* Split the volume name */
vol = strtok_r(dup_volname, ".", &save_ptr);
if (!vol) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SPLIT_FAIL,
+ "Volume name=%s", dup_volname, NULL);
ret = -1;
goto out;
}
@@ -438,18 +457,25 @@ build_volfile_path(char *volume_id, char *path, size_t path_len,
if (ret) {
dup_volname = gf_strdup(volid_ptr);
if (!dup_volname) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "Volume name=%s", volid_ptr, NULL);
ret = -1;
goto out;
}
/* Split the volume name */
vol = strtok_r(dup_volname, ".", &save_ptr);
if (!vol) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SPLIT_FAIL,
+ "Volume name=%s", dup_volname, NULL);
ret = -1;
goto out;
}
ret = glusterd_volinfo_find(vol, &volinfo);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL,
+ NULL);
goto out;
+ }
}
gotvolinfo:
@@ -458,8 +484,10 @@ gotvolinfo:
ret = snprintf(path, path_len, "%s/%s/%s.vol", path_prefix,
volinfo->volname, volid_ptr);
- if (ret == -1)
+ if (ret == -1) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
goto out;
+ }
ret = sys_stat(path, &stbuf);
@@ -514,12 +542,14 @@ glusterd_get_args_from_dict(gf_getspec_req *args, peer_info_t *peerinfo,
GF_ASSERT(peerinfo);
if (!args->xdata.xdata_len) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
ret = 0;
goto out;
}
dict = dict_new();
if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -553,6 +583,8 @@ glusterd_get_args_from_dict(gf_getspec_req *args, peer_info_t *peerinfo,
}
*brick_name = gf_strdup(name);
if (*brick_name == NULL) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "Brick_name=%s", name, NULL);
ret = -1;
goto out;
}
@@ -898,14 +930,27 @@ __server_getspec(rpcsvc_request_t *req)
char addrstr[RPCSVC_PEER_STRLEN] = {0};
peer_info_t *peerinfo = NULL;
xlator_t *this = NULL;
+ dict_t *dict = NULL;
+ glusterd_peerinfo_t *peer = NULL;
+ glusterd_conf_t *conf = NULL;
+ int peer_cnt = 0;
+ char *peer_hosts = NULL;
+ char *tmp_str = NULL;
+ char portstr[10] = {
+ 0,
+ };
+ int len = 0;
this = THIS;
GF_ASSERT(this);
+ conf = this->private;
ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_gf_getspec_req);
if (ret < 0) {
// failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode the message");
goto fail;
}
@@ -920,6 +965,9 @@ __server_getspec(rpcsvc_request_t *req)
goto fail;
}
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_MOUNT_REQ_RCVD,
+ "Received mount request for volume %s", volume);
+
/* Need to strip leading '/' from volnames. This was introduced to
* support nfs style mount parameters for native gluster mount
*/
@@ -931,7 +979,7 @@ __server_getspec(rpcsvc_request_t *req)
volume);
if (ret < 0 || ret >= sizeof(peerinfo->volname)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
- "peerinfo->volname %s truncated or error occured: "
+ "peerinfo->volname %s truncated or error occurred: "
"(ret: %d)",
peerinfo->volname, ret);
ret = -1;
@@ -950,11 +998,22 @@ __server_getspec(rpcsvc_request_t *req)
goto fail;
}
+ dict = dict_new();
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
trans = req->trans;
/* addrstr will be empty for cli socket connections */
ret = rpcsvc_transport_peername(trans, (char *)&addrstr, sizeof(addrstr));
- if (ret)
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_RPC_TRANSPORT_GET_PEERNAME_FAIL,
+ "Failed to get the peername");
goto fail;
+ }
tmp = strrchr(addrstr, ':');
if (tmp)
@@ -968,12 +1027,61 @@ __server_getspec(rpcsvc_request_t *req)
*/
if (strlen(addrstr) == 0 || gf_is_local_addr(addrstr)) {
ret = build_volfile_path(volume, filename, sizeof(filename),
- TRUSTED_PREFIX);
+ TRUSTED_PREFIX, dict);
} else {
- ret = build_volfile_path(volume, filename, sizeof(filename), NULL);
+ ret = build_volfile_path(volume, filename, sizeof(filename), NULL,
+ dict);
+ }
+
+ RCU_READ_LOCK;
+ cds_list_for_each_entry_rcu(peer, &conf->peers, uuid_list)
+ {
+ if (!peer->connected)
+ continue;
+ if (!peer_hosts) {
+ if (peer->port) {
+ snprintf(portstr, sizeof(portstr), "%d", peer->port);
+ } else {
+ snprintf(portstr, sizeof(portstr), "%d", GLUSTERD_DEFAULT_PORT);
+ }
+ len = strlen(peer->hostname) + strlen(portstr) + 3;
+ tmp_str = GF_CALLOC(1, len, gf_gld_mt_char);
+ snprintf(tmp_str, len, "%s%s%s%s", peer->hostname, ":", portstr,
+ " ");
+ peer_hosts = tmp_str;
+ } else {
+ len = strlen(peer_hosts) + strlen(peer->hostname) +
+ strlen(portstr) + 3;
+ tmp_str = GF_CALLOC(1, len, gf_gld_mt_char);
+ snprintf(tmp_str, len, "%s%s%s%s%s", peer_hosts, peer->hostname,
+ ":", portstr, " ");
+ GF_FREE(peer_hosts);
+ peer_hosts = tmp_str;
+ }
+ peer_cnt++;
+ }
+ RCU_READ_UNLOCK;
+ if (peer_cnt) {
+ op_ret = dict_set_str(dict, GLUSTERD_BRICK_SERVERS, peer_hosts);
+ if (op_ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to set peer_host in dict");
+ ret = op_ret;
+ goto fail;
+ }
}
if (ret == 0) {
+ if (dict->count > 0) {
+ ret = dict_allocate_and_serialize(dict, &rsp.xdata.xdata_val,
+ &rsp.xdata.xdata_len);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
+ goto fail;
+ }
+ }
+
/* to allocate the proper buffer to hold the file data */
ret = sys_stat(filename, &stbuf);
if (ret < 0) {
@@ -990,6 +1098,7 @@ __server_getspec(rpcsvc_request_t *req)
}
ret = file_len = stbuf.st_size;
} else {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_NOT_FOUND, NULL);
op_errno = ENOENT;
goto fail;
}
@@ -997,6 +1106,7 @@ __server_getspec(rpcsvc_request_t *req)
if (file_len) {
rsp.spec = CALLOC(file_len + 1, sizeof(char));
if (!rsp.spec) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
ret = -1;
op_errno = ENOMEM;
goto fail;
@@ -1015,7 +1125,6 @@ __server_getspec(rpcsvc_request_t *req)
goto fail;
}
}
-
/* convert to XDR */
fail:
if (spec_fd >= 0)
@@ -1024,6 +1133,9 @@ fail:
GF_FREE(brick_name);
rsp.op_ret = ret;
+ if (rsp.op_ret < 0)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MOUNT_REQ_FAIL,
+ "Failed to mount the volume");
if (op_errno)
rsp.op_errno = gf_errno_to_error(op_errno);
@@ -1035,9 +1147,18 @@ fail:
(xdrproc_t)xdr_gf_getspec_rsp);
free(args.key); // malloced by xdr
free(rsp.spec);
+
+ if (peer_hosts)
+ GF_FREE(peer_hosts);
+ if (dict)
+ dict_unref(dict);
+
if (args.xdata.xdata_val)
free(args.xdata.xdata_val);
+ if (rsp.xdata.xdata_val)
+ GF_FREE(rsp.xdata.xdata_val);
+
return 0;
}
@@ -1064,13 +1185,17 @@ __server_event_notify(rpcsvc_request_t *req)
(xdrproc_t)xdr_gf_event_notify_req);
if (ret < 0) {
req->rpc_err = GARBAGE_ARGS;
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto fail;
}
if (args.dict.dict_len) {
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
return ret;
+ }
ret = dict_unserialize(args.dict.dict_val, args.dict.dict_len, &dict);
if (ret) {
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
@@ -1189,9 +1314,9 @@ gd_validate_mgmt_hndsk_req(rpcsvc_request_t *req, dict_t *dict)
*/
if (!ret) {
gf_uuid_parse(uuid_str, peer_uuid);
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(peer_uuid, NULL) != NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret)
return _gf_true;
}
@@ -1207,7 +1332,7 @@ gd_validate_mgmt_hndsk_req(rpcsvc_request_t *req, dict_t *dict)
* is available in the peerinfo list but the uuid has changed of the
* node due to a reinstall, in that case the validation should fail!
*/
- rcu_read_lock();
+ RCU_READ_LOCK;
if (!uuid_str) {
ret = (glusterd_peerinfo_find(NULL, hostname) == NULL);
} else {
@@ -1225,7 +1350,7 @@ gd_validate_mgmt_hndsk_req(rpcsvc_request_t *req, dict_t *dict)
ret = -1;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HANDSHAKE_REQ_REJECTED,
"Rejecting management "
@@ -1263,6 +1388,7 @@ __glusterd_mgmt_hndsk_versions(rpcsvc_request_t *req)
if (ret < 0) {
// failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
@@ -1276,8 +1402,10 @@ __glusterd_mgmt_hndsk_versions(rpcsvc_request_t *req)
}
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
ret = dict_set_int32(dict, GD_OP_VERSION_KEY, conf->op_version);
if (ret) {
@@ -1363,6 +1491,7 @@ __glusterd_mgmt_hndsk_versions_ack(rpcsvc_request_t *req)
if (ret < 0) {
// failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
@@ -1435,22 +1564,25 @@ __server_get_volume_info(rpcsvc_request_t *req)
char *volume_id_str = NULL;
int32_t flags = 0;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
+
ret = xdr_to_generic(req->msg[0], &vol_info_req,
(xdrproc_t)xdr_gf_get_volume_info_req);
if (ret < 0) {
/* failed to decode msg */
req->rpc_err = GARBAGE_ARGS;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
- gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_VOL_INFO_REQ_RECVD,
- "Received get volume info req");
+ gf_smsg(this->name, GF_LOG_INFO, 0, GD_MSG_VOL_INFO_REQ_RECVD, NULL);
if (vol_info_req.dict.dict_len) {
/* Unserialize the dictionary */
dict = dict_new();
if (!dict) {
- gf_msg("glusterd", GF_LOG_WARNING, ENOMEM, GD_MSG_NO_MEMORY,
- "Out of Memory");
+ gf_smsg(this->name, GF_LOG_WARNING, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
op_errno = ENOMEM;
ret = -1;
goto out;
@@ -1459,9 +1591,8 @@ __server_get_volume_info(rpcsvc_request_t *req)
ret = dict_unserialize(vol_info_req.dict.dict_val,
vol_info_req.dict.dict_len, &dict);
if (ret < 0) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ NULL);
op_errno = -ret;
ret = -1;
goto out;
@@ -1472,8 +1603,8 @@ __server_get_volume_info(rpcsvc_request_t *req)
ret = dict_get_int32(dict, "flags", &flags);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
- "failed to get flags");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=flags", NULL);
op_errno = -ret;
ret = -1;
goto out;
@@ -1481,13 +1612,15 @@ __server_get_volume_info(rpcsvc_request_t *req)
if (!flags) {
/* Nothing to query about. Just return success */
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_NO_FLAG_SET, "No flags set");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_FLAG_SET, NULL);
ret = 0;
goto out;
}
ret = dict_get_str(dict, "volname", &volname);
if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=volname", NULL);
op_errno = EINVAL;
ret = -1;
goto out;
@@ -1495,6 +1628,8 @@ __server_get_volume_info(rpcsvc_request_t *req)
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL,
+ "Volname=%s", volname, NULL);
op_errno = EINVAL;
ret = -1;
goto out;
@@ -1503,6 +1638,8 @@ __server_get_volume_info(rpcsvc_request_t *req)
if (flags & (int32_t)GF_GET_VOLUME_UUID) {
volume_id_str = gf_strdup(uuid_utoa(volinfo->volume_id));
if (!volume_id_str) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ NULL);
op_errno = ENOMEM;
ret = -1;
goto out;
@@ -1510,8 +1647,8 @@ __server_get_volume_info(rpcsvc_request_t *req)
dict_rsp = dict_new();
if (!dict_rsp) {
- gf_msg("glusterd", GF_LOG_WARNING, ENOMEM, GD_MSG_NO_MEMORY,
- "Out of Memory");
+ gf_smsg(this->name, GF_LOG_WARNING, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
op_errno = ENOMEM;
GF_FREE(volume_id_str);
ret = -1;
@@ -1519,6 +1656,8 @@ __server_get_volume_info(rpcsvc_request_t *req)
}
ret = dict_set_dynstr(dict_rsp, "volume_id", volume_id_str);
if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=volume_id", NULL);
op_errno = -ret;
ret = -1;
goto out;
@@ -1527,6 +1666,8 @@ __server_get_volume_info(rpcsvc_request_t *req)
ret = dict_allocate_and_serialize(dict_rsp, &vol_info_rsp.dict.dict_val,
&vol_info_rsp.dict.dict_len);
if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
op_errno = -ret;
ret = -1;
goto out;
@@ -1592,6 +1733,8 @@ __server_get_snap_info(rpcsvc_request_t *req)
if (snap_info_req.dict.dict_len) {
dict = dict_new();
if (!dict) {
+ gf_smsg("glusterd", GF_LOG_WARNING, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
op_errno = ENOMEM;
ret = -1;
goto out;
@@ -1622,6 +1765,8 @@ __server_get_snap_info(rpcsvc_request_t *req)
dict_rsp = dict_new();
if (!dict_rsp) {
+ gf_smsg("glusterd", GF_LOG_WARNING, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
op_errno = ENOMEM;
ret = -1;
goto out;
@@ -1664,16 +1809,16 @@ server_get_snap_info(rpcsvc_request_t *req)
return glusterd_big_locked_handler(req, __server_get_snap_info);
}
-rpcsvc_actor_t gluster_handshake_actors[GF_HNDSK_MAXVALUE] = {
- [GF_HNDSK_NULL] = {"NULL", GF_HNDSK_NULL, NULL, NULL, 0, DRC_NA},
- [GF_HNDSK_GETSPEC] = {"GETSPEC", GF_HNDSK_GETSPEC, server_getspec, NULL, 0,
- DRC_NA},
- [GF_HNDSK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_HNDSK_EVENT_NOTIFY,
- server_event_notify, NULL, 0, DRC_NA},
- [GF_HNDSK_GET_VOLUME_INFO] = {"GETVOLUMEINFO", GF_HNDSK_GET_VOLUME_INFO,
- server_get_volume_info, NULL, 0, DRC_NA},
- [GF_HNDSK_GET_SNAPSHOT_INFO] = {"GETSNAPINFO", GF_HNDSK_GET_SNAPSHOT_INFO,
- server_get_snap_info, NULL, 0, DRC_NA},
+static rpcsvc_actor_t gluster_handshake_actors[GF_HNDSK_MAXVALUE] = {
+ [GF_HNDSK_NULL] = {"NULL", NULL, NULL, GF_HNDSK_NULL, DRC_NA, 0},
+ [GF_HNDSK_GETSPEC] = {"GETSPEC", server_getspec, NULL, GF_HNDSK_GETSPEC,
+ DRC_NA, 0},
+ [GF_HNDSK_EVENT_NOTIFY] = {"EVENTNOTIFY", server_event_notify, NULL,
+ GF_HNDSK_EVENT_NOTIFY, DRC_NA, 0},
+ [GF_HNDSK_GET_VOLUME_INFO] = {"GETVOLUMEINFO", server_get_volume_info, NULL,
+ GF_HNDSK_GET_VOLUME_INFO, DRC_NA, 0},
+ [GF_HNDSK_GET_SNAPSHOT_INFO] = {"GETSNAPINFO", server_get_snap_info, NULL,
+ GF_HNDSK_GET_SNAPSHOT_INFO, DRC_NA, 0},
};
struct rpcsvc_program gluster_handshake_prog = {
@@ -1685,9 +1830,9 @@ struct rpcsvc_program gluster_handshake_prog = {
};
/* A minimal RPC program just for the cli getspec command */
-rpcsvc_actor_t gluster_cli_getspec_actors[GF_HNDSK_MAXVALUE] = {
- [GF_HNDSK_GETSPEC] = {"GETSPEC", GF_HNDSK_GETSPEC, server_getspec, NULL, 0,
- DRC_NA},
+static rpcsvc_actor_t gluster_cli_getspec_actors[GF_HNDSK_MAXVALUE] = {
+ [GF_HNDSK_GETSPEC] = {"GETSPEC", server_getspec, NULL, GF_HNDSK_GETSPEC,
+ DRC_NA, 0},
};
struct rpcsvc_program gluster_cli_getspec_prog = {
@@ -1698,26 +1843,26 @@ struct rpcsvc_program gluster_cli_getspec_prog = {
.numactors = GF_HNDSK_MAXVALUE,
};
-char *glusterd_dump_proc[GF_DUMP_MAXVALUE] = {
+static char *glusterd_dump_proc[GF_DUMP_MAXVALUE] = {
[GF_DUMP_NULL] = "NULL",
[GF_DUMP_DUMP] = "DUMP",
[GF_DUMP_PING] = "PING",
};
-rpc_clnt_prog_t glusterd_dump_prog = {
+static rpc_clnt_prog_t glusterd_dump_prog = {
.progname = "GLUSTERD-DUMP",
.prognum = GLUSTER_DUMP_PROGRAM,
.progver = GLUSTER_DUMP_VERSION,
.procnames = glusterd_dump_proc,
};
-rpcsvc_actor_t glusterd_mgmt_hndsk_actors[GD_MGMT_HNDSK_MAXVALUE] = {
- [GD_MGMT_HNDSK_NULL] = {"NULL", GD_MGMT_HNDSK_NULL, NULL, NULL, 0, DRC_NA},
- [GD_MGMT_HNDSK_VERSIONS] = {"MGMT-VERS", GD_MGMT_HNDSK_VERSIONS,
- glusterd_mgmt_hndsk_versions, NULL, 0, DRC_NA},
- [GD_MGMT_HNDSK_VERSIONS_ACK] = {"MGMT-VERS-ACK", GD_MGMT_HNDSK_VERSIONS_ACK,
- glusterd_mgmt_hndsk_versions_ack, NULL, 0,
- DRC_NA},
+static rpcsvc_actor_t glusterd_mgmt_hndsk_actors[GD_MGMT_HNDSK_MAXVALUE] = {
+ [GD_MGMT_HNDSK_NULL] = {"NULL", NULL, NULL, GD_MGMT_HNDSK_NULL, DRC_NA, 0},
+ [GD_MGMT_HNDSK_VERSIONS] = {"MGMT-VERS", glusterd_mgmt_hndsk_versions, NULL,
+ GD_MGMT_HNDSK_VERSIONS, DRC_NA, 0},
+ [GD_MGMT_HNDSK_VERSIONS_ACK] = {"MGMT-VERS-ACK",
+ glusterd_mgmt_hndsk_versions_ack, NULL,
+ GD_MGMT_HNDSK_VERSIONS_ACK, DRC_NA, 0},
};
struct rpcsvc_program glusterd_mgmt_hndsk_prog = {
@@ -1728,13 +1873,13 @@ struct rpcsvc_program glusterd_mgmt_hndsk_prog = {
.numactors = GD_MGMT_HNDSK_MAXVALUE,
};
-char *glusterd_mgmt_hndsk_proc[GD_MGMT_HNDSK_MAXVALUE] = {
+static char *glusterd_mgmt_hndsk_proc[GD_MGMT_HNDSK_MAXVALUE] = {
[GD_MGMT_HNDSK_NULL] = "NULL",
[GD_MGMT_HNDSK_VERSIONS] = "MGMT-VERS",
[GD_MGMT_HNDSK_VERSIONS_ACK] = "MGMT-VERS-ACK",
};
-rpc_clnt_prog_t gd_clnt_mgmt_hndsk_prog = {
+static rpc_clnt_prog_t gd_clnt_mgmt_hndsk_prog = {
.progname = "Gluster MGMT Handshake",
.prognum = GD_MGMT_HNDSK_PROGRAM,
.progver = GD_MGMT_HNDSK_VERSION,
@@ -1768,16 +1913,17 @@ glusterd_event_connected_inject(glusterd_peerctx_t *peerctx)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
+ RCU_READ_UNLOCK;
ret = -1;
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)", peerctx->peername,
uuid_utoa(peerctx->peerid));
GF_FREE(ctx);
- goto unlock;
+ goto out;
}
ctx->hostname = gf_strdup(peerinfo->hostname);
ctx->port = peerinfo->port;
@@ -1790,13 +1936,13 @@ glusterd_event_connected_inject(glusterd_peerctx_t *peerctx)
ret = glusterd_friend_sm_inject_event(event);
+ RCU_READ_UNLOCK;
+
if (ret)
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
"Unable to inject "
"EVENT_CONNECTED ret = %d",
ret);
-unlock:
- rcu_read_unlock();
out:
gf_msg_debug("glusterd", 0, "returning %d", ret);
@@ -1813,22 +1959,45 @@ gd_validate_peer_op_version(xlator_t *this, glusterd_peerinfo_t *peerinfo,
int32_t peer_min_op_version = 0;
int32_t peer_max_op_version = 0;
- if (!dict || !this || !peerinfo)
+ if (!dict) {
+ gf_smsg("glusterd", GF_LOG_WARNING, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
+ goto out;
+ }
+
+ if (!this) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_XLATOR_NOT_DEFINED,
+ NULL);
+ goto out;
+ }
+
+ if (!peerinfo) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
conf = this->private;
ret = dict_get_int32(dict, GD_OP_VERSION_KEY, &peer_op_version);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", GD_OP_VERSION_KEY, NULL);
goto out;
+ }
ret = dict_get_int32(dict, GD_MAX_OP_VERSION_KEY, &peer_max_op_version);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", GD_MAX_OP_VERSION_KEY, NULL);
goto out;
+ }
ret = dict_get_int32(dict, GD_MIN_OP_VERSION_KEY, &peer_min_op_version);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", GD_MIN_OP_VERSION_KEY, NULL);
goto out;
+ }
ret = -1;
/* Check if peer can support our op_version */
@@ -1870,7 +2039,7 @@ __glusterd_mgmt_hndsk_version_ack_cbk(struct rpc_req *req, struct iovec *iov,
frame = myframe;
peerctx = frame->local;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
gf_msg_debug(this->name, 0, "Could not find peer %s(%s)",
@@ -1930,7 +2099,7 @@ out:
if (ret != 0 && peerinfo)
rpc_transport_disconnect(peerinfo->rpc->conn.trans, _gf_false);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
frame->local = NULL;
STACK_DESTROY(frame->root);
@@ -1979,7 +2148,7 @@ __glusterd_mgmt_hndsk_version_cbk(struct rpc_req *req, struct iovec *iov,
frame = myframe;
peerctx = frame->local;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -2055,7 +2224,7 @@ out:
rpc_transport_disconnect(peerinfo->rpc->conn.trans, _gf_false);
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (rsp.hndsk.hndsk_val)
free(rsp.hndsk.hndsk_val);
@@ -2094,14 +2263,20 @@ glusterd_mgmt_handshake(xlator_t *this, glusterd_peerctx_t *peerctx)
int ret = -1;
frame = create_frame(this, this->ctx->pool);
- if (!frame)
+ if (!frame) {
+ gf_smsg("glusterd", GF_LOG_WARNING, errno, GD_MSG_FRAME_CREATE_FAIL,
+ NULL);
goto out;
+ }
frame->local = peerctx;
req_dict = dict_new();
- if (!req_dict)
+ if (!req_dict) {
+ gf_smsg("glusterd", GF_LOG_WARNING, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
goto out;
+ }
ret = dict_set_dynstr(req_dict, GD_PEER_ID_KEY,
gf_strdup(uuid_utoa(MY_UUID)));
@@ -2114,23 +2289,29 @@ glusterd_mgmt_handshake(xlator_t *this, glusterd_peerctx_t *peerctx)
GF_PROTOCOL_DICT_SERIALIZE(this, req_dict, (&req.hndsk.hndsk_val),
req.hndsk.hndsk_len, ret, out);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
+ RCU_READ_UNLOCK;
gf_msg_debug(THIS->name, 0, "Could not find peer %s(%s)",
peerctx->peername, uuid_utoa(peerctx->peerid));
- goto unlock;
+ goto out;
}
ret = glusterd_submit_request(
peerinfo->rpc, &req, frame, &gd_clnt_mgmt_hndsk_prog,
GD_MGMT_HNDSK_VERSIONS, NULL, this, glusterd_mgmt_hndsk_version_cbk,
(xdrproc_t)xdr_gf_mgmt_hndsk_req);
+
+ RCU_READ_UNLOCK;
+
ret = 0;
-unlock:
- rcu_read_unlock();
+
out:
+ if (req_dict)
+ dict_unref(req_dict);
+
if (ret && frame)
STACK_DESTROY(frame->root);
@@ -2244,7 +2425,7 @@ __glusterd_peer_dump_version_cbk(struct rpc_req *req, struct iovec *iov,
frame = myframe;
peerctx = frame->local;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -2320,7 +2501,7 @@ out:
if (ret != 0 && peerinfo)
rpc_transport_disconnect(peerinfo->rpc->conn.trans, _gf_false);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
glusterd_friend_sm();
glusterd_op_sm();
@@ -2362,20 +2543,26 @@ glusterd_peer_dump_version(xlator_t *this, struct rpc_clnt *rpc,
int ret = -1;
frame = create_frame(this, this->ctx->pool);
- if (!frame)
+ if (!frame) {
+ gf_smsg(this->name, GF_LOG_WARNING, errno, GD_MSG_FRAME_CREATE_FAIL,
+ NULL);
goto out;
+ }
frame->local = peerctx;
- if (!peerctx)
+ if (!peerctx) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
+ RCU_READ_UNLOCK;
gf_msg_debug(this->name, 0, "Couldn't find peer %s(%s)",
peerctx->peername, uuid_utoa(peerctx->peerid));
- goto unlock;
+ goto out;
}
req.gfs_id = 0xcafe;
@@ -2383,8 +2570,8 @@ glusterd_peer_dump_version(xlator_t *this, struct rpc_clnt *rpc,
ret = glusterd_submit_request(
peerinfo->rpc, &req, frame, &glusterd_dump_prog, GF_DUMP_DUMP, NULL,
this, glusterd_peer_dump_version_cbk, (xdrproc_t)xdr_gf_dump_req);
-unlock:
- rcu_read_unlock();
+
+ RCU_READ_UNLOCK;
out:
if (ret && frame)
STACK_DESTROY(frame->root);
diff --git a/xlators/mgmt/glusterd/src/glusterd-hooks.c b/xlators/mgmt/glusterd/src/glusterd-hooks.c
index 4a482d5cfb7..61c0f1c946f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-hooks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-hooks.c
@@ -8,15 +8,15 @@
cases as published by the Free Software Foundation.
*/
-#include "glusterfs.h"
-#include "dict.h"
-#include "xlator.h"
-#include "logging.h"
-#include "run.h"
-#include "defaults.h"
-#include "syscall.h"
-#include "compat.h"
-#include "compat-errno.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/run.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/syscall.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/compat-errno.h>
#include "glusterd.h"
#include "glusterd-sm.h"
#include "glusterd-op-sm.h"
@@ -87,21 +87,24 @@ glusterd_hooks_create_hooks_directory(char *basedir)
glusterd_conf_t *priv = NULL;
int32_t len = 0;
- priv = THIS->private;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
snprintf(path, sizeof(path), "%s/hooks", basedir);
- ret = mkdir_p(path, 0777, _gf_true);
+ ret = mkdir_p(path, 0755, _gf_true);
if (ret) {
- gf_msg(THIS->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
- "Unable to create %s", path);
+ gf_smsg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
+ "Path=%s", path, NULL);
goto out;
}
GLUSTERD_GET_HOOKS_DIR(version_dir, GLUSTERD_HOOK_VER, priv);
- ret = mkdir_p(version_dir, 0777, _gf_true);
+ ret = mkdir_p(version_dir, 0755, _gf_true);
if (ret) {
- gf_msg(THIS->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
- "Unable to create %s", version_dir);
+ gf_smsg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
+ "Directory=%s", version_dir, NULL);
goto out;
}
@@ -112,13 +115,14 @@ glusterd_hooks_create_hooks_directory(char *basedir)
len = snprintf(path, sizeof(path), "%s/%s", version_dir, cmd_subdir);
if ((len < 0) || (len >= sizeof(path))) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
- ret = mkdir_p(path, 0777, _gf_true);
+ ret = mkdir_p(path, 0755, _gf_true);
if (ret) {
- gf_msg(THIS->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
- "Unable to create %s", path);
+ gf_smsg(this->name, GF_LOG_CRITICAL, errno,
+ GD_MSG_CREATE_DIR_FAILED, "Path=%s", path, NULL);
goto out;
}
@@ -126,13 +130,15 @@ glusterd_hooks_create_hooks_directory(char *basedir)
len = snprintf(path, sizeof(path), "%s/%s/%s", version_dir,
cmd_subdir, type_subdir[type]);
if ((len < 0) || (len >= sizeof(path))) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL,
+ NULL);
ret = -1;
goto out;
}
- ret = mkdir_p(path, 0777, _gf_true);
+ ret = mkdir_p(path, 0755, _gf_true);
if (ret) {
- gf_msg(THIS->name, GF_LOG_CRITICAL, errno,
- GD_MSG_CREATE_DIR_FAILED, "Unable to create %s", path);
+ gf_smsg(this->name, GF_LOG_CRITICAL, errno,
+ GD_MSG_CREATE_DIR_FAILED, "Path=%s", path, NULL);
goto out;
}
}
@@ -200,20 +206,31 @@ glusterd_hooks_set_volume_args(dict_t *dict, runner_t *runner)
int i = 0;
int count = 0;
int ret = -1;
+ int flag = 0;
char query[1024] = {
0,
};
char *key = NULL;
char *value = NULL;
+ char *inet_family = NULL;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
ret = dict_get_int32(dict, "count", &count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=count", NULL);
goto out;
+ }
/* This will not happen unless op_ctx
* is corrupted*/
- if (!count)
+ if (!count) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY, "count",
+ NULL);
goto out;
+ }
runner_add_arg(runner, "-o");
for (i = 1; ret == 0; i++) {
@@ -228,9 +245,23 @@ glusterd_hooks_set_volume_args(dict_t *dict, runner_t *runner)
continue;
runner_argprintf(runner, "%s=%s", key, value);
+ if ((strncmp(key, "cluster.enable-shared-storage",
+ SLEN("cluster.enable-shared-storage")) == 0 ||
+ strncmp(key, "enable-shared-storage",
+ SLEN("enable-shared-storage")) == 0) &&
+ strncmp(value, "enable", SLEN("enable")) == 0)
+ flag = 1;
}
glusterd_hooks_add_custom_args(dict, runner);
+ if (flag == 1) {
+ ret = dict_get_str_sizen(this->options, "transport.address-family",
+ &inet_family);
+ if (!ret) {
+ runner_argprintf(runner, "transport.address-family=%s",
+ inet_family);
+ }
+ }
ret = 0;
out:
@@ -357,27 +388,31 @@ glusterd_hooks_run_hooks(char *hooks_path, glusterd_op_t op, dict_t *op_ctx,
lines = GF_CALLOC(1, N * sizeof(*lines), gf_gld_mt_charptr);
if (!lines) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
ret = -1;
goto out;
}
ret = -1;
line_count = 0;
- GF_SKIP_IRRELEVANT_ENTRIES(entry, hookdir, scratch);
- while (entry) {
+
+ while ((entry = sys_readdir(hookdir, scratch))) {
+ if (gf_irrelevant_entry(entry))
+ continue;
if (line_count == N - 1) {
N *= 2;
lines = GF_REALLOC(lines, N * sizeof(char *));
- if (!lines)
+ if (!lines) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
}
if (glusterd_is_hook_enabled(entry->d_name)) {
lines[line_count] = gf_strdup(entry->d_name);
line_count++;
}
-
- GF_SKIP_IRRELEVANT_ENTRIES(entry, hookdir, scratch);
}
lines[line_count] = NULL;
@@ -461,31 +496,40 @@ glusterd_hooks_stub_init(glusterd_hooks_stub_t **stub, char *scriptdir,
int ret = -1;
glusterd_hooks_stub_t *hooks_stub = NULL;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
GF_ASSERT(stub);
if (!stub)
goto out;
hooks_stub = GF_CALLOC(1, sizeof(*hooks_stub), gf_gld_mt_hooks_stub_t);
- if (!hooks_stub)
+ if (!hooks_stub) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
+ }
CDS_INIT_LIST_HEAD(&hooks_stub->all_hooks);
hooks_stub->op = op;
hooks_stub->scriptdir = gf_strdup(scriptdir);
- if (!hooks_stub->scriptdir)
+ if (!hooks_stub->scriptdir) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "scriptdir=%s", scriptdir, NULL);
goto out;
+ }
hooks_stub->op_ctx = dict_copy_with_ref(op_ctx, hooks_stub->op_ctx);
- if (!hooks_stub->op_ctx)
+ if (!hooks_stub->op_ctx) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_COPY_FAIL, NULL);
goto out;
+ }
*stub = hooks_stub;
ret = 0;
out:
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_POST_HOOK_STUB_INIT_FAIL,
- "Failed to initialize "
- "post hooks stub");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_HOOK_STUB_INIT_FAIL,
+ NULL);
glusterd_hooks_stub_cleanup(hooks_stub);
}
@@ -547,12 +591,20 @@ glusterd_hooks_priv_init(glusterd_hooks_private_t **new)
int ret = -1;
glusterd_hooks_private_t *hooks_priv = NULL;
- if (!new)
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
+
+ if (!new) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
hooks_priv = GF_CALLOC(1, sizeof(*hooks_priv), gf_gld_mt_hooks_priv_t);
- if (!hooks_priv)
+ if (!hooks_priv) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
+ }
pthread_mutex_init(&hooks_priv->mutex, NULL);
pthread_cond_init(&hooks_priv->cond, NULL);
diff --git a/xlators/mgmt/glusterd/src/glusterd-hooks.h b/xlators/mgmt/glusterd/src/glusterd-hooks.h
index 3813c18e989..f8b887b9bd7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-hooks.h
+++ b/xlators/mgmt/glusterd/src/glusterd-hooks.h
@@ -34,17 +34,17 @@ typedef enum glusterd_commit_hook_type {
typedef struct hooks_private {
struct cds_list_head list;
- int waitcount; // debug purposes
pthread_mutex_t mutex;
pthread_cond_t cond;
pthread_t worker;
+ int waitcount; // debug purposes
} glusterd_hooks_private_t;
typedef struct hooks_stub {
struct cds_list_head all_hooks;
char *scriptdir;
- glusterd_op_t op;
dict_t *op_ctx;
+ glusterd_op_t op;
} glusterd_hooks_stub_t;
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c
index 4a7a5ba4479..11523f2854b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.c
@@ -7,7 +7,7 @@
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "cli1-xdr.h"
#include "xdr-generic.h"
#include "glusterd.h"
@@ -17,8 +17,8 @@
#include "glusterd-volgen.h"
#include "glusterd-locks.h"
#include "glusterd-errno.h"
-#include "run.h"
-#include "syscall.h"
+#include <glusterfs/run.h>
+#include <glusterfs/syscall.h>
#include "glusterd-messages.h"
#include <signal.h>
@@ -36,22 +36,20 @@ glusterd_valid_entities valid_types[] = {
};
/* Checks if the lock request is for a valid entity */
-gf_boolean_t
+static gf_boolean_t
glusterd_mgmt_v3_is_type_valid(char *type)
{
- int32_t i = 0;
- gf_boolean_t ret = _gf_false;
+ int i = 0;
GF_ASSERT(type);
for (i = 0; valid_types[i].type; i++) {
if (!strcmp(type, valid_types[i].type)) {
- ret = _gf_true;
- break;
+ return _gf_true;
}
}
- return ret;
+ return _gf_false;
}
/* Initialize the global mgmt_v3 lock list(dict) when
@@ -138,15 +136,12 @@ out:
return;
}
-int32_t
+static int32_t
glusterd_get_mgmt_v3_lock_owner(char *key, uuid_t *uuid)
{
int32_t ret = -1;
glusterd_mgmt_v3_lock_obj *lock_obj = NULL;
glusterd_conf_t *priv = NULL;
- uuid_t no_owner = {
- 0,
- };
xlator_t *this = NULL;
this = THIS;
@@ -164,8 +159,6 @@ glusterd_get_mgmt_v3_lock_owner(char *key, uuid_t *uuid)
ret = dict_get_bin(priv->mgmt_v3_lock, key, (void **)&lock_obj);
if (!ret)
gf_uuid_copy(*uuid, lock_obj->lock_owner);
- else
- gf_uuid_copy(*uuid, no_owner);
ret = 0;
out:
@@ -199,11 +192,11 @@ glusterd_release_multiple_locks_per_entity(dict_t *dict, uuid_t uuid,
/* Release all the locks held */
for (i = 0; i < locked_count; i++) {
- snprintf(name_buf, sizeof(name_buf), "%sname%d", type, i + 1);
+ ret = snprintf(name_buf, sizeof(name_buf), "%sname%d", type, i + 1);
/* Looking for volname1, volname2 or snapname1, *
* as key in the dict snapname2 */
- ret = dict_get_str(dict, name_buf, &name);
+ ret = dict_get_strn(dict, name_buf, ret, &name);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
"Unable to get %s locked_count = %d", name_buf,
@@ -248,11 +241,11 @@ glusterd_acquire_multiple_locks_per_entity(dict_t *dict, uuid_t uuid,
/* Locking one element after other */
for (i = 0; i < count; i++) {
- snprintf(name_buf, sizeof(name_buf), "%sname%d", type, i + 1);
+ ret = snprintf(name_buf, sizeof(name_buf), "%sname%d", type, i + 1);
/* Looking for volname1, volname2 or snapname1, *
* as key in the dict snapname2 */
- ret = dict_get_str(dict, name_buf, &name);
+ ret = dict_get_strn(dict, name_buf, ret, &name);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
"Unable to get %s count = %d", name_buf, count);
@@ -321,13 +314,13 @@ glusterd_mgmt_v3_unlock_entity(dict_t *dict, uuid_t uuid, char *type,
}
/* Looking for volcount or snapcount in the dict */
- snprintf(name_buf, sizeof(name_buf), "%scount", type);
- ret = dict_get_int32(dict, name_buf, &count);
+ ret = snprintf(name_buf, sizeof(name_buf), "%scount", type);
+ ret = dict_get_int32n(dict, name_buf, ret, &count);
if (ret) {
/* count is not present. Only one *
* element name needs to be unlocked */
- snprintf(name_buf, sizeof(name_buf), "%sname", type);
- ret = dict_get_str(dict, name_buf, &name);
+ ret = snprintf(name_buf, sizeof(name_buf), "%sname", type);
+ ret = dict_get_strn(dict, name_buf, ret, &name);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
"Unable to fetch %sname", type);
@@ -390,13 +383,13 @@ glusterd_mgmt_v3_lock_entity(dict_t *dict, uuid_t uuid, uint32_t *op_errno,
}
/* Looking for volcount or snapcount in the dict */
- snprintf(name_buf, sizeof(name_buf), "%scount", type);
- ret = dict_get_int32(dict, name_buf, &count);
+ ret = snprintf(name_buf, sizeof(name_buf), "%scount", type);
+ ret = dict_get_int32n(dict, name_buf, ret, &count);
if (ret) {
/* count is not present. Only one *
* element name needs to be locked */
- snprintf(name_buf, sizeof(name_buf), "%sname", type);
- ret = dict_get_str(dict, name_buf, &name);
+ ret = snprintf(name_buf, sizeof(name_buf), "%sname", type);
+ ret = dict_get_strn(dict, name_buf, ret, &name);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
"Unable to fetch %sname", type);
@@ -569,8 +562,8 @@ glusterd_mgmt_v3_lock(const char *name, uuid_t uuid, uint32_t *op_errno,
goto out;
}
- gf_msg_debug(this->name, 0, "Trying to acquire lock of %s %s for %s as %s",
- type, name, uuid_utoa(uuid), key);
+ gf_msg_debug(this->name, 0, "Trying to acquire lock of %s for %s", key,
+ uuid_utoa(uuid));
ret = glusterd_get_mgmt_v3_lock_owner(key, &owner);
if (ret) {
@@ -589,7 +582,7 @@ glusterd_mgmt_v3_lock(const char *name, uuid_t uuid, uint32_t *op_errno,
goto out;
}
- lock_obj = GF_CALLOC(1, sizeof(glusterd_mgmt_v3_lock_obj),
+ lock_obj = GF_MALLOC(sizeof(glusterd_mgmt_v3_lock_obj),
gf_common_mt_mgmt_v3_lock_obj_t);
if (!lock_obj) {
ret = -1;
@@ -616,18 +609,25 @@ glusterd_mgmt_v3_lock(const char *name, uuid_t uuid, uint32_t *op_errno,
}
mgmt_lock_timer->xl = THIS;
- key_dup = gf_strdup(key);
- delay.tv_sec = priv->mgmt_v3_lock_timeout;
- delay.tv_nsec = 0;
/*changing to default timeout value*/
priv->mgmt_v3_lock_timeout = GF_LOCK_TIMER;
ret = -1;
mgmt_lock_timer_xl = mgmt_lock_timer->xl;
- GF_VALIDATE_OR_GOTO(this->name, mgmt_lock_timer_xl, out);
+ if (!mgmt_lock_timer_xl) {
+ GF_FREE(mgmt_lock_timer);
+ goto out;
+ }
mgmt_lock_timer_ctx = mgmt_lock_timer_xl->ctx;
- GF_VALIDATE_OR_GOTO(this->name, mgmt_lock_timer_ctx, out);
+ if (!mgmt_lock_timer_ctx) {
+ GF_FREE(mgmt_lock_timer);
+ goto out;
+ }
+
+ key_dup = gf_strdup(key);
+ delay.tv_sec = priv->mgmt_v3_lock_timeout;
+ delay.tv_nsec = 0;
mgmt_lock_timer->timer = gf_timer_call_after(
mgmt_lock_timer_ctx, delay, gd_mgmt_v3_unlock_timer_cbk, key_dup);
@@ -637,24 +637,25 @@ glusterd_mgmt_v3_lock(const char *name, uuid_t uuid, uint32_t *op_errno,
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"Unable to set timer in mgmt_v3 lock");
+ GF_FREE(key_dup);
GF_FREE(mgmt_lock_timer);
goto out;
}
/* Saving the backtrace into the pre-allocated buffer, ctx->btbuf*/
if ((bt = gf_backtrace_save(NULL))) {
- snprintf(key, sizeof(key), "debug.last-success-bt-%s-%s", name, type);
+ snprintf(key, sizeof(key), "debug.last-success-bt-%s", key_dup);
ret = dict_set_dynstr_with_alloc(priv->mgmt_v3_lock, key, bt);
if (ret)
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_SET_FAILED,
"Failed to save "
- "the back trace for lock %s-%s granted to %s",
- name, type, uuid_utoa(uuid));
+ "the back trace for lock %s granted to %s",
+ key_dup, uuid_utoa(uuid));
ret = 0;
}
- gf_msg_debug(this->name, 0, "Lock for %s %s successfully held by %s", type,
- name, uuid_utoa(uuid));
+ gf_msg_debug(this->name, 0, "Lock for %s successfully held by %s", key_dup,
+ uuid_utoa(uuid));
ret = 0;
out:
@@ -672,9 +673,9 @@ gd_mgmt_v3_unlock_timer_cbk(void *data)
glusterd_conf_t *conf = NULL;
glusterd_mgmt_v3_lock_timer *mgmt_lock_timer = NULL;
char *key = NULL;
- char *type = NULL;
+ int keylen;
char bt_key[PATH_MAX] = "";
- char name[PATH_MAX] = "";
+ int bt_key_len = 0;
int32_t ret = -1;
glusterfs_ctx_t *mgmt_lock_timer_ctx = NULL;
xlator_t *mgmt_lock_timer_xl = NULL;
@@ -686,25 +687,21 @@ gd_mgmt_v3_unlock_timer_cbk(void *data)
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- gf_log(THIS->name, GF_LOG_INFO, "In gd_mgmt_v3_unlock_timer_cbk");
GF_ASSERT(NULL != data);
key = (char *)data;
- dict_del(conf->mgmt_v3_lock, key);
+ keylen = strlen(key);
+ dict_deln(conf->mgmt_v3_lock, key, keylen);
- type = strrchr(key, '_');
- strncpy(name, key, strlen(key) - strlen(type) - 1);
-
- ret = snprintf(bt_key, PATH_MAX, "debug.last-success-bt-%s-%s", name,
- type + 1);
- if (ret != SLEN("debug.last-success-bt-") + strlen(name) + strlen(type)) {
+ bt_key_len = snprintf(bt_key, PATH_MAX, "debug.last-success-bt-%s", key);
+ if (bt_key_len != SLEN("debug.last-success-bt-") + keylen) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CREATE_KEY_FAIL,
"Unable to create backtrace "
"key");
goto out;
}
- dict_del(conf->mgmt_v3_lock, bt_key);
+ dict_deln(conf->mgmt_v3_lock, bt_key, bt_key_len);
ret = dict_get_bin(conf->mgmt_v3_lock_timer, key,
(void **)&mgmt_lock_timer);
@@ -724,8 +721,12 @@ out:
timer = mgmt_lock_timer->timer;
GF_FREE(timer->data);
gf_timer_call_cancel(mgmt_lock_timer_ctx, mgmt_lock_timer->timer);
- dict_del(conf->mgmt_v3_lock_timer, bt_key);
+ dict_deln(conf->mgmt_v3_lock_timer, bt_key, bt_key_len);
mgmt_lock_timer->timer = NULL;
+ gf_log(this->name, GF_LOG_INFO,
+ "unlock timer is cancelled for volume_type"
+ " %s",
+ key);
}
ret_function:
@@ -738,6 +739,7 @@ glusterd_mgmt_v3_unlock(const char *name, uuid_t uuid, char *type)
{
char key[PATH_MAX] = "";
char key_dup[PATH_MAX] = "";
+ int keylen;
int32_t ret = -1;
gf_boolean_t is_valid = _gf_true;
glusterd_conf_t *priv = NULL;
@@ -772,14 +774,13 @@ glusterd_mgmt_v3_unlock(const char *name, uuid_t uuid, char *type)
goto out;
}
- ret = snprintf(key, sizeof(key), "%s_%s", name, type);
- if (ret != strlen(name) + 1 + strlen(type)) {
+ keylen = snprintf(key, sizeof(key), "%s_%s", name, type);
+ if (keylen != strlen(name) + 1 + strlen(type)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CREATE_KEY_FAIL,
"Unable to create key");
ret = -1;
goto out;
}
- strncpy(key_dup, key, strlen(key));
gf_msg_debug(this->name, 0, "Trying to release lock of %s %s for %s as %s",
type, name, uuid_utoa(uuid), key);
@@ -808,7 +809,7 @@ glusterd_mgmt_v3_unlock(const char *name, uuid_t uuid, char *type)
}
/* Removing the mgmt_v3 lock from the global list */
- dict_del(priv->mgmt_v3_lock, key);
+ dict_deln(priv->mgmt_v3_lock, key, keylen);
ret = dict_get_bin(priv->mgmt_v3_lock_timer, key,
(void **)&mgmt_lock_timer);
@@ -818,23 +819,24 @@ glusterd_mgmt_v3_unlock(const char *name, uuid_t uuid, char *type)
goto out;
}
+ (void)snprintf(key_dup, sizeof(key_dup), "%s", key);
+
/* Remove the backtrace key as well */
- ret = snprintf(key, sizeof(key), "debug.last-success-bt-%s-%s", name, type);
- if (ret !=
- SLEN("debug.last-success-bt-") + strlen(name) + strlen(type) + 1) {
+ ret = snprintf(key, sizeof(key), "debug.last-success-bt-%s", key_dup);
+ if (ret != SLEN("debug.last-success-bt-") + keylen) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CREATE_KEY_FAIL,
"Unable to create backtrace "
"key");
ret = -1;
goto out;
}
- dict_del(priv->mgmt_v3_lock, key);
+ dict_deln(priv->mgmt_v3_lock, key, ret);
gf_msg_debug(this->name, 0, "Lock for %s %s successfully released", type,
name);
/* Release owner reference which was held during lock */
- if (mgmt_lock_timer->timer) {
+ if (mgmt_lock_timer && mgmt_lock_timer->timer) {
ret = -1;
mgmt_lock_timer_xl = mgmt_lock_timer->xl;
GF_VALIDATE_OR_GOTO(this->name, mgmt_lock_timer_xl, out);
@@ -846,7 +848,7 @@ glusterd_mgmt_v3_unlock(const char *name, uuid_t uuid, char *type)
timer = mgmt_lock_timer->timer;
GF_FREE(timer->data);
gf_timer_call_cancel(mgmt_lock_timer_ctx, mgmt_lock_timer->timer);
- dict_del(priv->mgmt_v3_lock_timer, key_dup);
+ dict_deln(priv->mgmt_v3_lock_timer, key_dup, keylen);
}
ret = glusterd_volinfo_find(name, &volinfo);
if (volinfo && volinfo->stage_deleted) {
@@ -855,6 +857,10 @@ glusterd_mgmt_v3_unlock(const char *name, uuid_t uuid, char *type)
* stage_deleted flag is set back to false
*/
volinfo->stage_deleted = _gf_false;
+ gf_log(this->name, GF_LOG_INFO,
+ "Volume %s still exist, setting "
+ "stage deleted flag to false for the volume",
+ volinfo->volname);
}
ret = 0;
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.h b/xlators/mgmt/glusterd/src/glusterd-locks.h
index 8878a30d0bf..44667cebd3d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.h
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.h
@@ -40,9 +40,6 @@ void
glusterd_mgmt_v3_lock_timer_fini();
int32_t
-glusterd_get_mgmt_v3_lock_owner(char *volname, uuid_t *uuid);
-
-int32_t
glusterd_mgmt_v3_lock(const char *key, uuid_t uuid, uint32_t *op_errno,
char *type);
diff --git a/xlators/mgmt/glusterd/src/glusterd-log-ops.c b/xlators/mgmt/glusterd/src/glusterd-log-ops.c
index 4742225beb5..34abf35cb00 100644
--- a/xlators/mgmt/glusterd/src/glusterd-log-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-log-ops.c
@@ -7,7 +7,7 @@
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "cli1-xdr.h"
#include "xdr-generic.h"
#include "glusterd.h"
@@ -16,7 +16,7 @@
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
#include "glusterd-messages.h"
-#include "syscall.h"
+#include <glusterfs/syscall.h>
#include <signal.h>
@@ -43,6 +43,7 @@ __glusterd_handle_log_rotate(rpcsvc_request_t *req)
if (ret < 0) {
// failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
@@ -75,7 +76,7 @@ __glusterd_handle_log_rotate(rpcsvc_request_t *req)
"for volume %s",
volname);
- ret = dict_set_uint64(dict, "rotate-key", (uint64_t)time(NULL));
+ ret = dict_set_uint64(dict, "rotate-key", (uint64_t)gf_time());
if (ret)
goto out;
@@ -105,7 +106,6 @@ glusterd_op_stage_log_rotate(dict_t *dict, char **op_errstr)
int ret = -1;
char *volname = NULL;
glusterd_volinfo_t *volinfo = NULL;
- gf_boolean_t exists = _gf_false;
char msg[2048] = {0};
char *brick = NULL;
@@ -116,13 +116,11 @@ glusterd_op_stage_log_rotate(dict_t *dict, char **op_errstr)
goto out;
}
- exists = glusterd_check_volume_exists(volname);
ret = glusterd_volinfo_find(volname, &volinfo);
- if (!exists) {
+ if (ret) {
snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
*op_errstr = gf_strdup(msg);
- ret = -1;
goto out;
}
@@ -141,6 +139,8 @@ glusterd_op_stage_log_rotate(dict_t *dict, char **op_errstr)
/* If no brick is specified, do log-rotate for
all the bricks in the volume */
if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=brick", NULL);
ret = 0;
goto out;
}
@@ -207,8 +207,11 @@ glusterd_op_log_rotate(dict_t *dict)
ret = dict_get_str(dict, "brick", &brick);
/* If no brick is specified, do log-rotate for
all the bricks in the volume */
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=brick", NULL);
goto cont;
+ }
ret = glusterd_brickinfo_new_from_brick(brick, &tmpbrkinfo, _gf_false,
NULL);
@@ -229,8 +232,9 @@ cont:
if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
continue;
- if (brick && (strcmp(tmpbrkinfo->hostname, brickinfo->hostname) ||
- strcmp(tmpbrkinfo->path, brickinfo->path)))
+ if (tmpbrkinfo && brick &&
+ (strcmp(tmpbrkinfo->hostname, brickinfo->hostname) ||
+ strcmp(tmpbrkinfo->path, brickinfo->path)))
continue;
valid_brick = 1;
diff --git a/xlators/mgmt/glusterd/src/glusterd-mem-types.h b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
index 210d0f8658c..d7257e1a7b5 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mem-types.h
+++ b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
@@ -11,67 +11,48 @@
#ifndef __GLUSTERD_MEM_TYPES_H__
#define __GLUSTERD_MEM_TYPES_H__
-#include "mem-types.h"
+#include <glusterfs/mem-types.h>
typedef enum gf_gld_mem_types_ {
- gf_gld_mt_dir_entry_t = gf_common_mt_end + 1,
- gf_gld_mt_volfile_ctx = gf_common_mt_end + 2,
- gf_gld_mt_glusterd_state_t = gf_common_mt_end + 3,
- gf_gld_mt_glusterd_conf_t = gf_common_mt_end + 4,
- gf_gld_mt_locker = gf_common_mt_end + 5,
- gf_gld_mt_string = gf_common_mt_end + 6,
- gf_gld_mt_lock_table = gf_common_mt_end + 7,
- gf_gld_mt_char = gf_common_mt_end + 8,
- gf_gld_mt_glusterd_connection_t = gf_common_mt_end + 9,
- gf_gld_mt_resolve_comp = gf_common_mt_end + 10,
- gf_gld_mt_peerinfo_t = gf_common_mt_end + 11,
- gf_gld_mt_friend_sm_event_t = gf_common_mt_end + 12,
- gf_gld_mt_friend_req_ctx_t = gf_common_mt_end + 13,
- gf_gld_mt_friend_update_ctx_t = gf_common_mt_end + 14,
- gf_gld_mt_op_sm_event_t = gf_common_mt_end + 15,
- gf_gld_mt_op_lock_ctx_t = gf_common_mt_end + 16,
- gf_gld_mt_op_stage_ctx_t = gf_common_mt_end + 17,
- gf_gld_mt_op_commit_ctx_t = gf_common_mt_end + 18,
- gf_gld_mt_mop_stage_req_t = gf_common_mt_end + 19,
- gf_gld_mt_probe_ctx_t = gf_common_mt_end + 20,
- gf_gld_mt_create_volume_ctx_t = gf_common_mt_end + 21,
- gf_gld_mt_start_volume_ctx_t = gf_common_mt_end + 22,
- gf_gld_mt_stop_volume_ctx_t = gf_common_mt_end + 23,
- gf_gld_mt_delete_volume_ctx_t = gf_common_mt_end + 24,
- gf_gld_mt_glusterd_volinfo_t = gf_common_mt_end + 25,
- gf_gld_mt_glusterd_brickinfo_t = gf_common_mt_end + 26,
- gf_gld_mt_peer_hostname_t = gf_common_mt_end + 27,
- gf_gld_mt_ifreq = gf_common_mt_end + 28,
- gf_gld_mt_store_handle_t = gf_common_mt_end + 29,
- gf_gld_mt_store_iter_t = gf_common_mt_end + 30,
- gf_gld_mt_defrag_info = gf_common_mt_end + 31,
- gf_gld_mt_log_filename_ctx_t = gf_common_mt_end + 32,
- gf_gld_mt_log_locate_ctx_t = gf_common_mt_end + 33,
- gf_gld_mt_log_rotate_ctx_t = gf_common_mt_end + 34,
- gf_gld_mt_peerctx_t = gf_common_mt_end + 35,
- gf_gld_mt_sm_tr_log_t = gf_common_mt_end + 36,
- gf_gld_mt_pending_node_t = gf_common_mt_end + 37,
- gf_gld_mt_brick_rsp_ctx_t = gf_common_mt_end + 38,
- gf_gld_mt_mop_brick_req_t = gf_common_mt_end + 39,
- gf_gld_mt_op_allack_ctx_t = gf_common_mt_end + 40,
- gf_gld_mt_linearr = gf_common_mt_end + 41,
- gf_gld_mt_linebuf = gf_common_mt_end + 42,
- gf_gld_mt_mount_pattern = gf_common_mt_end + 43,
- gf_gld_mt_mount_comp_container = gf_common_mt_end + 44,
- gf_gld_mt_mount_component = gf_common_mt_end + 45,
- gf_gld_mt_mount_spec = gf_common_mt_end + 46,
- gf_gld_mt_georep_meet_spec = gf_common_mt_end + 47,
- gf_gld_mt_nodesrv_t = gf_common_mt_end + 48,
- gf_gld_mt_charptr = gf_common_mt_end + 49,
- gf_gld_mt_hooks_stub_t = gf_common_mt_end + 50,
- gf_gld_mt_hooks_priv_t = gf_common_mt_end + 51,
- gf_gld_mt_mop_commit_req_t = gf_common_mt_end + 52,
- gf_gld_mt_int = gf_common_mt_end + 53,
- gf_gld_mt_snap_t = gf_common_mt_end + 54,
- gf_gld_mt_missed_snapinfo_t = gf_common_mt_end + 55,
- gf_gld_mt_snap_create_args_t = gf_common_mt_end + 56,
- gf_gld_mt_local_peers_t = gf_common_mt_end + 57,
- gf_gld_mt_glusterd_brick_proc_t = gf_common_mt_end + 58,
- gf_gld_mt_end = gf_common_mt_end + 59,
+ gf_gld_mt_glusterd_conf_t = gf_common_mt_end + 1,
+ gf_gld_mt_char,
+ gf_gld_mt_peerinfo_t,
+ gf_gld_mt_friend_sm_event_t,
+ gf_gld_mt_friend_req_ctx_t,
+ gf_gld_mt_friend_update_ctx_t,
+ gf_gld_mt_op_sm_event_t,
+ gf_gld_mt_op_lock_ctx_t,
+ gf_gld_mt_op_stage_ctx_t,
+ gf_gld_mt_op_commit_ctx_t,
+ gf_gld_mt_mop_stage_req_t,
+ gf_gld_mt_probe_ctx_t,
+ gf_gld_mt_glusterd_volinfo_t,
+ gf_gld_mt_volinfo_dict_data_t,
+ gf_gld_mt_glusterd_brickinfo_t,
+ gf_gld_mt_peer_hostname_t,
+ gf_gld_mt_defrag_info,
+ gf_gld_mt_peerctx_t,
+ gf_gld_mt_sm_tr_log_t,
+ gf_gld_mt_pending_node_t,
+ gf_gld_mt_brick_rsp_ctx_t,
+ gf_gld_mt_mop_brick_req_t,
+ gf_gld_mt_op_allack_ctx_t,
+ gf_gld_mt_linearr,
+ gf_gld_mt_linebuf,
+ gf_gld_mt_mount_pattern,
+ gf_gld_mt_mount_comp_container,
+ gf_gld_mt_mount_spec,
+ gf_gld_mt_georep_meet_spec,
+ gf_gld_mt_charptr,
+ gf_gld_mt_hooks_stub_t,
+ gf_gld_mt_hooks_priv_t,
+ gf_gld_mt_mop_commit_req_t,
+ gf_gld_mt_int,
+ gf_gld_mt_snap_t,
+ gf_gld_mt_missed_snapinfo_t,
+ gf_gld_mt_snap_create_args_t,
+ gf_gld_mt_glusterd_brick_proc_t,
+ gf_gld_mt_glusterd_svc_proc_t,
+ gf_gld_mt_end,
} gf_gld_mem_types_t;
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h
index 41fedf9e288..3a1e600fb03 100644
--- a/xlators/mgmt/glusterd/src/glusterd-messages.h
+++ b/xlators/mgmt/glusterd/src/glusterd-messages.h
@@ -11,7 +11,7 @@
#ifndef _GLUSTERD_MESSAGES_H_
#define _GLUSTERD_MESSAGES_H_
-#include "glfs-message-id.h"
+#include <glusterfs/glfs-message-id.h>
/* To add new message IDs, append new identifiers at the end of the list.
*
@@ -46,7 +46,7 @@ GLFS_MSGID(
GD_MSG_SNAP_STATUS_FAIL, GD_MSG_SNAP_INIT_FAIL, GD_MSG_VOLINFO_SET_FAIL,
GD_MSG_VOLINFO_GET_FAIL, GD_MSG_BRICK_CREATION_FAIL,
GD_MSG_BRICK_GET_INFO_FAIL, GD_MSG_BRICK_NEW_INFO_FAIL, GD_MSG_LVS_FAIL,
- GD_MSG_SETXATTR_FAIL, GD_MSG_UMOUNTING_SNAP_BRICK, GD_MSG_OP_UNSUPPORTED,
+ GD_MSG_SET_XATTR_FAIL, GD_MSG_UMOUNTING_SNAP_BRICK, GD_MSG_OP_UNSUPPORTED,
GD_MSG_SNAP_NOT_FOUND, GD_MSG_FS_LABEL_UPDATE_FAIL, GD_MSG_LVM_MOUNT_FAILED,
GD_MSG_DICT_SET_FAILED, GD_MSG_CANONICALIZE_FAIL, GD_MSG_DICT_GET_FAILED,
GD_MSG_SNAP_INFO_FAIL, GD_MSG_SNAP_VOL_CONFIG_FAIL,
@@ -78,7 +78,7 @@ GLFS_MSGID(
GD_MSG_COMMIT_OP_FAIL, GD_MSG_PEER_LIST_CREATE_FAIL, GD_MSG_BRICK_OP_FAIL,
GD_MSG_OPINFO_SET_FAIL, GD_MSG_OP_EVENT_UNLOCK_FAIL,
GD_MSG_MGMTV3_OP_RESP_FAIL, GD_MSG_PEER_NOT_FOUND, GD_MSG_REQ_DECODE_FAIL,
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL, GD_MSG_ALREADY_STOPPED,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, GD_MSG_ALREADY_STOPPED,
GD_MSG_PRE_VALD_RESP_FAIL, GD_MSG_SVC_GET_FAIL, GD_MSG_VOLFILE_NOT_FOUND,
GD_MSG_OP_EVENT_LOCK_FAIL, GD_MSG_NON_STRIPE_VOL, GD_MSG_SNAPD_OBJ_GET_FAIL,
GD_MSG_QUOTA_DISABLED, GD_MSG_CACHE_MINMAX_SIZE_INVALID,
@@ -116,7 +116,7 @@ GLFS_MSGID(
GD_MSG_PARSE_BRICKINFO_FAIL, GD_MSG_VERS_STORE_FAIL, GD_MSG_HEADER_ADD_FAIL,
GD_MSG_QUOTA_CONF_WRITE_FAIL, GD_MSG_QUOTA_CONF_CORRUPT, GD_MSG_FORK_FAIL,
GD_MSG_CKSUM_COMPUTE_FAIL, GD_MSG_VERS_CKSUM_STORE_FAIL,
- GD_MSG_GETXATTR_FAIL, GD_MSG_CONVERSION_FAILED, GD_MSG_VOL_NOT_DISTRIBUTE,
+ GD_MSG_GET_XATTR_FAIL, GD_MSG_CONVERSION_FAILED, GD_MSG_VOL_NOT_DISTRIBUTE,
GD_MSG_VOL_STOPPED, GD_MSG_OPCTX_GET_FAIL, GD_MSG_TASKID_GEN_FAIL,
GD_MSG_REBALANCE_ID_MISSING, GD_MSG_NO_REBALANCE_PFX_IN_VOLNAME,
GD_MSG_DEFRAG_STATUS_UPDATE_FAIL, GD_MSG_UUID_GEN_STORE_FAIL,
@@ -298,6 +298,154 @@ GLFS_MSGID(
GD_MSG_LOCALTIME_LOGGING_ENABLE, GD_MSG_LOCALTIME_LOGGING_DISABLE,
GD_MSG_PORTS_EXHAUSTED, GD_MSG_CHANGELOG_GET_FAIL,
GD_MSG_MANAGER_FUNCTION_FAILED,
- GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL);
+ GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL, GD_MSG_SHD_START_FAIL,
+ GD_MSG_SHD_OBJ_GET_FAIL, GD_MSG_SVC_ATTACH_FAIL, GD_MSG_ATTACH_INFO,
+ GD_MSG_DETACH_INFO, GD_MSG_SVC_DETACH_FAIL,
+ GD_MSG_RPC_TRANSPORT_GET_PEERNAME_FAIL, GD_MSG_CLUSTER_RC_ENABLE,
+ GD_MSG_NFS_GANESHA_DISABLED, GD_MSG_GANESHA_NOT_RUNNING, GD_MSG_SNAP_WARN,
+ GD_MSG_BRICK_SUBVOL_VERIFY_FAIL, GD_MSG_REMOVE_ARBITER_BRICK,
+ GD_MSG_BRICK_NOT_DECOM, GD_MSG_BRICK_STOPPED, GD_MSG_BRICK_DEAD,
+ GD_MSG_BRICK_HOST_NOT_FOUND, GD_MSG_BRICK_HOST_DOWN, GD_MSG_BRICK_DELETE,
+ GD_MSG_BRICK_NO_REMOVE_CMD, GD_MSG_MIGRATION_PROG, GD_MSG_MIGRATION_FAIL,
+ GD_MSG_COPY_FAIL, GD_MSG_REALPATH_GET_FAIL,
+ GD_MSG_ARBITER_BRICK_SET_INFO_FAIL, GD_MSG_STRCHR_FAIL, GD_MSG_SPLIT_FAIL,
+ GD_MSG_ALLOC_AND_COPY_UUID_FAIL, GD_MSG_VOL_SHD_NOT_COMP,
+ GD_MSG_BITROT_NOT_ENABLED, GD_MSG_CREATE_BRICK_DIR_FAILED,
+ GD_MSG_CREATE_GLUSTER_DIR_FAILED, GD_MSG_BRICK_CREATE_MNTPNT,
+ GD_MSG_BRICK_CREATE_ROOT, GD_MSG_SET_XATTR_BRICK_FAIL,
+ GD_MSG_REMOVE_XATTR_FAIL, GD_MSG_XLATOR_NOT_DEFINED,
+ GD_MSG_BRICK_NOT_RUNNING, GD_MSG_INCORRECT_BRICK, GD_MSG_UUID_GET_FAIL,
+ GD_MSG_INVALID_ARGUMENT, GD_MSG_FRAME_CREATE_FAIL,
+ GD_MSG_SNAPSHOT_NOT_THIN_PROVISIONED, GD_MSG_VOL_STOP_ARGS_GET_FAILED,
+ GD_MSG_LSTAT_FAIL, GD_MSG_VOLUME_NOT_IMPORTED,
+ GD_MSG_ADD_BRICK_MNT_INFO_FAIL, GD_MSG_GET_MNT_ENTRY_INFO_FAIL,
+ GD_MSG_QUORUM_CLUSTER_COUNT_GET_FAIL, GD_MSG_POST_COMMIT_OP_FAIL,
+ GD_MSG_POST_COMMIT_FROM_UUID_REJCT, GD_MSG_POST_COMMIT_REQ_SEND_FAIL);
+
+#define GD_MSG_INVALID_ENTRY_STR "Invalid data entry"
+#define GD_MSG_INVALID_ARGUMENT_STR \
+ "Invalid arguments have been given to function"
+#define GD_MSG_GARBAGE_ARGS_STR "Garbage args received"
+#define GD_MSG_BRICK_SUBVOL_VERIFY_FAIL_STR "Brick's subvol verification fail"
+#define GD_MSG_REMOVE_ARBITER_BRICK_STR "Failed to remove arbiter bricks"
+#define GD_MSG_DICT_GET_FAILED_STR "Dict get failed"
+#define GD_MSG_DICT_SET_FAILED_STR "Dict set failed"
+#define GD_MSG_BRICK_NOT_FOUND_STR "Brick not found in volume"
+#define GD_MSG_BRICK_NOT_DECOM_STR "Brick is not decommissoned"
+#define GD_MSG_BRICK_STOPPED_STR "Found stopped brick"
+#define GD_MSG_BRICK_DEAD_STR "Found dead brick"
+#define GD_MSG_BRICK_HOST_NOT_FOUND_STR \
+ "Host node of the brick is not a part of cluster"
+#define GD_MSG_BRICK_HOST_DOWN_STR "Host node of the brick is down"
+#define GD_MSG_BRICK_DELETE_STR \
+ "Deleting all the bricks of the volume is not allowed"
+#define GD_MSG_BRICK_NO_REMOVE_CMD_STR "No remove-brick command issued"
+#define GD_MSG_INCORRECT_BRICK_STR "Incorrect brick for volume"
+#define GD_MSG_MIGRATION_PROG_STR "Migration is in progress"
+#define GD_MSG_MIGRATION_FAIL_STR "Migration has failed"
+#define GD_MSG_XLATOR_NOT_DEFINED_STR "Xlator not defined"
+#define GD_MSG_DICT_CREATE_FAIL_STR "Failed to create dictionary"
+#define GD_MSG_COPY_FAIL_STR "Failed to copy"
+#define GD_MSG_UUID_GET_FAIL_STR "Failed to get the uuid of local glusterd"
+#define GD_MSG_GEO_REP_START_FAILED_STR "Georep start failed for volume"
+#define GD_MSG_REALPATH_GET_FAIL_STR "Failed to get realpath"
+#define GD_MSG_FILE_NOT_FOUND_STR "File not found in directory"
+#define GD_MSG_SRC_FILE_ERROR_STR "Error in source file"
+#define GD_MSG_DICT_UNSERIALIZE_FAIL_STR "Failed to unserialize dict"
+#define GD_MSG_VOL_ID_SET_FAIL_STR "Failed to set volume id"
+#define GD_MSG_ARBITER_BRICK_SET_INFO_FAIL_STR \
+ "Failed to add arbiter info to brick"
+#define GD_MSG_NO_MEMORY_STR "Out of memory"
+#define GD_MSG_GLUSTERD_UMOUNT_FAIL_STR "Failed to unmount path"
+#define GD_MSG_PEER_ADD_FAIL_STR "Failed to add new peer"
+#define GD_MSG_BRICK_GET_INFO_FAIL_STR "Failed to get brick info"
+#define GD_MSG_STRCHR_FAIL_STR "Failed to get the character"
+#define GD_MSG_SPLIT_FAIL_STR "Failed to split"
+#define GD_MSG_VOLINFO_GET_FAIL_STR "Failed to get volinfo"
+#define GD_MSG_PEER_NOT_FOUND_STR "Failed to find peer info"
+#define GD_MSG_DICT_COPY_FAIL_STR "Failed to copy values from dictionary"
+#define GD_MSG_ALLOC_AND_COPY_UUID_FAIL_STR \
+ "Failed to allocate memory or copy uuid"
+#define GD_MSG_VOL_NOT_FOUND_STR "Volume not found"
+#define GD_MSG_PEER_DISCONNECTED_STR "Peer is disconnected"
+#define GD_MSG_QUOTA_GET_STAT_FAIL_STR "Failed to get quota status"
+#define GD_MSG_SNAP_STATUS_FAIL_STR "Failed to get status of snapd"
+#define GD_MSG_VALIDATE_FAILED_STR "Failed to validate volume"
+#define GD_MSG_VOL_NOT_STARTED_STR "Volume is not started"
+#define GD_MSG_VOL_SHD_NOT_COMP_STR "Volume is not Self-heal compatible"
+#define GD_MSG_SELF_HEALD_DISABLED_STR "Self-heal daemon is disabled"
+#define GD_MSG_NFS_GANESHA_DISABLED_STR "NFS server is disabled"
+#define GD_MSG_QUOTA_DISABLED_STR "Quota is disabled"
+#define GD_MSG_BITROT_NOT_RUNNING_STR "Bitrot is not enabled"
+#define GD_MSG_BITROT_NOT_ENABLED_STR "Volume does not have bitrot enabled"
+#define GD_MSG_SNAPD_NOT_RUNNING_STR "Snapd is not enabled"
+#define GD_MSG_STRDUP_FAILED_STR "Strdup operation failed"
+#define GD_MSG_QUORUM_CLUSTER_COUNT_GET_FAIL_STR \
+ "Failed to get quorum cluster counts"
+#define GD_MSG_GLUSTER_SERVICE_START_FAIL_STR "Failed to start glusterd service"
+#define GD_MSG_PEER_ADDRESS_GET_FAIL_STR "Failed to get the address of peer"
+#define GD_MSG_INVALID_SLAVE_STR "Volume is not a slave volume"
+#define GD_MSG_BRICK_NOT_RUNNING_STR "One or more bricks are not running"
+#define GD_MSG_BRK_MNTPATH_GET_FAIL_STR "Failed to get brick mount device"
+#define GD_MSG_SNAPSHOT_NOT_THIN_PROVISIONED_STR \
+ "Snapshot is supported only for thin provisioned LV."
+#define GD_MSG_SNAP_DEVICE_NAME_GET_FAIL_STR \
+ "Failed to copy snapshot device name"
+#define GD_MSG_SNAP_NOT_FOUND_STR "Snapshot does not exist"
+#define GD_MSG_CREATE_BRICK_DIR_FAILED_STR "Failed to create brick directory"
+#define GD_MSG_LSTAT_FAIL_STR "Lstat operation failed"
+#define GD_MSG_DIR_OP_FAILED_STR \
+ "The provided path is already present. It is not a directory"
+#define GD_MSG_BRICK_CREATION_FAIL_STR \
+ "Brick isn't allowed to be created inside glusterd's working directory."
+#define GD_MSG_BRICK_CREATE_ROOT_STR \
+ "The brick is being created in the root partition. It is recommended " \
+ "that you don't use the system's root partition for storage backend."
+#define GD_MSG_BRICK_CREATE_MNTPNT_STR \
+ "The brick is a mount point. Please create a sub-directory under the " \
+ "mount point and use that as the brick directory."
+#define GD_MSG_CREATE_GLUSTER_DIR_FAILED_STR \
+ "Failed to create glusterfs directory"
+#define GD_MSG_VOLINFO_IMPORT_FAIL_STR "Volume is not yet imported"
+#define GD_MSG_BRICK_SET_INFO_FAIL_STR \
+ "Failed to add brick mount details to dict"
+#define GD_MSG_SET_XATTR_BRICK_FAIL_STR \
+ "Glusterfs is not supported on brick. Setting extended attribute failed"
+#define GD_MSG_SET_XATTR_FAIL_STR "Failed to set extended attribute"
+#define GD_MSG_REMOVE_XATTR_FAIL_STR "Failed to remove extended attribute"
+#define GD_MSG_XLATOR_SET_OPT_FAIL_STR "Failed to set xlator type"
+#define GD_MSG_XLATOR_LINK_FAIL_STR \
+ "Failed to do the link of xlator with children"
+#define GD_MSG_READ_ERROR_STR "Failed to read directory"
+#define GD_MSG_INCOMPATIBLE_VALUE_STR "Incompatible transport type"
+#define GD_MSG_VOL_STOP_ARGS_GET_FAILED_STR "Failed to get volume stop args"
+#define GD_MSG_FRAME_CREATE_FAIL_STR "Failed to create frame"
+#define GD_MSG_VOLUME_NOT_IMPORTED_STR "Volume has not been imported"
+#define GD_MSG_ADD_BRICK_MNT_INFO_FAIL_STR \
+ "Failed to add brick mount details to dict"
+#define GD_MSG_GET_MNT_ENTRY_INFO_FAIL_STR "Failed to get mount entry details"
+#define GD_MSG_BRICKPATH_ROOT_GET_FAIL_STR "failed to get brick root details"
+#define GD_MSG_VOL_INFO_REQ_RECVD_STR "Received get volume info req"
+#define GD_MSG_NO_FLAG_SET_STR "No flags set"
+#define GD_MSG_CREATE_DIR_FAILED_STR "Failed to create directory"
+#define GD_MSG_POST_HOOK_STUB_INIT_FAIL_STR \
+ "Failed to initialize post hooks stub"
+#define GD_MSG_FILE_OP_FAILED_STR "File operation failed"
+#define GD_MSG_INODE_SIZE_GET_FAIL_STR "Failed to get inode size"
+#define GD_MSG_CMD_EXEC_FAIL_STR "Command execution failed"
+#define GD_MSG_XLATOR_CREATE_FAIL_STR "Failed to create xlator"
+#define GD_MSG_CLRCLK_VOL_REQ_RCVD_STR "Received clear-locks request for volume"
+#define GD_MSG_BRK_PORT_NUM_GET_FAIL_STR \
+ "Couldn't get port number of local bricks"
+#define GD_MSG_CLRLOCKS_MOUNTDIR_CREATE_FAIL_STR \
+ "Creating mount directory for clear-locks failed"
+#define GD_MSG_CLRLOCKS_CLNT_MOUNT_FAIL_STR \
+ "Failed to mount clear-locks maintenance client"
+#define GD_MSG_CLRLOCKS_CLNT_UMOUNT_FAIL_STR \
+ "Failed to unmount clear-locks mount point"
+#define GD_MSG_CLRCLK_SND_CMD_FAIL_STR "Failed to send command for clear-locks"
+#define GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL_STR \
+ "Failed to allocate memory or get serialized length of dict"
+#define GD_MSG_GET_XATTR_FAIL_STR "Failed to get extended attribute"
#endif /* !_GLUSTERD_MESSAGES_H_ */
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
index c8b080cc0ca..1069688a89d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
@@ -165,6 +165,7 @@ glusterd_handle_mgmt_v3_lock_fn(rpcsvc_request_t *req)
ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_op_lock_ctx_t);
if (!ctx) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
ret = -1;
goto out;
}
@@ -174,6 +175,7 @@ glusterd_handle_mgmt_v3_lock_fn(rpcsvc_request_t *req)
ctx->dict = dict_new();
if (!ctx->dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -181,8 +183,8 @@ glusterd_handle_mgmt_v3_lock_fn(rpcsvc_request_t *req)
ret = dict_unserialize(lock_req.dict.dict_val, lock_req.dict.dict_len,
&ctx->dict);
if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ NULL);
goto out;
}
@@ -264,8 +266,8 @@ glusterd_mgmt_v3_pre_validate_send_resp(rpcsvc_request_t *req, int32_t op,
ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
}
@@ -315,20 +317,21 @@ glusterd_handle_pre_validate_fn(rpcsvc_request_t *req)
}
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ NULL);
goto out;
}
rsp_dict = dict_new();
if (!rsp_dict) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
- "Failed to get new dictionary");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
return -1;
}
@@ -391,8 +394,8 @@ glusterd_mgmt_v3_brick_op_send_resp(rpcsvc_request_t *req, int32_t op,
ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
}
@@ -441,20 +444,21 @@ glusterd_handle_brick_op_fn(rpcsvc_request_t *req)
}
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ NULL);
goto out;
}
rsp_dict = dict_new();
if (!rsp_dict) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
- "Failed to get new dictionary");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
return -1;
}
@@ -518,8 +522,8 @@ glusterd_mgmt_v3_commit_send_resp(rpcsvc_request_t *req, int32_t op,
ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
}
@@ -569,20 +573,21 @@ glusterd_handle_commit_fn(rpcsvc_request_t *req)
}
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ NULL);
goto out;
}
rsp_dict = dict_new();
if (!rsp_dict) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
- "Failed to get new dictionary");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
return -1;
}
@@ -621,6 +626,136 @@ out:
}
static int
+glusterd_mgmt_v3_post_commit_send_resp(rpcsvc_request_t *req, int32_t op,
+ int32_t status, char *op_errstr,
+ uint32_t op_errno, dict_t *rsp_dict)
+{
+ gd1_mgmt_v3_post_commit_rsp rsp = {
+ {0},
+ };
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid(&rsp.uuid);
+ rsp.op = op;
+ rsp.op_errno = op_errno;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
+ goto out;
+ }
+
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_post_commit_rsp);
+
+ GF_FREE(rsp.dict.dict_val);
+out:
+ gf_msg_debug(this->name, 0, "Responded to post commit, ret: %d", ret);
+ return ret;
+}
+
+static int
+glusterd_handle_post_commit_fn(rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_v3_post_commit_req op_req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+ uint32_t op_errno = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_v3_post_commit_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode post commit "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
+ "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa(op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new();
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
+ goto out;
+ }
+
+ ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ NULL);
+ goto out;
+ }
+
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
+ return -1;
+ }
+
+ ret = gd_mgmt_v3_post_commit_fn(op_req.op, dict, &op_errstr, &op_errno,
+ rsp_dict);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL,
+ "post commit failed on operation %s", gd_op_list[op_req.op]);
+ }
+
+ ret = glusterd_mgmt_v3_post_commit_send_resp(req, op_req.op, ret, op_errstr,
+ op_errno, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_OP_RESP_FAIL,
+ "Failed to send post commit "
+ "response for operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
+
+out:
+ if (op_errstr && (strcmp(op_errstr, "")))
+ GF_FREE(op_errstr);
+
+ free(op_req.dict.dict_val);
+
+ if (dict)
+ dict_unref(dict);
+
+ if (rsp_dict)
+ dict_unref(rsp_dict);
+
+ /* Return 0 from handler to avoid double deletion of req obj */
+ return 0;
+}
+
+static int
glusterd_mgmt_v3_post_validate_send_resp(rpcsvc_request_t *req, int32_t op,
int32_t status, char *op_errstr,
dict_t *rsp_dict)
@@ -646,8 +781,8 @@ glusterd_mgmt_v3_post_validate_send_resp(rpcsvc_request_t *req, int32_t op,
ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
}
@@ -696,20 +831,21 @@ glusterd_handle_post_validate_fn(rpcsvc_request_t *req)
}
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ NULL);
goto out;
}
rsp_dict = dict_new();
if (!rsp_dict) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
- "Failed to get new dictionary");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
return -1;
}
@@ -867,6 +1003,7 @@ glusterd_handle_mgmt_v3_unlock_fn(rpcsvc_request_t *req)
ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_op_lock_ctx_t);
if (!ctx) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_MEMORY, NULL);
ret = -1;
goto out;
}
@@ -876,6 +1013,7 @@ glusterd_handle_mgmt_v3_unlock_fn(rpcsvc_request_t *req)
ctx->dict = dict_new();
if (!ctx->dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -883,8 +1021,8 @@ glusterd_handle_mgmt_v3_unlock_fn(rpcsvc_request_t *req)
ret = dict_unserialize(lock_req.dict.dict_val, lock_req.dict.dict_len,
&ctx->dict);
if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ NULL);
goto out;
}
@@ -955,6 +1093,12 @@ glusterd_handle_commit(rpcsvc_request_t *req)
}
static int
+glusterd_handle_post_commit(rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler(req, glusterd_handle_post_commit_fn);
+}
+
+static int
glusterd_handle_post_validate(rpcsvc_request_t *req)
{
return glusterd_big_locked_handler(req, glusterd_handle_post_validate_fn);
@@ -966,25 +1110,28 @@ glusterd_handle_mgmt_v3_unlock(rpcsvc_request_t *req)
return glusterd_big_locked_handler(req, glusterd_handle_mgmt_v3_unlock_fn);
}
-rpcsvc_actor_t gd_svc_mgmt_v3_actors[GLUSTERD_MGMT_V3_MAXVALUE] = {
- [GLUSTERD_MGMT_V3_NULL] = {"NULL", GLUSTERD_MGMT_V3_NULL,
- glusterd_mgmt_v3_null, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_LOCK] = {"MGMT_V3_LOCK", GLUSTERD_MGMT_V3_LOCK,
- glusterd_handle_mgmt_v3_lock, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_PRE_VALIDATE] = {"PRE_VAL", GLUSTERD_MGMT_V3_PRE_VALIDATE,
- glusterd_handle_pre_validate, NULL, 0,
- DRC_NA},
- [GLUSTERD_MGMT_V3_BRICK_OP] = {"BRCK_OP", GLUSTERD_MGMT_V3_BRICK_OP,
- glusterd_handle_brick_op, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_COMMIT] = {"COMMIT", GLUSTERD_MGMT_V3_COMMIT,
- glusterd_handle_commit, NULL, 0, DRC_NA},
+static rpcsvc_actor_t gd_svc_mgmt_v3_actors[GLUSTERD_MGMT_V3_MAXVALUE] = {
+ [GLUSTERD_MGMT_V3_NULL] = {"NULL", glusterd_mgmt_v3_null, NULL,
+ GLUSTERD_MGMT_V3_NULL, DRC_NA, 0},
+ [GLUSTERD_MGMT_V3_LOCK] = {"MGMT_V3_LOCK", glusterd_handle_mgmt_v3_lock,
+ NULL, GLUSTERD_MGMT_V3_LOCK, DRC_NA, 0},
+ [GLUSTERD_MGMT_V3_PRE_VALIDATE] = {"PRE_VAL", glusterd_handle_pre_validate,
+ NULL, GLUSTERD_MGMT_V3_PRE_VALIDATE,
+ DRC_NA, 0},
+ [GLUSTERD_MGMT_V3_BRICK_OP] = {"BRCK_OP", glusterd_handle_brick_op, NULL,
+ GLUSTERD_MGMT_V3_BRICK_OP, DRC_NA, 0},
+ [GLUSTERD_MGMT_V3_COMMIT] = {"COMMIT", glusterd_handle_commit, NULL,
+ GLUSTERD_MGMT_V3_COMMIT, DRC_NA, 0},
+ [GLUSTERD_MGMT_V3_POST_COMMIT] = {"POST_COMMIT",
+ glusterd_handle_post_commit, NULL,
+ GLUSTERD_MGMT_V3_POST_COMMIT, DRC_NA, 0},
[GLUSTERD_MGMT_V3_POST_VALIDATE] = {"POST_VAL",
- GLUSTERD_MGMT_V3_POST_VALIDATE,
- glusterd_handle_post_validate, NULL, 0,
- DRC_NA},
- [GLUSTERD_MGMT_V3_UNLOCK] = {"MGMT_V3_UNLOCK", GLUSTERD_MGMT_V3_UNLOCK,
- glusterd_handle_mgmt_v3_unlock, NULL, 0,
- DRC_NA},
+ glusterd_handle_post_validate, NULL,
+ GLUSTERD_MGMT_V3_POST_VALIDATE, DRC_NA,
+ 0},
+ [GLUSTERD_MGMT_V3_UNLOCK] = {"MGMT_V3_UNLOCK",
+ glusterd_handle_mgmt_v3_unlock, NULL,
+ GLUSTERD_MGMT_V3_UNLOCK, DRC_NA, 0},
};
struct rpcsvc_program gd_svc_mgmt_v3_prog = {
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
index 6534530b52f..bca7221062b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -19,6 +19,7 @@
#include "glusterd-locks.h"
#include "glusterd-mgmt.h"
#include "glusterd-op-sm.h"
+#include "glusterd-server-quorum.h"
#include "glusterd-volgen.h"
#include "glusterd-store.h"
#include "glusterd-snapshot-utils.h"
@@ -51,14 +52,14 @@ gd_mgmt_v3_collate_errors(struct syncargs *args, int op_ret, int op_errno,
args->op_ret = op_ret;
args->op_errno = op_errno;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(peerid, NULL);
if (peerinfo)
peer_str = gf_strdup(peerinfo->hostname);
else
peer_str = gf_strdup(uuid_utoa(uuid));
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
is_operrstr_blk = (op_errstr && strcmp(op_errstr, ""));
err_string = (is_operrstr_blk) ? op_errstr : err_str;
@@ -85,6 +86,11 @@ gd_mgmt_v3_collate_errors(struct syncargs *args, int op_ret, int op_errno,
peer_str, err_string);
break;
}
+ case GLUSTERD_MGMT_V3_POST_COMMIT: {
+ snprintf(op_err, sizeof(op_err), "Post commit failed on %s. %s",
+ peer_str, err_string);
+ break;
+ }
case GLUSTERD_MGMT_V3_POST_VALIDATE: {
snprintf(op_err, sizeof(op_err),
"Post Validation failed on %s. %s", peer_str,
@@ -159,7 +165,6 @@ gd_mgmt_v3_pre_validate_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
goto out;
}
break;
- case GD_OP_ADD_TIER_BRICK:
case GD_OP_ADD_BRICK:
ret = glusterd_op_stage_add_brick(dict, op_errstr, rsp_dict);
if (ret) {
@@ -187,15 +192,12 @@ gd_mgmt_v3_pre_validate_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
goto out;
}
break;
- case GD_OP_TIER_START_STOP:
- case GD_OP_TIER_STATUS:
- case GD_OP_DETACH_TIER_STATUS:
- case GD_OP_REMOVE_TIER_BRICK:
- ret = glusterd_op_stage_tier(dict, op_errstr, rsp_dict);
+ case GD_OP_REMOVE_BRICK:
+ ret = glusterd_op_stage_remove_brick(dict, op_errstr);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMAND_NOT_FOUND,
- "tier "
- "prevalidation failed");
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_PRE_VALIDATION_FAIL,
+ "Remove brick prevalidation failed.");
goto out;
}
break;
@@ -210,6 +212,25 @@ gd_mgmt_v3_pre_validate_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
}
break;
+ case GD_OP_PROFILE_VOLUME:
+ ret = glusterd_op_stage_stats_volume(dict, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_PRE_VALIDATION_FAIL,
+ "prevalidation failed for profile operation.");
+ goto out;
+ }
+ break;
+ case GD_OP_REBALANCE:
+ case GD_OP_DEFRAG_BRICK_VOLUME:
+ ret = glusterd_mgmt_v3_op_stage_rebalance(dict, op_errstr);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "Rebalance Prevalidate Failed");
+ goto out;
+ }
+ break;
+
case GD_OP_MAX_OPVERSION:
ret = 0;
break;
@@ -247,6 +268,19 @@ gd_mgmt_v3_brick_op_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
}
break;
}
+ case GD_OP_PROFILE_VOLUME:
+ case GD_OP_REBALANCE:
+ case GD_OP_DEFRAG_BRICK_VOLUME: {
+ ret = gd_brick_op_phase(op, rsp_dict, dict, op_errstr);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "%s brickop "
+ "failed",
+ gd_op_list[op]);
+ goto out;
+ }
+ break;
+ }
default:
break;
}
@@ -263,7 +297,6 @@ gd_mgmt_v3_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
{
int32_t ret = -1;
xlator_t *this = NULL;
- int32_t cmd = 0;
this = THIS;
GF_ASSERT(this);
@@ -319,6 +352,15 @@ gd_mgmt_v3_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
}
break;
}
+ case GD_OP_REMOVE_BRICK: {
+ ret = glusterd_op_remove_brick(dict, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "Remove-brick commit failed.");
+ goto out;
+ }
+ break;
+ }
case GD_OP_RESET_BRICK: {
ret = glusterd_op_reset_brick(dict, rsp_dict);
if (ret) {
@@ -337,52 +379,67 @@ gd_mgmt_v3_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
}
break;
}
- case GD_OP_TIER_START_STOP: {
- ret = glusterd_op_tier_start_stop(dict, op_errstr, rsp_dict);
+ case GD_OP_PROFILE_VOLUME: {
+ ret = glusterd_op_stats_volume(dict, op_errstr, rsp_dict);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
- "tier commit failed.");
+ "commit failed for volume profile operation.");
goto out;
}
break;
}
- case GD_OP_REMOVE_TIER_BRICK: {
- ret = glusterd_op_remove_tier_brick(dict, op_errstr, rsp_dict);
+ case GD_OP_REBALANCE:
+ case GD_OP_DEFRAG_BRICK_VOLUME: {
+ ret = glusterd_mgmt_v3_op_rebalance(dict, op_errstr, rsp_dict);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
- "tier detach commit failed.");
+ "Rebalance Commit Failed");
goto out;
}
- ret = dict_get_int32n(dict, "rebalance-command",
- SLEN("rebalance-command"), &cmd);
- if (ret) {
- gf_msg_debug(this->name, 0, "cmd not found");
- goto out;
- }
-
- if (cmd != GF_DEFRAG_CMD_DETACH_STOP)
- break;
+ break;
}
- case GD_OP_DETACH_TIER_STATUS:
- case GD_OP_TIER_STATUS: {
- ret = glusterd_op_tier_status(dict, op_errstr, rsp_dict, op);
+
+ default:
+ break;
+ }
+
+ ret = 0;
+out:
+ gf_msg_debug(this->name, 0, "OP = %d. Returning %d", op, ret);
+ return ret;
+}
+
+int32_t
+gd_mgmt_v3_post_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
+ uint32_t *op_errno, dict_t *rsp_dict)
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+ GF_VALIDATE_OR_GOTO(this->name, op_errno, out);
+ GF_ASSERT(rsp_dict);
+
+ switch (op) {
+ case GD_OP_ADD_BRICK:
+ ret = glusterd_post_commit_add_brick(dict, op_errstr);
if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_COMMIT_OP_FAIL,
- "tier status commit failed");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL,
+ "Add-brick post commit failed.");
goto out;
}
break;
- }
- case GD_OP_ADD_TIER_BRICK: {
- ret = glusterd_op_add_tier_brick(dict, op_errstr);
+ case GD_OP_REPLACE_BRICK:
+ ret = glusterd_post_commit_replace_brick(dict, op_errstr);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
- "tier add-brick commit failed.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL,
+ "Replace-brick post commit failed.");
goto out;
}
break;
- }
-
default:
break;
}
@@ -401,7 +458,6 @@ gd_mgmt_v3_post_validate_fn(glusterd_op_t op, int32_t op_ret, dict_t *dict,
xlator_t *this = NULL;
char *volname = NULL;
glusterd_volinfo_t *volinfo = NULL;
- glusterd_svc_t *svc = NULL;
this = THIS;
GF_ASSERT(this);
@@ -466,12 +522,6 @@ gd_mgmt_v3_post_validate_fn(glusterd_op_t op, int32_t op_ret, dict_t *dict,
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
- if (ret)
- goto out;
- }
break;
}
case GD_OP_STOP_VOLUME: {
@@ -492,49 +542,6 @@ gd_mgmt_v3_post_validate_fn(glusterd_op_t op, int32_t op_ret, dict_t *dict,
}
break;
}
- case GD_OP_ADD_TIER_BRICK: {
- ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get"
- " volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
- "Unable to "
- "allocate memory");
- goto out;
- }
- ret = glusterd_create_volfiles_and_notify_services(volinfo);
- if (ret)
- goto out;
- ret = glusterd_store_volinfo(volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret)
- goto out;
- ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get"
- " volume name");
- goto out;
- }
-
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
- "dict set "
- "failed");
- goto out;
- }
- ret = -1;
- svc = &(volinfo->tierd.svc);
- ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
- if (ret)
- goto out;
- }
default:
break;
@@ -640,15 +647,21 @@ gd_mgmt_v3_lock(glusterd_op_t op, dict_t *op_ctx, glusterd_peerinfo_t *peerinfo,
ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
&req.dict.dict_len);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
+ }
gf_uuid_copy(req.uuid, my_uuid);
req.op = op;
GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL);
goto out;
+ }
ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid,
&gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_LOCK,
@@ -705,10 +718,13 @@ glusterd_mgmt_v3_initiate_lockdown(glusterd_op_t op, dict_t *dict,
/* Sending mgmt_v3 lock req to other nodes in the cluster */
gd_syncargs_init(&args, NULL);
- synctask_barrier_init((&args));
+ ret = synctask_barrier_init((&args));
+ if (ret)
+ goto out;
+
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -726,7 +742,7 @@ glusterd_mgmt_v3_initiate_lockdown(glusterd_op_t op, dict_t *dict,
gd_mgmt_v3_lock(op, dict, peerinfo, &args, MY_UUID, peer_uuid);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -796,7 +812,6 @@ glusterd_pre_validate_aggr_rsp_dict(glusterd_op_t op, dict_t *aggr, dict_t *rsp)
break;
case GD_OP_START_VOLUME:
case GD_OP_ADD_BRICK:
- case GD_OP_ADD_TIER_BRICK:
ret = glusterd_aggr_brick_mount_dirs(aggr, rsp);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0,
@@ -815,10 +830,10 @@ glusterd_pre_validate_aggr_rsp_dict(glusterd_op_t op, dict_t *aggr, dict_t *rsp)
goto out;
}
case GD_OP_STOP_VOLUME:
- case GD_OP_TIER_STATUS:
- case GD_OP_DETACH_TIER_STATUS:
- case GD_OP_TIER_START_STOP:
- case GD_OP_REMOVE_TIER_BRICK:
+ case GD_OP_REMOVE_BRICK:
+ case GD_OP_PROFILE_VOLUME:
+ case GD_OP_DEFRAG_BRICK_VOLUME:
+ case GD_OP_REBALANCE:
break;
case GD_OP_MAX_OPVERSION:
break;
@@ -954,15 +969,21 @@ gd_mgmt_v3_pre_validate_req(glusterd_op_t op, dict_t *op_ctx,
ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
&req.dict.dict_len);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
+ }
gf_uuid_copy(req.uuid, my_uuid);
req.op = op;
GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL);
goto out;
+ }
ret = gd_syncop_submit_request(
peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog,
@@ -1004,6 +1025,16 @@ glusterd_mgmt_v3_pre_validate(glusterd_op_t op, dict_t *req_dict,
goto out;
}
+ if (op == GD_OP_PROFILE_VOLUME || op == GD_OP_STOP_VOLUME ||
+ op == GD_OP_REBALANCE || op == GD_OP_REMOVE_BRICK) {
+ ret = glusterd_validate_quorum(this, op, req_dict, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SERVER_QUORUM_NOT_MET,
+ "Server quorum not met. Rejecting operation.");
+ goto out;
+ }
+ }
+
/* Pre Validation on local node */
ret = gd_mgmt_v3_pre_validate_fn(op, req_dict, op_errstr, rsp_dict,
op_errno);
@@ -1043,10 +1074,13 @@ glusterd_mgmt_v3_pre_validate(glusterd_op_t op, dict_t *req_dict,
/* Sending Pre Validation req to other nodes in the cluster */
gd_syncargs_init(&args, req_dict);
- synctask_barrier_init((&args));
+ ret = synctask_barrier_init((&args));
+ if (ret)
+ goto out;
+
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1065,7 +1099,7 @@ glusterd_mgmt_v3_pre_validate(glusterd_op_t op, dict_t *req_dict,
peer_uuid);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1120,9 +1154,11 @@ glusterd_mgmt_v3_build_payload(dict_t **req, char **op_errstr, dict_t *dict,
case GD_OP_START_VOLUME:
case GD_OP_STOP_VOLUME:
case GD_OP_ADD_BRICK:
+ case GD_OP_REMOVE_BRICK:
+ case GD_OP_DEFRAG_BRICK_VOLUME:
case GD_OP_REPLACE_BRICK:
case GD_OP_RESET_BRICK:
- case GD_OP_ADD_TIER_BRICK: {
+ case GD_OP_PROFILE_VOLUME: {
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, errno,
@@ -1139,12 +1175,29 @@ glusterd_mgmt_v3_build_payload(dict_t **req, char **op_errstr, dict_t *dict,
}
dict_copy(dict, req_dict);
} break;
- case GD_OP_TIER_START_STOP:
- case GD_OP_REMOVE_TIER_BRICK:
- case GD_OP_DETACH_TIER_STATUS:
- case GD_OP_TIER_STATUS:
+
+ case GD_OP_REBALANCE: {
+ if (gd_set_commit_hash(dict) != 0) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_CRITICAL, errno,
+ GD_MSG_DICT_GET_FAILED,
+ "volname is not present in "
+ "operation ctx");
+ goto out;
+ }
+
+ if (strcasecmp(volname, "all")) {
+ ret = glusterd_dict_set_volid(dict, volname, op_errstr);
+ if (ret)
+ goto out;
+ }
dict_copy(dict, req_dict);
- break;
+ } break;
+
default:
break;
}
@@ -1167,6 +1220,7 @@ gd_mgmt_v3_brick_op_cbk_fn(struct rpc_req *req, struct iovec *iov, int count,
call_frame_t *frame = NULL;
int32_t op_ret = -1;
int32_t op_errno = -1;
+ dict_t *rsp_dict = NULL;
xlator_t *this = NULL;
uuid_t *peerid = NULL;
@@ -1196,20 +1250,53 @@ gd_mgmt_v3_brick_op_cbk_fn(struct rpc_req *req, struct iovec *iov, int count,
if (ret < 0)
goto out;
+ if (rsp.dict.dict_len) {
+ /* Unserialize the dictionary */
+ rsp_dict = dict_new();
+
+ ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict);
+ if (ret < 0) {
+ goto out;
+ } else {
+ rsp_dict->extra_stdfree = rsp.dict.dict_val;
+ }
+ }
+
gf_uuid_copy(args->uuid, rsp.uuid);
+ pthread_mutex_lock(&args->lock_dict);
+ {
+ if (rsp.op == GD_OP_DEFRAG_BRICK_VOLUME ||
+ rsp.op == GD_OP_PROFILE_VOLUME)
+ ret = glusterd_syncop_aggr_rsp_dict(rsp.op, args->dict, rsp_dict);
+ }
+ pthread_mutex_unlock(&args->lock_dict);
- op_ret = rsp.op_ret;
- op_errno = rsp.op_errno;
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESP_AGGR_FAIL, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ if (!rsp.op_ret)
+ op_ret = ret;
+ else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
+ } else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
out:
+
gd_mgmt_v3_collate_errors(args, op_ret, op_errno, rsp.op_errstr,
GLUSTERD_MGMT_V3_BRICK_OP, *peerid, rsp.uuid);
if (rsp.op_errstr)
free(rsp.op_errstr);
- if (rsp.dict.dict_val)
- free(rsp.dict.dict_val);
+ if (rsp_dict)
+ dict_unref(rsp_dict);
+
GF_FREE(peerid);
/* req->rpc_status set to -1 means, STACK_DESTROY will be called from
* the caller function.
@@ -1250,15 +1337,21 @@ gd_mgmt_v3_brick_op_req(glusterd_op_t op, dict_t *op_ctx,
ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
&req.dict.dict_len);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
+ }
gf_uuid_copy(req.uuid, my_uuid);
req.op = op;
GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL);
goto out;
+ }
ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid,
&gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_BRICK_OP,
@@ -1271,8 +1364,8 @@ out:
}
int
-glusterd_mgmt_v3_brick_op(glusterd_op_t op, dict_t *req_dict, char **op_errstr,
- uint32_t txn_generation)
+glusterd_mgmt_v3_brick_op(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
+ char **op_errstr, uint32_t txn_generation)
{
int32_t ret = -1;
int32_t peer_cnt = 0;
@@ -1319,16 +1412,28 @@ glusterd_mgmt_v3_brick_op(glusterd_op_t op, dict_t *req_dict, char **op_errstr,
}
goto out;
}
+ if (op == GD_OP_DEFRAG_BRICK_VOLUME || op == GD_OP_PROFILE_VOLUME) {
+ ret = glusterd_syncop_aggr_rsp_dict(op, op_ctx, rsp_dict);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ goto out;
+ }
+ }
dict_unref(rsp_dict);
rsp_dict = NULL;
/* Sending brick op req to other nodes in the cluster */
- gd_syncargs_init(&args, NULL);
- synctask_barrier_init((&args));
+ gd_syncargs_init(&args, op_ctx);
+ ret = synctask_barrier_init((&args));
+ if (ret)
+ goto out;
+
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1347,7 +1452,7 @@ glusterd_mgmt_v3_brick_op(glusterd_op_t op, dict_t *req_dict, char **op_errstr,
peer_uuid);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1495,15 +1600,21 @@ gd_mgmt_v3_commit_req(glusterd_op_t op, dict_t *op_ctx,
ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
&req.dict.dict_len);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
+ }
gf_uuid_copy(req.uuid, my_uuid);
req.op = op;
GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL);
goto out;
+ }
ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid,
&gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_COMMIT,
@@ -1528,7 +1639,6 @@ glusterd_mgmt_v3_commit(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
uuid_t peer_uuid = {0};
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
- int32_t count = 0;
this = THIS;
GF_ASSERT(this);
@@ -1540,6 +1650,26 @@ glusterd_mgmt_v3_commit(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
GF_ASSERT(op_errstr);
GF_VALIDATE_OR_GOTO(this->name, op_errno, out);
+ switch (op) {
+ case GD_OP_REBALANCE:
+ case GD_OP_DEFRAG_BRICK_VOLUME:
+
+ ret = glusterd_set_rebalance_id_in_rsp_dict(req_dict, op_ctx);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "Failed to set rebalance id in dict.");
+ }
+ break;
+ case GD_OP_REMOVE_BRICK:
+ ret = glusterd_set_rebalance_id_for_remove_brick(req_dict, op_ctx);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "Failed to set rebalance id for remove-brick in dict.");
+ }
+ break;
+ default:
+ break;
+ }
rsp_dict = dict_new();
if (!rsp_dict) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
@@ -1582,10 +1712,12 @@ glusterd_mgmt_v3_commit(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
/* Sending commit req to other nodes in the cluster */
gd_syncargs_init(&args, op_ctx);
- synctask_barrier_init((&args));
+ ret = synctask_barrier_init((&args));
+ if (ret)
+ goto out;
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1593,22 +1725,9 @@ glusterd_mgmt_v3_commit(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
*/
if (peerinfo->generation > txn_generation)
continue;
-
- if (!peerinfo->connected) {
- if (op == GD_OP_TIER_STATUS || op == GD_OP_DETACH_TIER_STATUS) {
- ret = dict_get_int32n(args.dict, "count", SLEN("count"),
- &count);
- if (ret)
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "failed to get index");
- count++;
- ret = dict_set_int32n(args.dict, "count", SLEN("count"), count);
- if (ret)
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "failed to set index");
- }
+ if (!peerinfo->connected)
continue;
- }
+
if (op != GD_OP_SYNC_VOLUME &&
peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
continue;
@@ -1617,7 +1736,7 @@ glusterd_mgmt_v3_commit(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
peer_uuid);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1647,6 +1766,274 @@ out:
}
int32_t
+gd_mgmt_v3_post_commit_cbk_fn(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ int32_t ret = -1;
+ struct syncargs *args = NULL;
+ gd1_mgmt_v3_post_commit_rsp rsp = {
+ {0},
+ };
+ call_frame_t *frame = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ GF_ASSERT(myframe);
+
+ frame = myframe;
+ args = frame->local;
+ peerid = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL);
+
+ ret = xdr_to_generic(*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_v3_post_commit_rsp);
+ if (ret < 0)
+ goto out;
+
+ if (rsp.dict.dict_len) {
+ /* Unserialize the dictionary */
+ rsp_dict = dict_new();
+
+ ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict);
+ if (ret < 0) {
+ free(rsp.dict.dict_val);
+ goto out;
+ } else {
+ rsp_dict->extra_stdfree = rsp.dict.dict_val;
+ }
+ }
+
+ gf_uuid_copy(args->uuid, rsp.uuid);
+ pthread_mutex_lock(&args->lock_dict);
+ {
+ ret = glusterd_syncop_aggr_rsp_dict(rsp.op, args->dict, rsp_dict);
+ }
+ pthread_mutex_unlock(&args->lock_dict);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESP_AGGR_FAIL, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ if (!rsp.op_ret)
+ op_ret = ret;
+ else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
+ } else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
+
+out:
+ if (rsp_dict)
+ dict_unref(rsp_dict);
+
+ gd_mgmt_v3_collate_errors(args, op_ret, op_errno, rsp.op_errstr,
+ GLUSTERD_MGMT_V3_POST_COMMIT, *peerid, rsp.uuid);
+ GF_FREE(peerid);
+
+ if (rsp.op_errstr)
+ free(rsp.op_errstr);
+
+ /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
+ * the caller function.
+ */
+ if (req->rpc_status != -1)
+ STACK_DESTROY(frame->root);
+ synctask_barrier_wake(args);
+ return 0;
+}
+
+int32_t
+gd_mgmt_v3_post_commit_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ return glusterd_big_locked_cbk(req, iov, count, myframe,
+ gd_mgmt_v3_post_commit_cbk_fn);
+}
+
+int
+gd_mgmt_v3_post_commit_req(glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo, struct syncargs *args,
+ uuid_t my_uuid, uuid_t recv_uuid)
+{
+ int32_t ret = -1;
+ gd1_mgmt_v3_post_commit_req req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(op_ctx);
+ GF_ASSERT(peerinfo);
+ GF_ASSERT(args);
+
+ ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
+ goto out;
+ }
+
+ gf_uuid_copy(req.uuid, my_uuid);
+ req.op = op;
+
+ GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL);
+ goto out;
+ }
+
+ ret = gd_syncop_submit_request(
+ peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog,
+ GLUSTERD_MGMT_V3_POST_COMMIT, gd_mgmt_v3_post_commit_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_v3_post_commit_req);
+out:
+ GF_FREE(req.dict.dict_val);
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_mgmt_v3_post_commit(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
+ char **op_errstr, uint32_t *op_errno,
+ uint32_t txn_generation)
+{
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
+ dict_t *rsp_dict = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ struct syncargs args = {0};
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ GF_ASSERT(op_ctx);
+ GF_ASSERT(req_dict);
+ GF_ASSERT(op_errstr);
+ GF_VALIDATE_OR_GOTO(this->name, op_errno, out);
+
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Failed to create response dictionary");
+ goto out;
+ }
+
+ /* Post commit on local node */
+ ret = gd_mgmt_v3_post_commit_fn(op, req_dict, op_errstr, op_errno,
+ rsp_dict);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL,
+ "Post commit failed for "
+ "operation %s on local node",
+ gd_op_list[op]);
+
+ if (*op_errstr == NULL) {
+ ret = gf_asprintf(op_errstr,
+ "Post commit failed "
+ "on localhost. Please "
+ "check log file for details.");
+ if (ret == -1)
+ *op_errstr = NULL;
+
+ ret = -1;
+ }
+ goto out;
+ }
+
+ ret = glusterd_syncop_aggr_rsp_dict(op, op_ctx, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESP_AGGR_FAIL, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ goto out;
+ }
+
+ dict_unref(rsp_dict);
+ rsp_dict = NULL;
+
+ /* Sending post commit req to other nodes in the cluster */
+ gd_syncargs_init(&args, op_ctx);
+ ret = synctask_barrier_init((&args));
+ if (ret)
+ goto out;
+ peer_cnt = 0;
+
+ RCU_READ_LOCK;
+ cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
+ {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_generation)
+ continue;
+ if (!peerinfo->connected)
+ continue;
+
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+ gd_mgmt_v3_post_commit_req(op, req_dict, peerinfo, &args, MY_UUID,
+ peer_uuid);
+ peer_cnt++;
+ }
+ RCU_READ_UNLOCK;
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
+
+ gd_synctask_barrier_wait((&args), peer_cnt);
+
+ if (args.op_ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL,
+ "Post commit failed on peers");
+
+ if (args.errstr)
+ *op_errstr = gf_strdup(args.errstr);
+ }
+
+ ret = args.op_ret;
+ *op_errno = args.op_errno;
+
+ gf_msg_debug(this->name, 0,
+ "Sent post commit req for %s to %d "
+ "peers. Returning %d",
+ gd_op_list[op], peer_cnt, ret);
+out:
+ glusterd_op_modify_op_ctx(op, op_ctx);
+ return ret;
+}
+
+int32_t
gd_mgmt_v3_post_validate_cbk_fn(struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
@@ -1736,16 +2123,22 @@ gd_mgmt_v3_post_validate_req(glusterd_op_t op, int32_t op_ret, dict_t *op_ctx,
ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
&req.dict.dict_len);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
+ }
gf_uuid_copy(req.uuid, my_uuid);
req.op = op;
req.op_ret = op_ret;
GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL);
goto out;
+ }
ret = gd_syncop_submit_request(
peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog,
@@ -1787,14 +2180,6 @@ glusterd_mgmt_v3_post_validate(glusterd_op_t op, int32_t op_ret, dict_t *dict,
goto out;
}
- /* Copy the contents of dict like missed snaps info to req_dict */
- if (op != GD_OP_REMOVE_TIER_BRICK)
- /* dict and req_dict has the same values during remove tier
- * brick (detach start) So this rewrite make the remove brick
- * id to become empty.
- * Avoiding to copy it retains the value. */
- dict_copy(dict, req_dict);
-
/* Post Validation on local node */
ret = gd_mgmt_v3_post_validate_fn(op, op_ret, req_dict, op_errstr,
rsp_dict);
@@ -1823,10 +2208,13 @@ glusterd_mgmt_v3_post_validate(glusterd_op_t op, int32_t op_ret, dict_t *dict,
/* Sending Post Validation req to other nodes in the cluster */
gd_syncargs_init(&args, req_dict);
- synctask_barrier_init((&args));
+ ret = synctask_barrier_init((&args));
+ if (ret)
+ goto out;
+
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1845,7 +2233,7 @@ glusterd_mgmt_v3_post_validate(glusterd_op_t op, int32_t op_ret, dict_t *dict,
MY_UUID, peer_uuid);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1957,15 +2345,21 @@ gd_mgmt_v3_unlock(glusterd_op_t op, dict_t *op_ctx,
ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
&req.dict.dict_len);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
+ }
gf_uuid_copy(req.uuid, my_uuid);
req.op = op;
GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL);
goto out;
+ }
ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid,
&gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_UNLOCK,
@@ -2010,7 +2404,7 @@ glusterd_mgmt_v3_release_peer_locks(glusterd_op_t op, dict_t *dict,
if (ret)
goto out;
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -2028,7 +2422,7 @@ glusterd_mgmt_v3_release_peer_locks(glusterd_op_t op, dict_t *dict,
gd_mgmt_v3_unlock(op, dict, peerinfo, &args, MY_UUID, peer_uuid);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -2057,6 +2451,173 @@ out:
}
int32_t
+glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase(rpcsvc_request_t *req,
+ glusterd_op_t op,
+ dict_t *dict)
+{
+ int32_t ret = -1;
+ int32_t op_ret = -1;
+ dict_t *req_dict = NULL;
+ dict_t *tmp_dict = NULL;
+ glusterd_conf_t *conf = NULL;
+ char *op_errstr = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t is_acquired = _gf_false;
+ uuid_t *originator_uuid = NULL;
+ uint32_t txn_generation = 0;
+ uint32_t op_errno = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ GF_ASSERT(dict);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ /* Save the peer list generation */
+ txn_generation = conf->generation;
+ cmm_smp_rmb();
+ /* This read memory barrier makes sure that this assignment happens here
+ * only and is not reordered and optimized by either the compiler or the
+ * processor.
+ */
+
+ /* Save the MY_UUID as the originator_uuid. This originator_uuid
+ * will be used by is_origin_glusterd() to determine if a node
+ * is the originator node for a command. */
+ originator_uuid = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!originator_uuid) {
+ ret = -1;
+ goto out;
+ }
+
+ gf_uuid_copy(*originator_uuid, MY_UUID);
+ ret = dict_set_bin(dict, "originator_uuid", originator_uuid,
+ sizeof(uuid_t));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set originator_uuid.");
+ GF_FREE(originator_uuid);
+ goto out;
+ }
+
+ /* Marking the operation as complete synctasked */
+ ret = dict_set_int32(dict, "is_synctasked", _gf_true);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set synctasked flag.");
+ goto out;
+ }
+
+ /* Use a copy at local unlock as cli response will be sent before
+ * the unlock and the volname in the dict might be removed */
+ tmp_dict = dict_new();
+ if (!tmp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Unable to create dict");
+ goto out;
+ }
+ dict_copy(dict, tmp_dict);
+
+ /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
+ ret = glusterd_mgmt_v3_initiate_lockdown(op, dict, &op_errstr, &op_errno,
+ &is_acquired, txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCKDOWN_FAIL,
+ "mgmt_v3 lockdown failed.");
+ goto out;
+ }
+
+ /* BUILD PAYLOAD */
+ ret = glusterd_mgmt_v3_build_payload(&req_dict, &op_errstr, dict, op);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_PAYLOAD_BUILD_FAIL,
+ LOGSTR_BUILD_PAYLOAD, gd_op_list[op]);
+ if (op_errstr == NULL)
+ gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
+ goto out;
+ }
+
+ /* PRE-COMMIT VALIDATE PHASE */
+ ret = glusterd_mgmt_v3_pre_validate(op, req_dict, &op_errstr, &op_errno,
+ txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
+ "Pre Validation Failed");
+ goto out;
+ }
+
+ /* BRICK-OPS */
+ ret = glusterd_mgmt_v3_brick_op(op, dict, req_dict, &op_errstr,
+ txn_generation);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR, "Brick Op Failed");
+ goto out;
+ }
+
+ /* COMMIT OP PHASE */
+ ret = glusterd_mgmt_v3_commit(op, dict, req_dict, &op_errstr, &op_errno,
+ txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "Commit Op Failed");
+ goto out;
+ }
+
+ /* POST-COMMIT VALIDATE PHASE */
+ /* As of now, post_validate is not trying to cleanup any failed
+ commands. So as of now, I am sending 0 (op_ret as 0).
+ */
+ ret = glusterd_mgmt_v3_post_validate(op, 0, dict, req_dict, &op_errstr,
+ txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_VALIDATION_FAIL,
+ "Post Validation Failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ op_ret = ret;
+ /* UNLOCK PHASE FOR PEERS*/
+ (void)glusterd_mgmt_v3_release_peer_locks(op, dict, op_ret, &op_errstr,
+ is_acquired, txn_generation);
+
+ /* LOCAL VOLUME(S) UNLOCK */
+ if (is_acquired) {
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock(tmp_dict, MY_UUID);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
+ "Failed to release mgmt_v3 locks on localhost");
+ op_ret = ret;
+ }
+ }
+
+ if (op_ret && (op_errno == 0))
+ op_errno = EG_INTRNL;
+
+ if (op != GD_OP_MAX_OPVERSION) {
+ /* SEND CLI RESPONSE */
+ glusterd_op_send_cli_response(op, op_ret, op_errno, req, dict,
+ op_errstr);
+ }
+
+ if (req_dict)
+ dict_unref(req_dict);
+
+ if (tmp_dict)
+ dict_unref(tmp_dict);
+
+ if (op_errstr) {
+ GF_FREE(op_errstr);
+ op_errstr = NULL;
+ }
+
+ return 0;
+}
+
+int32_t
glusterd_mgmt_v3_initiate_all_phases(rpcsvc_request_t *req, glusterd_op_t op,
dict_t *dict)
{
@@ -2161,6 +2722,15 @@ glusterd_mgmt_v3_initiate_all_phases(rpcsvc_request_t *req, glusterd_op_t op,
goto out;
}
+ /* POST COMMIT OP PHASE */
+ ret = glusterd_mgmt_v3_post_commit(op, dict, req_dict, &op_errstr,
+ &op_errno, txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL,
+ "Post commit Op Failed");
+ goto out;
+ }
+
/* POST-COMMIT VALIDATE PHASE */
/* As of now, post_validate is not trying to cleanup any failed
commands. So as of now, I am sending 0 (op_ret as 0).
@@ -2398,7 +2968,8 @@ glusterd_mgmt_v3_initiate_snap_phases(rpcsvc_request_t *req, glusterd_op_t op,
goto out;
}
- ret = glusterd_mgmt_v3_brick_op(op, req_dict, &op_errstr, txn_generation);
+ ret = glusterd_mgmt_v3_brick_op(op, dict, req_dict, &op_errstr,
+ txn_generation);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_FAIL,
"Brick Ops Failed");
@@ -2458,7 +3029,8 @@ unbarrier:
goto out;
}
- ret = glusterd_mgmt_v3_brick_op(op, req_dict, &op_errstr, txn_generation);
+ ret = glusterd_mgmt_v3_brick_op(op, dict, req_dict, &op_errstr,
+ txn_generation);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_FAIL,
@@ -2489,7 +3061,7 @@ out:
ret = glusterd_mgmt_v3_post_validate(op, op_ret, dict, req_dict, &op_errstr,
txn_generation);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_VALIDATION_FAIL,
"Post Validation Failed");
op_ret = -1;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
index 9b57f4cb833..27dd1849519 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.h
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
@@ -28,6 +28,10 @@ gd_mgmt_v3_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
uint32_t *op_errno, dict_t *rsp_dict);
int32_t
+gd_mgmt_v3_post_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
+ uint32_t *op_errno, dict_t *rsp_dict);
+
+int32_t
gd_mgmt_v3_post_validate_fn(glusterd_op_t op, int32_t op_ret, dict_t *dict,
char **op_errstr, dict_t *rsp_dict);
@@ -36,6 +40,11 @@ glusterd_mgmt_v3_initiate_all_phases(rpcsvc_request_t *req, glusterd_op_t op,
dict_t *dict);
int32_t
+glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase(rpcsvc_request_t *req,
+ glusterd_op_t op,
+ dict_t *dict);
+
+int32_t
glusterd_mgmt_v3_initiate_snap_phases(rpcsvc_request_t *req, glusterd_op_t op,
dict_t *dict);
@@ -79,4 +88,10 @@ glusterd_reset_brick_prevalidate(dict_t *dict, char **op_errstr,
dict_t *rsp_dict);
int
glusterd_op_reset_brick(dict_t *dict, dict_t *rsp_dict);
+
+int
+glusterd_post_commit_add_brick(dict_t *dict, char **op_errstr);
+
+int
+glusterd_post_commit_replace_brick(dict_t *dict, char **op_errstr);
#endif /* _GLUSTERD_MGMT_H_ */
diff --git a/xlators/mgmt/glusterd/src/glusterd-mountbroker.c b/xlators/mgmt/glusterd/src/glusterd-mountbroker.c
index 356a4bcca67..645d845ee76 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mountbroker.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mountbroker.c
@@ -11,21 +11,21 @@
#include <fnmatch.h>
#include <pwd.h>
-#include "globals.h"
-#include "glusterfs.h"
-#include "compat.h"
-#include "dict.h"
-#include "list.h"
-#include "logging.h"
-#include "syscall.h"
-#include "defaults.h"
-#include "compat.h"
-#include "compat-errno.h"
-#include "run.h"
+#include <glusterfs/globals.h>
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/list.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/syscall.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/run.h>
#include "glusterd-mem-types.h"
#include "glusterd.h"
#include "glusterd-utils.h"
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "glusterd-mountbroker.h"
#include "glusterd-op-sm.h"
#include "glusterd-messages.h"
@@ -81,6 +81,7 @@ parse_mount_pattern_desc(gf_mount_spec_t *mspec, char *pdesc)
mspec->patterns = GF_CALLOC(mspec->len, sizeof(*mspec->patterns),
gf_gld_mt_mount_pattern);
if (!mspec->patterns) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
ret = -1;
goto out;
}
@@ -233,7 +234,7 @@ const char *georep_mnt_desc_template =
"user-map-root=%s "
")"
"SUB+("
- "log-file=" DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP
+ "log-file=%s/" GEOREP
"*/* "
"log-level=* "
"volfile-id=* "
@@ -242,21 +243,9 @@ const char *georep_mnt_desc_template =
"%s"
")";
-const char *hadoop_mnt_desc_template =
- "SUP("
- "volfile-server=%s "
- "client-pid=%d "
- "volfile-id=%s "
- "user-map-root=%s "
- ")"
- "SUB+("
- "log-file=" DEFAULT_LOG_FILE_DIRECTORY "/" GHADOOP
- "*/* "
- "log-level=* "
- ")";
-
int
-make_georep_mountspec(gf_mount_spec_t *mspec, const char *volnames, char *user)
+make_georep_mountspec(gf_mount_spec_t *mspec, const char *volnames, char *user,
+ char *logdir)
{
char *georep_mnt_desc = NULL;
char *meetspec = NULL;
@@ -273,8 +262,11 @@ make_georep_mountspec(gf_mount_spec_t *mspec, const char *volnames, char *user)
int ret = 0;
vols = gf_strdup((char *)volnames);
- if (!vols)
+ if (!vols) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "Volume name=%s", volnames, NULL);
goto out;
+ }
for (vc = 1, p = vols; *p; p++) {
if (*p == ',')
@@ -282,8 +274,10 @@ make_georep_mountspec(gf_mount_spec_t *mspec, const char *volnames, char *user)
}
siz = strlen(volnames) + vc * SLEN("volfile-id=");
meetspec = GF_CALLOC(1, siz + 1, gf_gld_mt_georep_meet_spec);
- if (!meetspec)
+ if (!meetspec) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
+ }
for (p = vols;;) {
vol = strtok_r(p, ",", &savetok);
@@ -299,7 +293,7 @@ make_georep_mountspec(gf_mount_spec_t *mspec, const char *volnames, char *user)
}
ret = gf_asprintf(&georep_mnt_desc, georep_mnt_desc_template,
- GF_CLIENT_PID_GSYNCD, user, meetspec);
+ GF_CLIENT_PID_GSYNCD, user, logdir, meetspec);
if (ret == -1) {
georep_mnt_desc = NULL;
goto out;
@@ -322,21 +316,6 @@ out:
return ret;
}
-int
-make_ghadoop_mountspec(gf_mount_spec_t *mspec, const char *volname, char *user,
- char *server)
-{
- char *hadoop_mnt_desc = NULL;
- int ret = 0;
-
- ret = gf_asprintf(&hadoop_mnt_desc, hadoop_mnt_desc_template, server,
- GF_CLIENT_PID_HADOOP, volname, user);
- if (ret == -1)
- return ret;
-
- return parse_mount_pattern_desc(mspec, hadoop_mnt_desc);
-}
-
static gf_boolean_t
match_comp(char *str, char *patcomp)
{
diff --git a/xlators/mgmt/glusterd/src/glusterd-mountbroker.h b/xlators/mgmt/glusterd/src/glusterd-mountbroker.h
index 319e05188b4..20c1347f52f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mountbroker.h
+++ b/xlators/mgmt/glusterd/src/glusterd-mountbroker.h
@@ -30,10 +30,8 @@ int
parse_mount_pattern_desc(gf_mount_spec_t *mspec, char *pdesc);
int
-make_georep_mountspec(gf_mount_spec_t *mspec, const char *volname, char *user);
-int
-make_ghadoop_mountspec(gf_mount_spec_t *mspec, const char *volname, char *user,
- char *server);
+make_georep_mountspec(gf_mount_spec_t *mspec, const char *volname, char *user,
+ char *logdir);
int
glusterd_do_mount(char *label, dict_t *argdict, char **path, int *op_errno);
diff --git a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c
index c153719545b..4908dbbc213 100644
--- a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c
@@ -8,9 +8,11 @@
cases as published by the Free Software Foundation.
*/
-#include "globals.h"
-#include "run.h"
-#include "syscall.h"
+#ifdef BUILD_GNFS
+
+#include <glusterfs/globals.h>
+#include <glusterfs/run.h>
+#include <glusterfs/syscall.h>
#include "glusterd.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
@@ -150,7 +152,7 @@ glusterd_nfssvc_reconfigure()
glusterd_volinfo_t *volinfo = NULL;
this = THIS;
- GF_VALIDATE_OR_GOTO(this->name, this, out);
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
priv = this->private;
GF_VALIDATE_OR_GOTO(this->name, priv, out);
@@ -220,6 +222,7 @@ glusterd_nfssvc_reconfigure()
ret = priv->nfs_svc.manager(&(priv->nfs_svc), NULL, PROC_START_NO_WAIT);
out:
- gf_msg_debug(this->name, 0, "Returning %d", ret);
+ gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
return ret;
}
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.h b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.h
index 47e89830f55..6bfdde95749 100644
--- a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.h
+++ b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.h
@@ -13,6 +13,7 @@
#include "glusterd-svc-mgmt.h"
+#ifdef BUILD_GNFS
void
glusterd_nfssvc_build(glusterd_svc_t *svc);
@@ -22,4 +23,5 @@ glusterd_nfssvc_init(glusterd_svc_t *svc);
int
glusterd_nfssvc_reconfigure();
+#endif /* BUILD_GNFS */
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index f52e9d3bc41..c537fc33a85 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -14,46 +14,40 @@
#include <sys/mount.h>
#include <libgen.h>
-#include "compat-uuid.h"
+#include <glusterfs/compat-uuid.h>
#include "fnmatch.h"
-#include "xlator.h"
+#include <glusterfs/xlator.h>
#include "protocol-common.h"
#include "glusterd.h"
-#include "call-stub.h"
-#include "defaults.h"
-#include "list.h"
-#include "dict.h"
-#include "compat.h"
-#include "compat-errno.h"
-#include "statedump.h"
-#include "glusterd-sm.h"
+#include <glusterfs/call-stub.h>
+#include <glusterfs/list.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/statedump.h>
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
#include "glusterd-store.h"
-#include "glusterd-hooks.h"
-#include "glusterd-volgen.h"
#include "glusterd-locks.h"
-#include "glusterd-messages.h"
-#include "glusterd-utils.h"
#include "glusterd-quota.h"
-#include "syscall.h"
+#include <glusterfs/syscall.h>
#include "cli1-xdr.h"
-#include "common-utils.h"
-#include "run.h"
#include "glusterd-snapshot-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
+#include "glusterd-shd-svc-helper.h"
#include "glusterd-shd-svc.h"
-#include "glusterd-nfs-svc.h"
#include "glusterd-quotad-svc.h"
#include "glusterd-server-quorum.h"
-#include "glusterd-volgen.h"
#include <sys/types.h>
#include <signal.h>
#include <sys/wait.h>
#include "glusterd-gfproxyd-svc-helper.h"
+#define len_strcmp(key, len, str) \
+ ((len == SLEN(str)) && (strcmp(key, str) == 0))
+
extern char local_node_hostname[PATH_MAX];
static int
glusterd_set_shared_storage(dict_t *dict, char *key, char *value,
@@ -67,7 +61,7 @@ glusterd_set_shared_storage(dict_t *dict, char *key, char *value,
* It's important that every value have a default, or have a special handler
* in glusterd_get_global_options_for_all_vols, or else we might crash there.
*/
-glusterd_all_vol_opts valid_all_vol_opts[] = {
+const glusterd_all_vol_opts valid_all_vol_opts[] = {
{GLUSTERD_QUORUM_RATIO_KEY, "51"},
{GLUSTERD_SHARED_STORAGE_KEY, "disable"},
/* This one actually gets filled in dynamically. */
@@ -85,7 +79,8 @@ glusterd_all_vol_opts valid_all_vol_opts[] = {
* can be attached per process.
* TBD: Discuss the default value for this. Maybe this should be a
* dynamic value depending on the memory specifications per node */
- {GLUSTERD_BRICKMUX_LIMIT_KEY, "0"},
+ {GLUSTERD_BRICKMUX_LIMIT_KEY, GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE},
+ {GLUSTERD_VOL_CNT_PER_THRD, GLUSTERD_VOL_CNT_PER_THRD_DEFAULT_VALUE},
{GLUSTERD_LOCALTIME_LOGGING_KEY, "disable"},
{GLUSTERD_DAEMON_LOG_LEVEL_KEY, "INFO"},
{NULL},
@@ -97,14 +92,6 @@ glusterd_op_info_t opinfo = {
{0},
};
-int
-glusterd_bricks_select_rebalance_volume(dict_t *dict, char **op_errstr,
- struct cds_list_head *selected);
-
-int
-glusterd_bricks_select_tier_volume(dict_t *dict, char **op_errstr,
- struct cds_list_head *selected);
-
int32_t
glusterd_txn_opinfo_dict_init()
{
@@ -119,6 +106,7 @@ glusterd_txn_opinfo_dict_init()
priv->glusterd_txn_opinfo = dict_new();
if (!priv->glusterd_txn_opinfo) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -191,8 +179,10 @@ glusterd_generate_txn_id(dict_t *dict, uuid_t **txn_id)
GF_ASSERT(dict);
*txn_id = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
- if (!*txn_id)
+ if (!*txn_id) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
+ }
if (priv->op_version < GD_OP_VERSION_3_6_0)
gf_uuid_copy(**txn_id, priv->global_txn_id);
@@ -403,7 +393,7 @@ glusterd_op_sm_event_name_get(int event)
return glusterd_op_sm_event_names[event];
}
-void
+static void
glusterd_destroy_lock_ctx(glusterd_op_lock_ctx_t *ctx)
{
if (!ctx)
@@ -422,56 +412,49 @@ glusterd_set_volume_status(glusterd_volinfo_t *volinfo,
static int
glusterd_op_sm_inject_all_acc(uuid_t *txn_id)
{
- int32_t ret = -1;
+ int ret = -1;
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACC, txn_id, NULL);
gf_msg_debug("glusterd", 0, "Returning %d", ret);
return ret;
}
static int
-glusterd_check_bitrot_cmd(char *key, char *value, char *errstr, size_t size)
+glusterd_check_bitrot_cmd(char *key, const int keylen, char *errstr,
+ const size_t size)
{
int ret = -1;
- if ((!strncmp(key, "bitrot", SLEN("bitrot"))) ||
- (!strncmp(key, "features.bitrot", SLEN("features.bitrot")))) {
+ if (len_strcmp(key, keylen, "bitrot") ||
+ len_strcmp(key, keylen, "features.bitrot")) {
snprintf(errstr, size,
- " 'gluster volume set <VOLNAME> %s' "
- "is invalid command. Use 'gluster volume bitrot "
- "<VOLNAME> {enable|disable}' instead.",
+ " 'gluster volume set <VOLNAME> %s' is invalid command."
+ " Use 'gluster volume bitrot <VOLNAME> {enable|disable}'"
+ " instead.",
key);
- ret = -1;
goto out;
- } else if ((!strncmp(key, "scrub-freq", SLEN("scrub-freq"))) ||
- (!strncmp(key, "features.scrub-freq",
- SLEN("features.scrub-freq")))) {
+ } else if (len_strcmp(key, keylen, "scrub-freq") ||
+ len_strcmp(key, keylen, "features.scrub-freq")) {
snprintf(errstr, size,
- " 'gluster volume "
- "set <VOLNAME> %s' is invalid command. Use 'gluster "
- "volume bitrot <VOLNAME> scrub-frequency"
+ " 'gluster volume set <VOLNAME> %s' is invalid command."
+ " Use 'gluster volume bitrot <VOLNAME> scrub-frequency"
" {hourly|daily|weekly|biweekly|monthly}' instead.",
key);
- ret = -1;
goto out;
- } else if ((!strncmp(key, "scrub", SLEN("scrub"))) ||
- (!strncmp(key, "features.scrub", SLEN("features.scrub")))) {
+ } else if (len_strcmp(key, keylen, "scrub") ||
+ len_strcmp(key, keylen, "features.scrub")) {
snprintf(errstr, size,
- " 'gluster volume set <VOLNAME> %s' is "
- "invalid command. Use 'gluster volume bitrot "
- "<VOLNAME> scrub {pause|resume}' instead.",
+ " 'gluster volume set <VOLNAME> %s' is invalid command."
+ " Use 'gluster volume bitrot <VOLNAME> scrub {pause|resume}'"
+ " instead.",
key);
- ret = -1;
goto out;
- } else if ((!strncmp(key, "scrub-throttle", SLEN("scrub-throttle"))) ||
- (!strncmp(key, "features.scrub-throttle",
- SLEN("features.scrub-throttle")))) {
+ } else if (len_strcmp(key, keylen, "scrub-throttle") ||
+ len_strcmp(key, keylen, "features.scrub-throttle")) {
snprintf(errstr, size,
- " 'gluster volume set <VOLNAME> %s' is "
- "invalid command. Use 'gluster volume bitrot "
- "<VOLNAME> scrub-throttle {lazy|normal|aggressive}' "
- "instead.",
+ " 'gluster volume set <VOLNAME> %s' is invalid command."
+ " Use 'gluster volume bitrot <VOLNAME> scrub-throttle "
+ " {lazy|normal|aggressive}' instead.",
key);
- ret = -1;
goto out;
}
@@ -481,61 +464,52 @@ out:
}
static int
-glusterd_check_quota_cmd(char *key, char *value, char *errstr, size_t size)
+glusterd_check_quota_cmd(char *key, const int keylen, char *value, char *errstr,
+ size_t size)
{
int ret = -1;
gf_boolean_t b = _gf_false;
- if ((strcmp(key, "quota") == 0) || (strcmp(key, "features.quota") == 0)) {
+ if (len_strcmp(key, keylen, "quota") ||
+ len_strcmp(key, keylen, "features.quota")) {
ret = gf_string2boolean(value, &b);
if (ret)
goto out;
+ ret = -1;
if (b) {
snprintf(errstr, size,
- " 'gluster "
- "volume set <VOLNAME> %s %s' is "
- "deprecated. Use 'gluster volume "
- "quota <VOLNAME> enable' instead.",
+ " 'gluster volume set <VOLNAME> %s %s' is deprecated."
+ " Use 'gluster volume quota <VOLNAME> enable' instead.",
key, value);
- ret = -1;
- goto out;
} else {
snprintf(errstr, size,
- " 'gluster "
- "volume set <VOLNAME> %s %s' is "
- "deprecated. Use 'gluster volume "
- "quota <VOLNAME> disable' instead.",
+ " 'gluster volume set <VOLNAME> %s %s' is deprecated."
+ " Use 'gluster volume quota <VOLNAME> disable' instead.",
key, value);
- ret = -1;
- goto out;
}
- } else if ((strcmp(key, "inode-quota") == 0) ||
- (strcmp(key, "features.inode-quota") == 0)) {
+ goto out;
+ } else if (len_strcmp(key, keylen, "inode-quota") ||
+ len_strcmp(key, keylen, "features.inode-quota")) {
ret = gf_string2boolean(value, &b);
if (ret)
goto out;
+ ret = -1;
if (b) {
- snprintf(errstr, size,
- " 'gluster "
- "volume set <VOLNAME> %s %s' is "
- "deprecated. Use 'gluster volume "
- "inode-quota <VOLNAME> enable' instead.",
- key, value);
- ret = -1;
- goto out;
+ snprintf(
+ errstr, size,
+ " 'gluster volume set <VOLNAME> %s %s' is deprecated."
+ " Use 'gluster volume inode-quota <VOLNAME> enable' instead.",
+ key, value);
} else {
/* inode-quota disable not supported,
* use quota disable
*/
snprintf(errstr, size,
- " 'gluster "
- "volume set <VOLNAME> %s %s' is "
- "deprecated. Use 'gluster volume "
- "quota <VOLNAME> disable' instead.",
+ " 'gluster volume set <VOLNAME> %s %s' is deprecated."
+ " Use 'gluster volume quota <VOLNAME> disable' instead.",
key, value);
- ret = -1;
- goto out;
}
+ goto out;
}
ret = 0;
@@ -570,8 +544,11 @@ glusterd_brick_op_build_payload(glusterd_op_t op,
case GD_OP_STOP_VOLUME:
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_BRICK_TERMINATE;
brick_req->name = brickinfo->path;
glusterd_set_brick_status(brickinfo, GF_BRICK_STOPPING);
@@ -580,8 +557,11 @@ glusterd_brick_op_build_payload(glusterd_op_t op,
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_BRICK_XLATOR_INFO;
brick_req->name = brickinfo->path;
@@ -590,51 +570,70 @@ glusterd_brick_op_build_payload(glusterd_op_t op,
case GD_OP_HEAL_VOLUME: {
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_BRICK_XLATOR_OP;
brick_req->name = "";
ret = dict_get_int32n(dict, "heal-op", SLEN("heal-op"),
(int32_t *)&heal_op);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=heal-op", NULL);
goto out;
+ }
ret = dict_set_int32n(dict, "xl-op", SLEN("xl-op"), heal_op);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=xl-op", NULL);
goto out;
+ }
} break;
case GD_OP_STATUS_VOLUME: {
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_BRICK_STATUS;
brick_req->name = "";
ret = dict_set_strn(dict, "brick-name", SLEN("brick-name"),
brickinfo->path);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=brick-name", NULL);
goto out;
+ }
} break;
case GD_OP_REBALANCE:
- case GD_OP_DETACH_TIER_STATUS:
- case GD_OP_TIER_STATUS:
case GD_OP_DEFRAG_BRICK_VOLUME:
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_BRICK_XLATOR_DEFRAG;
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=volname", NULL);
goto out;
+ }
ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_VOLINFO_GET_FAIL, "Volume=%s", volname, NULL);
goto out;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- snprintf(name, sizeof(name), "%s-tier-dht", volname);
- else
- snprintf(name, sizeof(name), "%s-dht", volname);
+ }
+ snprintf(name, sizeof(name), "%s-dht", volname);
brick_req->name = gf_strdup(name);
break;
@@ -642,8 +641,11 @@ glusterd_brick_op_build_payload(glusterd_op_t op,
case GD_OP_BARRIER:
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_BRICK_BARRIER;
brick_req->name = brickinfo->path;
break;
@@ -653,10 +655,15 @@ glusterd_brick_op_build_payload(glusterd_op_t op,
break;
}
+ brick_req->dict.dict_len = 0;
+ brick_req->dict.dict_val = NULL;
ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
&brick_req->input.input_len);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
+ }
*req = brick_req;
ret = 0;
@@ -678,13 +685,19 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
GF_ASSERT(op < GD_OP_MAX);
GF_ASSERT(op > GD_OP_NONE);
GF_ASSERT(req);
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
switch (op) {
case GD_OP_PROFILE_VOLUME:
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_NODE_PROFILE;
brick_req->name = "";
@@ -694,8 +707,11 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
case GD_OP_STATUS_VOLUME:
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_NODE_STATUS;
brick_req->name = "";
@@ -706,14 +722,20 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
case GD_OP_SCRUB_ONDEMAND:
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_NODE_BITROT;
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=volname", NULL);
goto out;
+ }
brick_req->name = gf_strdup(volname);
break;
@@ -721,11 +743,16 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
goto out;
}
+ brick_req->dict.dict_len = 0;
+ brick_req->dict.dict_val = NULL;
ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
&brick_req->input.input_len);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
+ }
*req = brick_req;
ret = 0;
@@ -733,7 +760,7 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
out:
if (ret && brick_req)
GF_FREE(brick_req);
- gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
return ret;
}
@@ -749,12 +776,14 @@ glusterd_validate_quorum_options(xlator_t *this, char *fullkey, char *value,
goto out;
key = strchr(fullkey, '.');
if (key == NULL) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
ret = -1;
goto out;
}
key++;
opt = xlator_volume_option_get(this, key);
if (!opt) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL, NULL);
ret = -1;
goto out;
}
@@ -775,16 +804,16 @@ glusterd_validate_brick_mx_options(xlator_t *this, char *fullkey, char *value,
}
static int
-glusterd_validate_shared_storage(char *key, char *value, char *errstr)
+glusterd_validate_shared_storage(char *value, char *errstr)
{
int32_t ret = -1;
- int32_t exists = -1;
int32_t count = -1;
char *op = NULL;
char hook_script[PATH_MAX] = "";
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
int32_t len = 0;
+ glusterd_volinfo_t *volinfo = NULL;
this = THIS;
GF_VALIDATE_OR_GOTO("glusterd", this, out);
@@ -792,16 +821,9 @@ glusterd_validate_shared_storage(char *key, char *value, char *errstr)
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- GF_VALIDATE_OR_GOTO(this->name, key, out);
GF_VALIDATE_OR_GOTO(this->name, value, out);
GF_VALIDATE_OR_GOTO(this->name, errstr, out);
- ret = 0;
-
- if (strcmp(key, GLUSTERD_SHARED_STORAGE_KEY)) {
- goto out;
- }
-
if ((strcmp(value, "enable")) && (strcmp(value, "disable"))) {
snprintf(errstr, PATH_MAX,
"Invalid option(%s). Valid options "
@@ -852,8 +874,8 @@ glusterd_validate_shared_storage(char *key, char *value, char *errstr)
goto out;
}
- exists = glusterd_check_volume_exists(GLUSTER_SHARED_STORAGE);
- if (exists) {
+ ret = glusterd_volinfo_find(GLUSTER_SHARED_STORAGE, &volinfo);
+ if (!ret) {
snprintf(errstr, PATH_MAX,
"Shared storage volume(" GLUSTER_SHARED_STORAGE
") already exists.");
@@ -887,7 +909,7 @@ out:
}
static int
-glusterd_validate_localtime_logging(char *key, char *value, char *errstr)
+glusterd_validate_localtime_logging(char *value, char *errstr)
{
int32_t ret = -1;
xlator_t *this = NULL;
@@ -899,29 +921,11 @@ glusterd_validate_localtime_logging(char *key, char *value, char *errstr)
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
-
- GF_VALIDATE_OR_GOTO(this->name, key, out);
GF_VALIDATE_OR_GOTO(this->name, value, out);
- GF_VALIDATE_OR_GOTO(this->name, errstr, out);
-
- ret = 0;
-
- if (strcmp(key, GLUSTERD_LOCALTIME_LOGGING_KEY)) {
- goto out;
- }
-
- if ((strcmp(value, "enable")) && (strcmp(value, "disable"))) {
- snprintf(errstr, PATH_MAX,
- "Invalid option(%s). Valid options "
- "are 'enable' and 'disable'",
- value);
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
- errstr);
- ret = -1;
- }
already_enabled = gf_log_get_localtime();
+ ret = 0;
if (strcmp(value, "enable") == 0) {
gf_log_set_localtime(1);
if (!already_enabled)
@@ -932,6 +936,15 @@ glusterd_validate_localtime_logging(char *key, char *value, char *errstr)
if (already_enabled)
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_LOCALTIME_LOGGING_DISABLE,
"localtime logging disable");
+ } else {
+ ret = -1;
+ GF_VALIDATE_OR_GOTO(this->name, errstr, out);
+ snprintf(errstr, PATH_MAX,
+ "Invalid option(%s). Valid options "
+ "are 'enable' and 'disable'",
+ value);
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
+ errstr);
}
out:
@@ -939,7 +952,7 @@ out:
}
static int
-glusterd_validate_daemon_log_level(char *key, char *value, char *errstr)
+glusterd_validate_daemon_log_level(char *value, char *errstr)
{
int32_t ret = -1;
xlator_t *this = NULL;
@@ -951,19 +964,15 @@ glusterd_validate_daemon_log_level(char *key, char *value, char *errstr)
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- GF_VALIDATE_OR_GOTO(this->name, key, out);
GF_VALIDATE_OR_GOTO(this->name, value, out);
- GF_VALIDATE_OR_GOTO(this->name, errstr, out);
ret = 0;
- if (strcmp(key, GLUSTERD_DAEMON_LOG_LEVEL_KEY)) {
- goto out;
- }
-
if ((strcmp(value, "INFO")) && (strcmp(value, "WARNING")) &&
(strcmp(value, "DEBUG")) && (strcmp(value, "TRACE")) &&
(strcmp(value, "ERROR"))) {
+ ret = -1;
+ GF_VALIDATE_OR_GOTO(this->name, errstr, out);
snprintf(errstr, PATH_MAX,
"Invalid option(%s). Valid options "
"are 'INFO' or 'WARNING' or 'ERROR' or 'DEBUG' or "
@@ -971,7 +980,6 @@ glusterd_validate_daemon_log_level(char *key, char *value, char *errstr)
value);
gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
errstr);
- ret = -1;
}
out:
@@ -991,6 +999,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
char keystr[100] = {
0,
};
+ int keystr_len;
int keylen;
char *trash_path = NULL;
int trash_path_len = 0;
@@ -1003,6 +1012,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
glusterd_brickinfo_t *brickinfo = NULL;
dict_t *val_dict = NULL;
gf_boolean_t global_opt = _gf_false;
+ gf_boolean_t key_matched = _gf_false; /* if a key was processed or not*/
glusterd_volinfo_t *voliter = NULL;
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
@@ -1015,6 +1025,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
gf_boolean_t check_op_version = _gf_true;
gf_boolean_t trash_enabled = _gf_false;
gf_boolean_t all_vol = _gf_false;
+ struct volopt_map_entry *vmep = NULL;
GF_ASSERT(dict);
this = THIS;
@@ -1022,10 +1033,6 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
priv = this->private;
GF_ASSERT(priv);
- val_dict = dict_new();
- if (!val_dict)
- goto out;
-
/* Check if we can support the required op-version
* This check is not done on the originator glusterd. The originator
* glusterd sets this value.
@@ -1040,8 +1047,8 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
if (check_op_version) {
ret = dict_get_uint32(dict, "new-op-version", &new_op_version);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Failed to get new_op_version");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=new-op-version", NULL);
goto out;
}
@@ -1049,9 +1056,8 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
(new_op_version < GD_OP_VERSION_MIN)) {
ret = -1;
snprintf(errstr, sizeof(errstr),
- "Required op_version (%d) is not "
- "supported. Max supported op version "
- "is %d",
+ "Required op_version (%d) is not supported."
+ " Max supported op version is %d",
new_op_version, priv->op_version);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION,
"%s", errstr);
@@ -1060,7 +1066,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
}
}
- ret = dict_get_int32n(dict, "count", SLEN("count"), &dict_count);
+ ret = dict_get_int32_sizen(dict, "count", &dict_count);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
"Count(dict),not set in Volume-Set");
@@ -1069,12 +1075,12 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
if (dict_count == 0) {
/*No options would be specified of volume set help */
- if (dict_getn(dict, "help", SLEN("help"))) {
+ if (dict_get_sizen(dict, "help")) {
ret = 0;
goto out;
}
- if (dict_getn(dict, "help-xml", SLEN("help-xml"))) {
+ if (dict_get_sizen(dict, "help-xml")) {
#if (HAVE_LIB_XML)
ret = 0;
goto out;
@@ -1083,8 +1089,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MODULE_NOT_INSTALLED,
"libxml not present in the system");
*op_errstr = gf_strdup(
- "Error: xml libraries not "
- "present to produce xml-output");
+ "Error: xml libraries not present to produce xml-output");
goto out;
#endif
}
@@ -1095,25 +1100,17 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
goto out;
}
- ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get volume name");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=volname", NULL);
goto out;
}
if (strcasecmp(volname, "all") != 0) {
- exists = glusterd_check_volume_exists(volname);
- if (!exists) {
- snprintf(errstr, sizeof(errstr), FMTSTR_CHECK_VOL_EXISTS, volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s",
- errstr);
- ret = -1;
- goto out;
- }
-
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
+ snprintf(errstr, sizeof(errstr), FMTSTR_CHECK_VOL_EXISTS, volname);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
FMTSTR_CHECK_VOL_EXISTS, volname);
goto out;
@@ -1130,15 +1127,23 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
all_vol = _gf_true;
}
+ val_dict = dict_new();
+ if (!val_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
+ goto out;
+ }
+
for (count = 1; ret != 1; count++) {
- global_opt = _gf_false;
- keylen = sprintf(keystr, "key%d", count);
- ret = dict_get_strn(dict, keystr, keylen, &key);
- if (ret)
+ keystr_len = sprintf(keystr, "key%d", count);
+ ret = dict_get_strn(dict, keystr, keystr_len, &key);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", keystr, NULL);
break;
+ }
- keylen = sprintf(keystr, "value%d", count);
- ret = dict_get_strn(dict, keystr, keylen, &value);
+ keystr_len = sprintf(keystr, "value%d", count);
+ ret = dict_get_strn(dict, keystr, keystr_len, &value);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
"invalid key,value pair in 'volume set'");
@@ -1146,13 +1151,15 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
goto out;
}
- if (strcmp(key, "config.memory-accounting") == 0) {
+ key_matched = _gf_false;
+ keylen = strlen(key);
+ if (len_strcmp(key, keylen, "config.memory-accounting")) {
+ key_matched = _gf_true;
gf_msg_debug(this->name, 0,
"enabling memory accounting for volume %s", volname);
ret = 0;
- }
-
- if (strcmp(key, "config.transport") == 0) {
+ } else if (len_strcmp(key, keylen, "config.transport")) {
+ key_matched = _gf_true;
gf_msg_debug(this->name, 0, "changing transport-type for volume %s",
volname);
ret = 0;
@@ -1162,23 +1169,31 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
(strcasecmp(value, "tcp,rdma") == 0) ||
(strcasecmp(value, "rdma,tcp") == 0))) {
ret = snprintf(errstr, sizeof(errstr),
- "transport-type %s does "
- "not exist",
- value);
+ "transport-type %s does not exist", value);
/* lets not bother about above return value,
its a failure anyways */
ret = -1;
goto out;
}
+ } else if (len_strcmp(key, keylen, "ganesha.enable")) {
+ key_matched = _gf_true;
+ if (!strcmp(value, "off") == 0) {
+ ret = ganesha_manage_export(dict, "off", _gf_true, op_errstr);
+ if (ret)
+ goto out;
+ }
}
- ret = glusterd_check_bitrot_cmd(key, value, errstr, sizeof(errstr));
- if (ret)
- goto out;
-
- ret = glusterd_check_quota_cmd(key, value, errstr, sizeof(errstr));
- if (ret)
- goto out;
+ if (!key_matched) {
+ ret = glusterd_check_bitrot_cmd(key, keylen, errstr,
+ sizeof(errstr));
+ if (ret)
+ goto out;
+ ret = glusterd_check_quota_cmd(key, keylen, value, errstr,
+ sizeof(errstr));
+ if (ret)
+ goto out;
+ }
if (is_key_glusterd_hooks_friendly(key))
continue;
@@ -1205,42 +1220,36 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
goto out;
}
- if (key_fixed)
+ if (key_fixed) {
key = key_fixed;
+ keylen = strlen(key_fixed);
+ }
- if (strcmp(key, "cluster.granular-entry-heal") == 0) {
+ if (len_strcmp(key, keylen, "cluster.granular-entry-heal")) {
/* For granular entry-heal, if the set command was
* invoked through volume-set CLI, then allow the
* command only if the volume is still in 'Created'
* state
*/
- if ((dict_getn(dict, "is-special-key", SLEN("is-special-key")) ==
- NULL) &&
- (volinfo->status != GLUSTERD_STATUS_NONE)) {
+ if (volinfo && volinfo->status != GLUSTERD_STATUS_NONE &&
+ (dict_get_sizen(dict, "is-special-key") == NULL)) {
snprintf(errstr, sizeof(errstr),
- " 'gluster "
- "volume set <VOLNAME> %s {enable, "
- "disable}' is not supported. Use "
- "'gluster volume heal <VOLNAME> "
- "granular-entry-heal {enable, "
- "disable}' instead.",
+ " 'gluster volume set <VOLNAME> %s {enable, disable}'"
+ " is not supported."
+ " Use 'gluster volume heal <VOLNAME> "
+ "granular-entry-heal {enable, disable}' instead.",
key);
ret = -1;
goto out;
}
- }
-
- /* Check if the key is cluster.op-version and set
- * local_new_op_version to the value given if possible.
- */
- if (strcmp(key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0) {
+ } else if (len_strcmp(key, keylen, GLUSTERD_GLOBAL_OP_VERSION_KEY)) {
+ /* Check if the key is cluster.op-version and set
+ * local_new_op_version to the value given if possible.
+ */
if (!all_vol) {
ret = -1;
snprintf(errstr, sizeof(errstr),
- "Option \""
- "%s\" is not valid for a single "
- "volume",
- key);
+ "Option \"%s\" is not valid for a single volume", key);
goto out;
}
/* Check if cluster.op-version is the only option being
@@ -1249,9 +1258,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
if (count != 1) {
ret = -1;
snprintf(errstr, sizeof(errstr),
- "Option \""
- "%s\" cannot be set along with other "
- "options",
+ "Option \"%s\" cannot be set along with other options",
key);
goto out;
}
@@ -1261,10 +1268,8 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
ret = gf_string2uint(value, &local_key_op_version);
if (ret) {
snprintf(errstr, sizeof(errstr),
- "invalid "
- "number format \"%s\" in option "
- "\"%s\"",
- value, key);
+ "invalid number format \"%s\" in option \"%s\"", value,
+ key);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY, "%s",
errstr);
goto out;
@@ -1274,9 +1279,8 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
local_key_op_version < GD_OP_VERSION_MIN) {
ret = -1;
snprintf(errstr, sizeof(errstr),
- "Required op_version (%d) is not "
- "supported. Max supported op version "
- "is %d",
+ "Required op_version (%d) is not supported."
+ " Max supported op version is %d",
local_key_op_version, priv->op_version);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
"%s", errstr);
@@ -1308,10 +1312,11 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
if (ret)
goto out;
- local_key_op_version = glusterd_get_op_version_for_key(key);
+ vmep = gd_get_vmep(key);
+ local_key_op_version = glusterd_get_op_version_from_vmep(vmep);
if (local_key_op_version > local_new_op_version)
local_new_op_version = local_key_op_version;
- if (gd_is_client_option(key) &&
+ if (gd_is_client_option(vmep) &&
(local_key_op_version > local_new_client_op_version))
local_new_client_op_version = local_key_op_version;
@@ -1327,8 +1332,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
ret = dict_get_uint32(dict, keystr, &key_op_version);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Failed to get key-op-version from"
- " dict");
+ "Failed to get key-op-version from dict");
goto out;
}
if (local_key_op_version != key_op_version) {
@@ -1337,60 +1341,63 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
"option: %s op-version mismatch", key);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERSION_MISMATCH,
"%s, required op-version = %" PRIu32
- ", "
- "available op-version = %" PRIu32,
+ ", available op-version = %" PRIu32,
errstr, key_op_version, local_key_op_version);
goto out;
}
}
- if (glusterd_check_globaloption(key))
- global_opt = _gf_true;
+ global_opt = glusterd_check_globaloption(key);
- ret = glusterd_validate_shared_storage(key, value, errstr);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_SHARED_STRG_VOL_OPT_VALIDATE_FAIL,
- "Failed to validate shared "
- "storage volume options");
- goto out;
- }
-
- ret = glusterd_validate_localtime_logging(key, value, errstr);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_LOCALTIME_LOGGING_VOL_OPT_VALIDATE_FAIL,
- "Failed to validate localtime "
- "logging volume options");
- goto out;
- }
-
- ret = glusterd_validate_daemon_log_level(key, value, errstr);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL,
- "Failed to validate daemon-log-level volume "
- "options");
- goto out;
- }
-
- if (volinfo) {
- ret = glusterd_volinfo_get(volinfo, VKEY_FEATURES_TRASH, &val_dup);
- if (val_dup) {
- ret = gf_string2boolean(val_dup, &trash_enabled);
- if (ret)
- goto out;
+ if (len_strcmp(key, keylen, GLUSTERD_SHARED_STORAGE_KEY)) {
+ ret = glusterd_validate_shared_storage(value, errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SHARED_STRG_VOL_OPT_VALIDATE_FAIL,
+ "Failed to validate shared storage volume options");
+ goto out;
+ }
+ } else if (len_strcmp(key, keylen, GLUSTERD_LOCALTIME_LOGGING_KEY)) {
+ ret = glusterd_validate_localtime_logging(value, errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_LOCALTIME_LOGGING_VOL_OPT_VALIDATE_FAIL,
+ "Failed to validate localtime logging volume options");
+ goto out;
+ }
+ } else if (len_strcmp(key, keylen, GLUSTERD_DAEMON_LOG_LEVEL_KEY)) {
+ ret = glusterd_validate_daemon_log_level(value, errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL,
+ "Failed to validate daemon-log-level volume options");
+ goto out;
+ }
+ } else if (len_strcmp(key, keylen, "features.trash-dir")) {
+ if (volinfo) {
+ ret = glusterd_volinfo_get(volinfo, VKEY_FEATURES_TRASH,
+ &val_dup);
+ if (!ret && val_dup) {
+ ret = gf_string2boolean(val_dup, &trash_enabled);
+ if (ret)
+ goto out;
+ }
+ }
+ if (!trash_enabled) {
+ snprintf(errstr, sizeof(errstr),
+ "Trash translator is not enabled. "
+ "Use volume set %s trash on",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
+ "Unable to set the options in 'volume set': %s", errstr);
+ ret = -1;
+ goto out;
}
- }
-
- if (!strcmp(key, "features.trash-dir") && trash_enabled) {
if (strchr(value, '/')) {
snprintf(errstr, sizeof(errstr),
"Path is not allowed as option");
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
- "Unable to set the options in 'volume "
- "set': %s",
- errstr);
+ "Unable to set the options in 'volume set': %s", errstr);
ret = -1;
goto out;
}
@@ -1411,16 +1418,13 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
snprintf(errstr, sizeof(errstr), "Path %s exists",
value);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
- "Unable to set the "
- "options in "
- "'volume set': %s",
+ "Unable to set the options in 'volume set': %s",
errstr);
ret = -1;
goto out;
} else {
gf_msg_debug(this->name, 0,
- "Directory with given "
- "name does not exists,"
+ "Directory with given name does not exist,"
" continuing");
}
@@ -1431,9 +1435,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
snprintf(errstr, sizeof(errstr),
"One or more bricks are down");
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
- "Unable to set the "
- "options in "
- "'volume set': %s",
+ "Unable to set the options in 'volume set': %s",
errstr);
ret = -1;
goto out;
@@ -1442,22 +1444,11 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
if (trash_path) {
GF_FREE(trash_path);
trash_path = NULL;
- trash_path_len = 0;
}
}
- } else if (!strcmp(key, "features.trash-dir") && !trash_enabled) {
- snprintf(errstr, sizeof(errstr),
- "Trash translator is not enabled. Use "
- "volume set %s trash on",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
- "Unable to set the options in 'volume "
- "set': %s",
- errstr);
- ret = -1;
- goto out;
}
- ret = dict_set_str(val_dict, key, value);
+
+ ret = dict_set_strn(val_dict, key, keylen, value);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
@@ -1482,12 +1473,11 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
- "Could not create "
- "temp volfile, some option failed: %s",
+ "Could not create temp volfile, some option failed: %s",
*op_errstr);
goto out;
}
- dict_del(val_dict, key);
+ dict_deln(val_dict, key, keylen);
if (key_fixed) {
GF_FREE(key_fixed);
@@ -1501,7 +1491,6 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
volname, local_new_client_op_version, op_errstr);
if (ret)
goto out;
-
cont:
if (origin_glusterd) {
ret = dict_set_uint32(dict, "new-op-version", local_new_op_version);
@@ -1516,8 +1505,7 @@ cont:
* TODO: Remove this and the other places this is referred once
* 3.3.x compatibility is not required
*/
- ret = dict_set_int32n(dict, "check-op-version",
- SLEN("check-op-version"), 1);
+ ret = dict_set_int32_sizen(dict, "check-op-version", 1);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"Failed to set check-op-version in dict");
@@ -1549,81 +1537,6 @@ out:
}
return ret;
}
-static int
-glusterd_water_limit_check(glusterd_volinfo_t *volinfo, gf_boolean_t is_hi,
- char **op_errstr)
-{
- int ret = -1;
- char *default_value = NULL;
- char *temp = NULL;
- uint64_t wm = 0;
- uint64_t default_wm = 0;
- struct volopt_map_entry *vmap = NULL;
- xlator_t *this = NULL;
- extern struct volopt_map_entry glusterd_volopt_map[];
- char msg[2048] = {0};
-
- this = THIS;
- GF_ASSERT(this);
-
- if (is_hi)
- ret = glusterd_volinfo_get(volinfo, "cluster.watermark-low", &temp);
- else
- ret = glusterd_volinfo_get(volinfo, "cluster.watermark-hi", &temp);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
- "failed to get watermark");
- goto out;
- }
-
- gf_string2bytesize_uint64(temp, &wm);
-
- if (is_hi)
- for (vmap = glusterd_volopt_map; vmap->key; vmap++) {
- if (strcmp(vmap->key, "cluster.watermark-hi") == 0)
- default_value = vmap->value;
- }
- else
- for (vmap = glusterd_volopt_map; vmap->key; vmap++) {
- if (strcmp(vmap->key, "cluster.watermark-low") == 0)
- default_value = vmap->value;
- }
-
- gf_string2bytesize_uint64(default_value, &default_wm);
-
- if (is_hi) {
- if (default_wm <= wm) {
- snprintf(msg, sizeof(msg),
- "Resetting hi-watermark "
- "to default will make it lower or equal to "
- "the low-watermark, which is an invalid "
- "configuration state. Please lower the "
- "low-watermark first to the desired value "
- "and then reset the hi-watermark.");
- ret = -1;
- goto out;
- }
- } else {
- if (default_wm >= wm) {
- snprintf(msg, sizeof(msg),
- "Resetting low-watermark "
- "to default will make it higher or equal to "
- "the hi-watermark, which is an invalid "
- "configuration state. Please raise the "
- "hi-watermark first to the desired value "
- "and then reset the low-watermark.");
- ret = -1;
- goto out;
- }
- }
-out:
- if (msg[0] != '\0') {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TIER_WATERMARK_RESET_FAIL,
- "%s", msg);
- *op_errstr = gf_strdup(msg);
- }
- return ret;
-}
static int
glusterd_op_stage_reset_volume(dict_t *dict, char **op_errstr)
@@ -1653,12 +1566,6 @@ glusterd_op_stage_reset_volume(dict_t *dict, char **op_errstr)
}
if (strcasecmp(volname, "all") != 0) {
- exists = glusterd_check_volume_exists(volname);
- if (!exists) {
- snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
- ret = -1;
- goto out;
- }
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
@@ -1677,22 +1584,25 @@ glusterd_op_stage_reset_volume(dict_t *dict, char **op_errstr)
goto out;
}
+ /* *
+ * If key ganesha.enable is set, then volume should be unexported from
+ * ganesha server. Also it is a volume-level option, perform only when
+ * volume name not equal to "all"(in other words if volinfo != NULL)
+ */
+ if (volinfo && (!strcmp(key, "all") || !strcmp(key, "ganesha.enable"))) {
+ if (glusterd_check_ganesha_export(volinfo)) {
+ ret = ganesha_manage_export(dict, "off", _gf_true, op_errstr);
+ if (ret)
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_GNS_RESET_FAIL,
+ "Could not reset ganesha.enable key");
+ }
+ }
+
if (strcmp(key, "all")) {
exists = glusterd_check_option_exists(key, &key_fixed);
if (exists == -1) {
ret = -1;
goto out;
- } else if (strcmp(key, "cluster.watermark-low") == 0) {
- ret = glusterd_water_limit_check(volinfo, _gf_false, op_errstr);
- if (ret)
- return ret;
- } else if (strcmp(key, "cluster.watermark-hi") == 0) {
- ret = glusterd_water_limit_check(volinfo, _gf_true, op_errstr);
- if (ret) {
- if (key_fixed)
- GF_FREE(key_fixed);
- return ret;
- }
}
if (!exists) {
@@ -1752,18 +1662,22 @@ glusterd_op_stage_sync_volume(dict_t *dict, char **op_errstr)
int ret = -1;
char *volname = NULL;
char *hostname = NULL;
- gf_boolean_t exists = _gf_false;
glusterd_peerinfo_t *peerinfo = NULL;
char msg[2048] = {
0,
};
glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
ret = dict_get_strn(dict, "hostname", SLEN("hostname"), &hostname);
if (ret) {
snprintf(msg, sizeof(msg),
"hostname couldn't be "
"retrieved from msg");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=hostname", NULL);
*op_errstr = gf_strdup(msg);
goto out;
}
@@ -1772,42 +1686,45 @@ glusterd_op_stage_sync_volume(dict_t *dict, char **op_errstr)
// volname is not present in case of sync all
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
if (!ret) {
- exists = glusterd_check_volume_exists(volname);
- if (!exists) {
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
snprintf(msg, sizeof(msg),
"Volume %s "
"does not exist",
volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_NOT_FOUND,
+ "Volume=%s", volname, NULL);
*op_errstr = gf_strdup(msg);
- ret = -1;
goto out;
}
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret)
- goto out;
-
- } else {
- ret = 0;
}
} else {
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(NULL, hostname);
if (peerinfo == NULL) {
+ RCU_READ_UNLOCK;
ret = -1;
snprintf(msg, sizeof(msg), "%s, is not a friend", hostname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_NOT_FOUND,
+ "Peer_name=%s", hostname, NULL);
*op_errstr = gf_strdup(msg);
+ goto out;
} else if (!peerinfo->connected) {
+ RCU_READ_UNLOCK;
+ ret = -1;
snprintf(msg, sizeof(msg),
"%s, is not connected at "
"the moment",
hostname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_DISCONNECTED,
+ "Peer_name=%s", hostname, NULL);
*op_errstr = gf_strdup(msg);
- ret = -1;
+ goto out;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
}
out:
@@ -1831,7 +1748,9 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_volinfo_t *volinfo = NULL;
dict_t *vol_opts = NULL;
+#ifdef BUILD_GNFS
gf_boolean_t nfs_disabled = _gf_false;
+#endif
gf_boolean_t shd_enabled = _gf_false;
GF_ASSERT(dict);
@@ -1841,8 +1760,11 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
GF_ASSERT(priv);
ret = dict_get_uint32(dict, "cmd", &cmd);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=cmd", NULL);
goto out;
+ }
if (cmd & GF_CLI_STATUS_ALL)
goto out;
@@ -1853,17 +1775,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
"The cluster is operating at "
"version 1. Getting the status of quotad is not "
"allowed in this state.");
- ret = -1;
- goto out;
- }
-
- if ((cmd & GF_CLI_STATUS_TIERD) &&
- (priv->op_version < GD_OP_VERSION_3_10_0)) {
- snprintf(msg, sizeof(msg),
- "The cluster is operating at "
- "version less than %d. Getting the "
- "status of tierd is not allowed in this state.",
- GD_OP_VERSION_3_10_0);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_QUOTA_GET_STAT_FAIL,
+ msg, NULL);
ret = -1;
goto out;
}
@@ -1875,6 +1788,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
"version less than %d. Getting the "
"status of snapd is not allowed in this state.",
GD_OP_VERSION_3_6_0);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SNAP_STATUS_FAIL, msg,
+ NULL);
ret = -1;
goto out;
}
@@ -1889,47 +1804,61 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL,
+ "Volume=%s", volname, NULL);
ret = -1;
goto out;
}
ret = glusterd_validate_volume_id(dict, volinfo);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VALIDATE_FAILED, NULL);
goto out;
+ }
ret = glusterd_is_volume_started(volinfo);
if (!ret) {
snprintf(msg, sizeof(msg), "Volume %s is not started", volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_NOT_STARTED,
+ "Volume=%s", volname, NULL);
ret = -1;
goto out;
}
vol_opts = volinfo->dict;
- if ((cmd & GF_CLI_STATUS_NFS) != 0) {
- nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
- _gf_false);
- if (nfs_disabled) {
- ret = -1;
- snprintf(msg, sizeof(msg), "NFS server is disabled for volume %s",
- volname);
- goto out;
- }
- } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
+ if ((cmd & GF_CLI_STATUS_SHD) != 0) {
if (glusterd_is_shd_compatible_volume(volinfo)) {
shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
} else {
ret = -1;
snprintf(msg, sizeof(msg), "Volume %s is not Self-heal compatible",
volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_SHD_NOT_COMP,
+ "Volume=%s", volname, NULL);
goto out;
}
if (!shd_enabled) {
ret = -1;
snprintf(msg, sizeof(msg),
"Self-heal Daemon is disabled for volume %s", volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SELF_HEALD_DISABLED,
+ "Volume=%s", volname, NULL);
goto out;
}
+#ifdef BUILD_GNFS
+ } else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
+ nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
+ _gf_false);
+ if (nfs_disabled) {
+ ret = -1;
+ snprintf(msg, sizeof(msg), "NFS server is disabled for volume %s",
+ volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_NFS_GANESHA_DISABLED, "Volume=%s", volname, NULL);
+ goto out;
+ }
+#endif
} else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
if (!glusterd_is_volume_quota_enabled(volinfo)) {
ret = -1;
@@ -1937,6 +1866,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
"Volume %s does not have "
"quota enabled",
volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_QUOTA_DISABLED,
+ "Volume=%s", volname, NULL);
goto out;
}
} else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
@@ -1946,15 +1877,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
"Volume %s does not have "
"bitrot enabled",
volname);
- goto out;
- }
- } else if ((cmd & GF_CLI_STATUS_TIERD) != 0) {
- if (!glusterd_is_tierd_enabled(volinfo)) {
- ret = -1;
- snprintf(msg, sizeof(msg),
- "Volume %s does not have "
- "tierd enabled.",
- volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BITROT_NOT_ENABLED,
+ "Volume=%s", volname, NULL);
goto out;
}
} else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
@@ -1965,6 +1889,10 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
"bitrot enabled. Scrubber will be enabled "
"automatically if bitrot is enabled",
volname);
+ gf_smsg(
+ this->name, GF_LOG_ERROR, errno, GD_MSG_BITROT_NOT_ENABLED,
+ "Scrubber will be enabled automatically if bitrot is enabled",
+ "Volume=%s", volname, NULL);
goto out;
}
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
@@ -1974,12 +1902,17 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
"Volume %s does not have "
"uss enabled",
volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SNAPD_NOT_RUNNING,
+ "Volume=%s", volname, NULL);
goto out;
}
} else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
ret = dict_get_strn(dict, "brick", SLEN("brick"), &brick);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=brick", NULL);
goto out;
+ }
ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
_gf_false);
@@ -1988,6 +1921,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
"No brick %s in"
" volume %s",
brick, volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BRICK_NOT_FOUND,
+ "Brick=%s, Volume=%s", brick, volname, NULL);
ret = -1;
goto out;
}
@@ -2007,12 +1942,11 @@ out:
return ret;
}
-static int
+int
glusterd_op_stage_stats_volume(dict_t *dict, char **op_errstr)
{
int ret = -1;
char *volname = NULL;
- gf_boolean_t exists = _gf_false;
char msg[2048] = {
0,
};
@@ -2025,14 +1959,12 @@ glusterd_op_stage_stats_volume(dict_t *dict, char **op_errstr)
goto out;
}
- exists = glusterd_check_volume_exists(volname);
ret = glusterd_volinfo_find(volname, &volinfo);
- if ((!exists) || (ret < 0)) {
+ if (ret) {
snprintf(msg, sizeof(msg),
"Volume %s, "
"doesn't exist",
volname);
- ret = -1;
goto out;
}
@@ -2055,8 +1987,8 @@ glusterd_op_stage_stats_volume(dict_t *dict, char **op_errstr)
ret = -1;
goto out;
}
- }
- if ((GF_CLI_STATS_STOP == stats_op) || (GF_CLI_STATS_INFO == stats_op)) {
+ } else if ((GF_CLI_STATS_STOP == stats_op) ||
+ (GF_CLI_STATS_INFO == stats_op)) {
if (_gf_false == glusterd_is_profile_on(volinfo)) {
snprintf(msg, sizeof(msg),
"Profile on Volume %s is"
@@ -2127,7 +2059,7 @@ _delete_reconfig_opt(dict_t *this, char *key, data_t *value, void *data)
* option is going to be reset
* */
if (!strncmp(key, VKEY_FEATURES_BITROT, strlen(VKEY_FEATURES_BITROT))) {
- dict_deln(this, VKEY_FEATURES_SCRUB, SLEN(VKEY_FEATURES_SCRUB));
+ dict_del_sizen(this, VKEY_FEATURES_SCRUB);
}
out:
return 0;
@@ -2196,17 +2128,16 @@ glusterd_options_reset(glusterd_volinfo_t *volinfo, char *key,
if (ret)
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->reconfigure(volinfo);
- if (ret)
- goto out;
- }
svc = &(volinfo->gfproxyd.svc);
ret = svc->reconfigure(volinfo);
if (ret)
goto out;
+ svc = &(volinfo->shd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
+
ret = glusterd_create_volfiles_and_notify_services(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
@@ -2221,7 +2152,7 @@ glusterd_options_reset(glusterd_volinfo_t *volinfo, char *key,
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret)
goto out;
}
@@ -2278,8 +2209,10 @@ glusterd_op_reset_all_volume_options(xlator_t *this, dict_t *dict)
ret = -1;
dup_opt = dict_new();
- if (!dup_opt)
+ if (!dup_opt) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
if (!all) {
dict_copy(conf->opts, dup_opt);
dict_del(dup_opt, key);
@@ -2290,8 +2223,11 @@ glusterd_op_reset_all_volume_options(xlator_t *this, dict_t *dict)
ret = dict_set_strn(dup_opt, GLUSTERD_GLOBAL_OPT_VERSION,
SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
goto out;
+ }
ret = glusterd_store_options(this, dup_opt);
if (ret)
@@ -2302,9 +2238,11 @@ glusterd_op_reset_all_volume_options(xlator_t *this, dict_t *dict)
ret = dict_set_dynstrn(conf->opts, GLUSTERD_GLOBAL_OPT_VERSION,
SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
goto out;
- else
+ } else
next_version = NULL;
if (!all) {
@@ -2398,6 +2336,16 @@ glusterd_op_reset_volume(dict_t *dict, char **op_rspstr)
}
}
+ if (!strcmp(key, "ganesha.enable") || !strcmp(key, "all")) {
+ if (glusterd_check_ganesha_export(volinfo) &&
+ is_origin_glusterd(dict)) {
+ ret = manage_export_config(volname, "off", op_rspstr);
+ if (ret)
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_GNS_RESET_FAIL,
+ "Could not reset ganesha.enable key");
+ }
+ }
+
out:
GF_FREE(key_fixed);
if (quorum_action)
@@ -2440,6 +2388,7 @@ glusterd_start_bricks(glusterd_volinfo_t *volinfo)
if (!brickinfo->start_triggered) {
pthread_mutex_lock(&brickinfo->restart_mutex);
{
+ /* coverity[SLEEP] */
ret = glusterd_brick_start(volinfo, brickinfo, _gf_false,
_gf_false);
}
@@ -2573,11 +2522,15 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
uint32_t op_version = 0;
glusterd_volinfo_t *volinfo = NULL;
glusterd_svc_t *svc = NULL;
+ gf_boolean_t svcs_reconfigure = _gf_false;
conf = this->private;
ret = dict_get_strn(dict, "key1", SLEN("key1"), &key);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=key1", NULL);
goto out;
+ }
ret = dict_get_strn(dict, "value1", SLEN("value1"), &value);
if (ret) {
@@ -2652,18 +2605,16 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->reconfigure(volinfo);
- if (ret)
- goto out;
- }
-
svc = &(volinfo->gfproxyd.svc);
ret = svc->reconfigure(volinfo);
if (ret)
goto out;
+ svc = &(volinfo->shd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
+
ret = glusterd_create_volfiles_and_notify_services(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0,
@@ -2673,16 +2624,19 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
goto out;
}
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure();
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_SVC_RESTART_FAIL,
- "Unable to restart "
- "services");
- goto out;
- }
+ svcs_reconfigure = _gf_true;
+ }
+ }
+ if (svcs_reconfigure) {
+ ret = glusterd_svcs_reconfigure(NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
+ "Unable to restart "
+ "services");
+ goto out;
}
}
+
ret = glusterd_store_global_info(this);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERS_STORE_FAIL,
@@ -2695,12 +2649,17 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
}
ret = -1;
dup_opt = dict_new();
- if (!dup_opt)
+ if (!dup_opt) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
dict_copy(conf->opts, dup_opt);
ret = dict_set_str(dup_opt, key, value);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
ret = glusterd_get_next_global_opt_version_str(conf->opts, &next_version);
if (ret)
@@ -2708,8 +2667,11 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
ret = dict_set_strn(dup_opt, GLUSTERD_GLOBAL_OPT_VERSION,
SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
goto out;
+ }
ret = glusterd_store_options(this, dup_opt);
if (ret)
@@ -2720,9 +2682,11 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
ret = dict_set_dynstrn(conf->opts, GLUSTERD_GLOBAL_OPT_VERSION,
SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
goto out;
- else
+ } else
next_version = NULL;
dup_value = gf_strdup(value);
@@ -2730,9 +2694,11 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
goto out;
ret = dict_set_dynstr(conf->opts, key, dup_value);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
- else
+ } else
dup_value = NULL; /* Protect the allocation from GF_FREE */
out:
@@ -2811,7 +2777,7 @@ glusterd_set_shared_storage(dict_t *dict, char *key, char *value,
goto out;
}
- ret = mkdir_p(GLUSTER_SHARED_STORAGE_BRICK_DIR, 0777, _gf_true);
+ ret = mkdir_p(GLUSTER_SHARED_STORAGE_BRICK_DIR, 0755, _gf_true);
if (-1 == ret) {
snprintf(errstr, PATH_MAX,
"Failed to create shared "
@@ -2945,6 +2911,11 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
if (strcmp(key, "config.memory-accounting") == 0) {
ret = gf_string2boolean(value, &volinfo->memory_accounting);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "Invalid value in key-value pair.");
+ goto out;
+ }
}
if (strcmp(key, "config.transport") == 0) {
@@ -2965,6 +2936,10 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
}
}
+ ret = glusterd_check_ganesha_cmd(key, value, errstr, dict);
+ if (ret == -1)
+ goto out;
+
if (!is_key_glusterd_hooks_friendly(key)) {
ret = glusterd_check_option_exists(key, &key_fixed);
GF_ASSERT(ret);
@@ -3044,17 +3019,16 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
if (ret)
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->reconfigure(volinfo);
- if (ret)
- goto out;
- }
svc = &(volinfo->gfproxyd.svc);
ret = svc->reconfigure(volinfo);
if (ret)
goto out;
+ svc = &(volinfo->shd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
+
ret = glusterd_create_volfiles_and_notify_services(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
@@ -3070,7 +3044,7 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
"Unable to restart services");
@@ -3091,18 +3065,16 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->reconfigure(volinfo);
- if (ret)
- goto out;
- }
-
svc = &(volinfo->gfproxyd.svc);
ret = svc->reconfigure(volinfo);
if (ret)
goto out;
+ svc = &(volinfo->shd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
+
ret = glusterd_create_volfiles_and_notify_services(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
@@ -3118,7 +3090,7 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
"Unable to restart services");
@@ -3161,6 +3133,8 @@ glusterd_op_sync_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
snprintf(msg, sizeof(msg),
"hostname couldn't be "
"retrieved from msg");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=hostname", NULL);
*op_errstr = gf_strdup(msg);
goto out;
}
@@ -3185,6 +3159,7 @@ glusterd_op_sync_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (!rsp_dict) {
// this should happen only on source
+ gf_smsg(this->name, GF_LOG_INFO, errno, GD_MSG_INVALID_ARGUMENT, NULL);
ret = 0;
goto out;
}
@@ -3249,13 +3224,11 @@ glusterd_remove_profile_volume_options(glusterd_volinfo_t *volinfo)
{
GF_ASSERT(volinfo);
- dict_deln(volinfo->dict, VKEY_DIAG_LAT_MEASUREMENT,
- SLEN(VKEY_DIAG_LAT_MEASUREMENT));
- dict_deln(volinfo->dict, VKEY_DIAG_CNT_FOP_HITS,
- SLEN(VKEY_DIAG_CNT_FOP_HITS));
+ dict_del_sizen(volinfo->dict, VKEY_DIAG_LAT_MEASUREMENT);
+ dict_del_sizen(volinfo->dict, VKEY_DIAG_CNT_FOP_HITS);
}
-static int
+int
glusterd_op_stats_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
int ret = -1;
@@ -3327,7 +3300,7 @@ glusterd_op_stats_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret)
goto out;
}
@@ -3347,10 +3320,11 @@ _add_remove_bricks_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo,
int ret = -1;
int count = 0;
int i = 0;
- char brick_key[1024] = {
+ char brick_key[16] = {
0,
};
- char dict_key[1024] = {
+ char dict_key[64] = {
+ /* dict_key is small as prefix is up to 32 chars */
0,
};
int keylen;
@@ -3415,7 +3389,7 @@ static int
_add_task_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
{
int ret = -1;
- char key[64] = {
+ char key[32] = {
0,
};
int keylen;
@@ -3430,7 +3404,6 @@ _add_task_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
GF_ASSERT(this);
switch (op) {
- case GD_OP_REMOVE_TIER_BRICK:
case GD_OP_REMOVE_BRICK:
snprintf(key, sizeof(key), "task%d", index);
ret = _add_remove_bricks_to_dict(dict, volinfo, key);
@@ -3440,7 +3413,6 @@ _add_task_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
"Failed to add remove bricks to dict");
goto out;
}
- case GD_OP_TIER_MIGRATE:
case GD_OP_REBALANCE:
uuid_str = gf_strdup(uuid_utoa(volinfo->rebal.rebalance_id));
status = volinfo->rebal.defrag_status;
@@ -3495,25 +3467,12 @@ glusterd_aggregate_task_status(dict_t *rsp_dict, glusterd_volinfo_t *volinfo)
int ret = -1;
int tasks = 0;
xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
this = THIS;
GF_ASSERT(this);
- conf = this->private;
if (!gf_uuid_is_null(volinfo->rebal.rebalance_id)) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- if (conf->op_version > GD_OP_VERSION_3_10_0)
- goto done;
- if (volinfo->rebal.op == GD_OP_REMOVE_BRICK)
- ret = _add_task_to_dict(rsp_dict, volinfo,
- GD_OP_REMOVE_TIER_BRICK, tasks);
- else if (volinfo->rebal.op == GD_OP_REBALANCE)
- ret = _add_task_to_dict(rsp_dict, volinfo, GD_OP_TIER_MIGRATE,
- tasks);
- } else
- ret = _add_task_to_dict(rsp_dict, volinfo, volinfo->rebal.op,
- tasks);
+ ret = _add_task_to_dict(rsp_dict, volinfo, volinfo->rebal.op, tasks);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
@@ -3522,15 +3481,12 @@ glusterd_aggregate_task_status(dict_t *rsp_dict, glusterd_volinfo_t *volinfo)
}
tasks++;
}
-done:
ret = dict_set_int32n(rsp_dict, "tasks", SLEN("tasks"), tasks);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"Error setting tasks count in dict");
goto out;
}
- ret = 0;
-
out:
return ret;
}
@@ -3542,7 +3498,6 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
int node_count = 0;
int brick_index = -1;
int other_count = 0;
- int hot_brick_count = -1;
int other_index = 0;
uint32_t cmd = 0;
char *volname = NULL;
@@ -3552,9 +3507,12 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_conf_t *priv = NULL;
dict_t *vol_opts = NULL;
+#ifdef BUILD_GNFS
gf_boolean_t nfs_disabled = _gf_false;
+#endif
gf_boolean_t shd_enabled = _gf_false;
gf_boolean_t origin_glusterd = _gf_false;
+ int snapd_enabled, bitrot_enabled, volume_quota_enabled;
this = THIS;
GF_ASSERT(this);
@@ -3602,29 +3560,22 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
}
vol_opts = volinfo->dict;
- if ((cmd & GF_CLI_STATUS_NFS) != 0) {
- ret = glusterd_add_node_to_dict(priv->nfs_svc.name, rsp_dict, 0,
- vol_opts);
- if (ret)
- goto out;
- other_count++;
- node_count++;
-
- } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
- ret = glusterd_add_node_to_dict(priv->shd_svc.name, rsp_dict, 0,
+ if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
+ ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict, 0,
vol_opts);
if (ret)
goto out;
other_count++;
node_count++;
-
- } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
- ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict, 0,
+#ifdef BUILD_GNFS
+ } else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
+ ret = glusterd_add_node_to_dict(priv->nfs_svc.name, rsp_dict, 0,
vol_opts);
if (ret)
goto out;
other_count++;
node_count++;
+#endif
} else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
ret = glusterd_add_node_to_dict(priv->bitd_svc.name, rsp_dict, 0,
vol_opts);
@@ -3639,14 +3590,14 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
other_count++;
node_count++;
- } else if ((cmd & GF_CLI_STATUS_TIERD) != 0) {
- ret = glusterd_add_tierd_to_dict(volinfo, rsp_dict, other_index);
+ } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
+ ret = glusterd_add_snapd_to_dict(volinfo, rsp_dict, other_index);
if (ret)
goto out;
other_count++;
node_count++;
- } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
- ret = glusterd_add_snapd_to_dict(volinfo, rsp_dict, other_index);
+ } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
+ ret = glusterd_add_shd_to_dict(volinfo, rsp_dict, other_index);
if (ret)
goto out;
other_count++;
@@ -3675,6 +3626,15 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
} else {
+ snapd_enabled = glusterd_is_snapd_enabled(volinfo);
+ shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
+#ifdef BUILD_GNFS
+ nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
+ _gf_false);
+#endif
+ volume_quota_enabled = glusterd_is_volume_quota_enabled(volinfo);
+ bitrot_enabled = glusterd_is_bitrot_enabled(volinfo);
+
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
{
brick_index++;
@@ -3693,7 +3653,7 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) {
other_index = brick_index + 1;
- if (glusterd_is_snapd_enabled(volinfo)) {
+ if (snapd_enabled) {
ret = glusterd_add_snapd_to_dict(volinfo, rsp_dict,
other_index);
if (ret)
@@ -3703,18 +3663,18 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
node_count++;
}
- if (glusterd_is_tierd_enabled(volinfo)) {
- ret = glusterd_add_tierd_to_dict(volinfo, rsp_dict,
- other_index);
- if (ret)
- goto out;
- other_count++;
- other_index++;
- node_count++;
+ if (glusterd_is_shd_compatible_volume(volinfo)) {
+ if (shd_enabled) {
+ ret = glusterd_add_shd_to_dict(volinfo, rsp_dict,
+ other_index);
+ if (ret)
+ goto out;
+ other_count++;
+ other_index++;
+ node_count++;
+ }
}
-
- nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
- _gf_false);
+#ifdef BUILD_GNFS
if (!nfs_disabled) {
ret = glusterd_add_node_to_dict(priv->nfs_svc.name, rsp_dict,
other_index, vol_opts);
@@ -3724,20 +3684,8 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
other_count++;
node_count++;
}
-
- if (glusterd_is_shd_compatible_volume(volinfo))
- shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
- if (shd_enabled) {
- ret = glusterd_add_node_to_dict(priv->shd_svc.name, rsp_dict,
- other_index, vol_opts);
- if (ret)
- goto out;
- other_count++;
- node_count++;
- other_index++;
- }
-
- if (glusterd_is_volume_quota_enabled(volinfo)) {
+#endif
+ if (volume_quota_enabled) {
ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict,
other_index, vol_opts);
if (ret)
@@ -3747,7 +3695,7 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
other_index++;
}
- if (glusterd_is_bitrot_enabled(volinfo)) {
+ if (bitrot_enabled) {
ret = glusterd_add_node_to_dict(priv->bitd_svc.name, rsp_dict,
other_index, vol_opts);
if (ret)
@@ -3755,11 +3703,8 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
other_count++;
node_count++;
other_index++;
- }
-
- /* For handling scrub status. Scrub daemon will be
- * running automatically when bitrot is enable*/
- if (glusterd_is_bitrot_enabled(volinfo)) {
+ /* For handling scrub status. Scrub daemon will be
+ * running automatically when bitrot is enable */
ret = glusterd_add_node_to_dict(priv->scrub_svc.name, rsp_dict,
other_index, vol_opts);
if (ret)
@@ -3770,35 +3715,31 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
}
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- hot_brick_count = volinfo->tier_info.hot_brick_count;
- ret = dict_set_int32n(rsp_dict, "hot_brick_count", SLEN("hot_brick_count"),
- hot_brick_count);
- if (ret)
- goto out;
-
ret = dict_set_int32n(rsp_dict, "type", SLEN("type"), volinfo->type);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=type", NULL);
goto out;
+ }
ret = dict_set_int32n(rsp_dict, "brick-index-max", SLEN("brick-index-max"),
brick_index);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Error setting brick-index-max to dict");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=brick-index-max", NULL);
goto out;
}
ret = dict_set_int32n(rsp_dict, "other-count", SLEN("other-count"),
other_count);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Error setting other-count to dict");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=other-count", NULL);
goto out;
}
ret = dict_set_int32n(rsp_dict, "count", SLEN("count"), node_count);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Error setting node count to dict");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=count", NULL);
goto out;
}
@@ -3864,7 +3805,7 @@ glusterd_op_ac_send_lock(glusterd_op_sm_event_t *event, void *ctx)
priv = this->private;
GF_ASSERT(priv);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -3885,7 +3826,7 @@ glusterd_op_ac_send_lock(glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = proc->fn(NULL, this, peerinfo);
if (ret) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg(this->name, GF_LOG_WARNING, 0,
GD_MSG_LOCK_REQ_SEND_FAIL,
"Failed to send lock request "
@@ -3906,7 +3847,7 @@ glusterd_op_ac_send_lock(glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
if (ret) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"failed to set peerinfo");
dict_unref(dict);
@@ -3915,7 +3856,7 @@ glusterd_op_ac_send_lock(glusterd_op_sm_event_t *event, void *ctx)
ret = proc->fn(NULL, this, dict);
if (ret) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg(this->name, GF_LOG_WARNING, 0,
GD_MSG_MGMTV3_LOCK_REQ_SEND_FAIL,
"Failed to send mgmt_v3 lock "
@@ -3931,7 +3872,7 @@ glusterd_op_ac_send_lock(glusterd_op_sm_event_t *event, void *ctx)
}
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
opinfo.pending_count = pending_count;
@@ -3967,7 +3908,7 @@ glusterd_op_ac_send_unlock(glusterd_op_sm_event_t *event, void *ctx)
priv = this->private;
GF_ASSERT(priv);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -4039,7 +3980,7 @@ glusterd_op_ac_send_unlock(glusterd_op_sm_event_t *event, void *ctx)
}
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
opinfo.pending_count = pending_count;
@@ -4275,8 +4216,10 @@ glusterd_dict_set_volid(dict_t *dict, char *volname, char **op_errstr)
this = THIS;
GF_ASSERT(this);
- if (!dict || !volname)
+ if (!dict || !volname) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
@@ -4426,9 +4369,7 @@ glusterd_op_build_payload(dict_t **req, char **op_errstr, dict_t *op_ctx)
req_dict = dict_ref(dict);
} break;
- case GD_OP_REMOVE_BRICK:
- case GD_OP_DETACH_TIER_STATUS:
- case GD_OP_REMOVE_TIER_BRICK: {
+ case GD_OP_REMOVE_BRICK: {
dict_t *dict = ctx;
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
if (ret) {
@@ -4480,8 +4421,6 @@ glusterd_op_build_payload(dict_t **req, char **op_errstr, dict_t *op_ctx)
case GD_OP_DEFRAG_BRICK_VOLUME:
case GD_OP_BARRIER:
case GD_OP_BITROT:
- case GD_OP_TIER_START_STOP:
- case GD_OP_TIER_STATUS:
case GD_OP_SCRUB_STATUS:
case GD_OP_SCRUB_ONDEMAND:
case GD_OP_RESET_BRICK: {
@@ -4497,7 +4436,8 @@ glusterd_op_build_payload(dict_t **req, char **op_errstr, dict_t *op_ctx)
case GD_OP_SYNC_VOLUME:
case GD_OP_COPY_FILE:
- case GD_OP_SYS_EXEC: {
+ case GD_OP_SYS_EXEC:
+ case GD_OP_GANESHA: {
dict_copy(dict, req_dict);
} break;
@@ -4592,7 +4532,7 @@ glusterd_op_ac_send_stage_op(glusterd_op_sm_event_t *event, void *ctx)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -4612,7 +4552,7 @@ glusterd_op_ac_send_stage_op(glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
if (ret) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"failed to "
"set peerinfo");
@@ -4632,7 +4572,7 @@ glusterd_op_ac_send_stage_op(glusterd_op_sm_event_t *event, void *ctx)
pending_count++;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
opinfo.pending_count = pending_count;
out:
@@ -4677,7 +4617,7 @@ glusterd_op_volume_dict_uuid_to_hostname(dict_t *dict, const char *key_fmt,
{
int ret = -1;
int i = 0;
- char key[1024];
+ char key[128];
int keylen;
char *uuid_str = NULL;
uuid_t uuid = {
@@ -5045,9 +4985,6 @@ glusterd_op_modify_op_ctx(glusterd_op_t op, void *ctx)
* same
*/
case GD_OP_DEFRAG_BRICK_VOLUME:
- case GD_OP_TIER_STATUS:
- case GD_OP_REMOVE_TIER_BRICK:
- case GD_OP_DETACH_TIER_STATUS:
case GD_OP_SCRUB_STATUS:
case GD_OP_SCRUB_ONDEMAND:
ret = dict_get_int32n(op_ctx, "count", SLEN("count"), &count);
@@ -5219,7 +5156,7 @@ glusterd_op_ac_send_commit_op(glusterd_op_sm_event_t *event, void *ctx)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -5239,7 +5176,7 @@ glusterd_op_ac_send_commit_op(glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
if (ret) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"failed to set peerinfo");
goto out;
@@ -5257,7 +5194,7 @@ glusterd_op_ac_send_commit_op(glusterd_op_sm_event_t *event, void *ctx)
pending_count++;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
opinfo.pending_count = pending_count;
gf_msg_debug(this->name, 0,
@@ -5658,9 +5595,14 @@ glusterd_op_ac_stage_op(glusterd_op_sm_event_t *event, void *ctx)
glusterd_op_info_t txn_op_info = {
{0},
};
+ glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT(this);
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
GF_ASSERT(ctx);
req_ctx = ctx;
@@ -5699,6 +5641,7 @@ glusterd_op_ac_stage_op(glusterd_op_sm_event_t *event, void *ctx)
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"Failed to set transaction id.");
GF_FREE(txn_id);
+ txn_id = NULL;
goto out;
}
@@ -5712,9 +5655,13 @@ out:
gf_msg_debug(this->name, 0, "Returning with %d", ret);
/* for no volname transactions, the txn_opinfo needs to be cleaned up
- * as there's no unlock event triggered
+ * as there's no unlock event triggered. However if the originator node of
+ * this transaction is still running with a version lower than 60000,
+ * txn_opinfo can't be cleared as that'll lead to a race of referring op_ctx
+ * after it's being freed.
*/
- if (txn_op_info.skip_locking)
+ if (txn_op_info.skip_locking && priv->op_version >= GD_OP_VERSION_6_0 &&
+ txn_id)
ret = glusterd_clear_txn_opinfo(txn_id);
if (rsp_dict)
@@ -5733,8 +5680,6 @@ glusterd_need_brick_op(glusterd_op_t op)
switch (op) {
case GD_OP_PROFILE_VOLUME:
case GD_OP_STATUS_VOLUME:
- case GD_OP_TIER_STATUS:
- case GD_OP_DETACH_TIER_STATUS:
case GD_OP_DEFRAG_BRICK_VOLUME:
case GD_OP_HEAL_VOLUME:
case GD_OP_SCRUB_STATUS:
@@ -5947,6 +5892,10 @@ glusterd_op_stage_validate(glusterd_op_t op, dict_t *dict, char **op_errstr,
ret = glusterd_op_stage_set_volume(dict, op_errstr);
break;
+ case GD_OP_GANESHA:
+ ret = glusterd_op_stage_set_ganesha(dict, op_errstr);
+ break;
+
case GD_OP_RESET_VOLUME:
ret = glusterd_op_stage_reset_volume(dict, op_errstr);
break;
@@ -6028,10 +5977,8 @@ glusterd_op_stage_validate(glusterd_op_t op, dict_t *dict, char **op_errstr,
static void
glusterd_wait_for_blockers(glusterd_conf_t *priv)
{
- while (priv->blockers) {
- synclock_unlock(&priv->big_lock);
- sleep(1);
- synclock_lock(&priv->big_lock);
+ while (GF_ATOMIC_GET(priv->blockers)) {
+ synccond_wait(&priv->cond_blockers, &priv->big_lock);
}
}
@@ -6053,7 +6000,6 @@ glusterd_op_commit_perform(glusterd_op_t op, dict_t *dict, char **op_errstr,
break;
case GD_OP_STOP_VOLUME:
- glusterd_wait_for_blockers(this->private);
ret = glusterd_op_stop_volume(dict);
break;
@@ -6075,7 +6021,9 @@ glusterd_op_commit_perform(glusterd_op_t op, dict_t *dict, char **op_errstr,
case GD_OP_SET_VOLUME:
ret = glusterd_op_set_volume(dict, op_errstr);
break;
-
+ case GD_OP_GANESHA:
+ ret = glusterd_op_set_ganesha(dict, op_errstr);
+ break;
case GD_OP_RESET_VOLUME:
ret = glusterd_op_reset_volume(dict, op_errstr);
break;
@@ -6260,9 +6208,6 @@ glusterd_bricks_select_remove_brick(dict_t *dict, char **op_errstr,
goto out;
}
- if (command == GF_DEFRAG_CMD_DETACH_START)
- return glusterd_bricks_select_tier_volume(dict, op_errstr, selected);
-
ret = dict_get_int32n(dict, "force", SLEN("force"), &force);
if (ret) {
gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
@@ -6366,6 +6311,7 @@ glusterd_bricks_select_profile_volume(dict_t *dict, char **op_errstr,
goto out;
break;
case GF_CLI_STATS_INFO:
+#ifdef BUILD_GNFS
ret = dict_get_str_boolean(dict, "nfs", _gf_false);
if (ret) {
if (!priv->nfs_svc.online) {
@@ -6390,6 +6336,7 @@ glusterd_bricks_select_profile_volume(dict_t *dict, char **op_errstr,
ret = 0;
goto out;
}
+#endif
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
{
if (glusterd_is_brick_started(brickinfo)) {
@@ -6421,6 +6368,7 @@ glusterd_bricks_select_profile_volume(dict_t *dict, char **op_errstr,
break;
case GF_CLI_STATS_TOP:
+#ifdef BUILD_GNFS
ret = dict_get_str_boolean(dict, "nfs", _gf_false);
if (ret) {
if (!priv->nfs_svc.online) {
@@ -6445,6 +6393,7 @@ glusterd_bricks_select_profile_volume(dict_t *dict, char **op_errstr,
ret = 0;
goto out;
}
+#endif
ret = dict_get_strn(dict, "brick", SLEN("brick"), &brick);
if (!ret) {
ret = glusterd_volume_brickinfo_get_by_brick(
@@ -6664,6 +6613,10 @@ _select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo = NULL;
int hxl_children = 0;
uuid_t candidate = {0};
+ int brick_index = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int delta = 0;
+ uuid_t candidate_max = {0};
if ((*index) == 0)
(*index)++;
@@ -6675,13 +6628,40 @@ _select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo,
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
{
+ if (gf_uuid_compare(brickinfo->uuid, candidate_max) > 0) {
+ if (!gf_uuid_compare(MY_UUID, brickinfo->uuid)) {
+ gf_uuid_copy(candidate_max, brickinfo->uuid);
+ } else {
+ peerinfo = glusterd_peerinfo_find(brickinfo->uuid, NULL);
+ if (peerinfo && peerinfo->connected) {
+ gf_uuid_copy(candidate_max, brickinfo->uuid);
+ }
+ }
+ }
+ }
+
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
if (gf_uuid_is_null(brickinfo->uuid))
(void)glusterd_resolve_brick(brickinfo);
- if (gf_uuid_compare(brickinfo->uuid, candidate) > 0)
- gf_uuid_copy(candidate, brickinfo->uuid);
+ delta %= hxl_children;
+ if ((*index + delta) == (brick_index + hxl_children)) {
+ if (!gf_uuid_compare(MY_UUID, brickinfo->uuid)) {
+ gf_uuid_copy(candidate, brickinfo->uuid);
+ } else {
+ peerinfo = glusterd_peerinfo_find(brickinfo->uuid, NULL);
+ if (peerinfo && peerinfo->connected) {
+ gf_uuid_copy(candidate, brickinfo->uuid);
+ } else if (peerinfo &&
+ (!gf_uuid_compare(candidate_max, MY_UUID))) {
+ _add_hxlator_to_dict(dict, volinfo,
+ ((*index) - 1) / hxl_children,
+ (*hxlator_count));
+ (*hxlator_count)++;
+ }
+ }
- if ((*index) % hxl_children == 0) {
if (!gf_uuid_compare(MY_UUID, candidate)) {
_add_hxlator_to_dict(dict, volinfo,
((*index) - 1) / hxl_children,
@@ -6689,6 +6669,8 @@ _select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo,
(*hxlator_count)++;
}
gf_uuid_clear(candidate);
+ brick_index += hxl_children;
+ delta++;
}
(*index)++;
@@ -6758,12 +6740,12 @@ fill_shd_status_for_local_bricks(dict_t *dict, glusterd_volinfo_t *volinfo,
dict_t *req_dict)
{
glusterd_brickinfo_t *brickinfo = NULL;
- char *msg = "self-heal-daemon is not running on";
- char key[1024] = {
+ static char *msg = "self-heal-daemon is not running on";
+ char key[32] = {
0,
};
int keylen;
- char value[1024] = {
+ char value[128] = {
0,
};
int ret = 0;
@@ -6832,16 +6814,18 @@ glusterd_shd_select_brick_xlator(dict_t *dict, gf_xl_afr_op_t heal_op,
int ret = -1;
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
+ glusterd_svc_t *svc = NULL;
this = THIS;
GF_ASSERT(this);
priv = this->private;
GF_ASSERT(priv);
+ svc = &(volinfo->shd.svc);
switch (heal_op) {
case GF_SHD_OP_INDEX_SUMMARY:
case GF_SHD_OP_STATISTICS_HEAL_COUNT:
- if (!priv->shd_svc.online) {
+ if (!svc->online) {
if (!rsp_dict) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL,
"Received "
@@ -6862,7 +6846,7 @@ glusterd_shd_select_brick_xlator(dict_t *dict, gf_xl_afr_op_t heal_op,
break;
case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
- if (!priv->shd_svc.online) {
+ if (!svc->online) {
if (!rsp_dict) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL,
"Received "
@@ -6913,7 +6897,6 @@ glusterd_bricks_select_heal_volume(dict_t *dict, char **op_errstr,
char *volname = NULL;
glusterd_conf_t *priv = NULL;
glusterd_volinfo_t *volinfo = NULL;
- glusterd_volinfo_t *dup_volinfo = NULL;
xlator_t *this = NULL;
char msg[2048] = {
0,
@@ -6951,31 +6934,10 @@ glusterd_bricks_select_heal_volume(dict_t *dict, char **op_errstr,
"heal op invalid");
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_create_sub_tier_volinfo(volinfo, &dup_volinfo, _gf_false,
- volname);
- if (ret < 0)
- goto out;
-
- ret = glusterd_shd_select_brick_xlator(
- dict, heal_op, dup_volinfo, &index, &hxlator_count, rsp_dict);
- glusterd_volinfo_delete(dup_volinfo);
- if (ret < 0)
- goto out;
- ret = glusterd_create_sub_tier_volinfo(volinfo, &dup_volinfo, _gf_true,
- volname);
- if (ret < 0)
- goto out;
- ret = glusterd_shd_select_brick_xlator(
- dict, heal_op, dup_volinfo, &index, &hxlator_count, rsp_dict);
- glusterd_volinfo_delete(dup_volinfo);
- if (ret < 0)
- goto out;
- } else {
- ret = glusterd_shd_select_brick_xlator(dict, heal_op, volinfo, &index,
- &hxlator_count, rsp_dict);
- if (ret < 0)
- goto out;
+ ret = glusterd_shd_select_brick_xlator(dict, heal_op, volinfo, &index,
+ &hxlator_count, rsp_dict);
+ if (ret < 0) {
+ goto out;
}
if (!hxlator_count)
@@ -6997,7 +6959,7 @@ glusterd_bricks_select_heal_volume(dict_t *dict, char **op_errstr,
ret = -1;
goto out;
} else {
- pending_node->node = &(priv->shd_svc);
+ pending_node->node = &(volinfo->shd.svc);
pending_node->type = GD_NODE_SHD;
cds_list_add_tail(&pending_node->list, selected);
pending_node = NULL;
@@ -7008,69 +6970,7 @@ out:
return ret;
}
-int
-glusterd_bricks_select_tier_volume(dict_t *dict, char **op_errstr,
- struct cds_list_head *selected)
-{
- int ret = -1;
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- char msg[2048] = {
- 0,
- };
- glusterd_pending_node_t *pending_node = NULL;
- glusterd_brickinfo_t *brick = NULL;
- gf_boolean_t retval = _gf_false;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(THIS->name, this, out);
-
- ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "volume name get failed");
- goto out;
- }
-
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret) {
- snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
-
- *op_errstr = gf_strdup(msg);
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
- goto out;
- }
- /*check if this node needs tierd*/
- cds_list_for_each_entry(brick, &volinfo->bricks, brick_list)
- {
- if (gf_uuid_compare(MY_UUID, brick->uuid) == 0) {
- retval = _gf_true;
- break;
- }
- }
-
- if (!retval)
- goto out;
-
- pending_node = GF_CALLOC(1, sizeof(*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- } else {
- pending_node->node = volinfo;
- pending_node->type = GD_NODE_TIERD;
- cds_list_add_tail(&pending_node->list, selected);
- pending_node = NULL;
- }
- ret = 0;
-
-out:
- return ret;
-}
-
-int
+static int
glusterd_bricks_select_rebalance_volume(dict_t *dict, char **op_errstr,
struct cds_list_head *selected)
{
@@ -7131,6 +7031,7 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
glusterd_pending_node_t *pending_node = NULL;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
+ glusterd_svc_t *svc = NULL;
GF_ASSERT(dict);
@@ -7159,7 +7060,6 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
case GF_CLI_STATUS_SHD:
case GF_CLI_STATUS_QUOTAD:
case GF_CLI_STATUS_SNAPD:
- case GF_CLI_STATUS_TIERD:
case GF_CLI_STATUS_BITD:
case GF_CLI_STATUS_SCRUB:
case GF_CLI_STATUS_CLIENT_LIST:
@@ -7206,6 +7106,7 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
cds_list_add_tail(&pending_node->list, selected);
ret = 0;
+#ifdef BUILD_GNFS
} else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
if (!priv->nfs_svc.online) {
ret = -1;
@@ -7225,8 +7126,10 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
cds_list_add_tail(&pending_node->list, selected);
ret = 0;
+#endif
} else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
- if (!priv->shd_svc.online) {
+ svc = &(volinfo->shd.svc);
+ if (!svc->online) {
ret = -1;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SELF_HEALD_DISABLED,
"Self-heal daemon is not running");
@@ -7238,7 +7141,7 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
- pending_node->node = &(priv->shd_svc);
+ pending_node->node = svc;
pending_node->type = GD_NODE_SHD;
pending_node->index = 0;
cds_list_add_tail(&pending_node->list, selected);
@@ -7304,30 +7207,6 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
cds_list_add_tail(&pending_node->list, selected);
ret = 0;
- } else if ((cmd & GF_CLI_STATUS_TIERD) != 0) {
- if (!volinfo->tierd.svc.online) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TIERD_NOT_RUNNING,
- "tierd is not "
- "running");
- ret = -1;
- goto out;
- }
- pending_node = GF_CALLOC(1, sizeof(*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
- "failed to allocate "
- "memory for pending node");
- ret = -1;
- goto out;
- }
-
- pending_node->node = (void *)(&volinfo->tierd);
- pending_node->type = GD_NODE_TIERD;
- pending_node->index = 0;
- cds_list_add_tail(&pending_node->list, selected);
-
- ret = 0;
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
if (!volinfo->snapd.svc.online) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPD_NOT_RUNNING,
@@ -7503,6 +7382,7 @@ glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
glusterd_op_t op = GD_OP_NONE;
glusterd_req_ctx_t *req_ctx = NULL;
char *op_errstr = NULL;
+ gf_boolean_t free_req_ctx = _gf_false;
this = THIS;
priv = this->private;
@@ -7511,6 +7391,9 @@ glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
req_ctx = ctx;
} else {
req_ctx = GF_CALLOC(1, sizeof(*req_ctx), gf_gld_mt_op_allack_ctx_t);
+ if (!req_ctx)
+ goto out;
+ free_req_ctx = _gf_true;
op = glusterd_op_get_op();
req_ctx->op = op;
gf_uuid_copy(req_ctx->uuid, MY_UUID);
@@ -7522,7 +7405,6 @@ glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
if (op_errstr == NULL)
gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
opinfo.op_errstr = op_errstr;
- GF_FREE(req_ctx);
goto out;
}
}
@@ -7541,6 +7423,8 @@ glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
}
out:
+ if (ret && free_req_ctx)
+ GF_FREE(req_ctx);
gf_msg_debug(this->name, 0, "Returning with %d", ret);
return ret;
@@ -7642,11 +7526,6 @@ glusterd_op_bricks_select(glusterd_op_t op, dict_t *dict, char **op_errstr,
ret = glusterd_bricks_select_status_volume(dict, op_errstr,
selected);
break;
- case GD_OP_TIER_STATUS:
- ret = glusterd_bricks_select_tier_volume(dict, op_errstr, selected);
- break;
-
- case GD_OP_DETACH_TIER_STATUS:
case GD_OP_DEFRAG_BRICK_VOLUME:
ret = glusterd_bricks_select_rebalance_volume(dict, op_errstr,
selected);
@@ -8090,9 +7969,12 @@ glusterd_op_sm()
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
xlator_t *this = NULL;
glusterd_op_info_t txn_op_info;
+ glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
ret = synclock_trylock(&gd_op_sm_lock);
if (ret) {
@@ -8170,7 +8052,8 @@ glusterd_op_sm()
"Unable to clear "
"transaction's opinfo");
} else {
- if (!(event_type == GD_OP_EVENT_STAGE_OP &&
+ if ((priv->op_version < GD_OP_VERSION_6_0) ||
+ !(event_type == GD_OP_EVENT_STAGE_OP &&
opinfo.state.state == GD_OP_STATE_STAGED &&
opinfo.skip_locking)) {
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
@@ -8249,13 +8132,11 @@ glusterd_op_free_ctx(glusterd_op_t op, void *ctx)
case GD_OP_PROFILE_VOLUME:
case GD_OP_STATUS_VOLUME:
case GD_OP_REBALANCE:
- case GD_OP_TIER_START_STOP:
case GD_OP_HEAL_VOLUME:
case GD_OP_STATEDUMP_VOLUME:
case GD_OP_CLEARLOCKS_VOLUME:
case GD_OP_DEFRAG_BRICK_VOLUME:
case GD_OP_MAX_OPVERSION:
- case GD_OP_TIER_STATUS:
dict_unref(ctx);
break;
default:
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
index 4fcaff1c8ba..8a24b16612a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
@@ -11,13 +11,13 @@
#define _GLUSTERD_OP_SM_H_
#include <pthread.h>
-#include "compat-uuid.h"
+#include <glusterfs/compat-uuid.h>
-#include "glusterfs.h"
-#include "xlator.h"
-#include "logging.h"
-#include "call-stub.h"
-#include "byte-order.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/byte-order.h>
#include "glusterd.h"
#include "protocol-common.h"
#include "glusterd-hooks.h"
@@ -259,9 +259,6 @@ glusterd_op_init_commit_rsp_dict(glusterd_op_t op);
void
glusterd_op_modify_op_ctx(glusterd_op_t op, void *op_ctx);
-void
-glusterd_op_perform_detach_tier(glusterd_volinfo_t *volinfo);
-
int
glusterd_set_detach_bricks(dict_t *dict, glusterd_volinfo_t *volinfo);
@@ -286,10 +283,6 @@ glusterd_stop_bricks(glusterd_volinfo_t *volinfo);
int
glusterd_defrag_volume_node_rsp(dict_t *req_dict, dict_t *rsp_dict,
dict_t *op_ctx);
-#ifdef HAVE_BD_XLATOR
-int
-glusterd_is_valid_vg(glusterd_brickinfo_t *brick, int check_tag, char *msg);
-#endif
int32_t
glusterd_get_txn_opinfo(uuid_t *txn_id, glusterd_op_info_t *opinfo);
@@ -309,6 +302,12 @@ glusterd_set_opinfo(char *errstr, int32_t op_errno, int32_t op_ret);
int
glusterd_dict_set_volid(dict_t *dict, char *volname, char **op_errstr);
-int32_t
-glusterd_tier_op(xlator_t *this, void *data);
+int
+glusterd_op_stats_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
+
+int
+glusterd_op_stage_stats_volume(dict_t *dict, char **op_errstr);
+
+int
+gd_set_commit_hash(dict_t *dict);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
index 7d2d28520fc..18d355cb186 100644
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
@@ -12,7 +12,7 @@
#include "glusterd-store.h"
#include "glusterd-server-quorum.h"
#include "glusterd-messages.h"
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "glusterd-utils.h"
void
@@ -48,6 +48,7 @@ glusterd_peerinfo_destroy(struct rcu_head *head)
}
glusterd_sm_tr_log_delete(&peerinfo->sm_log);
+ pthread_mutex_unlock(&peerinfo->delete_lock);
pthread_mutex_destroy(&peerinfo->delete_lock);
GF_FREE(peerinfo);
@@ -81,10 +82,112 @@ glusterd_peerinfo_cleanup(glusterd_peerinfo_t *peerinfo)
call_rcu(&peerinfo->rcu_head.head, glusterd_peerinfo_destroy);
if (quorum_action)
+ /* coverity[SLEEP] */
glusterd_do_quorum_action();
return 0;
}
+/* gd_peerinfo_find_from_hostname iterates over all the addresses saved for each
+ * peer and matches it to @hoststr.
+ * Returns the matched peer if found else returns NULL
+ */
+static glusterd_peerinfo_t *
+gd_peerinfo_find_from_hostname(const char *hoststr)
+{
+ xlator_t *this = THIS;
+ glusterd_conf_t *priv = NULL;
+ glusterd_peerinfo_t *peer = NULL;
+ glusterd_peerinfo_t *found = NULL;
+ glusterd_peer_hostname_t *tmphost = NULL;
+
+ GF_ASSERT(this != NULL);
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, (priv != NULL), out);
+
+ GF_VALIDATE_OR_GOTO(this->name, (hoststr != NULL), out);
+
+ RCU_READ_LOCK;
+ cds_list_for_each_entry_rcu(peer, &priv->peers, uuid_list)
+ {
+ cds_list_for_each_entry_rcu(tmphost, &peer->hostnames, hostname_list)
+ {
+ if (!strncasecmp(tmphost->hostname, hoststr, 1024)) {
+ gf_msg_debug(this->name, 0, "Friend %s found.. state: %d",
+ tmphost->hostname, peer->state.state);
+ found = peer; /* Probably needs to be
+ dereferenced*/
+ goto unlock;
+ }
+ }
+ }
+unlock:
+ RCU_READ_UNLOCK;
+out:
+ return found;
+}
+
+/* gd_peerinfo_find_from_addrinfo iterates over all the addresses saved for each
+ * peer, resolves them and compares them to @addr.
+ *
+ *
+ * NOTE: As getaddrinfo is a blocking call and is being performed multiple times
+ * in this function, it could lead to the calling thread to be blocked for
+ * significant amounts of time.
+ *
+ * Returns the matched peer if found else returns NULL
+ */
+static glusterd_peerinfo_t *
+gd_peerinfo_find_from_addrinfo(const struct addrinfo *addr)
+{
+ xlator_t *this = THIS;
+ glusterd_conf_t *conf = NULL;
+ glusterd_peerinfo_t *peer = NULL;
+ glusterd_peerinfo_t *found = NULL;
+ glusterd_peer_hostname_t *address = NULL;
+ int ret = 0;
+ struct addrinfo *paddr = NULL;
+ struct addrinfo *tmp = NULL;
+
+ GF_ASSERT(this != NULL);
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, (conf != NULL), out);
+
+ RCU_READ_LOCK;
+ cds_list_for_each_entry_rcu(peer, &conf->peers, uuid_list)
+ {
+ cds_list_for_each_entry_rcu(address, &peer->hostnames, hostname_list)
+ {
+ /* TODO: Cache the resolved addrinfos to improve
+ * performance
+ */
+ ret = getaddrinfo(address->hostname, NULL, NULL, &paddr);
+ if (ret) {
+ /* Don't fail if getaddrinfo fails, continue
+ * onto the next address
+ */
+ gf_msg_trace(this->name, 0, "getaddrinfo for %s failed (%s)",
+ address->hostname, gai_strerror(ret));
+ continue;
+ }
+
+ for (tmp = paddr; tmp != NULL; tmp = tmp->ai_next) {
+ if (gf_compare_sockaddr(addr->ai_addr, tmp->ai_addr)) {
+ found = peer; /* (de)referenced? */
+ break;
+ }
+ }
+
+ freeaddrinfo(paddr);
+ if (found)
+ goto unlock;
+ }
+ }
+unlock:
+ RCU_READ_UNLOCK;
+out:
+ return found;
+}
+
/* glusterd_peerinfo_find_by_hostname searches for a peer which matches the
* hostname @hoststr and if found returns the pointer to peerinfo object.
* Returns NULL otherwise.
@@ -99,14 +202,11 @@ glusterd_peerinfo_find_by_hostname(const char *hoststr)
int ret = -1;
struct addrinfo *addr = NULL;
struct addrinfo *p = NULL;
- xlator_t *this = NULL;
+ xlator_t *this = THIS;
glusterd_peerinfo_t *peerinfo = NULL;
- this = THIS;
GF_ASSERT(hoststr);
- peerinfo = NULL;
-
peerinfo = gd_peerinfo_find_from_hostname(hoststr);
if (peerinfo)
return peerinfo;
@@ -176,31 +276,33 @@ glusterd_peerinfo_find_by_uuid(uuid_t uuid)
glusterd_conf_t *priv = NULL;
glusterd_peerinfo_t *entry = NULL;
glusterd_peerinfo_t *found = NULL;
- xlator_t *this = NULL;
+ xlator_t *this = THIS;
+ glusterd_friend_sm_state_t state;
- this = THIS;
GF_ASSERT(this);
+ if (gf_uuid_is_null(uuid))
+ return NULL;
+
priv = this->private;
GF_ASSERT(priv);
- if (gf_uuid_is_null(uuid))
- return NULL;
-
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(entry, &priv->peers, uuid_list)
{
if (!gf_uuid_compare(entry->uuid, uuid)) {
- gf_msg_debug(this->name, 0, "Friend found... state: %s",
- glusterd_friend_sm_state_name_get(entry->state.state));
found = entry; /* Probably should be rcu_dereferenced */
+ state = found->state.state;
break;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
- if (!found)
+ if (found)
+ gf_msg_debug(this->name, 0, "Friend found... state: %s",
+ glusterd_friend_sm_state_name_get(state));
+ else
gf_msg_debug(this->name, 0, "Friend with uuid: %s, not found",
uuid_utoa(uuid));
return found;
@@ -214,9 +316,8 @@ glusterd_peerinfo_t *
glusterd_peerinfo_find(uuid_t uuid, const char *hostname)
{
glusterd_peerinfo_t *peerinfo = NULL;
- xlator_t *this = NULL;
+ xlator_t *this = THIS;
- this = THIS;
GF_ASSERT(this);
if (uuid) {
@@ -266,8 +367,10 @@ glusterd_peerinfo_new(glusterd_friend_sm_state_t state, uuid_t *uuid,
GF_ASSERT(conf);
new_peer = GF_CALLOC(1, sizeof(*new_peer), gf_gld_mt_peerinfo_t);
- if (!new_peer)
+ if (!new_peer) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
+ }
CDS_INIT_LIST_HEAD(&new_peer->uuid_list);
@@ -323,7 +426,7 @@ glusterd_chk_peers_connected_befriended(uuid_t skip_uuid)
priv = THIS->private;
GF_ASSERT(priv);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
if (!gf_uuid_is_null(skip_uuid) &&
@@ -336,7 +439,7 @@ glusterd_chk_peers_connected_befriended(uuid_t skip_uuid)
break;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg_debug(THIS->name, 0, "Returning %s", (ret ? "TRUE" : "FALSE"));
return ret;
@@ -357,8 +460,9 @@ glusterd_uuid_to_hostname(uuid_t uuid)
if (!gf_uuid_compare(MY_UUID, uuid)) {
hostname = gf_strdup("localhost");
+ return hostname;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
if (!cds_list_empty(&priv->peers)) {
cds_list_for_each_entry_rcu(entry, &priv->peers, uuid_list)
{
@@ -368,7 +472,7 @@ glusterd_uuid_to_hostname(uuid_t uuid)
}
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
return hostname;
}
@@ -399,15 +503,15 @@ glusterd_are_all_peers_up()
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
if (!peerinfo->connected) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
peers_up = _gf_true;
@@ -428,7 +532,7 @@ glusterd_are_vol_all_peers_up(glusterd_volinfo_t *volinfo,
if (!gf_uuid_compare(brickinfo->uuid, MY_UUID))
continue;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, peers, uuid_list)
{
if (gf_uuid_compare(peerinfo->uuid, brickinfo->uuid))
@@ -439,13 +543,12 @@ glusterd_are_vol_all_peers_up(glusterd_volinfo_t *volinfo,
if (!(peerinfo->connected) ||
(peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)) {
*down_peerstr = gf_strdup(peerinfo->hostname);
- gf_msg_debug(THIS->name, 0, "Peer %s is down. ",
- peerinfo->hostname);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
+ gf_msg_debug(THIS->name, 0, "Peer %s is down. ", *down_peerstr);
goto out;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
}
ret = _gf_true;
@@ -463,12 +566,16 @@ glusterd_peer_hostname_new(const char *hostname,
GF_ASSERT(hostname);
GF_ASSERT(name);
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
peer_hostname = GF_CALLOC(1, sizeof(*peer_hostname),
gf_gld_mt_peer_hostname_t);
- if (!peer_hostname)
+ if (!peer_hostname) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
+ }
peer_hostname->hostname = gf_strdup(hostname);
CDS_INIT_LIST_HEAD(&peer_hostname->hostname_list);
@@ -500,7 +607,6 @@ glusterd_peer_hostname_free(glusterd_peer_hostname_t *name)
gf_boolean_t
gd_peer_has_address(glusterd_peerinfo_t *peerinfo, const char *address)
{
- gf_boolean_t ret = _gf_false;
glusterd_peer_hostname_t *hostname = NULL;
GF_VALIDATE_OR_GOTO("glusterd", (peerinfo != NULL), out);
@@ -509,13 +615,12 @@ gd_peer_has_address(glusterd_peerinfo_t *peerinfo, const char *address)
cds_list_for_each_entry(hostname, &peerinfo->hostnames, hostname_list)
{
if (strcmp(hostname->hostname, address) == 0) {
- ret = _gf_true;
- break;
+ return _gf_true;
}
}
out:
- return ret;
+ return _gf_false;
}
int
@@ -624,112 +729,6 @@ out:
return ret;
}
-/* gd_peerinfo_find_from_hostname iterates over all the addresses saved for each
- * peer and matches it to @hoststr.
- * Returns the matched peer if found else returns NULL
- */
-glusterd_peerinfo_t *
-gd_peerinfo_find_from_hostname(const char *hoststr)
-{
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_peerinfo_t *peer = NULL;
- glusterd_peerinfo_t *found = NULL;
- glusterd_peer_hostname_t *tmphost = NULL;
-
- this = THIS;
- GF_ASSERT(this != NULL);
- priv = this->private;
- GF_VALIDATE_OR_GOTO(this->name, (priv != NULL), out);
-
- GF_VALIDATE_OR_GOTO(this->name, (hoststr != NULL), out);
-
- rcu_read_lock();
- cds_list_for_each_entry_rcu(peer, &priv->peers, uuid_list)
- {
- cds_list_for_each_entry_rcu(tmphost, &peer->hostnames, hostname_list)
- {
- if (!strncasecmp(tmphost->hostname, hoststr, 1024)) {
- gf_msg_debug(this->name, 0, "Friend %s found.. state: %d",
- tmphost->hostname, peer->state.state);
- found = peer; /* Probably needs to be
- dereferenced*/
- goto unlock;
- }
- }
- }
-unlock:
- rcu_read_unlock();
-out:
- return found;
-}
-
-/* gd_peerinfo_find_from_addrinfo iterates over all the addresses saved for each
- * peer, resolves them and compares them to @addr.
- *
- *
- * NOTE: As getaddrinfo is a blocking call and is being performed multiple times
- * in this function, it could lead to the calling thread to be blocked for
- * significant amounts of time.
- *
- * Returns the matched peer if found else returns NULL
- */
-glusterd_peerinfo_t *
-gd_peerinfo_find_from_addrinfo(const struct addrinfo *addr)
-{
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- glusterd_peerinfo_t *peer = NULL;
- glusterd_peerinfo_t *found = NULL;
- glusterd_peer_hostname_t *address = NULL;
- int ret = 0;
- struct addrinfo *paddr = NULL;
- struct addrinfo *tmp = NULL;
-
- this = THIS;
- GF_ASSERT(this != NULL);
- conf = this->private;
- GF_VALIDATE_OR_GOTO(this->name, (conf != NULL), out);
-
- GF_VALIDATE_OR_GOTO(this->name, (addr != NULL), out);
-
- rcu_read_lock();
- cds_list_for_each_entry_rcu(peer, &conf->peers, uuid_list)
- {
- cds_list_for_each_entry_rcu(address, &peer->hostnames, hostname_list)
- {
- /* TODO: Cache the resolved addrinfos to improve
- * performance
- */
- ret = getaddrinfo(address->hostname, NULL, NULL, &paddr);
- if (ret) {
- /* Don't fail if getaddrinfo fails, continue
- * onto the next address
- */
- gf_msg_trace(this->name, 0, "getaddrinfo for %s failed (%s)",
- address->hostname, gai_strerror(ret));
- ret = 0;
- continue;
- }
-
- for (tmp = paddr; tmp != NULL; tmp = tmp->ai_next) {
- if (gf_compare_sockaddr(addr->ai_addr, tmp->ai_addr)) {
- found = peer; /* (de)referenced? */
- break;
- }
- }
-
- freeaddrinfo(paddr);
- if (found)
- goto unlock;
- }
- }
-unlock:
- rcu_read_unlock();
-out:
- return found;
-}
-
/* gd_update_peerinfo_from_dict will update the hostnames for @peerinfo from
* peer details with @prefix in @dict.
* Returns 0 on success and -1 on failure.
@@ -830,7 +829,7 @@ gd_peerinfo_from_dict(dict_t *dict, const char *prefix)
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
glusterd_peerinfo_t *new_peer = NULL;
- char key[100] = {
+ char key[64] = {
0,
};
char *uuid_str = NULL;
@@ -875,14 +874,14 @@ out:
return new_peer;
}
-int
+static int
gd_add_peer_hostnames_to_dict(glusterd_peerinfo_t *peerinfo, dict_t *dict,
const char *prefix)
{
int ret = -1;
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
- char key[256] = {
+ char key[64] = {
0,
};
glusterd_peer_hostname_t *addr = NULL;
@@ -907,8 +906,11 @@ gd_add_peer_hostnames_to_dict(glusterd_peerinfo_t *peerinfo, dict_t *dict,
{
snprintf(key, sizeof(key), "%s.hostname%d", prefix, count);
ret = dict_set_dynstr_with_alloc(dict, key, addr->hostname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
count++;
}
@@ -924,47 +926,67 @@ gd_add_peer_detail_to_dict(glusterd_peerinfo_t *peerinfo, dict_t *friends,
int count)
{
int ret = -1;
- char key[64] = {
+ char key[32] = {
0,
};
int keylen;
char *peer_uuid_str = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
GF_ASSERT(peerinfo);
GF_ASSERT(friends);
peer_uuid_str = gd_peer_uuid_str(peerinfo);
keylen = snprintf(key, sizeof(key), "friend%d.uuid", count);
ret = dict_set_strn(friends, key, keylen, peer_uuid_str);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
+ key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "friend%d.hostname", count);
ret = dict_set_strn(friends, key, keylen, peerinfo->hostname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
+ key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "friend%d.port", count);
ret = dict_set_int32n(friends, key, keylen, peerinfo->port);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
+ key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "friend%d.stateId", count);
ret = dict_set_int32n(friends, key, keylen, peerinfo->state.state);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=%s in dict", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "friend%d.state", count);
ret = dict_set_strn(
friends, key, keylen,
glusterd_friend_sm_state_name_get(peerinfo->state.state));
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "key=%s",
+ key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "friend%d.connected", count);
ret = dict_set_int32n(friends, key, keylen, (int32_t)peerinfo->connected);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
+ key, NULL);
goto out;
+ }
snprintf(key, sizeof(key), "friend%d", count);
ret = gd_add_peer_hostnames_to_dict(peerinfo, friends, key);
@@ -983,28 +1005,30 @@ glusterd_peerinfo_find_by_generation(uint32_t generation)
glusterd_conf_t *priv = NULL;
glusterd_peerinfo_t *entry = NULL;
glusterd_peerinfo_t *found = NULL;
- xlator_t *this = NULL;
+ xlator_t *this = THIS;
+ glusterd_friend_sm_state_t state;
- this = THIS;
GF_ASSERT(this);
priv = this->private;
GF_ASSERT(priv);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(entry, &priv->peers, uuid_list)
{
if (entry->generation == generation) {
- gf_msg_debug(this->name, 0, "Friend found... state: %s",
- glusterd_friend_sm_state_name_get(entry->state.state));
found = entry; /* Probably should be rcu_dereferenced */
+ state = found->state.state;
break;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
- if (!found)
+ if (found)
+ gf_msg_debug(this->name, 0, "Friend found... state: %s",
+ glusterd_friend_sm_state_name_get(state));
+ else
gf_msg_debug(this->name, 0,
"Friend with generation: %" PRIu32 ", not found",
generation);
@@ -1025,9 +1049,9 @@ glusterd_get_peers_count()
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peer, &conf->peers, uuid_list) count++;
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
return count;
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.h b/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
index 47cbf6ee13d..fd254d57391 100644
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
@@ -64,12 +64,6 @@ int
gd_add_friend_to_dict(glusterd_peerinfo_t *friend, dict_t *dict,
const char *prefix);
-glusterd_peerinfo_t *
-gd_peerinfo_find_from_hostname(const char *hoststr);
-
-glusterd_peerinfo_t *
-gd_peerinfo_find_from_addrinfo(const struct addrinfo *addr);
-
int
gd_update_peerinfo_from_dict(glusterd_peerinfo_t *peerinfo, dict_t *dict,
const char *prefix);
@@ -78,9 +72,6 @@ glusterd_peerinfo_t *
gd_peerinfo_from_dict(dict_t *dict, const char *prefix);
int
-gd_add_peer_hostnames_to_dict(glusterd_peerinfo_t *peerinfo, dict_t *dict,
- const char *prefix);
-int
gd_add_peer_detail_to_dict(glusterd_peerinfo_t *peerinfo, dict_t *friends,
int count);
glusterd_peerinfo_t *
diff --git a/xlators/mgmt/glusterd/src/glusterd-pmap.c b/xlators/mgmt/glusterd/src/glusterd-pmap.c
index 27d664567cf..16ac628ab82 100644
--- a/xlators/mgmt/glusterd/src/glusterd-pmap.c
+++ b/xlators/mgmt/glusterd/src/glusterd-pmap.c
@@ -8,10 +8,10 @@
cases as published by the Free Software Foundation.
*/
-#include "xlator.h"
-#include "glusterfs.h"
-#include "syscall.h"
-#include "compat-errno.h"
+#include <glusterfs/xlator.h>
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/syscall.h>
+#include <glusterfs/compat-errno.h>
#include "glusterd.h"
#include "glusterd-utils.h"
@@ -433,17 +433,20 @@ __gluster_pmap_portbybrick(rpcsvc_request_t *req)
char *brick = NULL;
int port = 0;
int ret = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
ret = xdr_to_generic(req->msg[0], &args,
(xdrproc_t)xdr_pmap_port_by_brick_req);
if (ret < 0) {
req->rpc_err = GARBAGE_ARGS;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto fail;
}
brick = args.brick;
- port = pmap_registry_search(THIS, brick, GF_PMAP_PORT_BRICKSERVER,
+ port = pmap_registry_search(this, brick, GF_PMAP_PORT_BRICKSERVER,
_gf_false);
if (!port)
@@ -475,11 +478,14 @@ __gluster_pmap_brickbyport(rpcsvc_request_t *req)
0,
};
int ret = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
ret = xdr_to_generic(req->msg[0], &args,
(xdrproc_t)xdr_pmap_brick_by_port_req);
if (ret < 0) {
req->rpc_err = GARBAGE_ARGS;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto fail;
}
@@ -513,10 +519,13 @@ __gluster_pmap_signin(rpcsvc_request_t *req)
};
int ret = -1;
glusterd_brickinfo_t *brickinfo = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_pmap_signin_req);
if (ret < 0) {
req->rpc_err = GARBAGE_ARGS;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto fail;
}
@@ -570,6 +579,7 @@ __gluster_pmap_signout(rpcsvc_request_t *req)
if (ret < 0) {
// failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto fail;
}
rsp.op_ret = pmap_registry_remove(THIS, args.port, args.brick,
@@ -635,16 +645,16 @@ gluster_pmap_signout(rpcsvc_request_t *req)
return glusterd_big_locked_handler(req, __gluster_pmap_signout);
}
-rpcsvc_actor_t gluster_pmap_actors[GF_PMAP_MAXVALUE] = {
- [GF_PMAP_NULL] = {"NULL", GF_PMAP_NULL, NULL, NULL, 0, DRC_NA},
- [GF_PMAP_PORTBYBRICK] = {"PORTBYBRICK", GF_PMAP_PORTBYBRICK,
- gluster_pmap_portbybrick, NULL, 0, DRC_NA},
- [GF_PMAP_BRICKBYPORT] = {"BRICKBYPORT", GF_PMAP_BRICKBYPORT,
- gluster_pmap_brickbyport, NULL, 0, DRC_NA},
- [GF_PMAP_SIGNIN] = {"SIGNIN", GF_PMAP_SIGNIN, gluster_pmap_signin, NULL, 0,
- DRC_NA},
- [GF_PMAP_SIGNOUT] = {"SIGNOUT", GF_PMAP_SIGNOUT, gluster_pmap_signout, NULL,
- 0, DRC_NA},
+static rpcsvc_actor_t gluster_pmap_actors[GF_PMAP_MAXVALUE] = {
+ [GF_PMAP_NULL] = {"NULL", NULL, NULL, GF_PMAP_NULL, DRC_NA, 0},
+ [GF_PMAP_PORTBYBRICK] = {"PORTBYBRICK", gluster_pmap_portbybrick, NULL,
+ GF_PMAP_PORTBYBRICK, DRC_NA, 0},
+ [GF_PMAP_BRICKBYPORT] = {"BRICKBYPORT", gluster_pmap_brickbyport, NULL,
+ GF_PMAP_BRICKBYPORT, DRC_NA, 0},
+ [GF_PMAP_SIGNIN] = {"SIGNIN", gluster_pmap_signin, NULL, GF_PMAP_SIGNIN,
+ DRC_NA, 0},
+ [GF_PMAP_SIGNOUT] = {"SIGNOUT", gluster_pmap_signout, NULL, GF_PMAP_SIGNOUT,
+ DRC_NA, 0},
};
struct rpcsvc_program gluster_pmap_prog = {
diff --git a/xlators/mgmt/glusterd/src/glusterd-pmap.h b/xlators/mgmt/glusterd/src/glusterd-pmap.h
index 8a3ebac48a0..51d75361431 100644
--- a/xlators/mgmt/glusterd/src/glusterd-pmap.h
+++ b/xlators/mgmt/glusterd/src/glusterd-pmap.h
@@ -11,26 +11,26 @@
#define _GLUSTERD_PMAP_H_
#include <pthread.h>
-#include "compat-uuid.h"
+#include <glusterfs/compat-uuid.h>
-#include "glusterfs.h"
-#include "xlator.h"
-#include "logging.h"
-#include "call-stub.h"
-#include "byte-order.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/byte-order.h>
#include "rpcsvc.h"
struct pmap_port_status {
- gf_pmap_port_type_t type;
char *brickname;
void *xprt;
+ gf_pmap_port_type_t type;
};
struct pmap_registry {
+ struct pmap_port_status ports[GF_PORT_MAX + 1];
int base_port;
int max_port;
int last_alloc;
- struct pmap_port_status ports[GF_PORT_MAX + 1];
};
int
diff --git a/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c
index 200e3056117..a05c90d7b10 100644
--- a/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c
@@ -14,9 +14,9 @@
#include "glusterd.h"
#include "glusterd-utils.h"
-#include "common-utils.h"
-#include "xlator.h"
-#include "logging.h"
+#include <glusterfs/common-utils.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
#include "glusterd-messages.h"
#include "glusterd-proc-mgmt.h"
@@ -107,12 +107,14 @@ glusterd_proc_stop(glusterd_proc_t *proc, int sig, int flags)
"service, reason:%s",
proc->name, strerror(errno));
}
+ } else {
+ (void)glusterd_unlink_file(proc->pidfile);
}
if (flags != PROC_STOP_FORCE)
goto out;
synclock_unlock(&conf->big_lock);
- sleep(1);
+ synctask_sleep(1);
synclock_lock(&conf->big_lock);
if (gf_is_service_running(proc->pidfile, &pid)) {
ret = kill(pid, SIGKILL);
diff --git a/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.h
index 36ad5ae6731..e8e9ffc5082 100644
--- a/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.h
+++ b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.h
@@ -22,7 +22,7 @@ enum proc_flags {
};
struct glusterd_proc_ {
- char name[PATH_MAX];
+ char name[NAME_MAX];
char pidfile[PATH_MAX];
char logdir[PATH_MAX];
char logfile[PATH_MAX];
diff --git a/xlators/mgmt/glusterd/src/glusterd-quota.c b/xlators/mgmt/glusterd/src/glusterd-quota.c
index 6029b738590..8370c174ce3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-quota.c
+++ b/xlators/mgmt/glusterd/src/glusterd-quota.c
@@ -7,22 +7,21 @@
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "cli1-xdr.h"
#include "xdr-generic.h"
#include "glusterd.h"
#include "glusterd-op-sm.h"
#include "glusterd-store.h"
#include "glusterd-utils.h"
-#include "glusterd-nfs-svc.h"
#include "glusterd-quotad-svc.h"
#include "glusterd-volgen.h"
#include "glusterd-messages.h"
-#include "run.h"
-#include "syscall.h"
-#include "byte-order.h"
-#include "compat-errno.h"
-#include "quota-common-utils.h"
+#include <glusterfs/run.h>
+#include <glusterfs/syscall.h>
+#include <glusterfs/byte-order.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/quota-common-utils.h>
#include "glusterd-quota.h"
#include <sys/wait.h>
@@ -40,6 +39,49 @@
/* Any negative pid to make it special client */
#define QUOTA_CRAWL_PID "-100"
+#define GLUSTERFS_GET_QUOTA_LIMIT_MOUNT_PIDFILE(pidfile, volname) \
+ { \
+ snprintf(pidfile, PATH_MAX - 1, \
+ DEFAULT_VAR_RUN_DIRECTORY "/%s_quota_limit.pid", volname); \
+ }
+
+#define GLUSTERFS_GET_QUOTA_LIST_MOUNT_PIDFILE(pidfile, volname) \
+ { \
+ snprintf(pidfile, PATH_MAX - 1, \
+ DEFAULT_VAR_RUN_DIRECTORY "/%s_quota_list.pid", volname); \
+ }
+
+#define GLUSTERD_GET_QUOTA_CRAWL_PIDDIR(piddir, volinfo, type) \
+ do { \
+ char _volpath[PATH_MAX] = { \
+ 0, \
+ }; \
+ int32_t _crawl_pid_len; \
+ GLUSTERD_GET_VOLUME_DIR(_volpath, volinfo, priv); \
+ if (type == GF_QUOTA_OPTION_TYPE_ENABLE || \
+ type == GF_QUOTA_OPTION_TYPE_ENABLE_OBJECTS) \
+ _crawl_pid_len = snprintf(piddir, PATH_MAX, "%s/run/quota/enable", \
+ _volpath); \
+ else \
+ _crawl_pid_len = snprintf(piddir, PATH_MAX, \
+ "%s/run/quota/disable", _volpath); \
+ if ((_crawl_pid_len < 0) || (_crawl_pid_len >= PATH_MAX)) { \
+ piddir[0] = 0; \
+ } \
+ } while (0)
+
+#define GLUSTERD_GET_TMP_PATH(abspath, path) \
+ do { \
+ snprintf(abspath, sizeof(abspath) - 1, \
+ DEFAULT_VAR_RUN_DIRECTORY "/tmp%s", path); \
+ } while (0)
+
+#define GLUSTERD_GET_QUOTA_LIST_MOUNT_PATH(abspath, volname, path) \
+ do { \
+ snprintf(abspath, sizeof(abspath) - 1, \
+ DEFAULT_VAR_RUN_DIRECTORY "/%s_quota_list%s", volname, path); \
+ } while (0)
+
const char *gd_quota_op_list[GF_QUOTA_OPTION_TYPE_MAX + 1] = {
[GF_QUOTA_OPTION_TYPE_NONE] = "none",
[GF_QUOTA_OPTION_TYPE_ENABLE] = "enable",
@@ -266,7 +308,7 @@ _glusterd_quota_initiate_fs_crawl(glusterd_conf_t *priv,
GF_VALIDATE_OR_GOTO("glusterd", THIS, out);
GLUSTERD_GET_TMP_PATH(mountdir, "/");
- ret = sys_mkdir(mountdir, 0777);
+ ret = sys_mkdir(mountdir, 0755);
if (ret && errno != EEXIST) {
gf_msg(THIS->name, GF_LOG_WARNING, errno, GD_MSG_MOUNT_REQ_FAIL,
"failed to create temporary "
@@ -436,8 +478,9 @@ glusterd_stop_all_quota_crawl_service(glusterd_conf_t *priv,
if (dir == NULL)
return;
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
- while (entry) {
+ while ((entry = sys_readdir(dir, scratch))) {
+ if (gf_irrelevant_entry(entry))
+ continue;
len = snprintf(pidfile, sizeof(pidfile), "%s/%s", pid_dir,
entry->d_name);
if ((len >= 0) && (len < sizeof(pidfile))) {
@@ -445,8 +488,6 @@ glusterd_stop_all_quota_crawl_service(glusterd_conf_t *priv,
_gf_true);
sys_unlink(pidfile);
}
-
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
}
sys_closedir(dir);
}
@@ -470,7 +511,7 @@ glusterd_quota_initiate_fs_crawl(glusterd_conf_t *priv,
goto out;
}
- ret = mkdir_p(DEFAULT_QUOTA_CRAWL_LOG_DIRECTORY, 0777, _gf_true);
+ ret = mkdir_p(DEFAULT_QUOTA_CRAWL_LOG_DIRECTORY, 0755, _gf_true);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_GLUSTERD_OP_FAILED,
"failed to create dir %s: %s", DEFAULT_QUOTA_CRAWL_LOG_DIRECTORY,
@@ -479,7 +520,7 @@ glusterd_quota_initiate_fs_crawl(glusterd_conf_t *priv,
}
GLUSTERD_GET_QUOTA_CRAWL_PIDDIR(pid_dir, volinfo, type);
- ret = mkdir_p(pid_dir, 0777, _gf_true);
+ ret = mkdir_p(pid_dir, 0755, _gf_true);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_GLUSTERD_OP_FAILED,
"failed to create dir %s: %s", pid_dir, strerror(errno));
@@ -540,8 +581,7 @@ glusterd_quota_get_default_soft_limit(glusterd_volinfo_t *volinfo,
else
val = gf_strdup("80%");
- ret = dict_set_dynstrn(rsp_dict, "default-soft-limit",
- SLEN("default-soft-limit"), val);
+ ret = dict_set_dynstr_sizen(rsp_dict, "default-soft-limit", val);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"Failed to set default "
@@ -771,6 +811,7 @@ glusterd_set_quota_limit(char *volname, char *path, char *hard_limit,
0,
};
double soft_limit_double = 0;
+ int64_t local_hl = 0;
this = THIS;
GF_ASSERT(this);
@@ -820,11 +861,11 @@ glusterd_set_quota_limit(char *volname, char *path, char *hard_limit,
new_limit.sl = hton64(new_limit.sl);
- ret = gf_string2bytesize_int64(hard_limit, &new_limit.hl);
+ ret = gf_string2bytesize_int64(hard_limit, &local_hl);
if (ret)
goto out;
- new_limit.hl = hton64(new_limit.hl);
+ new_limit.hl = hton64(local_hl);
ret = sys_lsetxattr(abspath, key, (char *)(void *)&new_limit,
sizeof(new_limit), 0);
@@ -1755,16 +1796,23 @@ glusterd_op_quota(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
volinfo->quota_xattr_version--;
ret = glusterd_store_volinfo(volinfo,
GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_SET_FAIL,
+ "Failed to store volinfo for volume %s",
+ volinfo->volname);
+ }
}
ret = -1;
goto out;
}
+#if BUILD_GNFS
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
if (priv->op_version == GD_OP_VERSION_MIN)
(void)priv->nfs_svc.manager(&(priv->nfs_svc), NULL, 0);
}
+#endif
if (rsp_dict && start_crawl == _gf_true)
glusterd_quota_initiate_fs_crawl(priv, volinfo, type);
@@ -1851,10 +1899,9 @@ glusterd_get_gfid_from_brick(dict_t *dict, glusterd_volinfo_t *volinfo,
}
ret = sys_lgetxattr(backend_path, GFID_XATTR_KEY, gfid, 16);
if (ret < 0) {
- gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_SETXATTR_FAIL,
- "Failed to get "
- "extended attribute %s for directory %s. ",
- GFID_XATTR_KEY, backend_path);
+ gf_smsg(this->name, GF_LOG_INFO, errno, GD_MSG_GET_XATTR_FAIL,
+ "Attribute=%s, Directory=%s", GFID_XATTR_KEY, backend_path,
+ NULL);
ret = 0;
continue;
}
@@ -1990,7 +2037,7 @@ glusterd_create_quota_auxiliary_mount(xlator_t *this, char *volname, int type)
fclose(file);
}
- ret = sys_mkdir(mountdir, 0777);
+ ret = sys_mkdir(mountdir, 0755);
if (ret && errno != EEXIST) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_MOUNT_REQ_FAIL,
"Failed to create auxiliary "
@@ -1998,8 +2045,8 @@ glusterd_create_quota_auxiliary_mount(xlator_t *this, char *volname, int type)
mountdir);
goto out;
}
- snprintf(logfile, PATH_MAX - 1, "%s/quota-mount-%s.log",
- DEFAULT_LOG_FILE_DIRECTORY, volname);
+ snprintf(logfile, PATH_MAX - 1, "%s/quota-mount-%s.log", priv->logdir,
+ volname);
snprintf(qpid, 15, "%d", GF_CLIENT_PID_QUOTA_MOUNT);
if (dict_get_strn(this->options, "transport.socket.bind-address",
@@ -2046,7 +2093,6 @@ glusterd_op_stage_quota(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
int ret = 0;
char *volname = NULL;
- gf_boolean_t exists = _gf_false;
int type = 0;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
@@ -2070,12 +2116,6 @@ glusterd_op_stage_quota(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
}
- exists = glusterd_check_volume_exists(volname);
- if (!exists) {
- gf_asprintf(op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
- ret = -1;
- goto out;
- }
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
gf_asprintf(op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
diff --git a/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c b/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c
index 538614bbad1..f26d832a06d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c
@@ -8,8 +8,8 @@
cases as published by the Free Software Foundation.
*/
-#include "globals.h"
-#include "run.h"
+#include <glusterfs/globals.h>
+#include <glusterfs/run.h>
#include "glusterd.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
@@ -127,8 +127,10 @@ glusterd_quotadsvc_start(glusterd_svc_t *svc, int flags)
char *options[] = {svc->name, "--process-name", NULL};
cmdline = dict_new();
- if (!cmdline)
+ if (!cmdline) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
for (i = 0; options[i]; i++) {
ret = snprintf(key, sizeof(key), "arg%d", i);
@@ -157,7 +159,7 @@ glusterd_quotadsvc_reconfigure()
gf_boolean_t identical = _gf_false;
this = THIS;
- GF_VALIDATE_OR_GOTO(this->name, this, out);
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
priv = this->private;
GF_VALIDATE_OR_GOTO(this->name, priv, out);
diff --git a/xlators/mgmt/glusterd/src/glusterd-rcu.h b/xlators/mgmt/glusterd/src/glusterd-rcu.h
index 32ac3bbfd4e..c85f9bea8f8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rcu.h
+++ b/xlators/mgmt/glusterd/src/glusterd-rcu.h
@@ -21,7 +21,7 @@
#include "rculist-extra.h"
#endif
-#include "xlator.h"
+#include <glusterfs/xlator.h>
/* gd_rcu_head is a composite struct, composed of struct rcu_head and a this
* pointer, which is used to pass the THIS pointer to call_rcu callbacks.
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index f90d3de9843..458bf168ede 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -13,26 +13,45 @@
#include <sys/resource.h>
#include <sys/statvfs.h>
-#include "compat.h"
+#include <glusterfs/compat.h>
#include "protocol-common.h"
-#include "xlator.h"
-#include "logging.h"
-#include "timer.h"
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/timer.h>
#include "glusterd-mem-types.h"
#include "glusterd.h"
#include "glusterd-sm.h"
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
+#include "glusterd-mgmt.h"
#include "glusterd-messages.h"
#include "glusterd-store.h"
-#include "run.h"
+#include <glusterfs/run.h>
#include "glusterd-volgen.h"
#include "glusterd-messages.h"
-#include "syscall.h"
+#include <glusterfs/syscall.h>
#include "cli1-xdr.h"
#include "xdr-generic.h"
+#define GLUSTERD_GET_DEFRAG_SOCK_FILE(path, volinfo) \
+ do { \
+ int32_t _defrag_sockfile_len; \
+ char tmppath[PATH_MAX] = { \
+ 0, \
+ }; \
+ _defrag_sockfile_len = snprintf( \
+ tmppath, PATH_MAX, \
+ DEFAULT_VAR_RUN_DIRECTORY "/gluster-%s-%s-%s.sock", "rebalance", \
+ volinfo->volname, uuid_utoa(MY_UUID)); \
+ if ((_defrag_sockfile_len < 0) || \
+ (_defrag_sockfile_len >= PATH_MAX)) { \
+ path[0] = 0; \
+ } else { \
+ glusterd_set_socket_filepath(tmppath, path, sizeof(path)); \
+ } \
+ } while (0)
+
int32_t
glusterd_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe);
@@ -200,6 +219,9 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr,
char valgrind_logfile[PATH_MAX] = {
0,
};
+ char msg[1024] = {
+ 0,
+ };
char *volfileserver = NULL;
char *localtime_logging = NULL;
@@ -236,7 +258,7 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr,
glusterd_store_perform_node_state_store(volinfo);
GLUSTERD_GET_DEFRAG_DIR(defrag_path, volinfo, priv);
- ret = mkdir_p(defrag_path, 0777, _gf_true);
+ ret = mkdir_p(defrag_path, 0755, _gf_true);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED,
"Failed to create "
@@ -247,17 +269,21 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr,
GLUSTERD_GET_DEFRAG_SOCK_FILE(sockfile, volinfo);
GLUSTERD_GET_DEFRAG_PID_FILE(pidfile, volinfo, priv);
- snprintf(logfile, PATH_MAX, "%s/%s-%s.log", DEFAULT_LOG_FILE_DIRECTORY,
- volinfo->volname,
- (cmd == GF_DEFRAG_CMD_START_TIER ? "tier" : "rebalance"));
+ snprintf(logfile, PATH_MAX, "%s/%s-%s.log", priv->logdir, volinfo->volname,
+ "rebalance");
runinit(&runner);
- if (this->ctx->cmd_args.valgrind) {
+ if (this->ctx->cmd_args.vgtool != _gf_none) {
snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s-rebalance.log",
- DEFAULT_LOG_FILE_DIRECTORY, volinfo->volname);
+ priv->logdir, volinfo->volname);
+
+ if (this->ctx->cmd_args.vgtool == _gf_memcheck)
+ runner_add_args(&runner, "valgrind", "--leak-check=full",
+ "--trace-children=yes", "--track-origins=yes",
+ NULL);
+ else
+ runner_add_args(&runner, "valgrind", "--tool=drd", NULL);
- runner_add_args(&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes", NULL);
runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
}
@@ -265,18 +291,7 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr,
if (dict_get_strn(this->options, "transport.socket.bind-address",
SLEN("transport.socket.bind-address"),
- &volfileserver) == 0) {
- /*In the case of running multiple glusterds on a single machine,
- *we should ensure that log file and unix socket file should be
- *unique in given cluster */
-
- GLUSTERD_GET_DEFRAG_SOCK_FILE_OLD(sockfile, volinfo, priv);
- snprintf(logfile, PATH_MAX, "%s/%s-%s-%s.log",
- DEFAULT_LOG_FILE_DIRECTORY, volinfo->volname,
- (cmd == GF_DEFRAG_CMD_START_TIER ? "tier" : "rebalance"),
- uuid_utoa(MY_UUID));
-
- } else {
+ &volfileserver) != 0) {
volfileserver = "localhost";
}
@@ -287,11 +302,6 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr,
"*dht.assert-no-child-down=yes", "--xlator-option",
"*dht.readdir-optimize=on", "--process-name", "rebalance", NULL);
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- runner_add_arg(&runner, "--xlator-option");
- runner_argprintf(&runner, "*tier-dht.xattr-name=trusted.tier.tier-dht");
- }
-
runner_add_arg(&runner, "--xlator-option");
runner_argprintf(&runner, "*dht.rebalance-cmd=%d", cmd);
runner_add_arg(&runner, "--xlator-option");
@@ -314,6 +324,10 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr,
runner_add_arg(&runner, "--localtime-logging");
}
+ snprintf(msg, sizeof(msg), "Starting the rebalance service for volume %s",
+ volinfo->volname);
+ runner_log(&runner, this->name, GF_LOG_DEBUG, msg);
+
ret = runner_run_nowait(&runner);
if (ret) {
gf_msg_debug("glusterd", 0, "rebalance command failed");
@@ -377,9 +391,6 @@ glusterd_rebalance_rpc_create(glusterd_volinfo_t *volinfo)
glusterd_defrag_info_t *defrag = volinfo->rebal.defrag;
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
- struct stat buf = {
- 0,
- };
this = THIS;
GF_ASSERT(this);
@@ -390,36 +401,20 @@ glusterd_rebalance_rpc_create(glusterd_volinfo_t *volinfo)
if (!defrag)
goto out;
- GLUSTERD_GET_DEFRAG_SOCK_FILE(sockfile, volinfo);
- /* Check if defrag sockfile exists in the new location
- * in /var/run/ , if it does not try the old location
- */
- ret = sys_stat(sockfile, &buf);
- /* TODO: Remove this once we don't need backward compatibility
- * with the older path
- */
- if (ret && (errno == ENOENT)) {
- gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
- "Rebalance sockfile "
- "%s does not exist. Trying old path.",
- sockfile);
- GLUSTERD_GET_DEFRAG_SOCK_FILE_OLD(sockfile, volinfo, priv);
- ret = sys_stat(sockfile, &buf);
- if (ret && (ENOENT == errno)) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REBAL_NO_SOCK_FILE,
- "Rebalance "
- "sockfile %s does not exist",
- sockfile);
- goto out;
- }
+ options = dict_new();
+ if (!options) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
+ goto out;
}
+ GLUSTERD_GET_DEFRAG_SOCK_FILE(sockfile, volinfo);
+
/* Setting frame-timeout to 10mins (600seconds).
* Unix domain sockets ensures that the connection is reliable. The
* default timeout of 30mins used for unreliable network connections is
* too long for unix domain socket connections.
*/
- ret = rpc_transport_unix_options_build(&options, sockfile, 600);
+ ret = rpc_transport_unix_options_build(options, sockfile, 600);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_UNIX_OP_BUILD_FAIL,
"Unix options build failed");
@@ -436,6 +431,8 @@ glusterd_rebalance_rpc_create(glusterd_volinfo_t *volinfo)
}
ret = 0;
out:
+ if (options)
+ dict_unref(options);
return ret;
}
@@ -479,18 +476,6 @@ glusterd_rebalance_cmd_validate(int cmd, char *volname,
goto out;
}
- ret = glusterd_disallow_op_for_tier(*volinfo, GD_OP_REBALANCE, cmd);
- if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REBALANCE_CMD_IN_TIER_VOL,
- "Received rebalance command "
- "on Tier volume %s",
- volname);
- snprintf(op_errstr, len,
- "Rebalance operations are not "
- "supported on a tiered volume");
- goto out;
- }
-
ret = 0;
out:
@@ -506,6 +491,7 @@ __glusterd_handle_defrag_volume(rpcsvc_request_t *req)
0,
}};
glusterd_conf_t *priv = NULL;
+ int32_t op = GD_OP_NONE;
dict_t *dict = NULL;
char *volname = NULL;
gf_cli_defrag_type cmd = 0;
@@ -525,6 +511,7 @@ __glusterd_handle_defrag_volume(rpcsvc_request_t *req)
if (ret < 0) {
// failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
@@ -564,19 +551,25 @@ __glusterd_handle_defrag_volume(rpcsvc_request_t *req)
if (ret)
goto out;
- if ((cmd == GF_DEFRAG_CMD_STATUS) || (cmd == GF_DEFRAG_CMD_STATUS_TIER) ||
- (cmd == GF_DEFRAG_CMD_STOP_DETACH_TIER) ||
- (cmd == GF_DEFRAG_CMD_STOP) || (cmd == GF_DEFRAG_CMD_DETACH_STATUS)) {
- ret = glusterd_op_begin(req, GD_OP_DEFRAG_BRICK_VOLUME, dict, msg,
- sizeof(msg));
+ if ((cmd == GF_DEFRAG_CMD_STATUS) || (cmd == GF_DEFRAG_CMD_STOP)) {
+ op = GD_OP_DEFRAG_BRICK_VOLUME;
} else
- ret = glusterd_op_begin(req, GD_OP_REBALANCE, dict, msg, sizeof(msg));
+ op = GD_OP_REBALANCE;
+ if (priv->op_version < GD_OP_VERSION_6_0) {
+ gf_msg_debug(this->name, 0,
+ "The cluster is operating at "
+ "version less than %d. Falling back "
+ "to op-sm framework.",
+ GD_OP_VERSION_6_0);
+ ret = glusterd_op_begin(req, op, dict, msg, sizeof(msg));
+ glusterd_friend_sm();
+ glusterd_op_sm();
+ } else {
+ ret = glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase(req, op,
+ dict);
+ }
out:
-
- glusterd_friend_sm();
- glusterd_op_sm();
-
if (ret) {
if (msg[0] == '\0')
snprintf(msg, sizeof(msg), "Operation failed");
@@ -585,8 +578,8 @@ out:
}
free(cli_req.dict.dict_val); // malloced by xdr
-
- return 0;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
@@ -629,7 +622,98 @@ glusterd_brick_validation(dict_t *dict, char *key, data_t *value, void *data)
}
int
-glusterd_op_stage_rebalance(dict_t *dict, char **op_errstr)
+glusterd_set_rebalance_id_in_rsp_dict(dict_t *req_dict, dict_t *rsp_dict)
+{
+ int ret = -1;
+ int32_t cmd = 0;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ char msg[2048] = {0};
+ char *task_id_str = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(rsp_dict);
+ GF_ASSERT(req_dict);
+
+ ret = dict_get_strn(rsp_dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "volname not found");
+ goto out;
+ }
+
+ ret = dict_get_int32n(rsp_dict, "rebalance-command",
+ SLEN("rebalance-command"), &cmd);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "cmd not found");
+ goto out;
+ }
+
+ ret = glusterd_rebalance_cmd_validate(cmd, volname, &volinfo, msg,
+ sizeof(msg));
+ if (ret) {
+ gf_msg_debug(this->name, 0, "failed to validate");
+ goto out;
+ }
+
+ /* reblance id is generted in glusterd_mgmt_v3_op_stage_rebalance(), but
+ * rsp_dict is unavailable there. So copying it to rsp_dict from req_dict
+ * here. So that cli can display the rebalance id.*/
+ if ((cmd == GF_DEFRAG_CMD_START) ||
+ (cmd == GF_DEFRAG_CMD_START_LAYOUT_FIX) ||
+ (cmd == GF_DEFRAG_CMD_START_FORCE)) {
+ if (is_origin_glusterd(rsp_dict)) {
+ ret = dict_get_strn(req_dict, GF_REBALANCE_TID_KEY,
+ SLEN(GF_REBALANCE_TID_KEY), &task_id_str);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Missing rebalance-id");
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_REBALANCE_ID_MISSING, "%s", msg);
+ ret = 0;
+ } else {
+ gf_uuid_parse(task_id_str, volinfo->rebal.rebalance_id);
+ ret = glusterd_copy_uuid_to_dict(volinfo->rebal.rebalance_id,
+ rsp_dict, GF_REBALANCE_TID_KEY,
+ SLEN(GF_REBALANCE_TID_KEY));
+ if (ret) {
+ snprintf(msg, sizeof(msg),
+ "Failed to set rebalance id for volume %s",
+ volname);
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_DICT_SET_FAILED, "%s", msg);
+ }
+ }
+ }
+ }
+
+ /* Set task-id, if available, in rsp_dict for operations other than
+ * start. This is needed when we want rebalance id in xml output
+ */
+ if (cmd == GF_DEFRAG_CMD_STATUS || cmd == GF_DEFRAG_CMD_STOP) {
+ if (!gf_uuid_is_null(volinfo->rebal.rebalance_id)) {
+ if (GD_OP_REMOVE_BRICK == volinfo->rebal.op)
+ ret = glusterd_copy_uuid_to_dict(
+ volinfo->rebal.rebalance_id, rsp_dict,
+ GF_REMOVE_BRICK_TID_KEY, SLEN(GF_REMOVE_BRICK_TID_KEY));
+ else
+ ret = glusterd_copy_uuid_to_dict(volinfo->rebal.rebalance_id,
+ rsp_dict, GF_REBALANCE_TID_KEY,
+ SLEN(GF_REBALANCE_TID_KEY));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set task-id for volume %s", volname);
+ goto out;
+ }
+ }
+ }
+out:
+ return ret;
+}
+
+int
+glusterd_mgmt_v3_op_stage_rebalance(dict_t *dict, char **op_errstr)
{
char *volname = NULL;
char *cmd_str = NULL;
@@ -638,9 +722,7 @@ glusterd_op_stage_rebalance(dict_t *dict, char **op_errstr)
char msg[2048] = {0};
glusterd_volinfo_t *volinfo = NULL;
char *task_id_str = NULL;
- dict_t *op_ctx = NULL;
xlator_t *this = 0;
- int32_t is_force = 0;
this = THIS;
GF_ASSERT(this);
@@ -665,28 +747,316 @@ glusterd_op_stage_rebalance(dict_t *dict, char **op_errstr)
goto out;
}
switch (cmd) {
- case GF_DEFRAG_CMD_START_TIER:
+ case GF_DEFRAG_CMD_START:
+ case GF_DEFRAG_CMD_START_LAYOUT_FIX:
+ /* Check if the connected clients are all of version
+ * glusterfs-3.6 and higher. This is needed to prevent some data
+ * loss issues that could occur when older clients are connected
+ * when rebalance is run. This check can be bypassed by using
+ * 'force'
+ */
+ ret = glusterd_check_client_op_version_support(
+ volname, GD_OP_VERSION_3_6_0, NULL);
+ if (ret) {
+ ret = gf_asprintf(op_errstr,
+ "Volume %s has one or "
+ "more connected clients of a version"
+ " lower than GlusterFS-v3.6.0. "
+ "Starting rebalance in this state "
+ "could lead to data loss.\nPlease "
+ "disconnect those clients before "
+ "attempting this command again.",
+ volname);
+ goto out;
+ }
+ /* Fall through */
+ case GF_DEFRAG_CMD_START_FORCE:
+ if (is_origin_glusterd(dict)) {
+ ret = glusterd_generate_and_set_task_id(
+ dict, GF_REBALANCE_TID_KEY, SLEN(GF_REBALANCE_TID_KEY));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TASKID_GEN_FAIL,
+ "Failed to generate task-id");
+ goto out;
+ }
+ } else {
+ ret = dict_get_strn(dict, GF_REBALANCE_TID_KEY,
+ SLEN(GF_REBALANCE_TID_KEY), &task_id_str);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Missing rebalance-id");
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_REBALANCE_ID_MISSING, "%s", msg);
+ ret = 0;
+ }
+ }
+ ret = glusterd_defrag_start_validate(volinfo, msg, sizeof(msg),
+ GD_OP_REBALANCE);
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "defrag start validate "
+ "failed for volume %s.",
+ volinfo->volname);
+ goto out;
+ }
+ break;
+ case GF_DEFRAG_CMD_STATUS:
+ case GF_DEFRAG_CMD_STOP:
+
+ ret = dict_get_strn(dict, "cmd-str", SLEN("cmd-str"), &cmd_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get "
+ "command string");
+ ret = -1;
+ goto out;
+ }
+ if ((strstr(cmd_str, "rebalance") != NULL) &&
+ (volinfo->rebal.op != GD_OP_REBALANCE)) {
+ snprintf(msg, sizeof(msg),
+ "Rebalance not started "
+ "for volume %s.",
+ volinfo->volname);
+ ret = -1;
+ goto out;
+ }
+
+ if (strstr(cmd_str, "remove-brick") != NULL) {
+ if (volinfo->rebal.op != GD_OP_REMOVE_BRICK) {
+ snprintf(msg, sizeof(msg),
+ "remove-brick not "
+ "started for volume %s.",
+ volinfo->volname);
+ ret = -1;
+ goto out;
+ }
+
+ /* For remove-brick status/stop command check whether
+ * given input brick is part of volume or not.*/
+
+ ret = dict_foreach_fnmatch(dict, "brick*",
+ glusterd_brick_validation, volinfo);
+ if (ret == -1) {
+ snprintf(msg, sizeof(msg),
+ "Incorrect brick"
+ " for volume %s",
+ volinfo->volname);
+ goto out;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ ret = 0;
+out:
+ if (ret && op_errstr && msg[0])
+ *op_errstr = gf_strdup(msg);
+
+ return ret;
+}
+
+int
+glusterd_mgmt_v3_op_rebalance(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+{
+ char *volname = NULL;
+ int ret = 0;
+ int32_t cmd = 0;
+ char msg[2048] = {0};
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_brickinfo_t *tmp = NULL;
+ gf_boolean_t volfile_update = _gf_false;
+ char *task_id_str = NULL;
+ xlator_t *this = NULL;
+ uint32_t commit_hash;
+ int32_t is_force = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "volname not given");
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "rebalance-command", SLEN("rebalance-command"),
+ &cmd);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "command not given");
+ goto out;
+ }
+
+ ret = glusterd_rebalance_cmd_validate(cmd, volname, &volinfo, msg,
+ sizeof(msg));
+ if (ret) {
+ gf_msg_debug(this->name, 0, "cmd validate failed");
+ goto out;
+ }
+
+ switch (cmd) {
+ case GF_DEFRAG_CMD_START:
+ case GF_DEFRAG_CMD_START_LAYOUT_FIX:
+ case GF_DEFRAG_CMD_START_FORCE:
+
ret = dict_get_int32n(dict, "force", SLEN("force"), &is_force);
if (ret)
is_force = 0;
+ if (!is_force) {
+ /* Reset defrag status to 'NOT STARTED' whenever a
+ * remove-brick/rebalance command is issued to remove
+ * stale information from previous run.
+ */
+ volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED;
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- gf_asprintf(op_errstr,
- "volume %s is not a tier "
- "volume.",
- volinfo->volname);
- ret = -1;
+ ret = dict_get_strn(dict, GF_REBALANCE_TID_KEY,
+ SLEN(GF_REBALANCE_TID_KEY), &task_id_str);
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "Missing rebalance"
+ " id");
+ ret = 0;
+ } else {
+ gf_uuid_parse(task_id_str, volinfo->rebal.rebalance_id);
+ volinfo->rebal.op = GD_OP_REBALANCE;
+ }
+ if (!gd_should_i_start_rebalance(volinfo)) {
+ /* Store the rebalance-id and rebalance command
+ * even if the peer isn't starting a rebalance
+ * process. On peers where a rebalance process
+ * is started, glusterd_handle_defrag_start
+ * performs the storing.
+ * Storing this is needed for having
+ * 'volume status' work correctly.
+ */
+ glusterd_store_perform_node_state_store(volinfo);
+ break;
+ }
+ if (dict_get_uint32(dict, "commit-hash", &commit_hash) == 0) {
+ volinfo->rebal.commit_hash = commit_hash;
+ }
+ ret = glusterd_handle_defrag_start(volinfo, msg, sizeof(msg),
+ cmd, NULL, GD_OP_REBALANCE);
+ break;
+ } else {
+ /* Reset defrag status to 'STARTED' so that the
+ * pid is checked and restarted accordingly.
+ * If the pid is not running it executes the
+ * "NOT_STARTED" case and restarts the process
+ */
+ volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_STARTED;
+ volinfo->rebal.defrag_cmd = cmd;
+ volinfo->rebal.op = GD_OP_REBALANCE;
+
+ ret = dict_get_strn(dict, GF_REBALANCE_TID_KEY,
+ SLEN(GF_REBALANCE_TID_KEY), &task_id_str);
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "Missing rebalance"
+ " id");
+ ret = 0;
+ } else {
+ gf_uuid_parse(task_id_str, volinfo->rebal.rebalance_id);
+ volinfo->rebal.op = GD_OP_REBALANCE;
+ }
+ if (dict_get_uint32(dict, "commit-hash", &commit_hash) == 0) {
+ volinfo->rebal.commit_hash = commit_hash;
+ }
+ ret = glusterd_restart_rebalance_for_volume(volinfo);
+ break;
+ }
+ case GF_DEFRAG_CMD_STOP:
+ /* Clear task-id only on explicitly stopping rebalance.
+ * Also clear the stored operation, so it doesn't cause trouble
+ * with future rebalance/remove-brick starts
+ */
+ gf_uuid_clear(volinfo->rebal.rebalance_id);
+ volinfo->rebal.op = GD_OP_NONE;
+
+ /* Fall back to the old volume file in case of decommission*/
+ cds_list_for_each_entry_safe(brickinfo, tmp, &volinfo->bricks,
+ brick_list)
+ {
+ if (!brickinfo->decommissioned)
+ continue;
+ brickinfo->decommissioned = 0;
+ volfile_update = _gf_true;
+ }
+
+ if (volfile_update == _gf_false) {
+ ret = 0;
+ break;
+ }
+
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_VOLFILE_CREATE_FAIL, "failed to create volfiles");
goto out;
}
- if ((!is_force) && glusterd_is_tier_daemon_running(volinfo)) {
- ret = gf_asprintf(op_errstr,
- "A Tier daemon is "
- "already running on volume %s",
- volname);
- ret = -1;
+
+ ret = glusterd_store_volinfo(volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_SET_FAIL,
+ "failed to store volinfo");
goto out;
}
- /* Fall through */
+
+ ret = 0;
+ break;
+
+ case GF_DEFRAG_CMD_STATUS:
+ break;
+ default:
+ break;
+ }
+
+out:
+ if (ret && op_errstr && msg[0])
+ *op_errstr = gf_strdup(msg);
+
+ return ret;
+}
+
+int
+glusterd_op_stage_rebalance(dict_t *dict, char **op_errstr)
+{
+ char *volname = NULL;
+ char *cmd_str = NULL;
+ int ret = 0;
+ int32_t cmd = 0;
+ char msg[2048] = {0};
+ glusterd_volinfo_t *volinfo = NULL;
+ char *task_id_str = NULL;
+ dict_t *op_ctx = NULL;
+ xlator_t *this = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "volname not found");
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "rebalance-command", SLEN("rebalance-command"),
+ &cmd);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "cmd not found");
+ goto out;
+ }
+
+ ret = glusterd_rebalance_cmd_validate(cmd, volname, &volinfo, msg,
+ sizeof(msg));
+ if (ret) {
+ gf_msg_debug(this->name, 0, "failed to validate");
+ goto out;
+ }
+ switch (cmd) {
case GF_DEFRAG_CMD_START:
case GF_DEFRAG_CMD_START_LAYOUT_FIX:
/* Check if the connected clients are all of version
@@ -747,7 +1117,6 @@ glusterd_op_stage_rebalance(dict_t *dict, char **op_errstr)
goto out;
}
break;
- case GF_DEFRAG_CMD_STATUS_TIER:
case GF_DEFRAG_CMD_STATUS:
case GF_DEFRAG_CMD_STOP:
@@ -792,38 +1161,8 @@ glusterd_op_stage_rebalance(dict_t *dict, char **op_errstr)
goto out;
}
}
- if (cmd == GF_DEFRAG_CMD_STATUS_TIER) {
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf(msg, sizeof(msg),
- "volume %s is not "
- "a tier volume.",
- volinfo->volname);
- ret = -1;
- goto out;
- }
- }
-
break;
- case GF_DEFRAG_CMD_STOP_DETACH_TIER:
- case GF_DEFRAG_CMD_DETACH_STATUS:
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf(msg, sizeof(msg),
- "volume %s is not "
- "a tier volume.",
- volinfo->volname);
- ret = -1;
- goto out;
- }
-
- if (volinfo->rebal.op != GD_OP_REMOVE_BRICK) {
- snprintf(msg, sizeof(msg),
- "Detach-tier "
- "not started");
- ret = -1;
- goto out;
- }
- break;
default:
break;
}
@@ -879,8 +1218,7 @@ glusterd_op_rebalance(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
/* Set task-id, if available, in op_ctx dict for operations other than
* start
*/
- if (cmd == GF_DEFRAG_CMD_STATUS || cmd == GF_DEFRAG_CMD_STOP ||
- cmd == GF_DEFRAG_CMD_STATUS_TIER) {
+ if (cmd == GF_DEFRAG_CMD_STATUS || cmd == GF_DEFRAG_CMD_STOP) {
if (!gf_uuid_is_null(volinfo->rebal.rebalance_id)) {
ctx = glusterd_op_get_ctx();
if (!ctx) {
@@ -910,7 +1248,6 @@ glusterd_op_rebalance(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
case GF_DEFRAG_CMD_START:
case GF_DEFRAG_CMD_START_LAYOUT_FIX:
case GF_DEFRAG_CMD_START_FORCE:
- case GF_DEFRAG_CMD_START_TIER:
ret = dict_get_int32n(dict, "force", SLEN("force"), &is_force);
if (ret)
@@ -979,7 +1316,6 @@ glusterd_op_rebalance(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
break;
}
case GF_DEFRAG_CMD_STOP:
- case GF_DEFRAG_CMD_STOP_DETACH_TIER:
/* Clear task-id only on explicitly stopping rebalance.
* Also clear the stored operation, so it doesn't cause trouble
* with future rebalance/remove-brick starts
@@ -1017,20 +1353,10 @@ glusterd_op_rebalance(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER &&
- cmd == GF_OP_CMD_STOP_DETACH_TIER) {
- glusterd_defrag_info_set(volinfo, dict,
- GF_DEFRAG_CMD_START_TIER,
- GF_DEFRAG_CMD_START, GD_OP_REBALANCE);
- glusterd_restart_rebalance_for_volume(volinfo);
- }
-
ret = 0;
break;
- case GF_DEFRAG_CMD_START_DETACH_TIER:
case GF_DEFRAG_CMD_STATUS:
- case GF_DEFRAG_CMD_STATUS_TIER:
break;
default:
break;
@@ -1068,23 +1394,11 @@ glusterd_defrag_event_notify_handle(dict_t *dict)
volname_ptr = strchr(volname_ptr, '/');
volname = volname_ptr + 1;
} else {
- volname_ptr = strstr(volname, "tierd/");
- if (volname_ptr) {
- volname_ptr = strchr(volname_ptr, '/');
- if (!volname_ptr) {
- ret = -1;
- goto out;
- }
- volname = volname_ptr + 1;
- } else {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_NO_REBALANCE_PFX_IN_VOLNAME,
- "volname received (%s) is not prefixed with "
- "rebalance or tierd.",
- volname);
- ret = -1;
- goto out;
- }
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_REBALANCE_PFX_IN_VOLNAME,
+ "volname received (%s) is not prefixed with rebalance.",
+ volname);
+ ret = -1;
+ goto out;
}
ret = glusterd_volinfo_find(volname, &volinfo);
diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
index 7a75cf81aad..43c2f4373e0 100644
--- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
+++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
@@ -7,10 +7,10 @@
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "cli1-xdr.h"
#include "xdr-generic.h"
-#include "glusterfs.h"
+#include <glusterfs/glusterfs.h>
#include "glusterd.h"
#include "glusterd-op-sm.h"
#include "glusterd-geo-rep.h"
@@ -18,13 +18,12 @@
#include "glusterd-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
-#include "glusterd-nfs-svc.h"
#include "glusterd-volgen.h"
#include "glusterd-messages.h"
#include "glusterd-server-quorum.h"
#include "glusterd-mgmt.h"
-#include "run.h"
-#include "syscall.h"
+#include <glusterfs/run.h>
+#include <glusterfs/syscall.h>
#include <signal.h>
@@ -228,6 +227,20 @@ glusterd_op_stage_replace_brick(dict_t *dict, char **op_errstr,
is_force = _gf_true;
}
+ if (volinfo->snap_count > 0 || !cds_list_empty(&volinfo->snap_volumes)) {
+ snprintf(msg, sizeof(msg),
+ "Volume %s has %" PRIu64
+ " snapshots. "
+ "Changing the volume configuration will not effect snapshots."
+ "But the snapshot brick mount should be intact to "
+ "make them function.",
+ volname, volinfo->snap_count);
+ gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SNAP_WARN, "%s", msg);
+ msg[0] = '\0';
+ }
+
+ glusterd_add_peers_to_auth_list(volname);
+
ret = glusterd_get_dst_brick_info(&dst_brick, volname, op_errstr,
&dst_brickinfo, &host, dict,
&dup_dstbrick);
@@ -268,34 +281,37 @@ glusterd_op_stage_replace_brick(dict_t *dict, char **op_errstr,
}
if (!gf_is_local_addr(host)) {
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(NULL, host);
if (peerinfo == NULL) {
+ RCU_READ_UNLOCK;
ret = -1;
snprintf(msg, sizeof(msg), "%s, is not a friend", host);
*op_errstr = gf_strdup(msg);
+ goto out;
} else if (!peerinfo->connected) {
+ RCU_READ_UNLOCK;
+ ret = -1;
snprintf(msg, sizeof(msg),
"%s, is not connected at "
"the moment",
host);
*op_errstr = gf_strdup(msg);
- ret = -1;
+ goto out;
} else if (GD_FRIEND_STATE_BEFRIENDED != peerinfo->state.state) {
+ RCU_READ_UNLOCK;
+ ret = -1;
snprintf(msg, sizeof(msg),
"%s, is not befriended "
"at the moment",
host);
*op_errstr = gf_strdup(msg);
- ret = -1;
- }
- rcu_read_unlock();
-
- if (ret)
goto out;
+ }
+ RCU_READ_UNLOCK;
} else if (priv->op_version >= GD_OP_VERSION_3_6_0) {
/* A bricks mount dir is required only by snapshots which were
@@ -348,6 +364,9 @@ glusterd_op_perform_replace_brick(glusterd_volinfo_t *volinfo, char *old_brick,
int32_t ret = -1;
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
+ struct statvfs brickstat = {
+ 0,
+ };
this = THIS;
GF_ASSERT(this);
@@ -366,13 +385,28 @@ glusterd_op_perform_replace_brick(glusterd_volinfo_t *volinfo, char *old_brick,
if (ret)
goto out;
+ if (!gf_uuid_compare(new_brickinfo->uuid, MY_UUID)) {
+ ret = sys_statvfs(new_brickinfo->path, &brickstat);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_STATVFS_FAILED,
+ "Failed to fetch disk utilization "
+ "from the brick (%s:%s). Please check the health of "
+ "the brick. Error code was %s",
+ new_brickinfo->hostname, new_brickinfo->path,
+ strerror(errno));
+
+ goto out;
+ }
+ new_brickinfo->statfs_fsid = brickstat.f_fsid;
+ }
+
ret = glusterd_volume_brickinfo_get_by_brick(old_brick, volinfo,
&old_brickinfo, _gf_false);
if (ret)
goto out;
- strncpy(new_brickinfo->brick_id, old_brickinfo->brick_id,
- sizeof(new_brickinfo->brick_id));
+ (void)snprintf(new_brickinfo->brick_id, sizeof(new_brickinfo->brick_id),
+ "%s", old_brickinfo->brick_id);
new_brickinfo->port = old_brickinfo->port;
/* A bricks mount dir is required only by snapshots which were
@@ -387,8 +421,8 @@ glusterd_op_perform_replace_brick(glusterd_volinfo_t *volinfo, char *old_brick,
"brick1.mount_dir not present");
goto out;
}
- strncpy(new_brickinfo->mount_dir, brick_mount_dir,
- sizeof(new_brickinfo->mount_dir));
+ (void)snprintf(new_brickinfo->mount_dir,
+ sizeof(new_brickinfo->mount_dir), "%s", brick_mount_dir);
}
cds_list_add(&new_brickinfo->brick_list, &old_brickinfo->brick_list);
diff --git a/xlators/mgmt/glusterd/src/glusterd-reset-brick.c b/xlators/mgmt/glusterd/src/glusterd-reset-brick.c
index 41adc40b5ce..e4d247a1d6c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-reset-brick.c
+++ b/xlators/mgmt/glusterd/src/glusterd-reset-brick.c
@@ -7,10 +7,10 @@
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "cli1-xdr.h"
#include "xdr-generic.h"
-#include "glusterfs.h"
+#include <glusterfs/glusterfs.h>
#include "glusterd.h"
#include "glusterd-op-sm.h"
#include "glusterd-geo-rep.h"
@@ -18,12 +18,11 @@
#include "glusterd-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
-#include "glusterd-nfs-svc.h"
#include "glusterd-volgen.h"
#include "glusterd-messages.h"
#include "glusterd-mgmt.h"
-#include "run.h"
-#include "syscall.h"
+#include <glusterfs/run.h>
+#include <glusterfs/syscall.h>
#include <signal.h>
@@ -153,35 +152,38 @@ glusterd_reset_brick_prevalidate(dict_t *dict, char **op_errstr,
if (ret)
goto out;
} else {
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(NULL, host);
if (peerinfo == NULL) {
+ RCU_READ_UNLOCK;
ret = -1;
snprintf(msg, sizeof(msg), "%s, is not a friend.", host);
*op_errstr = gf_strdup(msg);
+ goto out;
} else if (!peerinfo->connected) {
+ RCU_READ_UNLOCK;
+ ret = -1;
snprintf(msg, sizeof(msg),
"%s,"
"is not connected at "
"the moment.",
host);
*op_errstr = gf_strdup(msg);
- ret = -1;
+ goto out;
} else if (GD_FRIEND_STATE_BEFRIENDED != peerinfo->state.state) {
+ RCU_READ_UNLOCK;
+ ret = -1;
snprintf(msg, sizeof(msg),
"%s, is not befriended "
"at the moment.",
host);
*op_errstr = gf_strdup(msg);
- ret = -1;
- }
- rcu_read_unlock();
-
- if (ret)
goto out;
+ }
+ RCU_READ_UNLOCK;
}
if (!(gf_uuid_compare(dst_brickinfo->uuid, MY_UUID))) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 40e22deff9b..88662e3bbae 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -14,19 +14,25 @@
#include "xdr-generic.h"
-#include "compat-errno.h"
+#include <glusterfs/compat-errno.h>
#include "glusterd-op-sm.h"
#include "glusterd-sm.h"
#include "glusterd.h"
#include "protocol-common.h"
#include "glusterd-utils.h"
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "glusterd-messages.h"
#include "glusterd-snapshot-utils.h"
#include <sys/uio.h>
#define SERVER_PATH_MAX (16 * 1024)
+#define GLUSTERD_STACK_DESTROY(frame) \
+ do { \
+ frame->local = NULL; \
+ STACK_DESTROY(frame->root); \
+ } while (0)
+
extern glusterd_op_info_t opinfo;
extern uuid_t global_txn_id;
@@ -58,8 +64,6 @@ glusterd_op_send_cli_response(glusterd_op_t op, int32_t op_ret,
ctx = op_ctx;
switch (op) {
- case GD_OP_DETACH_TIER:
- case GD_OP_REMOVE_TIER_BRICK:
case GD_OP_REMOVE_BRICK: {
if (ctx)
ret = dict_get_strn(ctx, "errstr", SLEN("errstr"), &errstr);
@@ -70,9 +74,6 @@ glusterd_op_send_cli_response(glusterd_op_t op, int32_t op_ret,
errstr = "Error while resetting options";
break;
}
- case GD_OP_TIER_MIGRATE:
- case GD_OP_TIER_STATUS:
- case GD_OP_DETACH_TIER_STATUS:
case GD_OP_REBALANCE:
case GD_OP_DEFRAG_BRICK_VOLUME: {
if (ctx) {
@@ -138,9 +139,14 @@ glusterd_op_send_cli_response(glusterd_op_t op, int32_t op_ret,
case GD_OP_SCRUB_ONDEMAND:
case GD_OP_RESET_BRICK:
case GD_OP_MAX_OPVERSION:
- case GD_OP_TIER_START_STOP:
case GD_OP_DETACH_NOT_STARTED:
case GD_OP_GANESHA:
+ case GD_OP_DETACH_TIER:
+ case GD_OP_TIER_MIGRATE:
+ case GD_OP_TIER_START_STOP:
+ case GD_OP_TIER_STATUS:
+ case GD_OP_DETACH_TIER_STATUS:
+ case GD_OP_REMOVE_TIER_BRICK:
case GD_OP_ADD_TIER_BRICK:
{
@@ -177,10 +183,8 @@ glusterd_op_send_cli_response(glusterd_op_t op, int32_t op_ret,
ret = dict_allocate_and_serialize(ctx, &rsp.dict.dict_val,
&rsp.dict.dict_len);
if (ret < 0)
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to "
- "serialize buffer");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
else
free_ptr = rsp.dict.dict_val;
}
@@ -266,14 +270,15 @@ __glusterd_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(rsp.uuid, rsp.hostname);
if (peerinfo == NULL) {
+ RCU_READ_UNLOCK
ret = -1;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peerd %s(%s)", rsp.hostname,
uuid_utoa(rsp.uuid));
- goto unlock;
+ goto out;
}
/*
@@ -387,9 +392,10 @@ cont:
ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_INIT_FRIEND_REQ, &event);
if (ret) {
+ RCU_READ_UNLOCK;
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_NEW_FRIEND_SM_EVENT_GET_FAIL,
"Unable to get event");
- goto unlock;
+ goto out;
}
event->peername = gf_strdup(peerinfo->hostname);
@@ -403,7 +409,7 @@ cont:
"Received resp to probe req");
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
free(rsp.hostname); // malloced by xdr
@@ -467,16 +473,17 @@ __glusterd_friend_add_cbk(struct rpc_req *req, struct iovec *iov, int count,
(op_ret) ? "RJT" : "ACC", uuid_utoa(rsp.uuid), rsp.hostname,
rsp.port);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(rsp.uuid, rsp.hostname);
if (peerinfo == NULL) {
+ RCU_READ_UNLOCK
ret = -1;
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
"received friend add response from"
" unknown peer uuid: %s",
uuid_utoa(rsp.uuid));
- goto unlock;
+ goto out;
}
if (op_ret)
@@ -507,7 +514,7 @@ __glusterd_friend_add_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = glusterd_friend_sm_inject_event(event);
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
ctx = ((call_frame_t *)myframe)->local;
((call_frame_t *)myframe)->local = NULL;
@@ -589,7 +596,7 @@ __glusterd_friend_remove_cbk(struct rpc_req *req, struct iovec *iov, int count,
rsp.port);
inject:
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(rsp.uuid, ctx->hostname);
if (peerinfo == NULL) {
@@ -622,7 +629,7 @@ inject:
op_ret = 0;
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
respond:
ret = glusterd_xfer_cli_deprobe_resp(ctx->req, op_ret, op_errno, NULL,
@@ -748,9 +755,9 @@ __glusterd_cluster_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
uuid_utoa(rsp.uuid));
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
@@ -865,9 +872,9 @@ glusterd_mgmt_v3_lock_peers_cbk_fn(struct rpc_req *req, struct iovec *iov,
uuid_utoa(rsp.uuid));
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
@@ -973,9 +980,9 @@ glusterd_mgmt_v3_unlock_peers_cbk_fn(struct rpc_req *req, struct iovec *iov,
uuid_utoa(rsp.uuid));
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
@@ -1079,9 +1086,9 @@ __glusterd_cluster_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
uuid_utoa(rsp.uuid));
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
@@ -1203,7 +1210,7 @@ out:
uuid_utoa(rsp.uuid));
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(rsp.uuid, NULL);
if (peerinfo == NULL) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
@@ -1230,7 +1237,7 @@ out:
event_type = GD_OP_EVENT_RCVD_ACC;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
if (ret)
@@ -1357,7 +1364,7 @@ __glusterd_commit_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
uuid_utoa(*txn_id));
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(rsp.uuid, NULL);
if (peerinfo == NULL) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
@@ -1408,7 +1415,7 @@ __glusterd_commit_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
}
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
@@ -1455,6 +1462,7 @@ glusterd_rpc_probe(call_frame_t *frame, xlator_t *this, void *data)
dict_t *dict = NULL;
if (!frame || !this || !data) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
ret = -1;
goto out;
}
@@ -1464,15 +1472,24 @@ glusterd_rpc_probe(call_frame_t *frame, xlator_t *this, void *data)
GF_ASSERT(priv);
ret = dict_get_strn(dict, "hostname", SLEN("hostname"), &hostname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=hostname", NULL);
goto out;
+ }
ret = dict_get_int32n(dict, "port", SLEN("port"), &port);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_DEBUG, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=port", NULL);
port = GF_DEFAULT_BASE_PORT;
+ }
ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=peerinfo", NULL);
goto out;
+ }
gf_uuid_copy(req.uuid, MY_UUID);
req.hostname = gf_strdup(hostname);
@@ -1501,6 +1518,7 @@ glusterd_rpc_friend_add(call_frame_t *frame, xlator_t *this, void *data)
dict_t *peer_data = NULL;
if (!frame || !this || !data) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
ret = -1;
goto out;
}
@@ -1510,11 +1528,11 @@ glusterd_rpc_friend_add(call_frame_t *frame, xlator_t *this, void *data)
GF_ASSERT(priv);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
ret = -1;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)", event->peername,
@@ -1522,17 +1540,18 @@ glusterd_rpc_friend_add(call_frame_t *frame, xlator_t *this, void *data)
goto out;
}
- gf_uuid_copy(req.uuid, MY_UUID);
req.hostname = gf_strdup(peerinfo->hostname);
req.port = peerinfo->port;
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
- ret = glusterd_add_volumes_to_export_dict(&peer_data);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Unable to add list of volumes "
- "in the peer_data dict for handshake");
+ gf_uuid_copy(req.uuid, MY_UUID);
+
+ peer_data = dict_new();
+ if (!peer_data) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
+ errno = ENOMEM;
goto out;
}
@@ -1563,10 +1582,26 @@ glusterd_rpc_friend_add(call_frame_t *frame, xlator_t *this, void *data)
}
}
- ret = dict_allocate_and_serialize(peer_data, &req.vols.vols_val,
- &req.vols.vols_len);
- if (ret)
+ /* Don't add any key-value in peer_data dictionary after call this function
+ */
+ ret = glusterd_add_volumes_to_export_dict(peer_data, &req.vols.vols_val,
+ &req.vols.vols_len);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to add list of volumes "
+ "in the peer_data dict for handshake");
goto out;
+ }
+
+ if (!req.vols.vols_len) {
+ ret = dict_allocate_and_serialize(peer_data, &req.vols.vols_val,
+ &req.vols.vols_len);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
+ goto out;
+ }
+ }
ret = glusterd_submit_request(
peerinfo->rpc, &req, frame, peerinfo->peer, GLUSTERD_FRIEND_ADD, NULL,
@@ -1604,11 +1639,11 @@ glusterd_rpc_friend_remove(call_frame_t *frame, xlator_t *this, void *data)
GF_ASSERT(priv);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
ret = -1;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)", event->peername,
@@ -1625,7 +1660,7 @@ glusterd_rpc_friend_remove(call_frame_t *frame, xlator_t *this, void *data)
glusterd_friend_remove_cbk,
(xdrproc_t)xdr_gd1_mgmt_friend_req);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
GF_FREE(req.hostname);
@@ -1739,8 +1774,11 @@ glusterd_mgmt_v3_lock_peers(call_frame_t *frame, xlator_t *this, void *data)
GF_ASSERT(priv);
ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=peerinfo", NULL);
goto out;
+ }
// peerinfo should not be in payload
dict_deln(dict, "peerinfo", SLEN("peerinfo"));
@@ -1750,9 +1788,8 @@ glusterd_mgmt_v3_lock_peers(call_frame_t *frame, xlator_t *this, void *data)
ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
&req.dict.dict_len);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "Failed to serialize dict "
- "to request buffer");
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
}
@@ -1776,6 +1813,7 @@ glusterd_mgmt_v3_lock_peers(call_frame_t *frame, xlator_t *this, void *data)
}
frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
if (!frame->cookie) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
ret = -1;
goto out;
}
@@ -1815,8 +1853,11 @@ glusterd_mgmt_v3_unlock_peers(call_frame_t *frame, xlator_t *this, void *data)
GF_ASSERT(priv);
ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=peerinfo", NULL);
goto out;
+ }
// peerinfo should not be in payload
dict_deln(dict, "peerinfo", SLEN("peerinfo"));
@@ -1826,9 +1867,8 @@ glusterd_mgmt_v3_unlock_peers(call_frame_t *frame, xlator_t *this, void *data)
ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
&req.dict.dict_len);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "Failed to serialize dict "
- "to request buffer");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
}
@@ -1852,6 +1892,7 @@ glusterd_mgmt_v3_unlock_peers(call_frame_t *frame, xlator_t *this, void *data)
}
frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
if (!frame->cookie) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
ret = -1;
goto out;
}
@@ -1921,7 +1962,6 @@ glusterd_stage_op(call_frame_t *frame, xlator_t *this, void *data)
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_conf_t *priv = NULL;
dict_t *dict = NULL;
- gf_boolean_t is_alloc = _gf_true;
uuid_t *txn_id = NULL;
if (!this) {
@@ -1934,8 +1974,11 @@ glusterd_stage_op(call_frame_t *frame, xlator_t *this, void *data)
GF_ASSERT(priv);
ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=peerinfo", NULL);
goto out;
+ }
// peerinfo should not be in payload
dict_deln(dict, "peerinfo", SLEN("peerinfo"));
@@ -1945,9 +1988,8 @@ glusterd_stage_op(call_frame_t *frame, xlator_t *this, void *data)
ret = dict_allocate_and_serialize(dict, &req.buf.buf_val, &req.buf.buf_len);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "Failed to serialize dict "
- "to request buffer");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
}
/* Sending valid transaction ID to peers */
@@ -1969,6 +2011,7 @@ glusterd_stage_op(call_frame_t *frame, xlator_t *this, void *data)
}
frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
if (!frame->cookie) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
ret = -1;
goto out;
}
@@ -1980,7 +2023,7 @@ glusterd_stage_op(call_frame_t *frame, xlator_t *this, void *data)
(xdrproc_t)xdr_gd1_mgmt_stage_op_req);
out:
- if ((_gf_true == is_alloc) && req.buf.buf_val)
+ if (req.buf.buf_val)
GF_FREE(req.buf.buf_val);
gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
@@ -1999,7 +2042,6 @@ glusterd_commit_op(call_frame_t *frame, xlator_t *this, void *data)
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_conf_t *priv = NULL;
dict_t *dict = NULL;
- gf_boolean_t is_alloc = _gf_true;
uuid_t *txn_id = NULL;
if (!this) {
@@ -2011,8 +2053,11 @@ glusterd_commit_op(call_frame_t *frame, xlator_t *this, void *data)
GF_ASSERT(priv);
ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=peerinfo", NULL);
goto out;
+ }
// peerinfo should not be in payload
dict_deln(dict, "peerinfo", SLEN("peerinfo"));
@@ -2022,9 +2067,8 @@ glusterd_commit_op(call_frame_t *frame, xlator_t *this, void *data)
ret = dict_allocate_and_serialize(dict, &req.buf.buf_val, &req.buf.buf_len);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "Failed to serialize dict to "
- "request buffer");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
}
/* Sending valid transaction ID to peers */
@@ -2046,6 +2090,7 @@ glusterd_commit_op(call_frame_t *frame, xlator_t *this, void *data)
}
frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
if (!frame->cookie) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
ret = -1;
goto out;
}
@@ -2057,7 +2102,7 @@ glusterd_commit_op(call_frame_t *frame, xlator_t *this, void *data)
(xdrproc_t)xdr_gd1_mgmt_commit_op_req);
out:
- if ((_gf_true == is_alloc) && req.buf.buf_val)
+ if (req.buf.buf_val)
GF_FREE(req.buf.buf_val);
gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
@@ -2279,8 +2324,7 @@ glusterd_brick_op(call_frame_t *frame, xlator_t *this, void *data)
rpc = glusterd_pending_node_get_rpc(pending_node);
if (!rpc) {
- if (pending_node->type == GD_NODE_REBALANCE ||
- pending_node->type == GD_NODE_TIERD) {
+ if (pending_node->type == GD_NODE_REBALANCE) {
opinfo.brick_pending_count = 0;
ret = 0;
GF_FREE(req->input.input_val);
diff --git a/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c b/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c
index 3ae5b0e861d..c49a0eefba5 100644
--- a/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c
@@ -8,8 +8,8 @@
cases as published by the Free Software Foundation.
*/
-#include "globals.h"
-#include "run.h"
+#include <glusterfs/globals.h>
+#include <glusterfs/run.h>
#include "glusterd.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
@@ -117,8 +117,10 @@ glusterd_scrubsvc_start(glusterd_svc_t *svc, int flags)
dict_t *cmdict = NULL;
cmdict = dict_new();
- if (!cmdict)
+ if (!cmdict) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto error_return;
+ }
ret = dict_set_str(cmdict, "cmdarg0", "--global-timer-wheel");
if (ret)
@@ -147,7 +149,7 @@ glusterd_scrubsvc_reconfigure()
gf_boolean_t identical = _gf_false;
this = THIS;
- GF_VALIDATE_OR_GOTO(this->name, this, out);
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
priv = this->private;
GF_VALIDATE_OR_GOTO(this->name, priv, out);
@@ -200,6 +202,6 @@ manager:
ret = priv->scrub_svc.manager(&(priv->scrub_svc), NULL, PROC_START_NO_WAIT);
out:
- gf_msg_debug(this->name, 0, "Returning %d", ret);
+ gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
index cfa0cce0aba..b0b8a2e4018 100644
--- a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
+++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
@@ -7,7 +7,7 @@
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "glusterd.h"
#include "glusterd-utils.h"
#include "glusterd-messages.h"
@@ -89,12 +89,15 @@ glusterd_validate_quorum(xlator_t *this, glusterd_op_t op, dict_t *dict,
ret = dict_get_str(dict, "volname", &volname);
if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=volname", NULL);
ret = 0;
goto out;
}
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL, NULL);
ret = 0;
goto out;
}
@@ -217,7 +220,7 @@ glusterd_get_quorum_cluster_counts(xlator_t *this, int *active_count,
if (active_count)
*active_count = 1;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
if (_is_contributing_to_quorum(peerinfo->quorum_contrib))
@@ -225,7 +228,7 @@ glusterd_get_quorum_cluster_counts(xlator_t *this, int *active_count,
if (active_count && (peerinfo->quorum_contrib == QUORUM_UP))
*active_count = *active_count + 1;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
ret = dict_get_str(conf->opts, GLUSTERD_QUORUM_RATIO_KEY, &val);
if (ret == 0) {
@@ -252,8 +255,11 @@ glusterd_is_volume_in_server_quorum(glusterd_volinfo_t *volinfo)
int ret = 0;
ret = dict_get_str(volinfo->dict, GLUSTERD_QUORUM_TYPE_KEY, &quorum_type);
- if (ret)
+ if (ret) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", GLUSTERD_QUORUM_TYPE_KEY, NULL);
goto out;
+ }
if (strcmp(quorum_type, GLUSTERD_SERVER_QUORUM) == 0)
res = _gf_true;
@@ -287,8 +293,11 @@ does_gd_meet_server_quorum(xlator_t *this)
ret = glusterd_get_quorum_cluster_counts(this, &active_count,
&quorum_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_QUORUM_CLUSTER_COUNT_GET_FAIL, NULL);
goto out;
+ }
if (!does_quorum_meet(active_count, quorum_count)) {
goto out;
@@ -372,6 +381,7 @@ glusterd_do_volume_quorum_action(xlator_t *this, glusterd_volinfo_t *volinfo,
if (!brickinfo->start_triggered) {
pthread_mutex_lock(&brickinfo->restart_mutex);
{
+ /* coverity[SLEEP] */
ret = glusterd_brick_start(volinfo, brickinfo, _gf_false,
_gf_false);
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
new file mode 100644
index 00000000000..5661e391a9c
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
@@ -0,0 +1,153 @@
+/*
+ Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "glusterd.h"
+#include "glusterd-utils.h"
+#include "glusterd-shd-svc-helper.h"
+#include "glusterd-messages.h"
+#include "glusterd-volgen.h"
+
+void
+glusterd_svc_build_shd_socket_filepath(glusterd_volinfo_t *volinfo, char *path,
+ int path_len)
+{
+ char sockfilepath[PATH_MAX] = {
+ 0,
+ };
+ char rundir[PATH_MAX] = {
+ 0,
+ };
+ int32_t len = 0;
+ glusterd_conf_t *priv = THIS->private;
+
+ if (!priv)
+ return;
+
+ GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv);
+ len = snprintf(sockfilepath, sizeof(sockfilepath), "%s/run-%s", rundir,
+ uuid_utoa(MY_UUID));
+ if ((len < 0) || (len >= sizeof(sockfilepath))) {
+ sockfilepath[0] = 0;
+ }
+
+ glusterd_set_socket_filepath(sockfilepath, path, path_len);
+}
+
+void
+glusterd_svc_build_shd_pidfile(glusterd_volinfo_t *volinfo, char *path,
+ int path_len)
+{
+ char rundir[PATH_MAX] = {
+ 0,
+ };
+ glusterd_conf_t *priv = THIS->private;
+
+ if (!priv)
+ return;
+
+ GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv);
+
+ snprintf(path, path_len, "%s/%s-shd.pid", rundir, volinfo->volname);
+}
+
+void
+glusterd_svc_build_shd_volfile_path(glusterd_volinfo_t *volinfo, char *path,
+ int path_len)
+{
+ char workdir[PATH_MAX] = {
+ 0,
+ };
+ glusterd_conf_t *priv = THIS->private;
+
+ if (!priv)
+ return;
+
+ GLUSTERD_GET_VOLUME_DIR(workdir, volinfo, priv);
+
+ snprintf(path, path_len, "%s/%s-shd.vol", workdir, volinfo->volname);
+}
+
+void
+glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd)
+{
+ glusterd_svc_proc_t *svc_proc = NULL;
+ glusterd_svc_t *svc = NULL;
+ glusterd_conf_t *conf = NULL;
+ gf_boolean_t need_unref = _gf_false;
+ rpc_clnt_t *rpc = NULL;
+
+ conf = THIS->private;
+ if (!conf)
+ return;
+
+ GF_VALIDATE_OR_GOTO(THIS->name, conf, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, shd, out);
+
+ svc = &shd->svc;
+ shd->attached = _gf_false;
+
+ if (svc->conn.rpc) {
+ rpc_clnt_unref(svc->conn.rpc);
+ svc->conn.rpc = NULL;
+ }
+
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ svc_proc = svc->svc_proc;
+ svc->svc_proc = NULL;
+ svc->inited = _gf_false;
+ cds_list_del_init(&svc->mux_svc);
+ glusterd_unlink_file(svc->proc.pidfile);
+
+ if (svc_proc && cds_list_empty(&svc_proc->svcs)) {
+ cds_list_del_init(&svc_proc->svc_proc_list);
+ /* We cannot free svc_proc list from here. Because
+ * if there are pending events on the rpc, it will
+ * try to access the corresponding svc_proc, so unrefing
+ * rpc request and then cleaning up the memory is carried
+ * from the notify function upon RPC_CLNT_DESTROY destroy.
+ */
+ need_unref = _gf_true;
+ rpc = svc_proc->rpc;
+ svc_proc->rpc = NULL;
+ }
+ }
+ pthread_mutex_unlock(&conf->attach_lock);
+ /*rpc unref has to be performed outside the lock*/
+ if (need_unref && rpc)
+ rpc_clnt_unref(rpc);
+out:
+ return;
+}
+
+int
+glusterd_svc_set_shd_pidfile(glusterd_volinfo_t *volinfo, dict_t *dict)
+{
+ int ret = -1;
+ glusterd_svc_t *svc = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO(this->name, dict, out);
+
+ svc = &(volinfo->shd.svc);
+
+ ret = dict_set_dynstr_with_alloc(dict, "pidfile", svc->proc.pidfile);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set pidfile %s in dict", svc->proc.pidfile);
+ goto out;
+ }
+ ret = 0;
+out:
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
new file mode 100644
index 00000000000..1f0984ba857
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
@@ -0,0 +1,42 @@
+/*
+ Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GLUSTERD_SHD_SVC_HELPER_H_
+#define _GLUSTERD_SHD_SVC_HELPER_H_
+
+#include "glusterd.h"
+#include "glusterd-svc-mgmt.h"
+
+void
+glusterd_svc_build_shd_socket_filepath(glusterd_volinfo_t *volinfo, char *path,
+ int path_len);
+
+void
+glusterd_svc_build_shd_pidfile(glusterd_volinfo_t *volinfo, char *path,
+ int path_len);
+
+void
+glusterd_svc_build_shd_volfile_path(glusterd_volinfo_t *volinfo, char *path,
+ int path_len);
+
+void
+glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd);
+
+int
+glusterd_recover_shd_attach_failure(glusterd_volinfo_t *volinfo,
+ glusterd_svc_t *svc, int flags);
+
+int
+glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo);
+
+int
+glusterd_svc_set_shd_pidfile(glusterd_volinfo_t *volinfo, dict_t *dict);
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
index 548231a30fd..1c56384a14b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
@@ -8,14 +8,15 @@
cases as published by the Free Software Foundation.
*/
-#include "globals.h"
-#include "run.h"
+#include <glusterfs/globals.h>
+#include <glusterfs/run.h>
#include "glusterd.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
-#include "glusterd-svc-mgmt.h"
#include "glusterd-shd-svc.h"
+#include "glusterd-shd-svc-helper.h"
#include "glusterd-svc-helper.h"
+#include "glusterd-store.h"
#define GD_SHD_PROCESS_NAME "--process-name"
char *shd_svc_name = "glustershd";
@@ -23,52 +24,186 @@ char *shd_svc_name = "glustershd";
void
glusterd_shdsvc_build(glusterd_svc_t *svc)
{
+ int ret = -1;
+ ret = snprintf(svc->name, sizeof(svc->name), "%s", shd_svc_name);
+ if (ret < 0)
+ return;
+
+ CDS_INIT_LIST_HEAD(&svc->mux_svc);
svc->manager = glusterd_shdsvc_manager;
svc->start = glusterd_shdsvc_start;
- svc->stop = glusterd_svc_stop;
+ svc->stop = glusterd_shdsvc_stop;
+ svc->reconfigure = glusterd_shdsvc_reconfigure;
}
int
-glusterd_shdsvc_init(glusterd_svc_t *svc)
+glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn,
+ glusterd_svc_proc_t *mux_svc)
{
- return glusterd_svc_init(svc, shd_svc_name);
+ int ret = -1;
+ char rundir[PATH_MAX] = {
+ 0,
+ };
+ char sockpath[PATH_MAX] = {
+ 0,
+ };
+ char pidfile[PATH_MAX] = {
+ 0,
+ };
+ char volfile[PATH_MAX] = {
+ 0,
+ };
+ char logdir[PATH_MAX] = {
+ 0,
+ };
+ char logfile[PATH_MAX] = {
+ 0,
+ };
+ char volfileid[256] = {0};
+ glusterd_svc_t *svc = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_muxsvc_conn_notify_t notify = NULL;
+ xlator_t *this = NULL;
+ char *volfileserver = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO(THIS->name, this, out);
+
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
+
+ volinfo = data;
+ GF_VALIDATE_OR_GOTO(this->name, data, out);
+ GF_VALIDATE_OR_GOTO(this->name, mux_svc, out);
+
+ svc = &(volinfo->shd.svc);
+
+ ret = snprintf(svc->name, sizeof(svc->name), "%s", shd_svc_name);
+ if (ret < 0)
+ goto out;
+
+ notify = glusterd_muxsvc_common_rpc_notify;
+ glusterd_store_perform_node_state_store(volinfo);
+
+ GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv);
+ glusterd_svc_create_rundir(rundir);
+
+ glusterd_svc_build_logfile_path(shd_svc_name, priv->logdir, logfile,
+ sizeof(logfile));
+
+ /* Initialize the connection mgmt */
+ if (mux_conn && mux_svc->rpc) {
+ /* multiplexed svc */
+ svc->conn.frame_timeout = mux_conn->frame_timeout;
+ /* This will be unrefed from glusterd_shd_svcproc_cleanup*/
+ svc->conn.rpc = rpc_clnt_ref(mux_svc->rpc);
+ ret = snprintf(svc->conn.sockpath, sizeof(svc->conn.sockpath), "%s",
+ mux_conn->sockpath);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = mkdir_p(priv->logdir, 0755, _gf_true);
+ if ((ret == -1) && (EEXIST != errno)) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED,
+ "Unable to create logdir %s", logdir);
+ goto out;
+ }
+
+ glusterd_svc_build_shd_socket_filepath(volinfo, sockpath,
+ sizeof(sockpath));
+ ret = glusterd_muxsvc_conn_init(&(svc->conn), mux_svc, sockpath, 600,
+ notify);
+ if (ret)
+ goto out;
+ /* This will be unrefed when the last svcs is detached from the list */
+ if (!mux_svc->rpc)
+ mux_svc->rpc = rpc_clnt_ref(svc->conn.rpc);
+ }
+
+ /* Initialize the process mgmt */
+ glusterd_svc_build_shd_pidfile(volinfo, pidfile, sizeof(pidfile));
+ glusterd_svc_build_shd_volfile_path(volinfo, volfile, PATH_MAX);
+ len = snprintf(volfileid, sizeof(volfileid), "shd/%s", volinfo->volname);
+ if ((len < 0) || (len >= sizeof(volfileid))) {
+ ret = -1;
+ goto out;
+ }
+
+ if (dict_get_strn(this->options, "transport.socket.bind-address",
+ SLEN("transport.socket.bind-address"),
+ &volfileserver) != 0) {
+ volfileserver = "localhost";
+ }
+ ret = glusterd_proc_init(&(svc->proc), shd_svc_name, pidfile, logdir,
+ logfile, volfile, volfileid, volfileserver);
+ if (ret)
+ goto out;
+
+out:
+ gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
+ return ret;
}
-static int
-glusterd_shdsvc_create_volfile()
+int
+glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo)
{
char filepath[PATH_MAX] = {
0,
};
+
int ret = -1;
- glusterd_conf_t *conf = THIS->private;
dict_t *mod_dict = NULL;
-
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
+
+ glusterd_svc_build_shd_volfile_path(volinfo, filepath, PATH_MAX);
+ if (!glusterd_is_shd_compatible_volume(volinfo)) {
+ /* If volfile exist, delete it. This case happens when we
+ * change from replica/ec to distribute.
+ */
+ (void)glusterd_unlink_file(filepath);
+ ret = 0;
+ goto out;
+ }
mod_dict = dict_new();
- if (!mod_dict)
+ if (!mod_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
ret = dict_set_uint32(mod_dict, "cluster.background-self-heal-count", 0);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=cluster.background-self-heal-count", NULL);
goto out;
+ }
ret = dict_set_str(mod_dict, "cluster.data-self-heal", "on");
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=cluster.data-self-heal", NULL);
goto out;
+ }
ret = dict_set_str(mod_dict, "cluster.metadata-self-heal", "on");
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=cluster.metadata-self-heal", NULL);
goto out;
+ }
ret = dict_set_str(mod_dict, "cluster.entry-self-heal", "on");
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=cluster.entry-self-heal", NULL);
goto out;
+ }
- glusterd_svc_build_volfile_path(shd_svc_name, conf->workdir, filepath,
- sizeof(filepath));
- ret = glusterd_create_global_volfile(build_shd_graph, filepath, mod_dict);
+ ret = glusterd_shdsvc_generate_volfile(volinfo, filepath, mod_dict);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
"Failed to create volfile");
goto out;
}
@@ -76,31 +211,109 @@ glusterd_shdsvc_create_volfile()
out:
if (mod_dict)
dict_unref(mod_dict);
- gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
return ret;
}
+gf_boolean_t
+glusterd_svcs_shd_compatible_volumes_stopped(glusterd_svc_t *svc)
+{
+ glusterd_svc_proc_t *svc_proc = NULL;
+ glusterd_shdsvc_t *shd = NULL;
+ glusterd_svc_t *temp_svc = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ gf_boolean_t comp = _gf_false;
+ glusterd_conf_t *conf = THIS->private;
+
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ svc_proc = svc->svc_proc;
+ if (!svc_proc)
+ goto unlock;
+ cds_list_for_each_entry(temp_svc, &svc_proc->svcs, mux_svc)
+ {
+ /* Get volinfo->shd from svc object */
+ shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
+ if (!shd) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
+ "Failed to get shd object "
+ "from shd service");
+ goto unlock;
+ }
+
+ /* Get volinfo from shd */
+ volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
+ if (!volinfo) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to get volinfo from "
+ "from shd");
+ goto unlock;
+ }
+ if (!glusterd_is_shd_compatible_volume(volinfo))
+ continue;
+ if (volinfo->status == GLUSTERD_STATUS_STARTED)
+ goto unlock;
+ }
+ comp = _gf_true;
+ }
+unlock:
+ pthread_mutex_unlock(&conf->attach_lock);
+out:
+ return comp;
+}
+
int
glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
{
- int ret = 0;
+ int ret = -1;
glusterd_volinfo_t *volinfo = NULL;
+ glusterd_conf_t *conf = NULL;
+ gf_boolean_t shd_restart = _gf_false;
- if (!svc->inited) {
- ret = glusterd_shdsvc_init(svc);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC,
- "Failed to init shd "
- "service");
- goto out;
- } else {
- svc->inited = _gf_true;
- gf_msg_debug(THIS->name, 0, "shd service initialized");
+ conf = THIS->private;
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+ volinfo = data;
+ GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
+
+ if (volinfo->is_snap_volume) {
+ /* healing of a snap volume is not supported yet*/
+ ret = 0;
+ goto out;
+ }
+
+ while (conf->restart_shd) {
+ synccond_wait(&conf->cond_restart_shd, &conf->big_lock);
+ }
+ conf->restart_shd = _gf_true;
+ shd_restart = _gf_true;
+
+ if (volinfo)
+ glusterd_volinfo_ref(volinfo);
+
+ if (!glusterd_is_shd_compatible_volume(volinfo)) {
+ ret = 0;
+ if (svc->inited) {
+ /* This means glusterd was running for this volume and now
+ * it was converted to a non-shd volume. So just stop the shd
+ */
+ ret = svc->stop(svc, SIGTERM);
}
+ goto out;
}
+ ret = glusterd_shdsvc_create_volfile(volinfo);
+ if (ret)
+ goto out;
- volinfo = data;
+ ret = glusterd_shd_svc_mux_init(volinfo, svc);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC,
+ "Failed to init shd service");
+ goto out;
+ }
/* If all the volumes are stopped or all shd compatible volumes
* are stopped then stop the service if:
@@ -110,31 +323,31 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
* - volinfo is NULL or
* - volinfo is present and volume is shd compatible
*/
- if (glusterd_are_all_volumes_stopped() ||
- glusterd_all_shd_compatible_volumes_stopped()) {
- if (!(volinfo && !glusterd_is_shd_compatible_volume(volinfo))) {
- ret = svc->stop(svc, SIGTERM);
- }
- } else {
- if (!(volinfo && !glusterd_is_shd_compatible_volume(volinfo))) {
- ret = glusterd_shdsvc_create_volfile();
- if (ret)
- goto out;
-
+ if (glusterd_svcs_shd_compatible_volumes_stopped(svc)) {
+ /* TODO
+ * Take a lock and detach all svc's to stop the process
+ * also reset the init flag
+ */
+ ret = svc->stop(svc, SIGTERM);
+ } else if (volinfo) {
+ if (volinfo->status != GLUSTERD_STATUS_STARTED) {
ret = svc->stop(svc, SIGTERM);
if (ret)
goto out;
-
+ }
+ if (volinfo->status == GLUSTERD_STATUS_STARTED) {
ret = svc->start(svc, flags);
if (ret)
goto out;
-
- ret = glusterd_conn_connect(&(svc->conn));
- if (ret)
- goto out;
}
}
out:
+ if (shd_restart) {
+ conf->restart_shd = _gf_false;
+ synccond_broadcast(&conf->cond_restart_shd);
+ }
+ if (volinfo)
+ glusterd_volinfo_unref(volinfo);
if (ret)
gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
@@ -143,11 +356,14 @@ out:
}
int
-glusterd_shdsvc_start(glusterd_svc_t *svc, int flags)
+glusterd_new_shd_svc_start(glusterd_svc_t *svc, int flags)
{
int ret = -1;
char glusterd_uuid_option[PATH_MAX] = {0};
+ char client_pid[32] = {0};
dict_t *cmdline = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
cmdline = dict_new();
if (!cmdline)
@@ -158,51 +374,190 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags)
if (ret < 0)
goto out;
+ ret = snprintf(client_pid, sizeof(client_pid), "--client-pid=%d",
+ GF_CLIENT_PID_SELF_HEALD);
+ if (ret < 0)
+ goto out;
+
+ ret = dict_set_str(cmdline, "arg", client_pid);
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=arg", NULL);
+ goto out;
+ }
+
/* Pass cmdline arguments as key-value pair. The key is merely
* a carrier and is not used. Since dictionary follows LIFO the value
* should be put in reverse order*/
ret = dict_set_str(cmdline, "arg4", svc->name);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=arg4", NULL);
goto out;
+ }
ret = dict_set_str(cmdline, "arg3", GD_SHD_PROCESS_NAME);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=arg3", NULL);
goto out;
+ }
ret = dict_set_str(cmdline, "arg2", glusterd_uuid_option);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=arg2", NULL);
goto out;
+ }
ret = dict_set_str(cmdline, "arg1", "--xlator-option");
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=arg1", NULL);
goto out;
+ }
ret = glusterd_svc_start(svc, flags, cmdline);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_GLUSTER_SERVICE_START_FAIL, NULL);
+ goto out;
+ }
+ ret = glusterd_conn_connect(&(svc->conn));
out:
if (cmdline)
dict_unref(cmdline);
+ return ret;
+}
+
+int
+glusterd_recover_shd_attach_failure(glusterd_volinfo_t *volinfo,
+ glusterd_svc_t *svc, int flags)
+{
+ int ret = -1;
+ glusterd_svc_proc_t *mux_proc = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ conf = THIS->private;
+
+ if (!conf || !volinfo || !svc)
+ return -1;
+ glusterd_shd_svcproc_cleanup(&volinfo->shd);
+ mux_proc = glusterd_svcprocess_new();
+ if (!mux_proc) {
+ return -1;
+ }
+ ret = glusterd_shdsvc_init(volinfo, NULL, mux_proc);
+ if (ret)
+ return -1;
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ cds_list_add_tail(&mux_proc->svc_proc_list, &conf->shd_procs);
+ svc->svc_proc = mux_proc;
+ cds_list_del_init(&svc->mux_svc);
+ cds_list_add_tail(&svc->mux_svc, &mux_proc->svcs);
+ }
+ pthread_mutex_unlock(&conf->attach_lock);
+
+ ret = glusterd_new_shd_svc_start(svc, flags);
+ if (!ret) {
+ volinfo->shd.attached = _gf_true;
+ }
+ return ret;
+}
+int
+glusterd_shdsvc_start(glusterd_svc_t *svc, int flags)
+{
+ int ret = -1;
+ glusterd_shdsvc_t *shd = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+ conf = THIS->private;
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+
+ /* Get volinfo->shd from svc object */
+ shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
+ if (!shd) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
+ "Failed to get shd object "
+ "from shd service");
+ return -1;
+ }
+
+ /* Get volinfo from shd */
+ volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
+ if (!volinfo) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to get volinfo from "
+ "from shd");
+ return -1;
+ }
+
+ if (volinfo->status != GLUSTERD_STATUS_STARTED)
+ return -1;
+
+ glusterd_volinfo_ref(volinfo);
+
+ if (!svc->inited) {
+ ret = glusterd_shd_svc_mux_init(volinfo, svc);
+ if (ret)
+ goto out;
+ }
+
+ if (shd->attached) {
+ glusterd_volinfo_ref(volinfo);
+ /* Unref will happen from glusterd_svc_attach_cbk */
+ ret = glusterd_attach_svc(svc, volinfo, flags);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to attach shd svc(volume=%s) to pid=%d",
+ volinfo->volname, glusterd_proc_get_pid(&svc->proc));
+ glusterd_shd_svcproc_cleanup(&volinfo->shd);
+ glusterd_volinfo_unref(volinfo);
+ goto out1;
+ }
+ goto out;
+ }
+ ret = glusterd_new_shd_svc_start(svc, flags);
+ if (!ret) {
+ shd->attached = _gf_true;
+ }
+out:
+ if (ret && volinfo)
+ glusterd_shd_svcproc_cleanup(&volinfo->shd);
+ if (volinfo)
+ glusterd_volinfo_unref(volinfo);
+out1:
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
return ret;
}
int
-glusterd_shdsvc_reconfigure()
+glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo)
{
int ret = -1;
xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
gf_boolean_t identical = _gf_false;
+ dict_t *mod_dict = NULL;
+ glusterd_svc_t *svc = NULL;
this = THIS;
- GF_VALIDATE_OR_GOTO(this->name, this, out);
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
- priv = this->private;
- GF_VALIDATE_OR_GOTO(this->name, priv, out);
+ if (!volinfo) {
+ /* reconfigure will be called separately*/
+ ret = 0;
+ goto out;
+ }
- if (glusterd_all_shd_compatible_volumes_stopped())
+ glusterd_volinfo_ref(volinfo);
+ svc = &(volinfo->shd.svc);
+ if (glusterd_svcs_shd_compatible_volumes_stopped(svc))
goto manager;
/*
@@ -210,8 +565,59 @@ glusterd_shdsvc_reconfigure()
* and cksum i.e. "character-by-character". If YES, then
* NOTHING has been changed, just return.
*/
- ret = glusterd_svc_check_volfile_identical(priv->shd_svc.name,
- build_shd_graph, &identical);
+
+ if (!glusterd_is_shd_compatible_volume(volinfo)) {
+ if (svc->inited)
+ goto manager;
+
+ /* Nothing to do if not shd compatible */
+ ret = 0;
+ goto out;
+ }
+ mod_dict = dict_new();
+ if (!mod_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
+ goto out;
+ }
+
+ ret = dict_set_uint32(mod_dict, "cluster.background-self-heal-count", 0);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=cluster.background-self-heal-count", NULL);
+ goto out;
+ }
+
+ ret = dict_set_str(mod_dict, "cluster.data-self-heal", "on");
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=cluster.data-self-heal", NULL);
+ goto out;
+ }
+
+ ret = dict_set_str(mod_dict, "cluster.metadata-self-heal", "on");
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=cluster.metadata-self-heal", NULL);
+ goto out;
+ }
+
+ ret = dict_set_int32(mod_dict, "graph-check", 1);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=graph-check", NULL);
+ goto out;
+ }
+
+ ret = dict_set_str(mod_dict, "cluster.entry-self-heal", "on");
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=cluster.entry-self-heal", NULL);
+ goto out;
+ }
+
+ ret = glusterd_volume_svc_check_volfile_identical(
+ "glustershd", mod_dict, volinfo, glusterd_shdsvc_generate_volfile,
+ &identical);
if (ret)
goto out;
@@ -226,8 +632,9 @@ glusterd_shdsvc_reconfigure()
* changed, then inform the xlator to reconfigure the options.
*/
identical = _gf_false; /* RESET the FLAG */
- ret = glusterd_svc_check_topology_identical(priv->shd_svc.name,
- build_shd_graph, &identical);
+ ret = glusterd_volume_svc_check_topology_identical(
+ "glustershd", mod_dict, volinfo, glusterd_shdsvc_generate_volfile,
+ &identical);
if (ret)
goto out;
@@ -235,7 +642,7 @@ glusterd_shdsvc_reconfigure()
* options to shd volfile, so that shd will be reconfigured.
*/
if (identical) {
- ret = glusterd_shdsvc_create_volfile();
+ ret = glusterd_shdsvc_create_volfile(volinfo);
if (ret == 0) { /* Only if above PASSES */
ret = glusterd_fetchspec_notify(THIS);
}
@@ -243,12 +650,147 @@ glusterd_shdsvc_reconfigure()
}
manager:
/*
- * shd volfile's topology has been changed. shd server needs
- * to be RESTARTED to ACT on the changed volfile.
+ * shd volfile's topology has been changed. volfile needs
+ * to be RECONFIGURED to ACT on the changed volfile.
*/
- ret = priv->shd_svc.manager(&(priv->shd_svc), NULL, PROC_START_NO_WAIT);
+ ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
out:
- gf_msg_debug(this->name, 0, "Returning %d", ret);
+ if (volinfo)
+ glusterd_volinfo_unref(volinfo);
+ if (mod_dict)
+ dict_unref(mod_dict);
+ gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_shdsvc_restart()
+{
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *tmp = NULL;
+ int ret = -1;
+ xlator_t *this = THIS;
+ glusterd_conf_t *conf = NULL;
+ glusterd_svc_t *svc = NULL;
+
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ pthread_mutex_lock(&conf->volume_lock);
+ cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list)
+ {
+ glusterd_volinfo_ref(volinfo);
+ pthread_mutex_unlock(&conf->volume_lock);
+ /* Start per volume shd svc */
+ if (volinfo->status == GLUSTERD_STATUS_STARTED) {
+ svc = &(volinfo->shd.svc);
+ ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SHD_START_FAIL,
+ "Couldn't start shd for "
+ "vol: %s on restart",
+ volinfo->volname);
+ gf_event(EVENT_SVC_MANAGER_FAILED, "volume=%s;svc_name=%s",
+ volinfo->volname, svc->name);
+ glusterd_volinfo_unref(volinfo);
+ goto out;
+ }
+ }
+ glusterd_volinfo_unref(volinfo);
+ pthread_mutex_lock(&conf->volume_lock);
+ }
+ pthread_mutex_unlock(&conf->volume_lock);
+out:
+ return ret;
+}
+
+int
+glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig)
+{
+ int ret = -1;
+ glusterd_svc_proc_t *svc_proc = NULL;
+ glusterd_shdsvc_t *shd = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ gf_boolean_t empty = _gf_false;
+ glusterd_conf_t *conf = NULL;
+ int pid = -1;
+
+ conf = THIS->private;
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+ svc_proc = svc->svc_proc;
+ if (!svc_proc) {
+ /*
+ * This can happen when stop was called on a volume that is not shd
+ * compatible.
+ */
+ gf_msg_debug("glusterd", 0, "svc_proc is null, ie shd already stopped");
+ ret = 0;
+ goto out;
+ }
+
+ /* Get volinfo->shd from svc object */
+ shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
+ if (!shd) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
+ "Failed to get shd object "
+ "from shd service");
+ return -1;
+ }
+
+ /* Get volinfo from shd */
+ volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
+ if (!volinfo) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to get volinfo from "
+ "from shd");
+ return -1;
+ }
+
+ glusterd_volinfo_ref(volinfo);
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ if (!gf_is_service_running(svc->proc.pidfile, &pid)) {
+ gf_msg_debug(THIS->name, 0, "shd isn't running");
+ }
+ cds_list_del_init(&svc->mux_svc);
+ empty = cds_list_empty(&svc_proc->svcs);
+ if (empty) {
+ svc_proc->status = GF_SVC_STOPPING;
+ cds_list_del_init(&svc_proc->svc_proc_list);
+ }
+ }
+ pthread_mutex_unlock(&conf->attach_lock);
+ if (empty) {
+ /* Unref will happen when destroying the connection */
+ glusterd_volinfo_ref(volinfo);
+ svc_proc->data = volinfo;
+ ret = glusterd_svc_stop(svc, sig);
+ if (ret) {
+ glusterd_volinfo_unref(volinfo);
+ goto out;
+ }
+ }
+ if (!empty && pid != -1) {
+ ret = glusterd_detach_svc(svc, volinfo, sig);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
+ "shd service is failed to detach volume %s from pid %d",
+ volinfo->volname, glusterd_proc_get_pid(&svc->proc));
+ else
+ gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_SVC_STOP_SUCCESS,
+ "Shd service is detached for volume %s from pid %d",
+ volinfo->volname, glusterd_proc_get_pid(&svc->proc));
+ }
+ svc->online = _gf_false;
+ (void)glusterd_unlink_file((char *)svc->proc.pidfile);
+ glusterd_shd_svcproc_cleanup(shd);
+ ret = 0;
+ glusterd_volinfo_unref(volinfo);
+out:
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc.h
index 775a9d44a2c..55b409f4b69 100644
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.h
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.h
@@ -12,12 +12,20 @@
#define _GLUSTERD_SHD_SVC_H_
#include "glusterd-svc-mgmt.h"
+#include "glusterd.h"
+
+typedef struct glusterd_shdsvc_ glusterd_shdsvc_t;
+struct glusterd_shdsvc_ {
+ glusterd_svc_t svc;
+ gf_boolean_t attached;
+};
void
glusterd_shdsvc_build(glusterd_svc_t *svc);
int
-glusterd_shdsvc_init(glusterd_svc_t *svc);
+glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn,
+ glusterd_svc_proc_t *svc_proc);
int
glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags);
@@ -27,4 +35,11 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags);
int
glusterd_shdsvc_reconfigure();
+
+int
+glusterd_shdsvc_restart();
+
+int
+glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
index 35bc71455d2..bf2d81b644a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
@@ -13,20 +13,20 @@
#include <sys/resource.h>
#include <libgen.h>
-#include "compat-uuid.h"
+#include <glusterfs/compat-uuid.h>
#include "fnmatch.h"
-#include "xlator.h"
+#include <glusterfs/xlator.h>
#include "protocol-common.h"
#include "glusterd.h"
-#include "call-stub.h"
-#include "defaults.h"
-#include "list.h"
+#include <glusterfs/call-stub.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/list.h>
#include "glusterd-messages.h"
-#include "dict.h"
-#include "compat.h"
-#include "compat-errno.h"
-#include "statedump.h"
+#include <glusterfs/dict.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/statedump.h>
#include "glusterd-sm.h"
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
@@ -146,24 +146,35 @@ glusterd_broadcast_friend_delete(char *hostname, uuid_t uuid)
ctx.op = GD_FRIEND_UPDATE_DEL;
friends = dict_new();
- if (!friends)
+ if (!friends) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "op");
ret = dict_set_int32n(friends, key, keylen, ctx.op);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "hostname");
ret = dict_set_strn(friends, key, keylen, hostname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
ret = dict_set_int32n(friends, "count", SLEN("count"), count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
if (!peerinfo->connected || !peerinfo->peer)
@@ -175,9 +186,10 @@ glusterd_broadcast_friend_delete(char *hostname, uuid_t uuid)
*/
ret = dict_set_static_ptr(friends, "peerinfo", peerinfo);
if (ret) {
+ RCU_READ_UNLOCK;
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"failed to set peerinfo");
- goto unlock;
+ goto out;
}
proc = &peerinfo->peer->proctable[GLUSTERD_FRIEND_UPDATE];
@@ -185,15 +197,13 @@ glusterd_broadcast_friend_delete(char *hostname, uuid_t uuid)
ret = proc->fn(NULL, this, friends);
}
}
-unlock:
- rcu_read_unlock();
-
- gf_msg_debug("glusterd", 0, "Returning with %d", ret);
+ RCU_READ_UNLOCK;
out:
if (friends)
dict_unref(friends);
+ gf_msg_debug("glusterd", 0, "Returning with %d", ret);
return ret;
}
@@ -229,29 +239,32 @@ glusterd_ac_reverse_probe_begin(glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT(event);
GF_ASSERT(ctx);
- rcu_read_lock();
+ new_ev_ctx = GF_CALLOC(1, sizeof(*new_ev_ctx), gf_gld_mt_probe_ctx_t);
+
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
+ RCU_READ_UNLOCK;
+ ret = -1;
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)", event->peername,
uuid_utoa(event->peerid));
- ret = -1;
goto out;
}
ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_PROBE, &new_event);
if (ret) {
+ RCU_READ_UNLOCK;
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
"Unable to get new new_event");
ret = -1;
goto out;
}
- new_ev_ctx = GF_CALLOC(1, sizeof(*new_ev_ctx), gf_gld_mt_probe_ctx_t);
-
if (!new_ev_ctx) {
+ RCU_READ_UNLOCK;
ret = -1;
goto out;
}
@@ -266,6 +279,8 @@ glusterd_ac_reverse_probe_begin(glusterd_friend_sm_event_t *event, void *ctx)
ret = glusterd_friend_sm_inject_event(new_event);
+ RCU_READ_UNLOCK;
+
if (ret) {
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
"Unable to inject new_event %d, "
@@ -274,8 +289,6 @@ glusterd_ac_reverse_probe_begin(glusterd_friend_sm_event_t *event, void *ctx)
}
out:
- rcu_read_unlock();
-
if (ret) {
if (new_event)
GF_FREE(new_event->peername);
@@ -305,31 +318,34 @@ glusterd_ac_friend_add(glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT(conf);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
+ RCU_READ_UNLOCK;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)", event->peername,
uuid_utoa(event->peerid));
goto out;
}
- if (!peerinfo->peer)
+ if (!peerinfo->peer) {
+ RCU_READ_UNLOCK;
goto out;
+ }
proc = &peerinfo->peer->proctable[GLUSTERD_FRIEND_ADD];
if (proc->fn) {
frame = create_frame(this, this->ctx->pool);
if (!frame) {
+ RCU_READ_UNLOCK;
goto out;
}
frame->local = ctx;
ret = proc->fn(frame, this, event);
}
+ RCU_READ_UNLOCK;
out:
- rcu_read_unlock();
-
if (ret && frame)
STACK_DESTROY(frame->root);
@@ -361,34 +377,49 @@ glusterd_ac_friend_probe(glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT(conf);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(NULL, probe_ctx->hostname);
if (peerinfo == NULL) {
// We should not reach this state ideally
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_NOT_FOUND, NULL);
ret = -1;
- goto out;
+ goto unlock;
}
- if (!peerinfo->peer)
- goto out;
+ if (!peerinfo->peer) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_ADDRESS_GET_FAIL,
+ NULL);
+ goto unlock;
+ }
proc = &peerinfo->peer->proctable[GLUSTERD_PROBE_QUERY];
if (proc->fn) {
frame = create_frame(this, this->ctx->pool);
if (!frame) {
- goto out;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_FRAME_CREATE_FAIL,
+ NULL);
+ goto unlock;
}
frame->local = ctx;
dict = dict_new();
- if (!dict)
- goto out;
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
+ goto unlock;
+ }
ret = dict_set_strn(dict, "hostname", SLEN("hostname"),
probe_ctx->hostname);
- if (ret)
- goto out;
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=hostname", NULL);
+ goto unlock;
+ }
ret = dict_set_int32n(dict, "port", SLEN("port"), probe_ctx->port);
- if (ret)
- goto out;
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=port", NULL);
+ goto unlock;
+ }
/* The peerinfo reference being set here is going to be used
* only within this critical section, in glusterd_rpc_probe
@@ -396,6 +427,7 @@ glusterd_ac_friend_probe(glusterd_friend_sm_event_t *event, void *ctx)
*/
ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
if (ret) {
+ RCU_READ_UNLOCK;
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"failed to set peerinfo");
goto out;
@@ -403,11 +435,11 @@ glusterd_ac_friend_probe(glusterd_friend_sm_event_t *event, void *ctx)
ret = proc->fn(frame, this, dict);
if (ret)
- goto out;
+ goto unlock;
}
-
+unlock:
+ RCU_READ_UNLOCK;
out:
- rcu_read_unlock();
if (dict)
dict_unref(dict);
@@ -440,10 +472,12 @@ glusterd_ac_send_friend_remove_req(glusterd_friend_sm_event_t *event,
GF_ASSERT(conf);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
+ RCU_READ_UNLOCK;
+ ret = -1;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)", event->peername,
uuid_utoa(event->peerid));
@@ -471,23 +505,29 @@ glusterd_ac_send_friend_remove_req(glusterd_friend_sm_event_t *event,
glusterd_broadcast_friend_delete(ctx->hostname, NULL);
glusterd_destroy_probe_ctx(ctx);
}
- goto out;
+ goto unlock;
}
- if (!peerinfo->peer)
- goto out;
+ if (!peerinfo->peer) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_ADDRESS_GET_FAIL,
+ NULL);
+ goto unlock;
+ }
proc = &peerinfo->peer->proctable[GLUSTERD_FRIEND_REMOVE];
if (proc->fn) {
frame = create_frame(this, this->ctx->pool);
if (!frame) {
- goto out;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_FRAME_CREATE_FAIL,
+ NULL);
+ goto unlock;
}
frame->local = data;
ret = proc->fn(frame, this, event);
}
+unlock:
+ RCU_READ_UNLOCK;
out:
- rcu_read_unlock();
gf_msg_debug("glusterd", 0, "Returning with %d", ret);
@@ -501,13 +541,11 @@ static gf_boolean_t
glusterd_should_update_peer(glusterd_peerinfo_t *peerinfo,
glusterd_peerinfo_t *cur_peerinfo)
{
- gf_boolean_t is_valid = _gf_false;
-
if ((peerinfo == cur_peerinfo) ||
(peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED))
- is_valid = _gf_true;
+ return _gf_true;
- return is_valid;
+ return _gf_false;
}
static int
@@ -534,27 +572,33 @@ glusterd_ac_send_friend_update(glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT(priv);
- rcu_read_lock();
+ keylen = snprintf(key, sizeof(key), "op");
+ friends = dict_new();
+
+ RCU_READ_LOCK;
cur_peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!cur_peerinfo) {
+ RCU_READ_UNLOCK;
+ ret = -1;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)", event->peername,
uuid_utoa(event->peerid));
- ret = -1;
goto out;
}
- ev_ctx.op = GD_FRIEND_UPDATE_ADD;
-
- friends = dict_new();
- if (!friends)
- goto out;
+ if (!friends) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
+ goto unlock;
+ }
- keylen = snprintf(key, sizeof(key), "op");
+ ev_ctx.op = GD_FRIEND_UPDATE_ADD;
ret = dict_set_int32n(friends, key, keylen, ev_ctx.op);
- if (ret)
- goto out;
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
+ goto unlock;
+ }
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
@@ -566,12 +610,15 @@ glusterd_ac_send_friend_update(glusterd_friend_sm_event_t *event, void *ctx)
snprintf(key, sizeof(key), "friend%d", count);
ret = gd_add_friend_to_dict(peerinfo, friends, key);
if (ret)
- goto out;
+ goto unlock;
}
ret = dict_set_int32n(friends, "count", SLEN("count"), count);
- if (ret)
- goto out;
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=count", NULL);
+ goto unlock;
+ }
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
@@ -583,6 +630,7 @@ glusterd_ac_send_friend_update(glusterd_friend_sm_event_t *event, void *ctx)
ret = dict_set_static_ptr(friends, "peerinfo", peerinfo);
if (ret) {
+ RCU_READ_UNLOCK;
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"failed to set peerinfo");
goto out;
@@ -594,14 +642,14 @@ glusterd_ac_send_friend_update(glusterd_friend_sm_event_t *event, void *ctx)
}
}
- gf_msg_debug("glusterd", 0, "Returning with %d", ret);
-
+unlock:
+ RCU_READ_UNLOCK;
out:
- rcu_read_unlock();
if (friends)
dict_unref(friends);
+ gf_msg_debug("glusterd", 0, "Returning with %d", ret);
return ret;
}
@@ -632,14 +680,18 @@ glusterd_ac_update_friend(glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT(priv);
- rcu_read_lock();
+ friends = dict_new();
+ keylen = snprintf(key, sizeof(key), "op");
+
+ RCU_READ_LOCK;
cur_peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!cur_peerinfo) {
+ RCU_READ_UNLOCK;
+ ret = -1;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)", event->peername,
uuid_utoa(event->peerid));
- ret = -1;
goto out;
}
@@ -649,19 +701,21 @@ glusterd_ac_update_friend(glusterd_friend_sm_event_t *event, void *ctx)
*/
if (!cur_peerinfo->connected || !cur_peerinfo->peer) {
ret = 0;
- goto out;
+ goto unlock;
}
- ev_ctx.op = GD_FRIEND_UPDATE_ADD;
-
- friends = dict_new();
- if (!friends)
+ if (!friends) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
- keylen = snprintf(key, sizeof(key), "op");
+ ev_ctx.op = GD_FRIEND_UPDATE_ADD;
ret = dict_set_int32n(friends, key, keylen, ev_ctx.op);
- if (ret)
- goto out;
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
+ goto unlock;
+ }
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
@@ -673,15 +727,19 @@ glusterd_ac_update_friend(glusterd_friend_sm_event_t *event, void *ctx)
snprintf(key, sizeof(key), "friend%d", count);
ret = gd_add_friend_to_dict(peerinfo, friends, key);
if (ret)
- goto out;
+ goto unlock;
}
ret = dict_set_int32n(friends, "count", SLEN("count"), count);
- if (ret)
- goto out;
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=count", NULL);
+ goto unlock;
+ }
ret = dict_set_static_ptr(friends, "peerinfo", cur_peerinfo);
if (ret) {
+ RCU_READ_UNLOCK;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"failed to set peerinfo");
goto out;
@@ -693,8 +751,9 @@ glusterd_ac_update_friend(glusterd_friend_sm_event_t *event, void *ctx)
gf_msg_debug(this->name, 0, "Returning with %d", ret);
+unlock:
+ RCU_READ_UNLOCK;
out:
- rcu_read_unlock();
if (friends)
dict_unref(friends);
@@ -738,13 +797,13 @@ glusterd_peer_detach_cleanup(glusterd_conf_t *priv)
}
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
+ if (glusterd_is_shd_compatible_volume(volinfo)) {
+ svc = &(volinfo->shd.svc);
ret = svc->stop(svc, SIGTERM);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
"Failed "
- "to stop tierd daemon service");
+ "to stop shd daemon service");
}
}
@@ -775,7 +834,7 @@ glusterd_peer_detach_cleanup(glusterd_conf_t *priv)
}
/*Reconfigure all daemon services upon peer detach*/
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(NULL);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
"Failed to reconfigure all daemon services.");
@@ -801,13 +860,13 @@ glusterd_ac_handle_friend_remove_req(glusterd_friend_sm_event_t *event,
ret = glusterd_xfer_friend_remove_resp(ev_ctx->req, ev_ctx->hostname,
ev_ctx->port);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_REMOVE_FRIEND,
&new_event);
if (ret) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
@@ -816,13 +875,13 @@ glusterd_ac_handle_friend_remove_req(glusterd_friend_sm_event_t *event,
ret = glusterd_friend_sm_inject_event(new_event);
if (ret) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
new_event = NULL;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
glusterd_peer_detach_cleanup(priv);
out:
@@ -842,22 +901,22 @@ glusterd_ac_friend_remove(glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT(event);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
+ RCU_READ_UNLOCK;
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)", event->peername,
uuid_utoa(event->peerid));
- rcu_read_unlock();
goto out;
}
ret = glusterd_friend_remove_cleanup_vols(peerinfo->uuid);
+ RCU_READ_UNLOCK;
if (ret)
gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_CLEANUP_FAIL,
"Volumes cleanup failed");
- rcu_read_unlock();
/* Exiting read critical section as glusterd_peerinfo_cleanup calls
* synchronize_rcu before freeing the peerinfo
*/
@@ -905,14 +964,14 @@ glusterd_ac_handle_friend_add_req(glusterd_friend_sm_event_t *event, void *ctx)
ev_ctx = ctx;
gf_uuid_copy(uuid, ev_ctx->uuid);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
+ RCU_READ_UNLOCK;
+ ret = -1;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)", event->peername,
uuid_utoa(event->peerid));
- ret = -1;
- rcu_read_unlock();
goto out;
}
@@ -922,7 +981,7 @@ glusterd_ac_handle_friend_add_req(glusterd_friend_sm_event_t *event, void *ctx)
*/
gf_uuid_copy(peerinfo->uuid, ev_ctx->uuid);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
conf = this->private;
GF_ASSERT(conf);
@@ -1047,9 +1106,10 @@ glusterd_friend_sm_transition_state(uuid_t peerid, char *peername,
GF_ASSERT(state);
GF_ASSERT(peername);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(peerid, peername);
if (!peerinfo) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_PEER_NOT_FOUND, NULL);
goto out;
}
@@ -1061,7 +1121,7 @@ glusterd_friend_sm_transition_state(uuid_t peerid, char *peername,
ret = 0;
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
return ret;
}
@@ -1413,25 +1473,24 @@ glusterd_friend_sm()
cds_list_del_init(&event->list);
event_type = event->event;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
+ RCU_READ_UNLOCK;
gf_msg("glusterd", GF_LOG_CRITICAL, 0, GD_MSG_PEER_NOT_FOUND,
"Received"
" event %s with empty peer info",
glusterd_friend_sm_event_name_get(event_type));
GF_FREE(event);
- rcu_read_unlock();
continue;
}
+ old_state = peerinfo->state.state;
+ RCU_READ_UNLOCK;
gf_msg_debug("glusterd", 0, "Dequeued event of type: '%s'",
glusterd_friend_sm_event_name_get(event_type));
- old_state = peerinfo->state.state;
-
- rcu_read_unlock();
/* Giving up read-critical section here as we only need
* the current state to call the handler.
*
@@ -1489,10 +1548,10 @@ glusterd_friend_sm()
/* We need to obtain peerinfo reference once again as we
* had exited the read critical section above.
*/
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
/* A peer can only be deleted as a effect of
* this state machine, and two such state
* machines can never run at the same time.
@@ -1514,11 +1573,11 @@ glusterd_friend_sm()
}
ret = glusterd_store_peerinfo(peerinfo);
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEERINFO_CREATE_FAIL,
"Failed to store peerinfo");
}
- rcu_read_unlock();
glusterd_destroy_friend_event_context(event);
GF_FREE(event);
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.h b/xlators/mgmt/glusterd/src/glusterd-sm.h
index 051e83c675d..11cbd85b3e3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.h
@@ -11,17 +11,14 @@
#define _GLUSTERD_SM_H_
#include <pthread.h>
-#include "compat-uuid.h"
+#include <glusterfs/compat-uuid.h>
#include "rpc-clnt.h"
-#include "glusterfs.h"
-#include "xlator.h"
-#include "logging.h"
-#include "call-stub.h"
-#include "byte-order.h"
-//#include "glusterd.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/byte-order.h>
#include "rpcsvc.h"
-#include "store.h"
+#include <glusterfs/store.h>
#include "glusterd-rcu.h"
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
index 7f5fa5f0240..d75f249b29e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
@@ -8,8 +8,8 @@
cases as published by the Free Software Foundation.
*/
-#include "globals.h"
-#include "run.h"
+#include <glusterfs/globals.h>
+#include <glusterfs/run.h>
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
#include "glusterd-messages.h"
@@ -20,14 +20,15 @@
#include "glusterd-snapd-svc.h"
#include "glusterd-snapd-svc-helper.h"
#include "glusterd-snapshot-utils.h"
-#include "syscall.h"
+#include <glusterfs/syscall.h>
char *snapd_svc_name = "snapd";
static void
glusterd_svc_build_snapd_logdir(char *logdir, char *volname, size_t len)
{
- snprintf(logdir, len, "%s/snaps/%s", DEFAULT_LOG_FILE_DIRECTORY, volname);
+ glusterd_conf_t *priv = THIS->private;
+ snprintf(logdir, len, "%s/snaps/%s", priv->logdir, volname);
}
static void
@@ -86,8 +87,10 @@ glusterd_snapdsvc_init(void *data)
svc = &(volinfo->snapd.svc);
ret = snprintf(svc->name, sizeof(svc->name), "%s", snapd_svc_name);
- if (ret < 0)
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
goto out;
+ }
notify = glusterd_snapdsvc_rpc_notify;
@@ -114,6 +117,7 @@ glusterd_snapdsvc_init(void *data)
glusterd_svc_build_snapd_logfile(logfile, logdir, sizeof(logfile));
len = snprintf(volfileid, sizeof(volfileid), "snapd/%s", volinfo->volname);
if ((len < 0) || (len >= sizeof(volfileid))) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
@@ -300,16 +304,22 @@ glusterd_snapdsvc_start(glusterd_svc_t *svc, int flags)
}
runinit(&runner);
- if (this->ctx->cmd_args.valgrind) {
+ if (this->ctx->cmd_args.vgtool != _gf_none) {
len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-snapd.log",
svc->proc.logdir);
if ((len < 0) || (len >= PATH_MAX)) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
- runner_add_args(&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes", NULL);
+ if (this->ctx->cmd_args.vgtool == _gf_memcheck)
+ runner_add_args(&runner, "valgrind", "--leak-check=full",
+ "--trace-children=yes", "--track-origins=yes",
+ NULL);
+ else
+ runner_add_args(&runner, "valgrind", "--tool=drd", NULL);
+
runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
}
@@ -366,6 +376,7 @@ int
glusterd_snapdsvc_restart()
{
glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *tmp = NULL;
int ret = 0;
xlator_t *this = THIS;
glusterd_conf_t *conf = NULL;
@@ -376,7 +387,7 @@ glusterd_snapdsvc_restart()
conf = this->private;
GF_ASSERT(conf);
- cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
+ cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list)
{
/* Start per volume snapd svc */
if (volinfo->status == GLUSTERD_STATUS_STARTED) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.h b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.h
index c95e4cc7661..e15dbf54315 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.h
+++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.h
@@ -17,8 +17,8 @@ typedef struct glusterd_snapdsvc_ glusterd_snapdsvc_t;
struct glusterd_snapdsvc_ {
glusterd_svc_t svc;
- int port;
gf_store_handle_t *handle;
+ int port;
};
void
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
index ec0d4c99b11..995268b796d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
@@ -16,8 +16,8 @@
#endif
#include <dlfcn.h>
-#include "dict.h"
-#include "syscall.h"
+#include <glusterfs/dict.h>
+#include <glusterfs/syscall.h>
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
#include "glusterd-messages.h"
@@ -81,43 +81,43 @@ glusterd_cleanup_snaps_for_volume(glusterd_volinfo_t *volinfo)
cds_list_for_each_entry_safe(snap_vol, dummy_snap_vol,
&volinfo->snap_volumes, snapvol_list)
{
- ret = glusterd_store_delete_volume(snap_vol);
+ snap = snap_vol->snapshot;
+ ret = glusterd_store_delete_snap(snap);
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_DELETE_FAIL,
"Failed to remove "
- "volume %s from store",
- snap_vol->volname);
+ "snap %s from store",
+ snap->snapname);
op_ret = ret;
continue;
}
- ret = glusterd_volinfo_delete(snap_vol);
+ ret = glusterd_snapobject_delete(snap);
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_DELETE_FAIL,
- "Failed to remove "
- "volinfo %s ",
- snap_vol->volname);
+ "Failed to delete "
+ "snap object %s",
+ snap->snapname);
op_ret = ret;
continue;
}
- snap = snap_vol->snapshot;
- ret = glusterd_store_delete_snap(snap);
+ ret = glusterd_store_delete_volume(snap_vol);
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_DELETE_FAIL,
"Failed to remove "
- "snap %s from store",
- snap->snapname);
+ "volume %s from store",
+ snap_vol->volname);
op_ret = ret;
continue;
}
- ret = glusterd_snapobject_delete(snap);
+ ret = glusterd_volinfo_delete(snap_vol);
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_DELETE_FAIL,
- "Failed to delete "
- "snap object %s",
- snap->snapname);
+ "Failed to remove "
+ "volinfo %s ",
+ snap_vol->volname);
op_ret = ret;
continue;
}
@@ -200,7 +200,7 @@ glusterd_snap_volinfo_restore(dict_t *dict, dict_t *rsp_dict,
int32_t volcount)
{
char *value = NULL;
- char key[PATH_MAX] = "";
+ char key[64] = "";
int32_t brick_count = -1;
int32_t ret = -1;
xlator_t *this = NULL;
@@ -282,12 +282,10 @@ glusterd_snap_volinfo_restore(dict_t *dict, dict_t *rsp_dict,
new_volinfo->volume_id,
sizeof(new_volinfo->volume_id), XATTR_REPLACE);
if (ret == -1) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SETXATTR_FAIL,
- "Failed to "
- "set extended attribute %s on %s. "
- "Reason: %s, snap: %s",
- GF_XATTR_VOL_ID_KEY, new_brickinfo->path,
- strerror(errno), new_volinfo->volname);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_SET_XATTR_FAIL,
+ "Attribute=%s, Path=%s, Reason=%s, Snap=%s",
+ GF_XATTR_VOL_ID_KEY, new_brickinfo->path,
+ strerror(errno), new_volinfo->volname, NULL);
goto out;
}
}
@@ -1518,6 +1516,7 @@ glusterd_import_friend_snap(dict_t *peer_data, int32_t snap_count,
int32_t volcount = -1;
int32_t i = -1;
xlator_t *this = NULL;
+ int64_t time_stamp;
this = THIS;
GF_ASSERT(this);
@@ -1562,12 +1561,13 @@ glusterd_import_friend_snap(dict_t *peer_data, int32_t snap_count,
}
snprintf(buf, sizeof(buf), "%s.time_stamp", prefix);
- ret = dict_get_int64(peer_data, buf, &snap->time_stamp);
+ ret = dict_get_int64(peer_data, buf, &time_stamp);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
"Unable to get time_stamp for snap %s", peer_snap_name);
goto out;
}
+ snap->time_stamp = (time_t)time_stamp;
snprintf(buf, sizeof(buf), "%s.snap_restored", prefix);
ret = dict_get_int8(peer_data, buf, (int8_t *)&snap->snap_restored);
@@ -1959,9 +1959,7 @@ glusterd_update_snaps_synctask(void *opaque)
synclock_lock(&conf->big_lock);
while (conf->restart_bricks) {
- synclock_unlock(&conf->big_lock);
- sleep(2);
- synclock_lock(&conf->big_lock);
+ synccond_wait(&conf->cond_restart_bricks, &conf->big_lock);
}
conf->restart_bricks = _gf_true;
@@ -2039,8 +2037,9 @@ glusterd_update_snaps_synctask(void *opaque)
"Failed to remove snap %s", snap->snapname);
goto out;
}
- if (dict)
- dict_unref(dict);
+
+ dict_unref(dict);
+ dict = NULL;
}
snprintf(buf, sizeof(buf), "%s.accept_peer_data", prefix);
ret = dict_get_int32(peer_data, buf, &val);
@@ -2068,6 +2067,7 @@ out:
if (dict)
dict_unref(dict);
conf->restart_bricks = _gf_false;
+ synccond_broadcast(&conf->cond_restart_bricks);
return ret;
}
@@ -2097,6 +2097,9 @@ glusterd_compare_friend_snapshots(dict_t *peer_data, char *peername,
goto out;
}
+ if (!snap_count)
+ goto out;
+
for (i = 1; i <= snap_count; i++) {
/* Compare one snapshot from peer_data at a time */
ret = glusterd_compare_snap(peer_data, i, peername, peerid);
@@ -2144,18 +2147,27 @@ glusterd_add_snapd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
snprintf(base_key, sizeof(base_key), "brick%d", count);
snprintf(key, sizeof(key), "%s.hostname", base_key);
ret = dict_set_str(dict, key, "Snapshot Daemon");
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
+ key, NULL);
goto out;
+ }
snprintf(key, sizeof(key), "%s.path", base_key);
ret = dict_set_dynstr(dict, key, gf_strdup(uuid_utoa(MY_UUID)));
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
+ key, NULL);
goto out;
+ }
snprintf(key, sizeof(key), "%s.port", base_key);
ret = dict_set_int32(dict, key, volinfo->snapd.port);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
+ key, NULL);
goto out;
+ }
glusterd_svc_build_snapd_pidfile(volinfo, pidfile, sizeof(pidfile));
@@ -2165,8 +2177,11 @@ glusterd_add_snapd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
snprintf(key, sizeof(key), "%s.pid", base_key);
ret = dict_set_int32(dict, key, pid);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
+ key, NULL);
goto out;
+ }
snprintf(key, sizeof(key), "%s.status", base_key);
ret = dict_set_int32(dict, key, brick_online);
@@ -2667,8 +2682,10 @@ glusterd_missed_snapinfo_new(glusterd_missed_snap_info **missed_snapinfo)
new_missed_snapinfo = GF_CALLOC(1, sizeof(*new_missed_snapinfo),
gf_gld_mt_missed_snapinfo_t);
- if (!new_missed_snapinfo)
+ if (!new_missed_snapinfo) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
+ }
CDS_INIT_LIST_HEAD(&new_missed_snapinfo->missed_snaps);
CDS_INIT_LIST_HEAD(&new_missed_snapinfo->snap_ops);
@@ -2696,8 +2713,10 @@ glusterd_missed_snap_op_new(glusterd_snap_op_t **snap_op)
new_snap_op = GF_CALLOC(1, sizeof(*new_snap_op),
gf_gld_mt_missed_snapinfo_t);
- if (!new_snap_op)
+ if (!new_snap_op) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
+ }
new_snap_op->brick_num = -1;
new_snap_op->op = -1;
@@ -2849,19 +2868,21 @@ out:
return quorum_met;
}
-int32_t
+static int32_t
glusterd_volume_quorum_check(glusterd_volinfo_t *volinfo, int64_t index,
- dict_t *dict, char *key_prefix, int8_t snap_force,
- int quorum_count, char *quorum_type,
- char **op_errstr, uint32_t *op_errno)
+ dict_t *dict, const char *key_prefix,
+ int8_t snap_force, int quorum_count,
+ char *quorum_type, char **op_errstr,
+ uint32_t *op_errno)
{
int ret = 0;
xlator_t *this = NULL;
int64_t i = 0;
int64_t j = 0;
- char key[1024] = {
+ char key[128] = {
0,
- };
+ }; /* key_prefix is passed from above, but is really quite small */
+ int keylen;
int down_count = 0;
gf_boolean_t first_brick_on = _gf_true;
glusterd_conf_t *priv = NULL;
@@ -2890,9 +2911,10 @@ glusterd_volume_quorum_check(glusterd_volinfo_t *volinfo, int64_t index,
with replica count 2, quorum is not met if even
one of its subvolumes is down
*/
- snprintf(key, sizeof(key), "%s%" PRId64 ".brick%" PRId64 ".status",
- key_prefix, index, i);
- ret = dict_get_int32(dict, key, &brick_online);
+ keylen = snprintf(key, sizeof(key),
+ "%s%" PRId64 ".brick%" PRId64 ".status",
+ key_prefix, index, i);
+ ret = dict_get_int32n(dict, key, keylen, &brick_online);
if (ret || !brick_online) {
ret = 1;
gf_msg(this->name, GF_LOG_ERROR, 0,
@@ -2915,10 +2937,10 @@ glusterd_volume_quorum_check(glusterd_volinfo_t *volinfo, int64_t index,
ret = 1;
quorum_met = _gf_false;
for (i = 0; i < volinfo->dist_leaf_count; i++) {
- snprintf(key, sizeof(key),
- "%s%" PRId64 ".brick%" PRId64 ".status", key_prefix,
- index, (j * volinfo->dist_leaf_count) + i);
- ret = dict_get_int32(dict, key, &brick_online);
+ keylen = snprintf(
+ key, sizeof(key), "%s%" PRId64 ".brick%" PRId64 ".status",
+ key_prefix, index, (j * volinfo->dist_leaf_count) + i);
+ ret = dict_get_int32n(dict, key, keylen, &brick_online);
if (ret || !brick_online) {
if (i == 0)
first_brick_on = _gf_false;
@@ -2949,9 +2971,9 @@ out:
return ret;
}
-int32_t
+static int32_t
glusterd_snap_common_quorum_calculate(glusterd_volinfo_t *volinfo, dict_t *dict,
- int64_t index, char *key_prefix,
+ int64_t index, const char *key_prefix,
int8_t snap_force,
gf_boolean_t snap_volume,
char **op_errstr, uint32_t *op_errno)
@@ -3000,9 +3022,10 @@ glusterd_snap_common_quorum_calculate(glusterd_volinfo_t *volinfo, dict_t *dict,
quorum_count = volinfo->brick_count;
}
- ret = dict_get_str(volinfo->dict, "cluster.quorum-type", &quorum_type);
+ ret = dict_get_str_sizen(volinfo->dict, "cluster.quorum-type",
+ &quorum_type);
if (!ret && !strcmp(quorum_type, "fixed")) {
- ret = dict_get_int32(volinfo->dict, "cluster.quorum-count", &tmp);
+ ret = dict_get_int32_sizen(volinfo->dict, "cluster.quorum-count", &tmp);
/* if quorum-type option is not found in the
dict assume auto quorum type. i.e n/2 + 1.
The same assumption is made when quorum-count
@@ -3044,12 +3067,12 @@ out:
return ret;
}
-int32_t
+static int32_t
glusterd_snap_quorum_check_for_clone(dict_t *dict, gf_boolean_t snap_volume,
char **op_errstr, uint32_t *op_errno)
{
const char err_str[] = "glusterds are not in quorum";
- char key_prefix[PATH_MAX] = {
+ char key_prefix[16] = {
0,
};
char *snapname = NULL;
@@ -3058,9 +3081,6 @@ glusterd_snap_quorum_check_for_clone(dict_t *dict, gf_boolean_t snap_volume,
glusterd_volinfo_t *tmp_volinfo = NULL;
char *volname = NULL;
int64_t volcount = 0;
- char key[PATH_MAX] = {
- 0,
- };
int64_t i = 0;
int32_t ret = -1;
xlator_t *this = NULL;
@@ -3075,7 +3095,7 @@ glusterd_snap_quorum_check_for_clone(dict_t *dict, gf_boolean_t snap_volume,
}
if (snap_volume) {
- ret = dict_get_str(dict, "snapname", &snapname);
+ ret = dict_get_str_sizen(dict, "snapname", &snapname);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
"failed to "
@@ -3117,9 +3137,7 @@ glusterd_snap_quorum_check_for_clone(dict_t *dict, gf_boolean_t snap_volume,
}
for (i = 1; i <= volcount; i++) {
- snprintf(key, sizeof(key), "%s%" PRId64,
- snap_volume ? "snap-volname" : "volname", i);
- ret = dict_get_str(dict, "clonename", &volname);
+ ret = dict_get_str_sizen(dict, "clonename", &volname);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
"failed to "
@@ -3166,14 +3184,14 @@ out:
return ret;
}
-int32_t
+static int32_t
glusterd_snap_quorum_check_for_create(dict_t *dict, gf_boolean_t snap_volume,
char **op_errstr, uint32_t *op_errno)
{
int8_t snap_force = 0;
int32_t force = 0;
const char err_str[] = "glusterds are not in quorum";
- char key_prefix[PATH_MAX] = {
+ char key_prefix[16] = {
0,
};
char *snapname = NULL;
@@ -3181,7 +3199,7 @@ glusterd_snap_quorum_check_for_create(dict_t *dict, gf_boolean_t snap_volume,
glusterd_volinfo_t *volinfo = NULL;
char *volname = NULL;
int64_t volcount = 0;
- char key[PATH_MAX] = {
+ char key[32] = {
0,
};
int64_t i = 0;
@@ -3308,7 +3326,7 @@ glusterd_snap_quorum_check(dict_t *dict, gf_boolean_t snap_volume,
goto out;
}
- ret = dict_get_int32(dict, "type", &snap_command);
+ ret = dict_get_int32_sizen(dict, "type", &snap_command);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
"unable to get the type of "
@@ -3362,6 +3380,25 @@ out:
return ret;
}
+int
+glusterd_is_path_mounted(const char *path)
+{
+ FILE *mtab = NULL;
+ struct mntent *part = NULL;
+ int is_mounted = 0;
+
+ if ((mtab = setmntent("/etc/mtab", "r")) != NULL) {
+ while ((part = getmntent(mtab)) != NULL) {
+ if ((part->mnt_fsname != NULL) &&
+ (strcmp(part->mnt_dir, path)) == 0) {
+ is_mounted = 1;
+ break;
+ }
+ }
+ endmntent(mtab);
+ }
+ return is_mounted;
+}
/* This function will do unmount for snaps.
*/
int32_t
@@ -3386,14 +3423,11 @@ glusterd_snap_unmount(xlator_t *this, glusterd_volinfo_t *volinfo)
continue;
}
- /* Fetch the brick mount path from the brickinfo->path */
- ret = glusterd_get_brick_root(brickinfo->path, &brick_mount_path);
+ ret = glusterd_find_brick_mount_path(brickinfo->path,
+ &brick_mount_path);
if (ret) {
- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BRICK_PATH_UNMOUNTED,
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRK_MNTPATH_GET_FAIL,
"Failed to find brick_mount_path for %s", brickinfo->path);
- /* There is chance that brick path is already
- * unmounted. */
- ret = 0;
goto out;
}
/* unmount cannot be done when the brick process is still in
@@ -3438,6 +3472,10 @@ glusterd_umount(const char *path)
GF_ASSERT(this);
GF_ASSERT(path);
+ if (!glusterd_is_path_mounted(path)) {
+ return 0;
+ }
+
runinit(&runner);
snprintf(msg, sizeof(msg), "umount path %s", path);
runner_add_args(&runner, _PATH_UMOUNT, "-f", path, NULL);
@@ -3513,9 +3551,9 @@ glusterd_copy_file(const char *source, const char *destination)
ret = sys_write(dest_fd, buffer, read_len);
if (ret != read_len) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED,
- "Error writing in "
- "file %s",
- destination);
+ "Writing in "
+ "file %s failed with error %s",
+ destination, strerror(errno));
goto out;
}
} while (ret > 0);
@@ -3570,13 +3608,17 @@ glusterd_copy_folder(const char *source, const char *destination)
continue;
ret = snprintf(src_path, sizeof(src_path), "%s/%s", source,
entry->d_name);
- if (ret < 0)
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
goto out;
+ }
ret = snprintf(dest_path, sizeof(dest_path), "%s/%s", destination,
entry->d_name);
- if (ret < 0)
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
goto out;
+ }
ret = glusterd_copy_file(src_path, dest_path);
if (ret) {
@@ -3732,8 +3774,10 @@ glusterd_copy_quota_files(glusterd_volinfo_t *src_vol,
GLUSTERD_GET_VOLUME_DIR(dest_dir, dest_vol, priv);
ret = snprintf(src_path, sizeof(src_path), "%s/quota.conf", src_dir);
- if (ret < 0)
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
goto out;
+ }
/* quota.conf is not present if quota is not enabled, Hence ignoring
* the absence of this file
@@ -3746,8 +3790,10 @@ glusterd_copy_quota_files(glusterd_volinfo_t *src_vol,
}
ret = snprintf(dest_path, sizeof(dest_path), "%s/quota.conf", dest_dir);
- if (ret < 0)
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
goto out;
+ }
ret = glusterd_copy_file(src_path, dest_path);
if (ret) {
@@ -3771,8 +3817,10 @@ glusterd_copy_quota_files(glusterd_volinfo_t *src_vol,
}
ret = snprintf(dest_path, sizeof(dest_path), "%s/quota.cksum", dest_dir);
- if (ret < 0)
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
goto out;
+ }
ret = glusterd_copy_file(src_path, dest_path);
if (ret) {
@@ -3786,6 +3834,148 @@ out:
return ret;
}
+/* *
+ * Here there are two possibilities, either destination is snaphot or
+ * clone. In the case of snapshot nfs_ganesha export file will be copied
+ * to snapdir. If it is clone , then new export file will be created for
+ * the clone in the GANESHA_EXPORT_DIRECTORY, replacing occurences of
+ * volname with clonename
+ */
+int
+glusterd_copy_nfs_ganesha_file(glusterd_volinfo_t *src_vol,
+ glusterd_volinfo_t *dest_vol)
+{
+ int32_t ret = -1;
+ char snap_dir[PATH_MAX] = {
+ 0,
+ };
+ char src_path[PATH_MAX] = {
+ 0,
+ };
+ char dest_path[PATH_MAX] = {
+ 0,
+ };
+ char buffer[BUFSIZ] = {
+ 0,
+ };
+ char *find_ptr = NULL;
+ char *buff_ptr = NULL;
+ char *tmp_ptr = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ struct stat stbuf = {
+ 0,
+ };
+ FILE *src = NULL;
+ FILE *dest = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("snapshot", this, out);
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
+
+ GF_VALIDATE_OR_GOTO(this->name, src_vol, out);
+ GF_VALIDATE_OR_GOTO(this->name, dest_vol, out);
+
+ if (glusterd_check_ganesha_export(src_vol) == _gf_false) {
+ gf_msg_debug(this->name, 0,
+ "%s is not exported via "
+ "NFS-Ganesha. Skipping copy of export conf.",
+ src_vol->volname);
+ ret = 0;
+ goto out;
+ }
+
+ if (src_vol->is_snap_volume) {
+ GLUSTERD_GET_SNAP_DIR(snap_dir, src_vol->snapshot, priv);
+ ret = snprintf(src_path, PATH_MAX, "%s/export.%s.conf", snap_dir,
+ src_vol->snapshot->snapname);
+ } else {
+ ret = snprintf(src_path, PATH_MAX, "%s/export.%s.conf",
+ GANESHA_EXPORT_DIRECTORY, src_vol->volname);
+ }
+ if (ret < 0 || ret >= PATH_MAX)
+ goto out;
+
+ ret = sys_lstat(src_path, &stbuf);
+ if (ret) {
+ /*
+ * This code path is hit, only when the src_vol is being *
+ * exported via NFS-Ganesha. So if the conf file is not *
+ * available, we fail the snapshot operation. *
+ */
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
+ "Stat on %s failed with %s", src_path, strerror(errno));
+ goto out;
+ }
+
+ if (dest_vol->is_snap_volume) {
+ memset(snap_dir, 0, PATH_MAX);
+ GLUSTERD_GET_SNAP_DIR(snap_dir, dest_vol->snapshot, priv);
+ ret = snprintf(dest_path, sizeof(dest_path), "%s/export.%s.conf",
+ snap_dir, dest_vol->snapshot->snapname);
+ if (ret < 0)
+ goto out;
+
+ ret = glusterd_copy_file(src_path, dest_path);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Failed to copy %s in %s", src_path, dest_path);
+ goto out;
+ }
+
+ } else {
+ ret = snprintf(dest_path, sizeof(dest_path), "%s/export.%s.conf",
+ GANESHA_EXPORT_DIRECTORY, dest_vol->volname);
+ if (ret < 0)
+ goto out;
+
+ src = fopen(src_path, "r");
+ dest = fopen(dest_path, "w");
+
+ if (!src || !dest) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED,
+ "Failed to open %s", dest ? src_path : dest_path);
+ ret = -1;
+ goto out;
+ }
+
+ /* *
+ * if the source volume is snapshot, the export conf file
+ * consists of orginal volname
+ */
+ if (src_vol->is_snap_volume)
+ find_ptr = gf_strdup(src_vol->parent_volname);
+ else
+ find_ptr = gf_strdup(src_vol->volname);
+
+ if (!find_ptr)
+ goto out;
+
+ /* Replacing volname with clonename */
+ while (fgets(buffer, BUFSIZ, src)) {
+ buff_ptr = buffer;
+ while ((tmp_ptr = strstr(buff_ptr, find_ptr))) {
+ while (buff_ptr < tmp_ptr)
+ fputc((int)*buff_ptr++, dest);
+ fputs(dest_vol->volname, dest);
+ buff_ptr += strlen(find_ptr);
+ }
+ fputs(buff_ptr, dest);
+ memset(buffer, 0, BUFSIZ);
+ }
+ }
+out:
+ if (src)
+ fclose(src);
+ if (dest)
+ fclose(dest);
+ if (find_ptr)
+ GF_FREE(find_ptr);
+
+ return ret;
+}
+
int32_t
glusterd_restore_geo_rep_files(glusterd_volinfo_t *snap_vol)
{
@@ -3796,7 +3986,7 @@ glusterd_restore_geo_rep_files(glusterd_volinfo_t *snap_vol)
char *origin_volname = NULL;
glusterd_volinfo_t *origin_vol = NULL;
int i = 0;
- char key[PATH_MAX] = "";
+ char key[32] = "";
char session[PATH_MAX] = "";
char slave[PATH_MAX] = "";
char snapgeo_dir[PATH_MAX] = "";
@@ -3874,6 +4064,64 @@ out:
return ret;
}
+int
+glusterd_restore_nfs_ganesha_file(glusterd_volinfo_t *src_vol,
+ glusterd_snap_t *snap)
+{
+ int32_t ret = -1;
+ char snap_dir[PATH_MAX] = "";
+ char src_path[PATH_MAX] = "";
+ char dest_path[PATH_MAX] = "";
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ struct stat stbuf = {
+ 0,
+ };
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("snapshot", this, out);
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
+
+ GF_VALIDATE_OR_GOTO(this->name, src_vol, out);
+ GF_VALIDATE_OR_GOTO(this->name, snap, out);
+
+ GLUSTERD_GET_SNAP_DIR(snap_dir, snap, priv);
+
+ ret = snprintf(src_path, sizeof(src_path), "%s/export.%s.conf", snap_dir,
+ snap->snapname);
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
+ goto out;
+ }
+
+ ret = sys_lstat(src_path, &stbuf);
+ if (ret) {
+ if (errno == ENOENT) {
+ ret = 0;
+ gf_msg_debug(this->name, 0, "%s not found", src_path);
+ } else
+ gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
+ "Stat on %s failed with %s", src_path, strerror(errno));
+ goto out;
+ }
+
+ ret = snprintf(dest_path, sizeof(dest_path), "%s/export.%s.conf",
+ GANESHA_EXPORT_DIRECTORY, src_vol->volname);
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
+ goto out;
+ }
+
+ ret = glusterd_copy_file(src_path, dest_path);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Failed to copy %s in %s", src_path, dest_path);
+
+out:
+ return ret;
+}
+
/* Snapd functions */
int
glusterd_is_snapd_enabled(glusterd_volinfo_t *volinfo)
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
index 19fedecee8d..5762999bba7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
@@ -10,6 +10,16 @@
#ifndef _GLUSTERD_SNAP_UTILS_H
#define _GLUSTERD_SNAP_UTILS_H
+#define GLUSTERD_GET_SNAP_DIR(path, snap, priv) \
+ do { \
+ int32_t _snap_dir_len; \
+ _snap_dir_len = snprintf(path, PATH_MAX, "%s/snaps/%s", priv->workdir, \
+ snap->snapname); \
+ if ((_snap_dir_len < 0) || (_snap_dir_len >= PATH_MAX)) { \
+ path[0] = 0; \
+ } \
+ } while (0)
+
int32_t
glusterd_snap_volinfo_find(char *volname, glusterd_snap_t *snap,
glusterd_volinfo_t **volinfo);
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 943352f19c8..aeaa8d15214 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -35,18 +35,18 @@
#include <regex.h>
-#include "compat.h"
+#include <glusterfs/compat.h>
#include "protocol-common.h"
-#include "xlator.h"
-#include "logging.h"
-#include "timer.h"
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/timer.h>
#include "glusterd-mem-types.h"
#include "glusterd.h"
#include "glusterd-sm.h"
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
#include "glusterd-store.h"
-#include "run.h"
+#include <glusterfs/run.h>
#include "glusterd-volgen.h"
#include "glusterd-mgmt.h"
#include "glusterd-syncop.h"
@@ -55,12 +55,27 @@
#include "glusterfs3.h"
-#include "syscall.h"
+#include <glusterfs/syscall.h>
#include "cli1-xdr.h"
#include "xdr-generic.h"
-#include "lvm-defaults.h"
-#include "events.h"
+#include <glusterfs/lvm-defaults.h>
+#include <glusterfs/events.h>
+
+#define GLUSTERD_GET_UUID_NOHYPHEN(ret_string, uuid) \
+ do { \
+ char *snap_volname_ptr = ret_string; \
+ char tmp_uuid[64]; \
+ char *snap_volid_ptr = uuid_utoa_r(uuid, tmp_uuid); \
+ while (*snap_volid_ptr) { \
+ if (*snap_volid_ptr == '-') { \
+ snap_volid_ptr++; \
+ } else { \
+ (*snap_volname_ptr++) = (*snap_volid_ptr++); \
+ } \
+ } \
+ *snap_volname_ptr = '\0'; \
+ } while (0)
char snap_mount_dir[VALID_GLUSTERD_PATHMAX];
struct snap_create_args_ {
@@ -186,7 +201,7 @@ glusterd_find_missed_snap(dict_t *rsp_dict, glusterd_volinfo_t *vol,
continue;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, peers, uuid_list)
{
if (gf_uuid_compare(peerinfo->uuid, brickinfo->uuid)) {
@@ -202,18 +217,18 @@ glusterd_find_missed_snap(dict_t *rsp_dict, glusterd_volinfo_t *vol,
ret = glusterd_add_missed_snaps_to_dict(
rsp_dict, vol, brickinfo, brick_count + 1, op);
if (ret) {
+ RCU_READ_UNLOCK;
gf_msg(this->name, GF_LOG_ERROR, 0,
GD_MSG_MISSED_SNAP_CREATE_FAIL,
"Failed to add missed snapshot "
"info for %s:%s in the "
"rsp_dict",
brickinfo->hostname, brickinfo->path);
- rcu_read_unlock();
goto out;
}
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
brick_count++;
}
@@ -499,6 +514,7 @@ glusterd_copy_geo_rep_session_files(char *session, glusterd_volinfo_t *snap_vol)
ret = snprintf(georep_session_dir, sizeof(georep_session_dir), "%s/%s/%s",
priv->workdir, GEOREP, session);
if (ret < 0) { /* Negative value is an error */
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL);
goto out;
}
@@ -506,10 +522,11 @@ glusterd_copy_geo_rep_session_files(char *session, glusterd_volinfo_t *snap_vol)
priv->workdir, GLUSTERD_VOL_SNAP_DIR_PREFIX,
snap_vol->snapshot->snapname, GEOREP, session);
if (ret < 0) { /* Negative value is an error */
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL);
goto out;
}
- ret = mkdir_p(snap_session_dir, 0777, _gf_true);
+ ret = mkdir_p(snap_session_dir, 0755, _gf_true);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED,
"Creating directory %s failed", snap_session_dir);
@@ -553,12 +570,14 @@ glusterd_copy_geo_rep_session_files(char *session, glusterd_volinfo_t *snap_vol)
ret = snprintf(src_path, sizeof(src_path), "%s/%s", georep_session_dir,
files[i]->d_name);
if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL);
goto out;
}
ret = snprintf(dest_path, sizeof(dest_path), "%s/%s", snap_session_dir,
files[i]->d_name);
if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL);
goto out;
}
@@ -617,17 +636,19 @@ glusterd_snapshot_backup_vol(glusterd_volinfo_t *volinfo)
"%s/" GLUSTERD_TRASH "/vols-%s.deleted", priv->workdir,
volinfo->volname);
if ((len < 0) || (len >= sizeof(delete_path))) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL);
goto out;
}
len = snprintf(trashdir, sizeof(trashdir), "%s/" GLUSTERD_TRASH,
priv->workdir);
- if ((len < 0) || (len >= sizeof(delete_path))) {
+ if ((len < 0) || (len >= sizeof(trashdir))) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL);
goto out;
}
/* Create trash folder if it is not there */
- ret = sys_mkdir(trashdir, 0777);
+ ret = sys_mkdir(trashdir, 0755);
if (ret && errno != EEXIST) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED,
"Failed to create trash directory, reason : %s",
@@ -648,7 +669,7 @@ glusterd_snapshot_backup_vol(glusterd_volinfo_t *volinfo)
/* Re-create an empty origin volume folder so that restore can
* happen. */
- ret = sys_mkdir(pathname, 0777);
+ ret = sys_mkdir(pathname, 0755);
if (ret && errno != EEXIST) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED,
"Failed to create origin "
@@ -689,14 +710,14 @@ out:
return op_ret;
}
-int32_t
+static int32_t
glusterd_copy_geo_rep_files(glusterd_volinfo_t *origin_vol,
glusterd_volinfo_t *snap_vol, dict_t *rsp_dict)
{
int32_t ret = -1;
int i = 0;
xlator_t *this = NULL;
- char key[PATH_MAX] = "";
+ char key[32] = "";
char session[PATH_MAX] = "";
char slave[PATH_MAX] = "";
char snapgeo_dir[PATH_MAX] = "";
@@ -715,13 +736,14 @@ glusterd_copy_geo_rep_files(glusterd_volinfo_t *origin_vol,
* is slave volume.
*/
if (!origin_vol->gsync_slaves) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_SLAVE, NULL);
ret = 0;
goto out;
}
GLUSTERD_GET_SNAP_GEO_REP_DIR(snapgeo_dir, snap_vol->snapshot, priv);
- ret = sys_mkdir(snapgeo_dir, 0777);
+ ret = sys_mkdir(snapgeo_dir, 0755);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED,
"Creating directory %s failed", snapgeo_dir);
@@ -1403,6 +1425,8 @@ glusterd_handle_snapshot_config(rpcsvc_request_t *req, glusterd_op_t op,
&config_command);
if (ret) {
snprintf(err_str, len, "Failed to get config-command type");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=config-command", NULL);
goto out;
}
@@ -1905,133 +1929,6 @@ out:
}
int
-glusterd_snapshot_pause_tier(xlator_t *this, glusterd_volinfo_t *volinfo)
-{
- int ret = -1;
- dict_t *dict = NULL;
- char *op_errstr = NULL;
-
- GF_VALIDATE_OR_GOTO("glusterd", this, out);
- GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
-
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- ret = 0;
- goto out;
- }
-
- dict = dict_new();
- if (!dict) {
- goto out;
- }
-
- ret = dict_set_int32n(dict, "rebalance-command", SLEN("rebalance-command"),
- GF_DEFRAG_CMD_PAUSE_TIER);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Failed to set rebalance-command");
- goto out;
- }
-
- ret = dict_set_strn(dict, "volname", SLEN("volname"), volinfo->volname);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Failed to set volname");
- goto out;
- }
-
- ret = gd_brick_op_phase(GD_OP_DEFRAG_BRICK_VOLUME, NULL, dict, &op_errstr);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_PAUSE_TIER_FAIL,
- "Failed to pause tier. Errstr=%s", op_errstr);
- goto out;
- }
-
-out:
- if (dict)
- dict_unref(dict);
-
- return ret;
-}
-
-int
-glusterd_snapshot_resume_tier(xlator_t *this, dict_t *snap_dict)
-{
- int ret = -1;
- dict_t *dict = NULL;
- int64_t volcount = 0;
- char key[64] = "";
- int keylen;
- char *volname = NULL;
- int i = 0;
- char *op_errstr = NULL;
- glusterd_volinfo_t *volinfo = NULL;
-
- GF_VALIDATE_OR_GOTO("glusterd", this, out);
- GF_VALIDATE_OR_GOTO(this->name, snap_dict, out);
-
- ret = dict_get_int64(snap_dict, "volcount", &volcount);
- if (ret) {
- goto out;
- }
- if (volcount <= 0) {
- ret = -1;
- goto out;
- }
-
- dict = dict_new();
- if (!dict)
- goto out;
-
- for (i = 1; i <= volcount; i++) {
- keylen = snprintf(key, sizeof(key), "volname%d", i);
- ret = dict_get_strn(snap_dict, key, keylen, &volname);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Failed to get key %s", volname);
- goto out;
- }
-
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret)
- goto out;
-
- if (volinfo->type != GF_CLUSTER_TYPE_TIER)
- continue;
-
- ret = dict_set_int32n(dict, "rebalance-command",
- SLEN("rebalance-command"),
- GF_DEFRAG_CMD_RESUME_TIER);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Failed to set rebalance-command");
-
- goto out;
- }
-
- ret = dict_set_strn(dict, "volname", SLEN("volname"), volname);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Failed to set volname");
- goto out;
- }
-
- ret = gd_brick_op_phase(GD_OP_DEFRAG_BRICK_VOLUME, NULL, dict,
- &op_errstr);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_RESUME_TIER_FAIL,
- "Failed to resume tier");
- goto out;
- }
- }
-
-out:
- if (dict)
- dict_unref(dict);
-
- return ret;
-}
-
-int
glusterd_snap_create_clone_common_prevalidate(
dict_t *rsp_dict, int flags, char *snapname, char *err_str,
char *snap_volname, int64_t volcount, glusterd_volinfo_t *volinfo,
@@ -2039,7 +1936,7 @@ glusterd_snap_create_clone_common_prevalidate(
{
char *device = NULL;
char *orig_device = NULL;
- char key[PATH_MAX] = "";
+ char key[128] = "";
int ret = -1;
int64_t i = 1;
int64_t brick_order = 0;
@@ -2088,6 +1985,13 @@ glusterd_snap_create_clone_common_prevalidate(
"command or use [force] option in "
"snapshot create to override this "
"behavior.");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_BRICK_NOT_RUNNING,
+ "Please run volume status command to see brick "
+ "status.Please start the stopped brick and then issue "
+ "snapshot create command or use 'force' option in "
+ "snapshot create to override this behavior.",
+ NULL);
} else {
snprintf(err_str, PATH_MAX,
"One or more bricks are not running. "
@@ -2096,6 +2000,12 @@ glusterd_snap_create_clone_common_prevalidate(
"Please start the stopped brick "
"and then issue snapshot clone "
"command ");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_BRICK_NOT_RUNNING,
+ "Please run snapshot status command to see brick "
+ "status. Please start the stopped brick and then issue "
+ "snapshot clone command.",
+ NULL);
}
*op_errno = EG_BRCKDWN;
ret = -1;
@@ -2111,6 +2021,10 @@ glusterd_snap_create_clone_common_prevalidate(
if (len < 0) {
strcpy(err_str, "<error>");
}
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_BRK_MNTPATH_GET_FAIL,
+ "Brick_hostname=%s, Brick_path=%s", brickinfo->hostname,
+ brickinfo->path, NULL);
ret = -1;
goto out;
}
@@ -2122,6 +2036,11 @@ glusterd_snap_create_clone_common_prevalidate(
"all bricks of %s are thinly "
"provisioned LV.",
volinfo->volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_SNAPSHOT_NOT_THIN_PROVISIONED,
+ "Ensure that all bricks of volume are thinly "
+ "provisioned LV, Volume=%s",
+ volinfo->volname, NULL);
ret = -1;
goto out;
}
@@ -2134,6 +2053,9 @@ glusterd_snap_create_clone_common_prevalidate(
"cannot copy the snapshot device "
"name (volname: %s, snapname: %s)",
volinfo->volname, snapname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_SNAP_DEVICE_NAME_GET_FAIL, "Volname=%s, Snapname=%s",
+ volinfo->volname, snapname, NULL);
*loglevel = GF_LOG_WARNING;
ret = -1;
goto out;
@@ -2238,7 +2160,6 @@ glusterd_snapshot_clone_prevalidate(dict_t *dict, char **op_errstr,
char *clonename = NULL;
char *snapname = NULL;
char device_name[64] = "";
- char key[PATH_MAX] = "";
glusterd_snap_t *snap = NULL;
char err_str[PATH_MAX] = "";
int ret = -1;
@@ -2247,6 +2168,7 @@ glusterd_snapshot_clone_prevalidate(dict_t *dict, char **op_errstr,
xlator_t *this = NULL;
uuid_t *snap_volid = NULL;
gf_loglevel_t loglevel = GF_LOG_ERROR;
+ glusterd_volinfo_t *volinfo = NULL;
this = THIS;
GF_ASSERT(op_errstr);
@@ -2267,7 +2189,8 @@ glusterd_snapshot_clone_prevalidate(dict_t *dict, char **op_errstr,
goto out;
}
- if (glusterd_check_volume_exists(clonename)) {
+ ret = glusterd_volinfo_find(clonename, &volinfo);
+ if (!ret) {
ret = -1;
snprintf(err_str, sizeof(err_str),
"Volume with name:%s "
@@ -2299,8 +2222,17 @@ glusterd_snapshot_clone_prevalidate(dict_t *dict, char **op_errstr,
goto out;
}
- snprintf(key, sizeof(key) - 1, "vol1_volid");
- ret = dict_get_bin(dict, key, (void **)&snap_volid);
+ if (!glusterd_is_volume_started(snap_vol)) {
+ snprintf(err_str, sizeof(err_str),
+ "Snapshot %s is "
+ "not activated",
+ snap->snapname);
+ loglevel = GF_LOG_WARNING;
+ *op_errno = EG_VOLSTP;
+ goto out;
+ }
+
+ ret = dict_get_bin(dict, "vol1_volid", (void **)&snap_volid);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
"Unable to fetch snap_volid");
@@ -2523,13 +2455,6 @@ glusterd_snapshot_create_prevalidate(dict_t *dict, char **op_errstr,
"Failed to pre validate");
goto out;
}
-
- ret = glusterd_snapshot_pause_tier(this, volinfo);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_PAUSE_TIER_FAIL,
- "Failed to pause tier in snap prevalidate.");
- goto out;
- }
}
ret = dict_set_int64(rsp_dict, "volcount", volcount);
@@ -3156,11 +3081,11 @@ out:
static int
glusterd_snapshot_get_snapvol_detail(dict_t *dict, glusterd_volinfo_t *snap_vol,
- char *keyprefix, int detail)
+ const char *keyprefix, const int detail)
{
int ret = -1;
int snap_limit = 0;
- char key[PATH_MAX] = "";
+ char key[64] = ""; /* keyprefix is quite small, up to 32 byts */
int keylen;
char *value = NULL;
glusterd_volinfo_t *origin_vol = NULL;
@@ -3323,13 +3248,14 @@ out:
static int
glusterd_snapshot_get_snap_detail(dict_t *dict, glusterd_snap_t *snap,
- char *keyprefix, glusterd_volinfo_t *volinfo)
+ const char *keyprefix,
+ glusterd_volinfo_t *volinfo)
{
int ret = -1;
int volcount = 0;
- char key[PATH_MAX] = "";
+ char key[32] = ""; /* keyprefix is quite small, up to 16 bytes */
int keylen;
- char timestr[64] = "";
+ char timestr[GF_TIMESTR_SIZE] = "";
char *value = NULL;
glusterd_volinfo_t *snap_vol = NULL;
glusterd_volinfo_t *tmp_vol = NULL;
@@ -3491,7 +3417,7 @@ glusterd_snapshot_get_all_snap_info(dict_t *dict)
{
int ret = -1;
int snapcount = 0;
- char key[64] = "";
+ char key[16] = "";
glusterd_snap_t *snap = NULL;
glusterd_snap_t *tmp_snap = NULL;
glusterd_conf_t *priv = NULL;
@@ -3538,7 +3464,7 @@ glusterd_snapshot_get_info_by_volume(dict_t *dict, char *volname, char *err_str,
int snapcount = 0;
int snap_limit = 0;
char *value = NULL;
- char key[64] = "";
+ char key[16] = "";
glusterd_volinfo_t *volinfo = NULL;
glusterd_volinfo_t *snap_vol = NULL;
glusterd_volinfo_t *tmp_vol = NULL;
@@ -3836,7 +3762,7 @@ glusterd_snapshot_get_vol_snapnames(dict_t *dict, glusterd_volinfo_t *volinfo)
int ret = -1;
int snapcount = 0;
char *snapname = NULL;
- char key[PATH_MAX] = "";
+ char key[32] = "";
glusterd_volinfo_t *snap_vol = NULL;
glusterd_volinfo_t *tmp_vol = NULL;
xlator_t *this = NULL;
@@ -4004,7 +3930,8 @@ glusterd_handle_snapshot_create(rpcsvc_request_t *req, glusterd_op_t op,
goto out;
}
- ret = dict_set_int64(dict, "snap-time", (int64_t)time(&snap_time));
+ snap_time = gf_time();
+ ret = dict_set_int64(dict, "snap-time", (int64_t)snap_time);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"Unable to set snap-time");
@@ -4569,6 +4496,7 @@ glusterd_add_missed_snaps_to_dict(dict_t *rsp_dict,
snap_uuid, snap_vol->volname, brick_number, brickinfo->path,
op, GD_MISSED_SNAP_PENDING);
if ((len < 0) || (len >= sizeof(missed_snap_entry))) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL);
goto out;
}
@@ -4576,6 +4504,8 @@ glusterd_add_missed_snaps_to_dict(dict_t *rsp_dict,
ret = dict_get_int32n(rsp_dict, "missed_snap_count",
SLEN("missed_snap_count"), &missed_snap_count);
if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=missed_snap_count", NULL);
/* Initialize the missed_snap_count for the first time */
missed_snap_count = 0;
}
@@ -4731,7 +4661,7 @@ glusterd_snap_brick_create(glusterd_volinfo_t *snap_volinfo,
goto out;
}
- ret = mkdir_p(snap_brick_mount_path, 0777, _gf_true);
+ ret = mkdir_p(snap_brick_mount_path, 0755, _gf_true);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED,
"creating the brick directory"
@@ -4765,7 +4695,7 @@ glusterd_snap_brick_create(glusterd_volinfo_t *snap_volinfo,
ret = sys_lsetxattr(brickinfo->path, GF_XATTR_VOL_ID_KEY,
snap_volinfo->volume_id, 16, XATTR_REPLACE);
if (ret == -1) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_SETXATTR_FAIL,
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_SET_XATTR_FAIL,
"Failed to set "
"extended attribute %s on %s. Reason: "
"%s, snap: %s",
@@ -5393,6 +5323,48 @@ glusterd_do_snap_vol(glusterd_volinfo_t *origin_vol, glusterd_snap_t *snap,
dict_deln(snap_vol->dict, "features.barrier", SLEN("features.barrier"));
gd_update_volume_op_versions(snap_vol);
+ /* *
+ * Create the export file from the node where ganesha.enable "on"
+ * is executed
+ * */
+ if (glusterd_is_ganesha_cluster() &&
+ glusterd_check_ganesha_export(snap_vol)) {
+ if (is_origin_glusterd(dict)) {
+ ret = manage_export_config(clonename, "on", NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_EXPORT_FILE_CREATE_FAIL,
+ "Failed to create"
+ "export file for NFS-Ganesha\n");
+ goto out;
+ }
+ }
+
+ ret = dict_set_dynstr_with_alloc(snap_vol->dict,
+ "features.cache-invalidation", "on");
+ ret = gd_ganesha_send_dbus(clonename, "on");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EXPORT_FILE_CREATE_FAIL,
+ "Dynamic export addition/deletion failed."
+ " Please see log file for details. Clone name = %s",
+ clonename);
+ goto out;
+ }
+ }
+ if (!glusterd_is_ganesha_cluster() &&
+ glusterd_check_ganesha_export(snap_vol)) {
+ /* This happens when a snapshot was created when Ganesha was
+ * enabled globally. Then Ganesha disabled from the cluster.
+ * In such cases, we will have the volume level option set
+ * on dict, So we have to disable it as it doesn't make sense
+ * to keep the option.
+ */
+
+ ret = dict_set_dynstr(snap_vol->dict, "ganesha.enable", "off");
+ if (ret)
+ goto out;
+ }
+
ret = glusterd_store_volinfo(snap_vol, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_SET_FAIL,
@@ -5464,8 +5436,31 @@ out:
for (i = 0; unsupported_opt[i].key; i++)
GF_FREE(unsupported_opt[i].value);
- if (snap_vol)
+ if (snap_vol) {
+ if (glusterd_is_ganesha_cluster() &&
+ glusterd_check_ganesha_export(snap_vol)) {
+ if (is_origin_glusterd(dict)) {
+ ret = manage_export_config(clonename, "on", NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_EXPORT_FILE_CREATE_FAIL,
+ "Failed to create"
+ "export file for NFS-Ganesha\n");
+ }
+ }
+
+ ret = gd_ganesha_send_dbus(clonename, "off");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_EXPORT_FILE_CREATE_FAIL,
+ "Dynamic export addition/deletion failed."
+ " Please see log file for details. Clone name = %s",
+ clonename);
+ }
+ }
+
glusterd_snap_volume_remove(rsp_dict, snap_vol, _gf_true, _gf_true);
+ }
snap_vol = NULL;
}
@@ -5517,6 +5512,8 @@ glusterd_snapshot_activate_deactivate_prevalidate(dict_t *dict,
"Snapshot (%s) does not "
"exist.",
snapname);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_NOT_FOUND,
+ "Snapname=%s", snapname, NULL);
*op_errno = EG_NOSNAP;
ret = -1;
goto out;
@@ -5629,12 +5626,12 @@ out:
return ret;
}
-int32_t
+static int32_t
glusterd_handle_snapshot_delete_all(dict_t *dict)
{
int32_t ret = -1;
int32_t i = 0;
- char key[PATH_MAX] = "";
+ char key[32] = "";
glusterd_conf_t *priv = NULL;
glusterd_snap_t *snap = NULL;
glusterd_snap_t *tmp_snap = NULL;
@@ -7232,10 +7229,10 @@ out:
return ret;
}
-int
+static int
glusterd_get_brick_lvm_details(dict_t *rsp_dict,
glusterd_brickinfo_t *brickinfo, char *volname,
- char *device, char *key_prefix)
+ char *device, const char *key_prefix)
{
int ret = -1;
glusterd_conf_t *priv = NULL;
@@ -7247,7 +7244,7 @@ glusterd_get_brick_lvm_details(dict_t *rsp_dict,
char buf[PATH_MAX] = "";
char *ptr = NULL;
char *token = NULL;
- char key[PATH_MAX] = "";
+ char key[160] = ""; /* key_prefix is 128 bytes at most */
char *value = NULL;
GF_ASSERT(rsp_dict);
@@ -7322,11 +7319,15 @@ glusterd_get_brick_lvm_details(dict_t *rsp_dict,
if (token != NULL) {
value = gf_strdup(token);
if (!value) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "token=%s", token, NULL);
ret = -1;
goto end;
}
ret = snprintf(key, sizeof(key), "%s.data", key_prefix);
if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL,
+ NULL);
goto end;
}
@@ -7341,11 +7342,15 @@ glusterd_get_brick_lvm_details(dict_t *rsp_dict,
if (token != NULL) {
value = gf_strdup(token);
if (!value) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "token=%s", token, NULL);
ret = -1;
goto end;
}
ret = snprintf(key, sizeof(key), "%s.lvsize", key_prefix);
if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL,
+ NULL);
goto end;
}
@@ -7375,16 +7380,16 @@ out:
return ret;
}
-int
+static int
glusterd_get_single_brick_status(char **op_errstr, dict_t *rsp_dict,
- char *keyprefix, int index,
+ const char *keyprefix, int index,
glusterd_volinfo_t *snap_volinfo,
glusterd_brickinfo_t *brickinfo)
{
int ret = -1;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
- char key[PATH_MAX] = "";
+ char key[128] = ""; /* keyprefix is not longer than 64 bytes */
int keylen;
char *device = NULL;
char *value = NULL;
@@ -7405,6 +7410,7 @@ glusterd_get_single_brick_status(char **op_errstr, dict_t *rsp_dict,
keylen = snprintf(key, sizeof(key), "%s.brick%d.path", keyprefix, index);
if (keylen < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
@@ -7412,11 +7418,14 @@ glusterd_get_single_brick_status(char **op_errstr, dict_t *rsp_dict,
ret = snprintf(brick_path, sizeof(brick_path), "%s:%s", brickinfo->hostname,
brickinfo->path);
if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
goto out;
}
value = gf_strdup(brick_path);
if (!value) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "brick_path=%s", brick_path, NULL);
ret = -1;
goto out;
}
@@ -7492,6 +7501,8 @@ glusterd_get_single_brick_status(char **op_errstr, dict_t *rsp_dict,
index);
if (keylen < 0) {
ret = -1;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL,
+ NULL);
goto out;
}
@@ -7549,13 +7560,13 @@ out:
return ret;
}
-int
+static int
glusterd_get_single_snap_status(char **op_errstr, dict_t *rsp_dict,
- char *keyprefix, glusterd_snap_t *snap)
+ const char *keyprefix, glusterd_snap_t *snap)
{
int ret = -1;
xlator_t *this = NULL;
- char key[PATH_MAX] = "";
+ char key[64] = ""; /* keyprefix is "status.snap0" */
int keylen;
char brickkey[PATH_MAX] = "";
glusterd_volinfo_t *snap_volinfo = NULL;
@@ -7577,6 +7588,7 @@ glusterd_get_single_snap_status(char **op_errstr, dict_t *rsp_dict,
{
keylen = snprintf(key, sizeof(key), "%s.vol%d", keyprefix, volcount);
if (keylen < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
@@ -7600,6 +7612,7 @@ glusterd_get_single_snap_status(char **op_errstr, dict_t *rsp_dict,
}
keylen = snprintf(brickkey, sizeof(brickkey), "%s.brickcount", key);
if (keylen < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
goto out;
}
@@ -7614,6 +7627,7 @@ glusterd_get_single_snap_status(char **op_errstr, dict_t *rsp_dict,
keylen = snprintf(key, sizeof(key), "%s.volcount", keyprefix);
if (keylen < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
@@ -7630,12 +7644,13 @@ out:
return ret;
}
-int
+static int
glusterd_get_each_snap_object_status(char **op_errstr, dict_t *rsp_dict,
- glusterd_snap_t *snap, char *keyprefix)
+ glusterd_snap_t *snap,
+ const char *keyprefix)
{
int ret = -1;
- char key[PATH_MAX] = "";
+ char key[32] = ""; /* keyprefix is "status.snap0" */
int keylen;
char *temp = NULL;
xlator_t *this = NULL;
@@ -7652,6 +7667,7 @@ glusterd_get_each_snap_object_status(char **op_errstr, dict_t *rsp_dict,
*/
keylen = snprintf(key, sizeof(key), "%s.snapname", keyprefix);
if (keylen < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
@@ -7673,6 +7689,7 @@ glusterd_get_each_snap_object_status(char **op_errstr, dict_t *rsp_dict,
keylen = snprintf(key, sizeof(key), "%s.uuid", keyprefix);
if (keylen < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
@@ -7756,6 +7773,7 @@ glusterd_get_snap_status_of_volume(char **op_errstr, dict_t *rsp_dict,
{
ret = snprintf(key, sizeof(key), "status.snap%d.snapname", i);
if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
goto out;
}
@@ -7807,6 +7825,7 @@ glusterd_get_all_snapshot_status(dict_t *dict, char **op_errstr,
{
ret = snprintf(key, sizeof(key), "status.snap%d.snapname", i);
if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
goto out;
}
@@ -7841,7 +7860,6 @@ glusterd_snapshot_status_commit(dict_t *dict, char **op_errstr,
xlator_t *this = NULL;
int ret = -1;
glusterd_conf_t *conf = NULL;
- char *get_buffer = NULL;
int32_t cmd = -1;
char *snapname = NULL;
glusterd_snap_t *snap = NULL;
@@ -7910,8 +7928,7 @@ glusterd_snapshot_status_commit(dict_t *dict, char **op_errstr,
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_STATUS_FAIL,
"Unable to "
- "get status of snap %s",
- get_buffer);
+ "get status of snap");
goto out;
}
@@ -8116,6 +8133,12 @@ glusterd_snapshot_clone_postvalidate(dict_t *dict, int32_t op_ret,
if (snap_vol)
snap = snap_vol->snapshot;
+ else {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_NOT_FOUND,
+ "Snapshot volume is null");
+ goto out;
+ }
/* Fetch snap object from snap_vol and delete it all in case of *
* a failure, or else, just delete the snap object as it is not *
@@ -8315,12 +8338,6 @@ glusterd_snapshot_create_postvalidate(dict_t *dict, int32_t op_ret,
}
}
- ret = glusterd_snapshot_resume_tier(this, dict);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_RESUME_TIER_FAIL,
- "Failed to resume tier in snapshot postvalidate.");
- }
-
out:
return ret;
}
@@ -8852,6 +8869,7 @@ glusterd_snapshot_revert_partial_restored_vol(glusterd_volinfo_t *volinfo)
"%s/" GLUSTERD_TRASH "/vols-%s.deleted", priv->workdir,
volinfo->volname);
if ((len < 0) || (len >= sizeof(trash_path))) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
@@ -8912,12 +8930,10 @@ glusterd_snapshot_revert_partial_restored_vol(glusterd_volinfo_t *volinfo)
snap_vol->volume_id,
sizeof(snap_vol->volume_id), XATTR_REPLACE);
if (ret == -1) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SETXATTR_FAIL,
- "Failed to set extended "
- "attribute %s on %s. "
- "Reason: %s, snap: %s",
- GF_XATTR_VOL_ID_KEY, brickinfo->path,
- strerror(errno), snap_vol->volname);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_SET_XATTR_FAIL,
+ "Attribute=%s, Path=%s, Reason=%s, Snap=%s",
+ GF_XATTR_VOL_ID_KEY, brickinfo->path,
+ strerror(errno), snap_vol->volname, NULL);
goto out;
}
}
@@ -9297,6 +9313,7 @@ glusterd_handle_snapshot_fn(rpcsvc_request_t *req)
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
req->rpc_err = GARBAGE_ARGS;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
@@ -9979,7 +9996,7 @@ glusterd_snapshot_get_volnames_uuids(dict_t *dict, char *volname,
{
int ret = -1;
int snapcount = 0;
- char key[PATH_MAX] = "";
+ char key[32] = "";
glusterd_volinfo_t *snap_vol = NULL;
glusterd_volinfo_t *volinfo = NULL;
glusterd_volinfo_t *tmp_vol = NULL;
diff --git a/xlators/mgmt/glusterd/src/glusterd-statedump.c b/xlators/mgmt/glusterd/src/glusterd-statedump.c
index 8c2786cb3f7..225d10cc546 100644
--- a/xlators/mgmt/glusterd/src/glusterd-statedump.c
+++ b/xlators/mgmt/glusterd/src/glusterd-statedump.c
@@ -8,11 +8,10 @@
cases as published by the Free Software Foundation.
*/
-#include "statedump.h"
+#include <glusterfs/statedump.h>
#include "glusterd.h"
#include "glusterd-shd-svc.h"
#include "glusterd-quotad-svc.h"
-#include "glusterd-nfs-svc.h"
#include "glusterd-locks.h"
#include "glusterd-messages.h"
@@ -201,13 +200,10 @@ glusterd_dump_priv(xlator_t *this)
gf_proc_dump_build_key(key, "glusterd", "ping-timeout");
gf_proc_dump_write(key, "%d", priv->ping_timeout);
-
- gf_proc_dump_build_key(key, "glusterd", "shd.online");
- gf_proc_dump_write(key, "%d", priv->shd_svc.online);
-
+#ifdef BUILD_GNFS
gf_proc_dump_build_key(key, "glusterd", "nfs.online");
gf_proc_dump_write(key, "%d", priv->nfs_svc.online);
-
+#endif
gf_proc_dump_build_key(key, "glusterd", "quotad.online");
gf_proc_dump_write(key, "%d", priv->quotad_svc.online);
diff --git a/xlators/mgmt/glusterd/src/glusterd-statedump.h b/xlators/mgmt/glusterd/src/glusterd-statedump.h
index 7d7fc1a7fa3..b5ef1f48e82 100644
--- a/xlators/mgmt/glusterd/src/glusterd-statedump.h
+++ b/xlators/mgmt/glusterd/src/glusterd-statedump.h
@@ -11,7 +11,7 @@
#ifndef _GLUSTERD_STATEDUMP_H_
#define _GLUSTERD_STATEDUMP_H_
-#include "xlator.h"
+#include <glusterfs/xlator.h>
int
glusterd_dump_priv(xlator_t *this);
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index d3e4415cb45..d94dceb10b7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -11,32 +11,32 @@
#include "glusterd-op-sm.h"
#include <inttypes.h>
-#include "glusterfs.h"
-#include "compat.h"
-#include "dict.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/dict.h>
#include "protocol-common.h"
-#include "xlator.h"
-#include "logging.h"
-#include "timer.h"
-#include "syscall.h"
-#include "defaults.h"
-#include "compat.h"
-#include "compat-errno.h"
-#include "statedump.h"
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/timer.h>
+#include <glusterfs/syscall.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/statedump.h>
#include "glusterd-mem-types.h"
#include "glusterd.h"
#include "glusterd-sm.h"
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
#include "glusterd-hooks.h"
-#include "store.h"
+#include <glusterfs/store.h>
#include "glusterd-store.h"
#include "glusterd-snapshot-utils.h"
#include "glusterd-messages.h"
#include "rpc-clnt.h"
-#include "common-utils.h"
-#include "quota-common-utils.h"
+#include <glusterfs/common-utils.h>
+#include <glusterfs/quota-common-utils.h>
#include <sys/resource.h>
#include <inttypes.h>
@@ -48,6 +48,23 @@
#include "mntent_compat.h"
#endif
+#define GLUSTERD_GET_BRICK_DIR(path, volinfo, priv) \
+ do { \
+ int32_t _brick_len; \
+ if (volinfo->is_snap_volume) { \
+ _brick_len = snprintf(path, PATH_MAX, "%s/snaps/%s/%s/%s", \
+ priv->workdir, volinfo->snapshot->snapname, \
+ volinfo->volname, GLUSTERD_BRICK_INFO_DIR); \
+ } else { \
+ _brick_len = snprintf(path, PATH_MAX, "%s/%s/%s/%s", \
+ priv->workdir, GLUSTERD_VOLUME_DIR_PREFIX, \
+ volinfo->volname, GLUSTERD_BRICK_INFO_DIR); \
+ } \
+ if ((_brick_len < 0) || (_brick_len >= PATH_MAX)) { \
+ path[0] = 0; \
+ } \
+ } while (0)
+
void
glusterd_replace_slash_with_hyphen(char *str)
{
@@ -57,7 +74,7 @@ glusterd_replace_slash_with_hyphen(char *str)
while (ptr) {
*ptr = '-';
- ptr = strchr(str, '/');
+ ptr = strchr(ptr, '/');
}
}
@@ -239,9 +256,10 @@ out:
int32_t
glusterd_store_volinfo_brick_fname_write(int vol_fd,
glusterd_brickinfo_t *brickinfo,
- int32_t brick_count)
+ int32_t brick_count,
+ int is_thin_arbiter)
{
- char key[PATH_MAX] = {
+ char key[64] = {
0,
};
char brickfname[PATH_MAX] = {
@@ -249,8 +267,13 @@ glusterd_store_volinfo_brick_fname_write(int vol_fd,
};
int32_t ret = -1;
- snprintf(key, sizeof(key), "%s-%d", GLUSTERD_STORE_KEY_VOL_BRICK,
- brick_count);
+ if (!is_thin_arbiter) {
+ snprintf(key, sizeof(key), "%s-%d", GLUSTERD_STORE_KEY_VOL_BRICK,
+ brick_count);
+ } else {
+ snprintf(key, sizeof(key), "%s-%d", GLUSTERD_STORE_KEY_VOL_TA_BRICK,
+ brick_count);
+ }
glusterd_store_brickinfofname_set(brickinfo, brickfname,
sizeof(brickfname));
ret = gf_store_save_value(vol_fd, key, brickfname);
@@ -295,15 +318,14 @@ glusterd_store_create_snapd_shandle_on_absence(glusterd_volinfo_t *volinfo)
* The snapshot details will be stored only if the cluster op-version is
* greater than or equal to 4
*/
-int
+static int
gd_store_brick_snap_details_write(int fd, glusterd_brickinfo_t *brickinfo)
{
int ret = -1;
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
- char value[PATH_MAX] = {
- 0,
- };
+ char value[5 * PATH_MAX];
+ uint total_len = 0;
this = THIS;
GF_ASSERT(this != NULL);
@@ -318,102 +340,104 @@ gd_store_brick_snap_details_write(int fd, glusterd_brickinfo_t *brickinfo)
goto out;
}
- if (strlen(brickinfo->device_path) > 0) {
- snprintf(value, sizeof(value), "%s", brickinfo->device_path);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_BRICK_DEVICE_PATH,
- value);
- if (ret)
- goto out;
+ if (brickinfo->device_path[0] != '\0') {
+ ret = snprintf(value + total_len, sizeof(value) - total_len, "%s=%s\n",
+ GLUSTERD_STORE_KEY_BRICK_DEVICE_PATH,
+ brickinfo->device_path);
+ if (ret < 0 || ret >= sizeof(value) - total_len) {
+ ret = -1;
+ goto err;
+ }
+ total_len += ret;
}
- if (strlen(brickinfo->mount_dir) > 0) {
- snprintf(value, sizeof(value), "%s", brickinfo->mount_dir);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_BRICK_MOUNT_DIR,
- value);
- if (ret)
- goto out;
+ if (brickinfo->mount_dir[0] != '\0') {
+ ret = snprintf(value + total_len, sizeof(value) - total_len, "%s=%s\n",
+ GLUSTERD_STORE_KEY_BRICK_MOUNT_DIR,
+ brickinfo->mount_dir);
+ if (ret < 0 || ret >= sizeof(value) - total_len) {
+ ret = -1;
+ goto err;
+ }
+ total_len += ret;
}
- if (strlen(brickinfo->fstype) > 0) {
- snprintf(value, sizeof(value), "%s", brickinfo->fstype);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_BRICK_FSTYPE, value);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FS_LABEL_UPDATE_FAIL,
- "Failed to save "
- "brick fs type of brick %s",
- brickinfo->path);
- goto out;
+ if (brickinfo->fstype[0] != '\0') {
+ ret = snprintf(value + total_len, sizeof(value) - total_len, "%s=%s\n",
+ GLUSTERD_STORE_KEY_BRICK_FSTYPE, brickinfo->fstype);
+ if (ret < 0 || ret >= sizeof(value) - total_len) {
+ ret = -1;
+ goto err;
}
+ total_len += ret;
}
- if (strlen(brickinfo->mnt_opts) > 0) {
- snprintf(value, sizeof(value), "%s", brickinfo->mnt_opts);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_BRICK_MNTOPTS, value);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRK_MOUNTOPTS_FAIL,
- "Failed to save "
- "brick mnt opts of brick %s",
- brickinfo->path);
- goto out;
+ if (brickinfo->mnt_opts[0] != '\0') {
+ ret = snprintf(value + total_len, sizeof(value) - total_len, "%s=%s\n",
+ GLUSTERD_STORE_KEY_BRICK_MNTOPTS, brickinfo->mnt_opts);
+ if (ret < 0 || ret >= sizeof(value) - total_len) {
+ ret = -1;
+ goto err;
}
+ total_len += ret;
}
- snprintf(value, sizeof(value), "%d", brickinfo->snap_status);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_BRICK_SNAP_STATUS, value);
- if (ret)
- goto out;
+ ret = snprintf(value + total_len, sizeof(value) - total_len, "%s=%d\n",
+ GLUSTERD_STORE_KEY_BRICK_SNAP_STATUS,
+ brickinfo->snap_status);
+ if (ret < 0 || ret >= sizeof(value) - total_len) {
+ ret = -1;
+ goto err;
+ }
+ total_len += ret;
- snprintf(value, sizeof(value), "%lu", brickinfo->statfs_fsid);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_BRICK_FSID, value);
+ ret = snprintf(value + total_len, sizeof(value) - total_len,
+ "%s=%" PRIu64 "\n", GLUSTERD_STORE_KEY_BRICK_FSID,
+ brickinfo->statfs_fsid);
+ if (ret < 0 || ret >= sizeof(value) - total_len) {
+ ret = -1;
+ goto err;
+ }
+ ret = gf_store_save_items(fd, value);
+err:
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FS_LABEL_UPDATE_FAIL,
+ "Failed to save "
+ "snap detils of brick %s",
+ brickinfo->path);
+ }
out:
return ret;
}
-int32_t
+static int32_t
glusterd_store_brickinfo_write(int fd, glusterd_brickinfo_t *brickinfo)
{
- char value[256] = {
- 0,
- };
- int32_t ret = 0;
+ char value[5 * PATH_MAX];
+ int32_t ret = -1;
GF_ASSERT(brickinfo);
GF_ASSERT(fd > 0);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_BRICK_UUID,
- uuid_utoa(brickinfo->uuid));
- if (ret)
- goto out;
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_BRICK_HOSTNAME,
- brickinfo->hostname);
- if (ret)
- goto out;
-
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_BRICK_PATH,
- brickinfo->path);
- if (ret)
- goto out;
-
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_BRICK_REAL_PATH,
- brickinfo->path);
- if (ret)
- goto out;
-
- snprintf(value, sizeof(value), "%d", brickinfo->port);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_BRICK_PORT, value);
-
- snprintf(value, sizeof(value), "%d", brickinfo->rdma_port);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_BRICK_RDMA_PORT, value);
-
- snprintf(value, sizeof(value), "%d", brickinfo->decommissioned);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_BRICK_DECOMMISSIONED,
- value);
- if (ret)
+ ret = snprintf(value, sizeof(value),
+ "%s=%s\n%s=%s\n%s=%s\n%s=%s\n%s=%d\n%s=%d\n%s=%d\n%s=%s\n",
+ GLUSTERD_STORE_KEY_BRICK_UUID, uuid_utoa(brickinfo->uuid),
+ GLUSTERD_STORE_KEY_BRICK_HOSTNAME, brickinfo->hostname,
+ GLUSTERD_STORE_KEY_BRICK_PATH, brickinfo->path,
+ GLUSTERD_STORE_KEY_BRICK_REAL_PATH, brickinfo->path,
+ GLUSTERD_STORE_KEY_BRICK_PORT, brickinfo->port,
+ GLUSTERD_STORE_KEY_BRICK_RDMA_PORT, brickinfo->rdma_port,
+ GLUSTERD_STORE_KEY_BRICK_DECOMMISSIONED,
+ brickinfo->decommissioned, GLUSTERD_STORE_KEY_BRICK_ID,
+ brickinfo->brick_id);
+
+ if (ret < 0 || ret >= sizeof(value)) {
+ ret = -1;
goto out;
+ }
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_BRICK_ID,
- brickinfo->brick_id);
+ ret = gf_store_save_items(fd, value);
if (ret)
goto out;
@@ -434,7 +458,7 @@ out:
int32_t
glusterd_store_snapd_write(int fd, glusterd_volinfo_t *volinfo)
{
- char value[256] = {
+ char value[64] = {
0,
};
int32_t ret = 0;
@@ -458,7 +482,7 @@ glusterd_store_snapd_write(int fd, glusterd_volinfo_t *volinfo)
return ret;
}
-int32_t
+static int32_t
glusterd_store_perform_brick_store(glusterd_brickinfo_t *brickinfo)
{
int fd = -1;
@@ -470,14 +494,14 @@ glusterd_store_perform_brick_store(glusterd_brickinfo_t *brickinfo)
ret = -1;
goto out;
}
-
ret = glusterd_store_brickinfo_write(fd, brickinfo);
if (ret)
goto out;
out:
- if (ret && (fd > 0))
+ if (ret && (fd > 0)) {
gf_store_unlink_tmppath(brickinfo->shandle);
+ }
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
return ret;
}
@@ -522,22 +546,18 @@ out:
return ret;
}
-int32_t
+static int32_t
glusterd_store_brickinfo(glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo, int32_t brick_count,
- int vol_fd)
+ int vol_fd, int is_thin_arbiter)
{
int32_t ret = -1;
GF_ASSERT(volinfo);
GF_ASSERT(brickinfo);
- ret = glusterd_store_volinfo_brick_fname_write(vol_fd, brickinfo,
- brick_count);
- if (ret)
- goto out;
-
- ret = glusterd_store_create_brick_dir(volinfo);
+ ret = glusterd_store_volinfo_brick_fname_write(
+ vol_fd, brickinfo, brick_count, is_thin_arbiter);
if (ret)
goto out;
@@ -639,168 +659,73 @@ out:
return ret;
}
-int32_t
-glusterd_store_remove_bricks(glusterd_volinfo_t *volinfo, char *delete_path)
-{
- int32_t ret = 0;
- glusterd_brickinfo_t *tmp = NULL;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- DIR *dir = NULL;
- struct dirent *entry = NULL;
- struct dirent scratch[2] = {
- {
- 0,
- },
- };
- char path[PATH_MAX] = {
- 0,
- };
- char brickdir[PATH_MAX] = {
- 0,
- };
- int32_t len = 0;
-
- this = THIS;
- GF_ASSERT(this);
-
- GF_ASSERT(volinfo);
-
- cds_list_for_each_entry(tmp, &volinfo->bricks, brick_list)
- {
- ret = glusterd_store_delete_brick(tmp, delete_path);
- if (ret)
- goto out;
- }
-
- priv = this->private;
- GF_ASSERT(priv);
-
- len = snprintf(brickdir, sizeof(brickdir), "%s/%s", delete_path,
- GLUSTERD_BRICK_INFO_DIR);
- if ((len < 0) || (len >= sizeof(brickdir))) {
- ret = -1;
- goto out;
- }
-
- dir = sys_opendir(brickdir);
-
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
-
- while (entry) {
- len = snprintf(path, sizeof(path), "%s/%s", brickdir, entry->d_name);
- if ((len >= 0) && (len < sizeof(path))) {
- ret = sys_unlink(path);
- if (ret && errno != ENOENT) {
- gf_msg_debug(this->name, 0, "Unable to unlink %s", path);
- }
- }
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
- }
-
- sys_closedir(dir);
-
- ret = sys_rmdir(brickdir);
-
-out:
- gf_msg_debug(this->name, 0, "Returning with %d", ret);
- return ret;
-}
-
static int
-_storeslaves(dict_t *this, char *key, data_t *value, void *data)
-{
- int32_t ret = 0;
- gf_store_handle_t *shandle = NULL;
- xlator_t *xl = NULL;
-
- xl = THIS;
- GF_ASSERT(xl);
-
- shandle = (gf_store_handle_t *)data;
-
- GF_ASSERT(shandle);
- GF_ASSERT(shandle->fd > 0);
- GF_ASSERT(shandle->path);
- GF_ASSERT(key);
- GF_ASSERT(value && value->data);
-
- if ((!shandle) || (shandle->fd <= 0) || (!shandle->path))
- return -1;
-
- if (!key)
- return -1;
- if (!value || !value->data)
- return -1;
-
- gf_msg_debug(xl->name, 0, "Storing in volinfo:key= %s, val=%s", key,
- value->data);
-
- ret = gf_store_save_value(shandle->fd, key, (char *)value->data);
- if (ret) {
- gf_msg(xl->name, GF_LOG_ERROR, 0, GD_MSG_STORE_HANDLE_WRITE_FAIL,
- "Unable to write into store"
- " handle for path: %s",
- shandle->path);
- return -1;
- }
- return 0;
-}
-
-int
-_storeopts(dict_t *this, char *key, data_t *value, void *data)
+_storeopts(dict_t *dict_value, char *key, data_t *value, void *data)
{
int32_t ret = 0;
int32_t exists = 0;
+ int32_t option_len = 0;
gf_store_handle_t *shandle = NULL;
- xlator_t *xl = NULL;
+ glusterd_volinfo_data_store_t *dict_data = NULL;
+ xlator_t *this = NULL;
- xl = THIS;
- GF_ASSERT(xl);
+ this = THIS;
+ GF_ASSERT(this);
- shandle = (gf_store_handle_t *)data;
+ dict_data = (glusterd_volinfo_data_store_t *)data;
+ shandle = dict_data->shandle;
GF_ASSERT(shandle);
GF_ASSERT(shandle->fd > 0);
- GF_ASSERT(shandle->path);
GF_ASSERT(key);
- GF_ASSERT(value && value->data);
-
- if ((!shandle) || (shandle->fd <= 0) || (!shandle->path))
- return -1;
+ GF_ASSERT(value);
+ GF_ASSERT(value->data);
- if (!key)
- return -1;
- if (!value || !value->data)
- return -1;
-
- if (is_key_glusterd_hooks_friendly(key)) {
- exists = 1;
+ if (dict_data->key_check == 1) {
+ if (is_key_glusterd_hooks_friendly(key)) {
+ exists = 1;
- } else {
- exists = glusterd_check_option_exists(key, NULL);
+ } else {
+ exists = glusterd_check_option_exists(key, NULL);
+ }
}
-
- if (1 == exists) {
- gf_msg_debug(xl->name, 0,
- "Storing in volinfo:key= %s, "
+ if (exists == 1 || dict_data->key_check == 0) {
+ gf_msg_debug(this->name, 0,
+ "Storing in buffer for volinfo:key= %s, "
"val=%s",
key, value->data);
-
} else {
- gf_msg_debug(xl->name, 0, "Discarding:key= %s, val=%s", key,
+ gf_msg_debug(this->name, 0, "Discarding:key= %s, val=%s", key,
value->data);
return 0;
}
- ret = gf_store_save_value(shandle->fd, key, (char *)value->data);
- if (ret) {
- gf_msg(xl->name, GF_LOG_ERROR, 0, GD_MSG_STORE_HANDLE_WRITE_FAIL,
- "Unable to write into store"
- " handle for path: %s",
- shandle->path);
+ /*
+ * The option_len considers the length of the key value
+ * pair and along with that '=' and '\n', but as value->len
+ * already considers a NULL at the end of the data, adding
+ * just 1.
+ */
+ option_len = strlen(key) + value->len + 1;
+
+ if ((VOLINFO_BUFFER_SIZE - dict_data->buffer_len - 1) < option_len) {
+ ret = gf_store_save_items(shandle->fd, dict_data->buffer);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, NULL);
+ return -1;
+ }
+ dict_data->buffer_len = 0;
+ dict_data->buffer[0] = '\0';
+ }
+ ret = snprintf(dict_data->buffer + dict_data->buffer_len, option_len + 1,
+ "%s=%s\n", key, value->data);
+ if (ret < 0 || ret > option_len + 1) {
+ gf_smsg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_COPY_FAIL, NULL);
return -1;
}
+
+ dict_data->buffer_len += ret;
+
return 0;
}
@@ -809,7 +734,7 @@ _storeopts(dict_t *this, char *key, data_t *value, void *data)
* The snapshot details will be stored only if the cluster op-version is
* greater than or equal to 4
*/
-int
+static int
glusterd_volume_write_snap_details(int fd, glusterd_volinfo_t *volinfo)
{
int ret = -1;
@@ -832,229 +757,163 @@ glusterd_volume_write_snap_details(int fd, glusterd_volinfo_t *volinfo)
goto out;
}
- snprintf(buf, sizeof(buf), "%s", volinfo->parent_volname);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_PARENT_VOLNAME, buf);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_FAIL,
- "Failed to store " GLUSTERD_STORE_KEY_PARENT_VOLNAME);
- goto out;
- }
-
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_RESTORED_SNAP,
- uuid_utoa(volinfo->restored_from_snap));
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_HANDLE_WRITE_FAIL,
- "Unable to write restored_from_snap");
- goto out;
+ ret = snprintf(buf, sizeof(buf), "%s=%s\n%s=%s\n%s=%" PRIu64 "\n",
+ GLUSTERD_STORE_KEY_PARENT_VOLNAME, volinfo->parent_volname,
+ GLUSTERD_STORE_KEY_VOL_RESTORED_SNAP,
+ uuid_utoa(volinfo->restored_from_snap),
+ GLUSTERD_STORE_KEY_SNAP_MAX_HARD_LIMIT,
+ volinfo->snap_max_hard_limit);
+ if (ret < 0 || ret >= sizeof(buf)) {
+ ret = -1;
+ goto err;
}
- snprintf(buf, sizeof(buf), "%" PRIu64, volinfo->snap_max_hard_limit);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_SNAP_MAX_HARD_LIMIT, buf);
+ ret = gf_store_save_items(fd, buf);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HARD_LIMIT_SET_FAIL,
- "Unable to write snap-max-hard-limit");
- goto out;
+ goto err;
}
-
ret = glusterd_store_snapd_info(volinfo);
- if (ret)
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPD_INFO_STORE_FAIL,
- "snapd info store failed "
- "volume: %s",
- volinfo->volname);
-
-out:
+err:
if (ret)
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPINFO_WRITE_FAIL,
"Failed to write snap details"
" for volume %s",
volinfo->volname);
- return ret;
-}
-
-int32_t
-glusterd_volume_write_tier_details(int fd, glusterd_volinfo_t *volinfo)
-{
- int32_t ret = -1;
- char buf[PATH_MAX] = "";
-
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- ret = 0;
- goto out;
- }
-
- snprintf(buf, sizeof(buf), "%d", volinfo->tier_info.cold_brick_count);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_COLD_COUNT, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->tier_info.cold_replica_count);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_COLD_REPLICA_COUNT, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->tier_info.cold_disperse_count);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_COLD_DISPERSE_COUNT, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->tier_info.cold_redundancy_count);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_COLD_REDUNDANCY_COUNT,
- buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->tier_info.hot_brick_count);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_HOT_COUNT, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->tier_info.hot_replica_count);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_HOT_REPLICA_COUNT, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->tier_info.hot_type);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_HOT_TYPE, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->tier_info.cold_type);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_COLD_TYPE, buf);
- if (ret)
- goto out;
-
out:
return ret;
}
-int32_t
+static int32_t
glusterd_volume_exclude_options_write(int fd, glusterd_volinfo_t *volinfo)
{
char *str = NULL;
- char buf[PATH_MAX] = "";
+ char buf[PATH_MAX];
+ uint total_len = 0;
int32_t ret = -1;
- xlator_t *this = NULL;
+ xlator_t *this = THIS;
glusterd_conf_t *conf = NULL;
- this = THIS;
GF_ASSERT(this);
GF_ASSERT(fd > 0);
GF_ASSERT(volinfo);
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, (conf != NULL), out);
- snprintf(buf, sizeof(buf), "%d", volinfo->type);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_TYPE, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->brick_count);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_COUNT, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->status);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_STATUS, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->sub_count);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_SUB_COUNT, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->stripe_count);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_STRIPE_CNT, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->replica_count);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_REPLICA_CNT, buf);
- if (ret)
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len,
+ "%s=%d\n%s=%d\n%s=%d\n%s=%d\n%s=%d\n%s=%d\n",
+ GLUSTERD_STORE_KEY_VOL_TYPE, volinfo->type,
+ GLUSTERD_STORE_KEY_VOL_COUNT, volinfo->brick_count,
+ GLUSTERD_STORE_KEY_VOL_STATUS, volinfo->status,
+ GLUSTERD_STORE_KEY_VOL_SUB_COUNT, volinfo->sub_count,
+ GLUSTERD_STORE_KEY_VOL_STRIPE_CNT, volinfo->stripe_count,
+ GLUSTERD_STORE_KEY_VOL_REPLICA_CNT, volinfo->replica_count);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
+ }
+ total_len += ret;
if ((conf->op_version >= GD_OP_VERSION_3_7_6) && volinfo->arbiter_count) {
- snprintf(buf, sizeof(buf), "%d", volinfo->arbiter_count);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_ARBITER_CNT, buf);
- if (ret)
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len, "%s=%d\n",
+ GLUSTERD_STORE_KEY_VOL_ARBITER_CNT,
+ volinfo->arbiter_count);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
+ }
+ total_len += ret;
}
if (conf->op_version >= GD_OP_VERSION_3_6_0) {
- snprintf(buf, sizeof(buf), "%d", volinfo->disperse_count);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_DISPERSE_CNT, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->redundancy_count);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_REDUNDANCY_CNT,
- buf);
- if (ret)
+ ret = snprintf(
+ buf + total_len, sizeof(buf) - total_len, "%s=%d\n%s=%d\n",
+ GLUSTERD_STORE_KEY_VOL_DISPERSE_CNT, volinfo->disperse_count,
+ GLUSTERD_STORE_KEY_VOL_REDUNDANCY_CNT, volinfo->redundancy_count);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
+ }
+ total_len += ret;
}
- snprintf(buf, sizeof(buf), "%d", volinfo->version);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_VERSION, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->transport_type);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_TRANSPORT, buf);
- if (ret)
- goto out;
-
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_ID,
- uuid_utoa(volinfo->volume_id));
- if (ret)
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len,
+ "%s=%d\n%s=%d\n%s=%s\n", GLUSTERD_STORE_KEY_VOL_VERSION,
+ volinfo->version, GLUSTERD_STORE_KEY_VOL_TRANSPORT,
+ volinfo->transport_type, GLUSTERD_STORE_KEY_VOL_ID,
+ uuid_utoa(volinfo->volume_id));
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
+ }
+ total_len += ret;
str = glusterd_auth_get_username(volinfo);
if (str) {
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_USERNAME, str);
- if (ret)
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len, "%s=%s\n",
+ GLUSTERD_STORE_KEY_USERNAME, str);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
+ }
+ total_len += ret;
}
str = glusterd_auth_get_password(volinfo);
if (str) {
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_PASSWORD, str);
- if (ret)
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len, "%s=%s\n",
+ GLUSTERD_STORE_KEY_PASSWORD, str);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
+ }
+ total_len += ret;
}
- snprintf(buf, sizeof(buf), "%d", volinfo->op_version);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_OP_VERSION, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->client_op_version);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_CLIENT_OP_VERSION,
- buf);
- if (ret)
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len, "%s=%d\n%s=%d\n",
+ GLUSTERD_STORE_KEY_VOL_OP_VERSION, volinfo->op_version,
+ GLUSTERD_STORE_KEY_VOL_CLIENT_OP_VERSION,
+ volinfo->client_op_version);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
- if (volinfo->caps) {
- snprintf(buf, sizeof(buf), "%d", volinfo->caps);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_CAPS, buf);
- if (ret)
- goto out;
}
+ total_len += ret;
if (conf->op_version >= GD_OP_VERSION_3_7_6) {
- snprintf(buf, sizeof(buf), "%d", volinfo->quota_xattr_version);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_QUOTA_VERSION,
- buf);
- if (ret)
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len, "%s=%d\n",
+ GLUSTERD_STORE_KEY_VOL_QUOTA_VERSION,
+ volinfo->quota_xattr_version);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
+ }
+ total_len += ret;
}
if (conf->op_version >= GD_OP_VERSION_3_10_0) {
- snprintf(buf, sizeof(buf), "%d", volinfo->is_tier_enabled);
- ret = gf_store_save_value(fd, GF_TIER_ENABLED, buf);
- if (ret)
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len, "%s=0\n",
+ GF_TIER_ENABLED);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
+ }
+ total_len += ret;
}
- ret = glusterd_volume_write_tier_details(fd, volinfo);
+ if ((conf->op_version >= GD_OP_VERSION_7_0) &&
+ volinfo->thin_arbiter_count) {
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len, "%s=%d\n",
+ GLUSTERD_STORE_KEY_VOL_THIN_ARBITER_CNT,
+ volinfo->thin_arbiter_count);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
+ goto out;
+ }
+ total_len += ret;
+ }
+
+ ret = gf_store_save_items(fd, buf);
+ if (ret)
+ goto out;
ret = glusterd_volume_write_snap_details(fd, volinfo);
@@ -1092,36 +951,26 @@ glusterd_store_piddirpath_set(glusterd_volinfo_t *volinfo, char *piddirpath)
}
static int32_t
-glusterd_store_create_volume_dir(glusterd_volinfo_t *volinfo)
-{
- int32_t ret = -1;
- char voldirpath[PATH_MAX] = {
- 0,
- };
-
- GF_ASSERT(volinfo);
-
- glusterd_store_voldirpath_set(volinfo, voldirpath);
- ret = gf_store_mkdir(voldirpath);
-
- gf_msg_debug(THIS->name, 0, "Returning with %d", ret);
- return ret;
-}
-
-static int32_t
-glusterd_store_create_volume_run_dir(glusterd_volinfo_t *volinfo)
+glusterd_store_create_volume_dirs(glusterd_volinfo_t *volinfo)
{
int32_t ret = -1;
- char piddirpath[PATH_MAX] = {
+ char dirpath[PATH_MAX] = {
0,
};
GF_ASSERT(volinfo);
- glusterd_store_piddirpath_set(volinfo, piddirpath);
+ glusterd_store_voldirpath_set(volinfo, dirpath);
+ ret = gf_store_mkdir(dirpath);
+ if (ret)
+ goto out;
- ret = gf_store_mkdir(piddirpath);
+ glusterd_store_piddirpath_set(volinfo, dirpath);
+ ret = gf_store_mkdir(dirpath);
+ if (ret)
+ goto out;
+out:
gf_msg_debug(THIS->name, 0, "Returning with %d", ret);
return ret;
}
@@ -1151,7 +1000,7 @@ glusterd_store_create_snap_dir(glusterd_snap_t *snap)
return ret;
}
-int32_t
+static int32_t
glusterd_store_volinfo_write(int fd, glusterd_volinfo_t *volinfo)
{
int32_t ret = -1;
@@ -1159,28 +1008,57 @@ glusterd_store_volinfo_write(int fd, glusterd_volinfo_t *volinfo)
GF_ASSERT(fd > 0);
GF_ASSERT(volinfo);
GF_ASSERT(volinfo->shandle);
+ xlator_t *this = NULL;
+ glusterd_volinfo_data_store_t *dict_data = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
shandle = volinfo->shandle;
+
+ dict_data = GF_CALLOC(1, sizeof(glusterd_volinfo_data_store_t),
+ gf_gld_mt_volinfo_dict_data_t);
+ if (dict_data == NULL) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_MEMORY, NULL);
+ return -1;
+ }
+
ret = glusterd_volume_exclude_options_write(fd, volinfo);
- if (ret)
+ if (ret) {
goto out;
+ }
+
+ dict_data->shandle = shandle;
+ dict_data->key_check = 1;
shandle->fd = fd;
- dict_foreach(volinfo->dict, _storeopts, shandle);
+ dict_foreach(volinfo->dict, _storeopts, (void *)dict_data);
+
+ dict_data->key_check = 0;
+ dict_foreach(volinfo->gsync_slaves, _storeopts, (void *)dict_data);
+
+ if (dict_data->buffer_len > 0) {
+ ret = gf_store_save_items(fd, dict_data->buffer);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, NULL);
+ goto out;
+ }
+ }
- dict_foreach(volinfo->gsync_slaves, _storeslaves, shandle);
shandle->fd = 0;
out:
- gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ GF_FREE(dict_data);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
return ret;
}
-int32_t
+static int32_t
glusterd_store_snapinfo_write(glusterd_snap_t *snap)
{
int32_t ret = -1;
int fd = 0;
- char buf[PATH_MAX] = "";
+ char buf[PATH_MAX];
+ uint total_len = 0;
GF_ASSERT(snap);
@@ -1188,30 +1066,34 @@ glusterd_store_snapinfo_write(glusterd_snap_t *snap)
if (fd <= 0)
goto out;
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_SNAP_ID,
- uuid_utoa(snap->snap_id));
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", snap->snap_status);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_SNAP_STATUS, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", snap->snap_restored);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_SNAP_RESTORED, buf);
- if (ret)
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len,
+ "%s=%s\n%s=%d\n%s=%d\n", GLUSTERD_STORE_KEY_SNAP_ID,
+ uuid_utoa(snap->snap_id), GLUSTERD_STORE_KEY_SNAP_STATUS,
+ snap->snap_status, GLUSTERD_STORE_KEY_SNAP_RESTORED,
+ snap->snap_restored);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
+ }
+ total_len += ret;
if (snap->description) {
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_SNAP_DESC,
- snap->description);
- if (ret)
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len, "%s=%s\n",
+ GLUSTERD_STORE_KEY_SNAP_DESC, snap->description);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
+ }
+ total_len += ret;
}
- snprintf(buf, sizeof(buf), "%ld", snap->time_stamp);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_SNAP_TIMESTAMP, buf);
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len, "%s=%ld\n",
+ GLUSTERD_STORE_KEY_SNAP_TIMESTAMP, snap->time_stamp);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
+ goto out;
+ }
+ ret = gf_store_save_items(fd, buf);
out:
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
@@ -1374,112 +1256,34 @@ glusterd_store_create_snap_shandle_on_absence(glusterd_snap_t *snap)
return ret;
}
-int32_t
+static int32_t
glusterd_store_brickinfos(glusterd_volinfo_t *volinfo, int vol_fd)
{
int32_t ret = 0;
glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_brickinfo_t *ta_brickinfo = NULL;
int32_t brick_count = 0;
+ int32_t ta_brick_count = 0;
GF_ASSERT(volinfo);
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
{
- ret = glusterd_store_brickinfo(volinfo, brickinfo, brick_count, vol_fd);
+ ret = glusterd_store_brickinfo(volinfo, brickinfo, brick_count, vol_fd,
+ 0);
if (ret)
goto out;
brick_count++;
}
-out:
- gf_msg_debug(THIS->name, 0, "Returning %d", ret);
- return ret;
-}
-
-int
-_gd_store_rebalance_dict(dict_t *dict, char *key, data_t *value, void *data)
-{
- int ret = -1;
- int fd = 0;
-
- fd = *(int *)data;
-
- ret = gf_store_save_value(fd, key, value->data);
-
- return ret;
-}
-
-int32_t
-glusterd_store_state_tier_write(int fd, glusterd_volinfo_t *volinfo)
-{
- int ret = -1;
- char buf[PATH_MAX] = {
- 0,
- };
-
- GF_VALIDATE_OR_GOTO(THIS->name, (fd > 0), out);
- GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
-
- /*tier counter values are stored here. so that after restart
- * of glusterd tier resumes at the state is was brought down
- */
-
- if (volinfo->tier.defrag_cmd == GF_DEFRAG_CMD_STATUS) {
- ret = 0;
- goto out;
+ if (volinfo->thin_arbiter_count == 1) {
+ ta_brickinfo = list_first_entry(&volinfo->ta_bricks,
+ glusterd_brickinfo_t, brick_list);
+ ret = glusterd_store_brickinfo(volinfo, ta_brickinfo, ta_brick_count,
+ vol_fd, 1);
+ if (ret)
+ goto out;
}
- snprintf(buf, sizeof(buf), "%d", volinfo->tier.defrag_status);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_TIER_STATUS, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->tier.op);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_TIER_DETACH_OP, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%" PRIu64, volinfo->tier.rebalance_files);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_MIGRATED_FILES, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%" PRIu64, volinfo->tier.rebalance_data);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_MIGRATED_SIZE, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%" PRIu64, volinfo->tier.lookedup_files);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_MIGRATIONS_SCANNED,
- buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%" PRIu64, volinfo->tier.rebalance_failures);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_MIGRATIONS_FAILURES,
- buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%" PRIu64, volinfo->tier.skipped_files);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_MIGRATIONS_SKIPPED,
- buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%f", volinfo->tier.rebalance_time);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_MIGRATION_RUN_TIME,
- buf);
- if (ret)
- goto out;
-
- gf_uuid_unparse(volinfo->tier.rebalance_id, buf);
- ret = gf_store_save_value(fd, GF_TIER_TID_KEY, buf);
- if (ret)
- goto out;
-
- if (volinfo->tier.dict) {
- dict_foreach(volinfo->tier.dict, _gd_store_rebalance_dict, &fd);
- }
out:
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
return ret;
@@ -1489,9 +1293,15 @@ int32_t
glusterd_store_node_state_write(int fd, glusterd_volinfo_t *volinfo)
{
int ret = -1;
- char buf[PATH_MAX] = {
- 0,
- };
+ char buf[PATH_MAX];
+ char uuid[UUID_SIZE + 1];
+ uint total_len = 0;
+ glusterd_volinfo_data_store_t *dict_data = NULL;
+ gf_store_handle_t shandle;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
GF_ASSERT(fd > 0);
GF_ASSERT(volinfo);
@@ -1501,61 +1311,63 @@ glusterd_store_node_state_write(int fd, glusterd_volinfo_t *volinfo)
goto out;
}
- snprintf(buf, sizeof(buf), "%d", volinfo->rebal.defrag_cmd);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_DEFRAG, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->rebal.defrag_status);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_DEFRAG_STATUS, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%d", volinfo->rebal.op);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_DEFRAG_OP, buf);
- if (ret)
- goto out;
-
- gf_uuid_unparse(volinfo->rebal.rebalance_id, buf);
- ret = gf_store_save_value(fd, GF_REBALANCE_TID_KEY, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%" PRIu64, volinfo->rebal.rebalance_files);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_DEFRAG_REB_FILES, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%" PRIu64, volinfo->rebal.rebalance_data);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_DEFRAG_SIZE, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%" PRIu64, volinfo->rebal.lookedup_files);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_DEFRAG_SCANNED, buf);
- if (ret)
- goto out;
-
- snprintf(buf, sizeof(buf), "%" PRIu64, volinfo->rebal.rebalance_failures);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_DEFRAG_FAILURES, buf);
- if (ret)
+ gf_uuid_unparse(volinfo->rebal.rebalance_id, uuid);
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len,
+ "%s=%d\n%s=%d\n%s=%d\n%s=%s\n",
+ GLUSTERD_STORE_KEY_VOL_DEFRAG, volinfo->rebal.defrag_cmd,
+ GLUSTERD_STORE_KEY_VOL_DEFRAG_STATUS,
+ volinfo->rebal.defrag_status, GLUSTERD_STORE_KEY_DEFRAG_OP,
+ volinfo->rebal.op, GF_REBALANCE_TID_KEY, uuid);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
-
- snprintf(buf, sizeof(buf), "%" PRIu64, volinfo->rebal.skipped_files);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_DEFRAG_SKIPPED, buf);
- if (ret)
+ }
+ total_len += ret;
+
+ ret = snprintf(
+ buf + total_len, sizeof(buf) - total_len,
+ "%s=%" PRIu64 "\n%s=%" PRIu64 "\n%s=%" PRIu64 "\n%s=%" PRIu64
+ "\n%s=%" PRIu64 "\n%s=%lf\n",
+ GLUSTERD_STORE_KEY_VOL_DEFRAG_REB_FILES, volinfo->rebal.rebalance_files,
+ GLUSTERD_STORE_KEY_VOL_DEFRAG_SIZE, volinfo->rebal.rebalance_data,
+ GLUSTERD_STORE_KEY_VOL_DEFRAG_SCANNED, volinfo->rebal.lookedup_files,
+ GLUSTERD_STORE_KEY_VOL_DEFRAG_FAILURES,
+ volinfo->rebal.rebalance_failures,
+ GLUSTERD_STORE_KEY_VOL_DEFRAG_SKIPPED, volinfo->rebal.skipped_files,
+ GLUSTERD_STORE_KEY_VOL_DEFRAG_RUN_TIME, volinfo->rebal.rebalance_time);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
+ }
- snprintf(buf, sizeof(buf), "%lf", volinfo->rebal.rebalance_time);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_DEFRAG_RUN_TIME, buf);
- if (ret)
+ ret = gf_store_save_items(fd, buf);
+ if (ret) {
goto out;
+ }
if (volinfo->rebal.dict) {
- dict_foreach(volinfo->rebal.dict, _gd_store_rebalance_dict, &fd);
+ dict_data = GF_CALLOC(1, sizeof(glusterd_volinfo_data_store_t),
+ gf_gld_mt_volinfo_dict_data_t);
+ if (dict_data == NULL) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_MEMORY, NULL);
+ return -1;
+ }
+ dict_data->shandle = &shandle;
+ shandle.fd = fd;
+ dict_foreach(volinfo->rebal.dict, _storeopts, (void *)dict_data);
+ if (dict_data->buffer_len > 0) {
+ ret = gf_store_save_items(fd, dict_data->buffer);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED,
+ NULL);
+ goto out;
+ ;
+ }
+ }
}
out:
- gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ GF_FREE(dict_data);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
return ret;
}
@@ -1576,12 +1388,6 @@ glusterd_store_perform_node_state_store(glusterd_volinfo_t *volinfo)
if (ret)
goto out;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_store_state_tier_write(fd, volinfo);
- if (ret)
- goto out;
- }
-
ret = gf_store_rename_tmppath(volinfo->node_state_shandle);
if (ret)
goto out;
@@ -1593,7 +1399,7 @@ out:
return ret;
}
-int32_t
+static int32_t
glusterd_store_perform_volume_store(glusterd_volinfo_t *volinfo)
{
int fd = -1;
@@ -1610,6 +1416,10 @@ glusterd_store_perform_volume_store(glusterd_volinfo_t *volinfo)
if (ret)
goto out;
+ ret = glusterd_store_create_brick_dir(volinfo);
+ if (ret)
+ goto out;
+
ret = glusterd_store_brickinfos(volinfo, fd);
if (ret)
goto out;
@@ -1671,6 +1481,7 @@ glusterd_store_brickinfos_atomic_update(glusterd_volinfo_t *volinfo)
{
int ret = -1;
glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_brickinfo_t *ta_brickinfo = NULL;
GF_ASSERT(volinfo);
@@ -1680,6 +1491,15 @@ glusterd_store_brickinfos_atomic_update(glusterd_volinfo_t *volinfo)
if (ret)
goto out;
}
+
+ if (volinfo->thin_arbiter_count == 1) {
+ ta_brickinfo = list_first_entry(&volinfo->ta_bricks,
+ glusterd_brickinfo_t, brick_list);
+ ret = gf_store_rename_tmppath(ta_brickinfo->shandle);
+ if (ret)
+ goto out;
+ }
+
out:
return ret;
}
@@ -1782,17 +1602,21 @@ glusterd_store_volinfo(glusterd_volinfo_t *volinfo,
glusterd_volinfo_ver_ac_t ac)
{
int32_t ret = -1;
+ glusterfs_ctx_t *ctx = NULL;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
+ ctx = this->ctx;
+ GF_ASSERT(ctx);
GF_ASSERT(volinfo);
+ pthread_mutex_lock(&ctx->cleanup_lock);
pthread_mutex_lock(&volinfo->store_volinfo_lock);
{
glusterd_perform_volinfo_version_action(volinfo, ac);
- ret = glusterd_store_create_volume_dir(volinfo);
- if (ret)
- goto unlock;
- ret = glusterd_store_create_volume_run_dir(volinfo);
+ ret = glusterd_store_create_volume_dirs(volinfo);
if (ret)
goto unlock;
@@ -1826,6 +1650,8 @@ glusterd_store_volinfo(glusterd_volinfo_t *volinfo,
}
unlock:
pthread_mutex_unlock(&volinfo->store_volinfo_lock);
+ pthread_mutex_unlock(&ctx->cleanup_lock);
+
if (ret)
glusterd_store_volume_cleanup_tmp(volinfo);
@@ -1875,7 +1701,7 @@ glusterd_store_delete_volume(glusterd_volinfo_t *volinfo)
goto out;
}
- ret = sys_mkdir(trashdir, 0777);
+ ret = sys_mkdir(trashdir, 0755);
if (ret && errno != EEXIST) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED,
"Failed to create trash "
@@ -1962,7 +1788,7 @@ glusterd_store_delete_snap(glusterd_snap_t *snap)
goto out;
}
- ret = sys_mkdir(trashdir, 0777);
+ ret = sys_mkdir(trashdir, 0755);
if (ret && errno != EEXIST) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED,
"Failed to create trash "
@@ -1987,8 +1813,9 @@ glusterd_store_delete_snap(glusterd_snap_t *snap)
goto out;
}
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
- while (entry) {
+ while ((entry = sys_readdir(dir, scratch))) {
+ if (gf_irrelevant_entry(entry))
+ continue;
len = snprintf(path, PATH_MAX, "%s/%s", delete_path, entry->d_name);
if ((len < 0) || (len >= PATH_MAX)) {
goto stat_failed;
@@ -2018,7 +1845,6 @@ glusterd_store_delete_snap(glusterd_snap_t *snap)
ret ? "Failed to remove" : "Removed", entry->d_name);
stat_failed:
memset(path, 0, sizeof(path));
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
}
ret = sys_closedir(dir);
@@ -2051,15 +1877,10 @@ glusterd_store_global_info(xlator_t *this)
{
int ret = -1;
glusterd_conf_t *conf = NULL;
- char op_version_str[15] = {
- 0,
- };
- char path[PATH_MAX] = {
- 0,
- };
+ char buf[PATH_MAX];
+ uint total_len = 0;
gf_store_handle_t *handle = NULL;
char *uuid_str = NULL;
- int32_t len = 0;
conf = this->private;
@@ -2068,12 +1889,13 @@ glusterd_store_global_info(xlator_t *this)
goto out;
if (!conf->handle) {
- len = snprintf(path, PATH_MAX, "%s/%s", conf->workdir,
+ ret = snprintf(buf, sizeof(buf), "%s/%s", conf->workdir,
GLUSTERD_INFO_FILE);
- if ((len < 0) || (len >= PATH_MAX)) {
+ if ((ret < 0) || (ret >= sizeof(buf))) {
+ ret = -1;
goto out;
}
- ret = gf_store_handle_new(path, &handle);
+ ret = gf_store_handle_new(buf, &handle);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_HANDLE_GET_FAIL,
"Unable to get store handle");
@@ -2093,39 +1915,38 @@ glusterd_store_global_info(xlator_t *this)
}
handle->fd = gf_store_mkstemp(handle);
- if (handle->fd <= 0) {
+ if (handle->fd < 0) {
ret = -1;
goto out;
}
- pthread_mutex_lock(&conf->mutex);
- {
- ret = gf_store_save_value(handle->fd, GLUSTERD_STORE_UUID_KEY,
- uuid_str);
+
+ ret = snprintf(buf, sizeof(buf), "%s=%s\n", GLUSTERD_STORE_UUID_KEY,
+ uuid_str);
+ if (ret < 0 || ret >= sizeof(buf)) {
+ ret = -1;
+ goto out;
}
- pthread_mutex_unlock(&conf->mutex);
- if (ret) {
- gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_UUID_SET_FAIL,
- "Storing uuid failed ret = %d", ret);
+ total_len += ret;
+
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len, "%s=%d\n",
+ GD_OP_VERSION_KEY, conf->op_version);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
}
- snprintf(op_version_str, 15, "%d", conf->op_version);
- ret = gf_store_save_value(handle->fd, GD_OP_VERSION_KEY, op_version_str);
+ ret = gf_store_save_items(handle->fd, buf);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERS_STORE_FAIL,
- "Storing op-version failed ret = %d", ret);
+ gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_OP_VERS_STORE_FAIL,
+ "Storing glusterd global-info failed ret = %d", ret);
goto out;
}
ret = gf_store_rename_tmppath(handle);
out:
if (handle) {
- if (ret && (handle->fd > 0))
+ if (ret && (handle->fd >= 0))
gf_store_unlink_tmppath(handle);
-
- if (handle->fd > 0) {
- handle->fd = 0;
- }
}
if (uuid_str)
@@ -2140,7 +1961,74 @@ out:
}
int
-glusterd_retrieve_op_version(xlator_t *this, int *op_version)
+glusterd_store_max_op_version(xlator_t *this)
+{
+ int ret = -1;
+ glusterd_conf_t *conf = NULL;
+ char op_version_str[15] = {
+ 0,
+ };
+ char path[PATH_MAX] = {
+ 0,
+ };
+ gf_store_handle_t *handle = NULL;
+ int32_t len = 0;
+
+ conf = this->private;
+
+ len = snprintf(path, PATH_MAX, "%s/%s", conf->workdir,
+ GLUSTERD_UPGRADE_FILE);
+ if ((len < 0) || (len >= PATH_MAX)) {
+ goto out;
+ }
+ ret = gf_store_handle_new(path, &handle);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_HANDLE_GET_FAIL,
+ "Unable to get store handle");
+ goto out;
+ }
+
+ /* These options need to be available for all users */
+ ret = sys_chmod(handle->path, 0644);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
+ "chmod error for %s", GLUSTERD_UPGRADE_FILE);
+ goto out;
+ }
+
+ handle->fd = gf_store_mkstemp(handle);
+ if (handle->fd < 0) {
+ ret = -1;
+ goto out;
+ }
+
+ snprintf(op_version_str, sizeof(op_version_str), "%d", GD_OP_VERSION_MAX);
+ ret = gf_store_save_value(handle->fd, GD_MAX_OP_VERSION_KEY,
+ op_version_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERS_STORE_FAIL,
+ "Storing op-version failed ret = %d", ret);
+ goto out;
+ }
+
+ ret = gf_store_rename_tmppath(handle);
+out:
+ if (handle) {
+ if (ret && (handle->fd >= 0))
+ gf_store_unlink_tmppath(handle);
+ }
+
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_GLUSTERD_GLOBAL_INFO_STORE_FAIL,
+ "Failed to store max op-version");
+ if (handle)
+ gf_store_handle_destroy(handle);
+ return ret;
+}
+
+int
+glusterd_retrieve_max_op_version(xlator_t *this, int *op_version)
{
char *op_version_str = NULL;
glusterd_conf_t *priv = NULL;
@@ -2155,25 +2043,21 @@ glusterd_retrieve_op_version(xlator_t *this, int *op_version)
priv = this->private;
- if (!priv->handle) {
- len = snprintf(path, PATH_MAX, "%s/%s", priv->workdir,
- GLUSTERD_INFO_FILE);
- if ((len < 0) || (len >= PATH_MAX)) {
- goto out;
- }
- ret = gf_store_handle_retrieve(path, &handle);
-
- if (ret) {
- gf_msg_debug(this->name, 0,
- "Unable to get store "
- "handle!");
- goto out;
- }
+ len = snprintf(path, PATH_MAX, "%s/%s", priv->workdir,
+ GLUSTERD_UPGRADE_FILE);
+ if ((len < 0) || (len >= PATH_MAX)) {
+ goto out;
+ }
+ ret = gf_store_handle_retrieve(path, &handle);
- priv->handle = handle;
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "Unable to get store "
+ "handle!");
+ goto out;
}
- ret = gf_store_retrieve_value(priv->handle, GD_OP_VERSION_KEY,
+ ret = gf_store_retrieve_value(handle, GD_MAX_OP_VERSION_KEY,
&op_version_str);
if (ret) {
gf_msg_debug(this->name, 0, "No previous op_version present");
@@ -2193,17 +2077,18 @@ glusterd_retrieve_op_version(xlator_t *this, int *op_version)
out:
if (op_version_str)
GF_FREE(op_version_str);
-
+ if (handle)
+ gf_store_handle_destroy(handle);
return ret;
}
int
-glusterd_retrieve_sys_snap_max_limit(xlator_t *this, uint64_t *limit, char *key)
+glusterd_retrieve_op_version(xlator_t *this, int *op_version)
{
- char *limit_str = NULL;
+ char *op_version_str = NULL;
glusterd_conf_t *priv = NULL;
int ret = -1;
- uint64_t tmp_limit = 0;
+ int tmp_version = 0;
char *tmp = NULL;
char path[PATH_MAX] = {
0,
@@ -2211,13 +2096,8 @@ glusterd_retrieve_sys_snap_max_limit(xlator_t *this, uint64_t *limit, char *key)
gf_store_handle_t *handle = NULL;
int32_t len = 0;
- GF_ASSERT(this);
priv = this->private;
- GF_ASSERT(priv);
- GF_ASSERT(limit);
- GF_ASSERT(key);
-
if (!priv->handle) {
len = snprintf(path, PATH_MAX, "%s/%s", priv->workdir,
GLUSTERD_INFO_FILE);
@@ -2236,25 +2116,26 @@ glusterd_retrieve_sys_snap_max_limit(xlator_t *this, uint64_t *limit, char *key)
priv->handle = handle;
}
- ret = gf_store_retrieve_value(priv->handle, key, &limit_str);
+ ret = gf_store_retrieve_value(priv->handle, GD_OP_VERSION_KEY,
+ &op_version_str);
if (ret) {
- gf_msg_debug(this->name, 0, "No previous %s present", key);
+ gf_msg_debug(this->name, 0, "No previous op_version present");
goto out;
}
- tmp_limit = strtoul(limit_str, &tmp, 10);
- if ((tmp_limit <= 0) || (tmp && strlen(tmp) > 1)) {
+ tmp_version = strtol(op_version_str, &tmp, 10);
+ if ((tmp_version <= 0) || (tmp && strlen(tmp) > 1)) {
gf_msg(this->name, GF_LOG_WARNING, EINVAL, GD_MSG_UNSUPPORTED_VERSION,
"invalid version number");
goto out;
}
- *limit = tmp_limit;
+ *op_version = tmp_version;
ret = 0;
out:
- if (limit_str)
- GF_FREE(limit_str);
+ if (op_version_str)
+ GF_FREE(op_version_str);
return ret;
}
@@ -2457,17 +2338,15 @@ glusterd_store_retrieve_snapd(glusterd_volinfo_t *volinfo)
if (op_errno != GD_STORE_EOF)
goto out;
- ret = gf_store_iter_destroy(iter);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
- "Failed to destroy store "
- "iter");
- goto out;
- }
-
ret = 0;
out:
+ if (gf_store_iter_destroy(&iter)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
+ "Failed to destroy store iter");
+ ret = -1;
+ }
+
return ret;
}
@@ -2476,6 +2355,7 @@ glusterd_store_retrieve_bricks(glusterd_volinfo_t *volinfo)
{
int32_t ret = 0;
glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_brickinfo_t *ta_brickinfo = NULL;
gf_store_iter_t *iter = NULL;
char *key = NULL;
char *value = NULL;
@@ -2487,7 +2367,8 @@ glusterd_store_retrieve_bricks(glusterd_volinfo_t *volinfo)
};
glusterd_conf_t *priv = NULL;
int32_t brick_count = 0;
- char tmpkey[4096] = {
+ int32_t ta_brick_count = 0;
+ char tmpkey[32] = {
0,
};
gf_store_iter_t *tmpiter = NULL;
@@ -2496,6 +2377,10 @@ glusterd_store_retrieve_bricks(glusterd_volinfo_t *volinfo)
struct pmap_registry *pmap = NULL;
xlator_t *this = NULL;
int brickid = 0;
+ /* ta_brick_id initialization with 2 since ta-brick id starts with
+ * volname-ta-2
+ */
+ int ta_brick_id = 2;
gf_store_op_errno_t op_errno = GD_STORE_SUCCESS;
int32_t len = 0;
@@ -2577,7 +2462,13 @@ glusterd_store_retrieve_bricks(glusterd_volinfo_t *volinfo)
}
} else if (!strncmp(key, GLUSTERD_STORE_KEY_BRICK_PORT,
SLEN(GLUSTERD_STORE_KEY_BRICK_PORT))) {
- gf_string2int(value, &brickinfo->port);
+ ret = gf_string2int(value, &brickinfo->port);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INCOMPATIBLE_VALUE,
+ "Failed to convert "
+ "string to integer");
+ }
if (brickinfo->port < priv->base_port) {
/* This is required to adhere to the
@@ -2592,7 +2483,13 @@ glusterd_store_retrieve_bricks(glusterd_volinfo_t *volinfo)
}
} else if (!strncmp(key, GLUSTERD_STORE_KEY_BRICK_RDMA_PORT,
SLEN(GLUSTERD_STORE_KEY_BRICK_RDMA_PORT))) {
- gf_string2int(value, &brickinfo->rdma_port);
+ ret = gf_string2int(value, &brickinfo->rdma_port);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INCOMPATIBLE_VALUE,
+ "Failed to convert "
+ "string to integer");
+ }
if (brickinfo->rdma_port < priv->base_port) {
/* This is required to adhere to the
@@ -2714,10 +2611,6 @@ glusterd_store_retrieve_bricks(glusterd_volinfo_t *volinfo)
op_errno);
goto out;
}
- ret = gf_store_iter_destroy(iter);
-
- if (ret)
- goto out;
if (brickinfo->brick_id[0] == '\0') {
/* This is an old volume upgraded to op_version 4 */
@@ -2748,7 +2641,12 @@ glusterd_store_retrieve_bricks(glusterd_volinfo_t *volinfo)
ret = -1;
goto out;
}
- strncpy(brickinfo->real_path, abspath, strlen(abspath));
+ if (strlen(abspath) >= sizeof(brickinfo->real_path)) {
+ ret = -1;
+ goto out;
+ }
+ (void)strncpy(brickinfo->real_path, abspath,
+ sizeof(brickinfo->real_path));
}
}
@@ -2767,19 +2665,208 @@ glusterd_store_retrieve_bricks(glusterd_volinfo_t *volinfo)
brickinfo->path);
/* No need for treating it as an error, lets continue
with just a message */
+ } else {
+ brickinfo->statfs_fsid = brickstat.f_fsid;
}
- brickinfo->statfs_fsid = brickstat.f_fsid;
}
cds_list_add_tail(&brickinfo->brick_list, &volinfo->bricks);
brick_count++;
}
- assign_brick_groups(volinfo);
- ret = gf_store_iter_destroy(tmpiter);
+ if (gf_store_iter_destroy(&tmpiter)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
+ "Failed to destroy store iter");
+ ret = -1;
+ goto out;
+ }
+
+ ret = gf_store_iter_new(volinfo->shandle, &tmpiter);
+
if (ret)
goto out;
+
+ if (volinfo->thin_arbiter_count == 1) {
+ snprintf(tmpkey, sizeof(tmpkey), "%s-%d",
+ GLUSTERD_STORE_KEY_VOL_TA_BRICK, 0);
+ while (ta_brick_count < volinfo->subvol_count) {
+ ret = glusterd_brickinfo_new(&ta_brickinfo);
+ if (ret)
+ goto out;
+
+ ret = gf_store_iter_get_matching(tmpiter, tmpkey, &tmpvalue);
+
+ len = snprintf(path, sizeof(path), "%s/%s", brickdir, tmpvalue);
+ GF_FREE(tmpvalue);
+ tmpvalue = NULL;
+ if ((len < 0) || (len >= sizeof(path))) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = gf_store_handle_retrieve(path, &ta_brickinfo->shandle);
+
+ if (ret)
+ goto out;
+
+ ret = gf_store_iter_new(ta_brickinfo->shandle, &iter);
+
+ if (ret)
+ goto out;
+
+ ret = gf_store_iter_get_next(iter, &key, &value, &op_errno);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, op_errno,
+ GD_MSG_STORE_ITER_GET_FAIL,
+ "Unable to iterate "
+ "the store for brick: %s",
+ path);
+ goto out;
+ }
+
+ while (!ret) {
+ if (!strncmp(key, GLUSTERD_STORE_KEY_BRICK_HOSTNAME,
+ SLEN(GLUSTERD_STORE_KEY_BRICK_HOSTNAME))) {
+ if (snprintf(ta_brickinfo->hostname,
+ sizeof(ta_brickinfo->hostname), "%s",
+ value) >= sizeof(ta_brickinfo->hostname)) {
+ gf_msg("glusterd", GF_LOG_ERROR, op_errno,
+ GD_MSG_PARSE_BRICKINFO_FAIL,
+ "brick hostname truncated: %s",
+ ta_brickinfo->hostname);
+ goto out;
+ }
+ } else if (!strncmp(key, GLUSTERD_STORE_KEY_BRICK_PATH,
+ SLEN(GLUSTERD_STORE_KEY_BRICK_PATH))) {
+ if (snprintf(ta_brickinfo->path, sizeof(ta_brickinfo->path),
+ "%s", value) >= sizeof(ta_brickinfo->path)) {
+ gf_msg("glusterd", GF_LOG_ERROR, op_errno,
+ GD_MSG_PARSE_BRICKINFO_FAIL,
+ "brick path truncated: %s", ta_brickinfo->path);
+ goto out;
+ }
+ } else if (!strncmp(key, GLUSTERD_STORE_KEY_BRICK_REAL_PATH,
+ SLEN(GLUSTERD_STORE_KEY_BRICK_REAL_PATH))) {
+ if (snprintf(ta_brickinfo->real_path,
+ sizeof(ta_brickinfo->real_path), "%s",
+ value) >= sizeof(ta_brickinfo->real_path)) {
+ gf_msg("glusterd", GF_LOG_ERROR, op_errno,
+ GD_MSG_PARSE_BRICKINFO_FAIL,
+ "real_path truncated: %s",
+ ta_brickinfo->real_path);
+ goto out;
+ }
+ } else if (!strncmp(key, GLUSTERD_STORE_KEY_BRICK_PORT,
+ SLEN(GLUSTERD_STORE_KEY_BRICK_PORT))) {
+ ret = gf_string2int(value, &ta_brickinfo->port);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INCOMPATIBLE_VALUE,
+ "Failed to convert "
+ "string to integer");
+ }
+
+ if (ta_brickinfo->port < priv->base_port) {
+ /* This is required to adhere to the
+ IANA standards */
+ ta_brickinfo->port = 0;
+ }
+ } else if (!strncmp(key, GLUSTERD_STORE_KEY_BRICK_RDMA_PORT,
+ SLEN(GLUSTERD_STORE_KEY_BRICK_RDMA_PORT))) {
+ ret = gf_string2int(value, &ta_brickinfo->rdma_port);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INCOMPATIBLE_VALUE,
+ "Failed to convert "
+ "string to integer");
+ }
+
+ if (ta_brickinfo->rdma_port < priv->base_port) {
+ /* This is required to adhere to the
+ IANA standards */
+ ta_brickinfo->rdma_port = 0;
+ }
+ } else if (!strncmp(
+ key, GLUSTERD_STORE_KEY_BRICK_DECOMMISSIONED,
+ SLEN(GLUSTERD_STORE_KEY_BRICK_DECOMMISSIONED))) {
+ ret = gf_string2int(value, &ta_brickinfo->decommissioned);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INCOMPATIBLE_VALUE,
+ "Failed to convert "
+ "string to integer");
+ }
+
+ } else if (!strcmp(key, GLUSTERD_STORE_KEY_BRICK_ID)) {
+ if (snprintf(ta_brickinfo->brick_id,
+ sizeof(ta_brickinfo->brick_id), "%s",
+ value) >= sizeof(ta_brickinfo->brick_id)) {
+ gf_msg("glusterd", GF_LOG_ERROR, op_errno,
+ GD_MSG_PARSE_BRICKINFO_FAIL,
+ "brick_id truncated: %s",
+ ta_brickinfo->brick_id);
+ goto out;
+ }
+ } else if (!strncmp(key, GLUSTERD_STORE_KEY_BRICK_FSID,
+ SLEN(GLUSTERD_STORE_KEY_BRICK_FSID))) {
+ ret = gf_string2uint64(value, &ta_brickinfo->statfs_fsid);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_INVALID_ENTRY,
+ "%s "
+ "is not a valid uint64_t value",
+ value);
+ }
+ } else if (!strcmp(key, GLUSTERD_STORE_KEY_BRICK_UUID)) {
+ gf_uuid_parse(value, brickinfo->uuid);
+ } else if (!strncmp(
+ key, GLUSTERD_STORE_KEY_BRICK_SNAP_STATUS,
+ SLEN(GLUSTERD_STORE_KEY_BRICK_SNAP_STATUS))) {
+ ret = gf_string2int(value, &ta_brickinfo->snap_status);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INCOMPATIBLE_VALUE,
+ "Failed to convert "
+ "string to integer");
+ }
+
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNKNOWN_KEY,
+ "Unknown key: %s", key);
+ }
+
+ GF_FREE(key);
+ GF_FREE(value);
+ key = NULL;
+ value = NULL;
+ ret = gf_store_iter_get_next(iter, &key, &value, &op_errno);
+ }
+
+ GLUSTERD_ASSIGN_BRICKID_TO_TA_BRICKINFO(ta_brickinfo, volinfo,
+ ta_brick_id);
+ ta_brick_id += 3;
+
+ cds_list_add_tail(&ta_brickinfo->brick_list, &volinfo->ta_bricks);
+ ta_brick_count++;
+ }
+ }
+
+ assign_brick_groups(volinfo);
+ ret = 0;
+
out:
+ if (gf_store_iter_destroy(&tmpiter)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
+ "Failed to destroy store iter");
+ ret = -1;
+ }
+
+ if (gf_store_iter_destroy(&iter)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
+ "Failed to destroy store iter");
+ ret = -1;
+ }
+
gf_msg_debug(this->name, 0, "Returning with %d", ret);
return ret;
@@ -2828,6 +2915,7 @@ glusterd_store_retrieve_node_state(glusterd_volinfo_t *volinfo)
goto out;
ret = gf_store_iter_get_next(iter, &key, &value, &op_errno);
+
if (ret)
goto out;
@@ -2846,52 +2934,22 @@ glusterd_store_retrieve_node_state(glusterd_volinfo_t *volinfo)
volinfo->rebal.op = atoi(value);
} else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_DEFRAG_REB_FILES,
SLEN(GLUSTERD_STORE_KEY_VOL_DEFRAG_REB_FILES))) {
- volinfo->rebal.rebalance_files = atoi(value);
+ sscanf(value, "%" PRIu64, &volinfo->rebal.rebalance_files);
} else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_DEFRAG_SIZE,
SLEN(GLUSTERD_STORE_KEY_VOL_DEFRAG_SIZE))) {
- volinfo->rebal.rebalance_data = atoi(value);
+ sscanf(value, "%" PRIu64, &volinfo->rebal.rebalance_data);
} else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_DEFRAG_SCANNED,
SLEN(GLUSTERD_STORE_KEY_VOL_DEFRAG_SCANNED))) {
- volinfo->rebal.lookedup_files = atoi(value);
+ sscanf(value, "%" PRIu64, &volinfo->rebal.lookedup_files);
} else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_DEFRAG_FAILURES,
SLEN(GLUSTERD_STORE_KEY_VOL_DEFRAG_FAILURES))) {
- volinfo->rebal.rebalance_failures = atoi(value);
+ sscanf(value, "%" PRIu64, &volinfo->rebal.rebalance_failures);
} else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_DEFRAG_SKIPPED,
SLEN(GLUSTERD_STORE_KEY_VOL_DEFRAG_SKIPPED))) {
- volinfo->rebal.skipped_files = atoi(value);
+ sscanf(value, "%" PRIu64, &volinfo->rebal.skipped_files);
} else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_DEFRAG_RUN_TIME,
SLEN(GLUSTERD_STORE_KEY_VOL_DEFRAG_RUN_TIME))) {
volinfo->rebal.rebalance_time = atoi(value);
-
- /* if none of the above keys match then its related to tier
- * so we get the values and store it on volinfo->tier
- */
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_TIER_STATUS,
- SLEN(GLUSTERD_STORE_KEY_VOL_TIER_STATUS))) {
- volinfo->tier.defrag_status = atoi(value);
- } else if (!strncmp(key, GF_TIER_TID_KEY, SLEN(GF_TIER_TID_KEY))) {
- gf_uuid_parse(value, volinfo->tier.rebalance_id);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_TIER_DETACH_OP,
- SLEN(GLUSTERD_STORE_KEY_TIER_DETACH_OP))) {
- volinfo->tier.op = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_MIGRATED_FILES,
- SLEN(GLUSTERD_STORE_KEY_VOL_MIGRATED_FILES))) {
- volinfo->tier.rebalance_files = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_MIGRATED_SIZE,
- SLEN(GLUSTERD_STORE_KEY_VOL_MIGRATED_SIZE))) {
- volinfo->tier.rebalance_data = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_MIGRATIONS_SCANNED,
- SLEN(GLUSTERD_STORE_KEY_VOL_MIGRATIONS_SCANNED))) {
- volinfo->tier.lookedup_files = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_MIGRATIONS_FAILURES,
- SLEN(GLUSTERD_STORE_KEY_VOL_MIGRATIONS_FAILURES))) {
- volinfo->tier.rebalance_failures = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_MIGRATIONS_SKIPPED,
- SLEN(GLUSTERD_STORE_KEY_VOL_MIGRATIONS_SKIPPED))) {
- volinfo->tier.skipped_files = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_MIGRATION_RUN_TIME,
- SLEN(GLUSTERD_STORE_KEY_VOL_MIGRATION_RUN_TIME))) {
- volinfo->tier.rebalance_time = atoi(value);
} else {
if (!tmp_dict) {
tmp_dict = dict_new();
@@ -2925,10 +2983,7 @@ glusterd_store_retrieve_node_state(glusterd_volinfo_t *volinfo)
ret = gf_store_iter_get_next(iter, &key, &value, &op_errno);
}
if (tmp_dict) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- volinfo->tier.dict = dict_ref(tmp_dict);
- else
- volinfo->rebal.dict = dict_ref(tmp_dict);
+ volinfo->rebal.dict = dict_ref(tmp_dict);
}
if (op_errno != GD_STORE_EOF) {
@@ -2936,19 +2991,20 @@ glusterd_store_retrieve_node_state(glusterd_volinfo_t *volinfo)
goto out;
}
- ret = gf_store_iter_destroy(iter);
-
- if (ret)
- goto out;
+ ret = 0;
out:
+ if (gf_store_iter_destroy(&iter)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
+ "Failed to destroy store iter");
+ ret = -1;
+ }
+
if (dup_value)
GF_FREE(dup_value);
if (ret) {
if (volinfo->rebal.dict)
dict_unref(volinfo->rebal.dict);
- else if (volinfo->tier.dict)
- dict_unref(volinfo->tier.dict);
}
if (tmp_dict)
dict_unref(tmp_dict);
@@ -3041,6 +3097,8 @@ glusterd_store_update_volinfo(glusterd_volinfo_t *volinfo)
volinfo->replica_count = atoi(value);
} else if (!strcmp(key, GLUSTERD_STORE_KEY_VOL_ARBITER_CNT)) {
volinfo->arbiter_count = atoi(value);
+ } else if (!strcmp(key, GLUSTERD_STORE_KEY_VOL_THIN_ARBITER_CNT)) {
+ volinfo->thin_arbiter_count = atoi(value);
} else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_DISPERSE_CNT,
SLEN(GLUSTERD_STORE_KEY_VOL_DISPERSE_CNT))) {
volinfo->disperse_count = atoi(value);
@@ -3085,9 +3143,6 @@ glusterd_store_update_volinfo(glusterd_volinfo_t *volinfo)
} else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_CLIENT_OP_VERSION,
SLEN(GLUSTERD_STORE_KEY_VOL_CLIENT_OP_VERSION))) {
volinfo->client_op_version = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_CAPS,
- SLEN(GLUSTERD_STORE_KEY_VOL_CAPS))) {
- volinfo->caps = atoi(value);
} else if (!strncmp(key, GLUSTERD_STORE_KEY_SNAP_MAX_HARD_LIMIT,
SLEN(GLUSTERD_STORE_KEY_SNAP_MAX_HARD_LIMIT))) {
volinfo->snap_max_hard_limit = (uint64_t)atoll(value);
@@ -3107,28 +3162,6 @@ glusterd_store_update_volinfo(glusterd_volinfo_t *volinfo)
"parent_volname truncated: %s", volinfo->parent_volname);
goto out;
}
- } else if (!strncmp(key, GF_TIER_ENABLED, SLEN(GF_TIER_ENABLED))) {
- volinfo->is_tier_enabled = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_COLD_COUNT, strlen(key))) {
- volinfo->tier_info.cold_brick_count = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_COLD_REPLICA_COUNT,
- strlen(key))) {
- volinfo->tier_info.cold_replica_count = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_COLD_DISPERSE_COUNT,
- strlen(key))) {
- volinfo->tier_info.cold_disperse_count = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_COLD_REDUNDANCY_COUNT,
- strlen(key))) {
- volinfo->tier_info.cold_redundancy_count = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_HOT_COUNT, strlen(key))) {
- volinfo->tier_info.hot_brick_count = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_HOT_REPLICA_COUNT,
- strlen(key))) {
- volinfo->tier_info.hot_replica_count = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_HOT_TYPE, strlen(key))) {
- volinfo->tier_info.hot_type = atoi(value);
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_COLD_TYPE, strlen(key))) {
- volinfo->tier_info.cold_type = atoi(value);
} else if (!strncmp(key, GLUSTERD_STORE_KEY_VOL_QUOTA_VERSION,
SLEN(GLUSTERD_STORE_KEY_VOL_QUOTA_VERSION))) {
volinfo->quota_xattr_version = atoi(value);
@@ -3147,8 +3180,11 @@ glusterd_store_update_volinfo(glusterd_volinfo_t *volinfo)
case 0:
/*Ignore GLUSTERD_STORE_KEY_VOL_BRICK since
- glusterd_store_retrieve_bricks gets it later*/
- if (!strstr(key, GLUSTERD_STORE_KEY_VOL_BRICK))
+ glusterd_store_retrieve_bricks gets it later.
+ also, ignore tier-enabled key as we deprecated
+ tier xlator*/
+ if (!strstr(key, GLUSTERD_STORE_KEY_VOL_BRICK) ||
+ !strstr(key, GF_TIER_ENABLED))
gf_msg(this->name, GF_LOG_WARNING, 0,
GD_MSG_UNKNOWN_KEY, "Unknown key: %s", key);
break;
@@ -3194,36 +3230,23 @@ glusterd_store_update_volinfo(glusterd_volinfo_t *volinfo)
volinfo->replica_count = 1;
break;
- case GF_CLUSTER_TYPE_STRIPE:
- volinfo->stripe_count = volinfo->sub_count;
- volinfo->replica_count = 1;
- break;
-
case GF_CLUSTER_TYPE_REPLICATE:
volinfo->stripe_count = 1;
volinfo->replica_count = volinfo->sub_count;
break;
- case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
- /* Introduced in 3.3 */
- GF_ASSERT(volinfo->stripe_count > 0);
- GF_ASSERT(volinfo->replica_count > 0);
- break;
-
case GF_CLUSTER_TYPE_DISPERSE:
GF_ASSERT(volinfo->disperse_count > 0);
GF_ASSERT(volinfo->redundancy_count > 0);
break;
- case GF_CLUSTER_TYPE_TIER:
- if (volinfo->tier_info.cold_type == GF_CLUSTER_TYPE_DISPERSE)
- volinfo->tier_info
- .cold_dist_leaf_count = volinfo->disperse_count;
- else
- volinfo->tier_info
- .cold_dist_leaf_count = glusterd_calc_dist_leaf_count(
- volinfo->tier_info.cold_replica_count, 1);
-
+ case GF_CLUSTER_TYPE_STRIPE:
+ case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
+ gf_msg(this->name, GF_LOG_CRITICAL, ENOTSUP,
+ GD_MSG_VOLINFO_STORE_FAIL,
+ "The volume type is no more supported. Please refer to "
+ "glusterfs-6.0 release-notes for how to migrate from "
+ "this volume type");
break;
default:
@@ -3244,16 +3267,15 @@ glusterd_store_update_volinfo(glusterd_volinfo_t *volinfo)
if (op_errno != GD_STORE_EOF)
goto out;
- ret = gf_store_iter_destroy(iter);
- if (ret) {
+ ret = 0;
+
+out:
+ if (gf_store_iter_destroy(&iter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
- "Failed to destroy store "
- "iter");
- goto out;
+ "Failed to destroy store iter");
+ ret = -1;
}
- ret = 0;
-out:
return ret;
}
@@ -3353,15 +3375,6 @@ glusterd_store_set_options_path(glusterd_conf_t *conf, char *path, size_t len)
snprintf(path, len, "%s/options", conf->workdir);
}
-int
-_store_global_opts(dict_t *this, char *key, data_t *value, void *data)
-{
- gf_store_handle_t *shandle = data;
-
- gf_store_save_value(shandle->fd, key, (char *)value->data);
- return 0;
-}
-
int32_t
glusterd_store_options(xlator_t *this, dict_t *opts)
{
@@ -3370,13 +3383,15 @@ glusterd_store_options(xlator_t *this, dict_t *opts)
char path[PATH_MAX] = {0};
int fd = -1;
int32_t ret = -1;
+ glusterd_volinfo_data_store_t *dict_data = NULL;
conf = this->private;
glusterd_store_set_options_path(conf, path, sizeof(path));
ret = gf_store_handle_new(path, &shandle);
- if (ret)
+ if (ret) {
goto out;
+ }
fd = gf_store_mkstemp(shandle);
if (fd <= 0) {
@@ -3384,15 +3399,30 @@ glusterd_store_options(xlator_t *this, dict_t *opts)
goto out;
}
+ dict_data = GF_CALLOC(1, sizeof(glusterd_volinfo_data_store_t),
+ gf_gld_mt_volinfo_dict_data_t);
+ if (dict_data == NULL) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_MEMORY, NULL);
+ return -1;
+ }
+ dict_data->shandle = shandle;
shandle->fd = fd;
- dict_foreach(opts, _store_global_opts, shandle);
- shandle->fd = 0;
+ dict_foreach(opts, _storeopts, (void *)dict_data);
+ if (dict_data->buffer_len > 0) {
+ ret = gf_store_save_items(fd, dict_data->buffer);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, NULL);
+ goto out;
+ }
+ }
+
ret = gf_store_rename_tmppath(shandle);
- if (ret)
- goto out;
out:
- if ((ret < 0) && (fd > 0))
+ shandle->fd = 0;
+ GF_FREE(dict_data);
+ if ((ret < 0) && (fd > 0)) {
gf_store_unlink_tmppath(shandle);
+ }
gf_store_handle_destroy(shandle);
return ret;
}
@@ -3438,7 +3468,7 @@ glusterd_store_retrieve_options(xlator_t *this)
goto out;
ret = 0;
out:
- (void)gf_store_iter_destroy(iter);
+ (void)gf_store_iter_destroy(&iter);
gf_store_handle_destroy(shandle);
return ret;
}
@@ -3490,28 +3520,28 @@ glusterd_store_retrieve_volumes(xlator_t *this, glusterd_snap_t *snap)
goto out;
}
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
-
- while (entry) {
+ while ((entry = sys_readdir(dir, scratch))) {
+ if (gf_irrelevant_entry(entry))
+ continue;
if (snap && ((!strcmp(entry->d_name, "geo-replication")) ||
(!strcmp(entry->d_name, "info"))))
- goto next;
+ continue;
len = snprintf(entry_path, PATH_MAX, "%s/%s", path, entry->d_name);
- if ((len < 0) || (len >= PATH_MAX)) {
- goto next;
- }
+ if ((len < 0) || (len >= PATH_MAX))
+ continue;
+
ret = sys_lstat(entry_path, &st);
if (ret == -1) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
"Failed to stat entry %s : %s", path, strerror(errno));
- goto next;
+ continue;
}
if (!S_ISDIR(st.st_mode)) {
gf_msg_debug(this->name, 0, "%s is not a valid volume",
entry->d_name);
- goto next;
+ continue;
}
volinfo = glusterd_store_retrieve_volume(entry->d_name, snap);
@@ -3534,8 +3564,6 @@ glusterd_store_retrieve_volumes(xlator_t *this, glusterd_snap_t *snap)
glusterd_store_create_nodestate_sh_on_absence(volinfo);
glusterd_store_perform_node_state_store(volinfo);
}
- next:
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
}
ret = 0;
@@ -3679,7 +3707,7 @@ glusterd_recreate_vol_brick_mounts(xlator_t *this, glusterd_volinfo_t *volinfo)
struct stat st_buf = {
0,
};
- char abspath[VALID_GLUSTERD_PATHMAX] = {0};
+ char abspath[PATH_MAX] = {0};
GF_ASSERT(this);
GF_ASSERT(volinfo);
@@ -3709,7 +3737,7 @@ glusterd_recreate_vol_brick_mounts(xlator_t *this, glusterd_volinfo_t *volinfo)
ret = sys_lstat(brickinfo->path, &st_buf);
if (ret) {
if (errno == ENOENT) {
- ret = mkdir_p(brick_mount_path, 0777, _gf_true);
+ ret = mkdir_p(brick_mount_path, 0755, _gf_true);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, errno,
GD_MSG_CREATE_DIR_FAILED, "Failed to create %s. ",
@@ -3742,7 +3770,12 @@ glusterd_recreate_vol_brick_mounts(xlator_t *this, glusterd_volinfo_t *volinfo)
ret = -1;
goto out;
}
- strncpy(brickinfo->real_path, abspath, strlen(abspath));
+ if (strlen(abspath) >= sizeof(brickinfo->real_path)) {
+ ret = -1;
+ goto out;
+ }
+ (void)strncpy(brickinfo->real_path, abspath,
+ sizeof(brickinfo->real_path));
}
}
@@ -3882,14 +3915,15 @@ glusterd_store_update_snap(glusterd_snap_t *snap)
if (op_errno != GD_STORE_EOF)
goto out;
- ret = gf_store_iter_destroy(iter);
- if (ret) {
+ ret = 0;
+
+out:
+ if (gf_store_iter_destroy(&iter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
- "Failed to destroy store "
- "iter");
+ "Failed to destroy store iter");
+ ret = -1;
}
-out:
return ret;
}
@@ -3950,7 +3984,6 @@ out:
int32_t
glusterd_store_retrieve_missed_snaps_list(xlator_t *this)
{
- char buf[PATH_MAX] = "";
char path[PATH_MAX] = "";
char *snap_vol_id = NULL;
char *missed_node_info = NULL;
@@ -3987,8 +4020,8 @@ glusterd_store_retrieve_missed_snaps_list(xlator_t *this)
}
do {
- ret = gf_store_read_and_tokenize(
- fp, buf, sizeof(buf), &missed_node_info, &value, &store_errno);
+ ret = gf_store_read_and_tokenize(fp, &missed_node_info, &value,
+ &store_errno);
if (ret) {
if (store_errno == GD_STORE_EOF) {
gf_msg_debug(this->name, 0, "EOF for missed_snap_list");
@@ -4032,6 +4065,9 @@ glusterd_store_retrieve_missed_snaps_list(xlator_t *this)
ret = 0;
out:
+ if (fp)
+ fclose(fp);
+
gf_msg_trace(this->name, 0, "Returning with %d", ret);
return ret;
}
@@ -4077,9 +4113,9 @@ glusterd_store_retrieve_snaps(xlator_t *this)
goto out;
}
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
-
- while (entry) {
+ while ((entry = sys_readdir(dir, scratch))) {
+ if (gf_irrelevant_entry(entry))
+ continue;
if (strcmp(entry->d_name, GLUSTERD_MISSED_SNAPS_LIST_FILE)) {
ret = glusterd_store_retrieve_snap(entry->d_name);
if (ret) {
@@ -4088,7 +4124,6 @@ glusterd_store_retrieve_snaps(xlator_t *this)
goto out;
}
}
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
}
/* Retrieve missed_snaps_list */
@@ -4110,8 +4145,8 @@ out:
int32_t
glusterd_store_write_missed_snapinfo(int32_t fd)
{
- char key[PATH_MAX] = "";
- char value[PATH_MAX] = "";
+ char key[(UUID_SIZE * 2) + 2];
+ char value[PATH_MAX];
int32_t ret = -1;
glusterd_conf_t *priv = NULL;
glusterd_missed_snap_info *missed_snapinfo = NULL;
@@ -4400,41 +4435,39 @@ glusterd_store_create_peer_shandle(glusterd_peerinfo_t *peerinfo)
return ret;
}
-int32_t
+static int32_t
glusterd_store_peer_write(int fd, glusterd_peerinfo_t *peerinfo)
{
- char buf[50] = {0};
+ char buf[PATH_MAX];
+ uint total_len = 0;
int32_t ret = 0;
int32_t i = 1;
glusterd_peer_hostname_t *hostname = NULL;
- char *key = NULL;
-
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_PEER_UUID,
- uuid_utoa(peerinfo->uuid));
- if (ret)
- goto out;
- snprintf(buf, sizeof(buf), "%d", peerinfo->state.state);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_PEER_STATE, buf);
- if (ret)
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len, "%s=%s\n%s=%d\n",
+ GLUSTERD_STORE_KEY_PEER_UUID, uuid_utoa(peerinfo->uuid),
+ GLUSTERD_STORE_KEY_PEER_STATE, peerinfo->state.state);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
+ }
+ total_len += ret;
cds_list_for_each_entry(hostname, &peerinfo->hostnames, hostname_list)
{
- ret = gf_asprintf(&key, GLUSTERD_STORE_KEY_PEER_HOSTNAME "%d", i);
- if (ret < 0)
- goto out;
- ret = gf_store_save_value(fd, key, hostname->hostname);
- if (ret)
+ ret = snprintf(buf + total_len, sizeof(buf) - total_len,
+ GLUSTERD_STORE_KEY_PEER_HOSTNAME "%d=%s\n", i,
+ hostname->hostname);
+ if (ret < 0 || ret >= sizeof(buf) - total_len) {
+ ret = -1;
goto out;
- GF_FREE(key);
- key = NULL;
+ }
+ total_len += ret;
i++;
}
+ ret = gf_store_save_items(fd, buf);
out:
- if (key)
- GF_FREE(key);
gf_msg_debug("glusterd", 0, "Returning with %d", ret);
return ret;
}
@@ -4537,11 +4570,9 @@ glusterd_store_retrieve_peers(xlator_t *this)
goto out;
}
- for (;;) {
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
- if (!entry) {
- break;
- }
+ while ((entry = sys_readdir(dir, scratch))) {
+ if (gf_irrelevant_entry(entry))
+ continue;
if (gf_uuid_parse(entry->d_name, tmp_uuid) != 0) {
gf_log(this->name, GF_LOG_WARNING, "skipping non-peer file %s",
entry->d_name);
@@ -4561,8 +4592,9 @@ glusterd_store_retrieve_peers(xlator_t *this)
goto next;
ret = gf_store_iter_get_next(iter, &key, &value, &op_errno);
- if (ret)
+ if (ret) {
goto next;
+ }
/* Create an empty peerinfo object before reading in the
* details
@@ -4606,8 +4638,6 @@ glusterd_store_retrieve_peers(xlator_t *this)
goto next;
}
- (void)gf_store_iter_destroy(iter);
-
if (gf_uuid_is_null(peerinfo->uuid)) {
gf_log("", GF_LOG_ERROR,
"Null UUID while attempting to read peer from '%s'",
@@ -4620,10 +4650,6 @@ glusterd_store_retrieve_peers(xlator_t *this)
*/
address = cds_list_entry(peerinfo->hostnames.next,
glusterd_peer_hostname_t, hostname_list);
- if (!address) {
- ret = -1;
- goto next;
- }
peerinfo->hostname = gf_strdup(address->hostname);
ret = glusterd_friend_add_from_peerinfo(peerinfo, 1, NULL);
@@ -4634,6 +4660,8 @@ glusterd_store_retrieve_peers(xlator_t *this)
is_ok = _gf_true;
next:
+ (void)gf_store_iter_destroy(&iter);
+
if (!is_ok) {
gf_log(this->name, GF_LOG_WARNING,
"skipping malformed peer file %s", entry->d_name);
@@ -4646,17 +4674,18 @@ glusterd_store_retrieve_peers(xlator_t *this)
args.mode = GD_MODE_ON;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
ret = glusterd_friend_rpc_create(this, peerinfo, &args);
if (ret)
break;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
peerinfo = NULL;
out:
+
if (dir)
sys_closedir(dir);
gf_msg_debug(this->name, 0, "Returning with %d", ret);
@@ -4814,7 +4843,9 @@ glusterd_resolve_all_bricks(xlator_t *this)
"peer=%s;volume=%s;brick=%s", brickinfo->hostname,
volinfo->volname, brickinfo->path);
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_RESOLVE_BRICK_FAIL,
- "resolve brick failed in restore");
+ "Failed to resolve brick %s with host %s of volume %s"
+ " in restore",
+ brickinfo->path, brickinfo->hostname, volinfo->volname);
goto out;
}
}
@@ -4963,10 +4994,10 @@ glusterd_store_save_quota_version_and_cksum(glusterd_volinfo_t *volinfo)
glusterd_conf_t *conf = NULL;
xlator_t *this = NULL;
char path[PATH_MAX] = {0};
- char cksum_path[PATH_MAX] = {
+ char cksum_path[PATH_MAX + 32] = {
0,
};
- char buf[256] = {0};
+ char buf[64] = {0};
int fd = -1;
int32_t ret = -1;
int32_t len = 0;
@@ -4991,19 +5022,17 @@ glusterd_store_save_quota_version_and_cksum(glusterd_volinfo_t *volinfo)
goto out;
}
- snprintf(buf, sizeof(buf) - 1, "%u", volinfo->quota_conf_cksum);
- ret = gf_store_save_value(fd, "cksum", buf);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CKSUM_STORE_FAIL,
- "Failed to store cksum");
+ ret = snprintf(buf, sizeof(buf), "cksum=%u\nversion=%u\n",
+ volinfo->quota_conf_cksum, volinfo->quota_conf_version);
+ if (ret < 0 || ret >= sizeof(buf)) {
+ ret = -1;
goto out;
}
- snprintf(buf, sizeof(buf) - 1, "%u", volinfo->quota_conf_version);
- ret = gf_store_save_value(fd, "version", buf);
+ ret = gf_store_save_items(fd, buf);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERS_STORE_FAIL,
- "Failed to store version");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CKSUM_STORE_FAIL,
+ "Failed to store quota cksum and version");
goto out;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.h b/xlators/mgmt/glusterd/src/glusterd-store.h
index 5db77703482..83f4df0783e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.h
+++ b/xlators/mgmt/glusterd/src/glusterd-store.h
@@ -11,14 +11,14 @@
#define _GLUSTERD_HA_H_
#include <pthread.h>
-#include "compat-uuid.h"
-
-#include "glusterfs.h"
-#include "xlator.h"
-#include "run.h"
-#include "logging.h"
-#include "call-stub.h"
-#include "byte-order.h"
+#include <glusterfs/compat-uuid.h>
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/run.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/byte-order.h>
#include "glusterd.h"
#include "rpcsvc.h"
@@ -28,6 +28,8 @@ typedef enum glusterd_store_ver_ac_ {
GLUSTERD_VOLINFO_VER_AC_DECREMENT = 2,
} glusterd_volinfo_ver_ac_t;
+#define UUID_SIZE 36
+#define VOLINFO_BUFFER_SIZE 4093
#define GLUSTERD_STORE_UUID_KEY "UUID"
#define GLUSTERD_STORE_KEY_VOL_TYPE "type"
@@ -40,7 +42,9 @@ typedef enum glusterd_store_ver_ac_ {
#define GLUSTERD_STORE_KEY_VOL_DISPERSE_CNT "disperse_count"
#define GLUSTERD_STORE_KEY_VOL_REDUNDANCY_CNT "redundancy_count"
#define GLUSTERD_STORE_KEY_VOL_ARBITER_CNT "arbiter_count"
+#define GLUSTERD_STORE_KEY_VOL_THIN_ARBITER_CNT "thin_arbiter_count"
#define GLUSTERD_STORE_KEY_VOL_BRICK "brick"
+#define GLUSTERD_STORE_KEY_VOL_TA_BRICK "ta-brick"
#define GLUSTERD_STORE_KEY_VOL_VERSION "version"
#define GLUSTERD_STORE_KEY_VOL_TRANSPORT "transport-type"
#define GLUSTERD_STORE_KEY_VOL_ID "volume-id"
@@ -59,17 +63,6 @@ typedef enum glusterd_store_ver_ac_ {
#define GLUSTERD_STORE_KEY_VOL_CLIENT_OP_VERSION "client-op-version"
#define GLUSTERD_STORE_KEY_VOL_QUOTA_VERSION "quota-version"
-#define GLUSTERD_STORE_KEY_VOL_TIER_STATUS "tier_status"
-#define GLUSTERD_STORE_KEY_TIER_DETACH_OP "tier_op"
-#define GLUSTERD_STORE_KEY_COLD_TYPE "cold_type"
-#define GLUSTERD_STORE_KEY_COLD_COUNT "cold_count"
-#define GLUSTERD_STORE_KEY_COLD_REPLICA_COUNT "cold_replica_count"
-#define GLUSTERD_STORE_KEY_COLD_DISPERSE_COUNT "cold_disperse_count"
-#define GLUSTERD_STORE_KEY_COLD_REDUNDANCY_COUNT "cold_redundancy_count"
-#define GLUSTERD_STORE_KEY_HOT_TYPE "hot_type"
-#define GLUSTERD_STORE_KEY_HOT_COUNT "hot_count"
-#define GLUSTERD_STORE_KEY_HOT_REPLICA_COUNT "hot_replica_count"
-
#define GLUSTERD_STORE_KEY_SNAP_NAME "name"
#define GLUSTERD_STORE_KEY_SNAP_ID "snap-id"
#define GLUSTERD_STORE_KEY_SNAP_DESC "desc"
@@ -101,8 +94,7 @@ typedef enum glusterd_store_ver_ac_ {
#define GLUSTERD_STORE_KEY_PEER_UUID "uuid"
#define GLUSTERD_STORE_KEY_PEER_HOSTNAME "hostname"
#define GLUSTERD_STORE_KEY_PEER_STATE "state"
-
-#define GLUSTERD_STORE_KEY_VOL_CAPS "caps"
+#define GLUSTERD_STORE_KEY_VOL_CAPS "caps" /* left just for backward compat */
#define GLUSTERD_STORE_KEY_VOL_DEFRAG_REB_FILES "rebalanced-files"
#define GLUSTERD_STORE_KEY_VOL_DEFRAG_SIZE "size"
@@ -118,6 +110,21 @@ typedef enum glusterd_store_ver_ac_ {
#define GLUSTERD_STORE_KEY_VOL_MIGRATIONS_SKIPPED "migration-skipped"
#define GLUSTERD_STORE_KEY_VOL_MIGRATION_RUN_TIME "migration-run-time"
+#define GLUSTERD_STORE_KEY_GANESHA_GLOBAL "nfs-ganesha"
+
+/*
+ * The structure is responsible for handling the parameter for writes into
+ * the buffer before it is finally written to the file. The writes will be
+ * of the form of key-value pairs.
+ */
+struct glusterd_volinfo_data_store_ {
+ gf_store_handle_t *shandle; /*Contains fd and path of the file */
+ int16_t buffer_len;
+ char key_check; /* flag to check if key is to be validated before write*/
+ char buffer[VOLINFO_BUFFER_SIZE];
+};
+typedef struct glusterd_volinfo_data_store_ glusterd_volinfo_data_store_t;
+
int32_t
glusterd_store_volinfo(glusterd_volinfo_t *volinfo,
glusterd_volinfo_ver_ac_t ac);
@@ -156,6 +163,12 @@ int
glusterd_retrieve_op_version(xlator_t *this, int *op_version);
int
+glusterd_retrieve_max_op_version(xlator_t *this, int *op_version);
+
+int
+glusterd_store_max_op_version(xlator_t *this);
+
+int
glusterd_store_global_info(xlator_t *this);
int32_t
@@ -171,9 +184,6 @@ void
glusterd_replace_slash_with_hyphen(char *str);
int32_t
-glusterd_store_perform_volume_store(glusterd_volinfo_t *volinfo);
-
-int32_t
glusterd_store_create_quota_conf_sh_on_absence(glusterd_volinfo_t *volinfo);
int
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
index 3007d92f539..ca845903c4f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
@@ -7,25 +7,28 @@
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
+#include <signal.h>
-#include "globals.h"
-#include "run.h"
+#include <glusterfs/globals.h>
+#include <glusterfs/run.h>
#include "glusterd.h"
-#include "glusterfs.h"
+#include <glusterfs/glusterfs.h>
#include "glusterd-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-shd-svc.h"
#include "glusterd-quotad-svc.h"
+#ifdef BUILD_GNFS
#include "glusterd-nfs-svc.h"
+#endif
#include "glusterd-bitd-svc.h"
-#include "glusterd-tierd-svc.h"
-#include "glusterd-tierd-svc-helper.h"
+#include "glusterd-shd-svc-helper.h"
#include "glusterd-scrub-svc.h"
#include "glusterd-svc-helper.h"
-#include "syscall.h"
+#include <glusterfs/syscall.h>
+#include "glusterd-snapshot-utils.h"
int
-glusterd_svcs_reconfigure()
+glusterd_svcs_reconfigure(glusterd_volinfo_t *volinfo)
{
int ret = 0;
xlator_t *this = THIS;
@@ -37,15 +40,18 @@ glusterd_svcs_reconfigure()
conf = this->private;
GF_ASSERT(conf);
+#ifdef BUILD_GNFS
svc_name = "nfs";
ret = glusterd_nfssvc_reconfigure();
if (ret)
goto out;
-
+#endif
svc_name = "self-heald";
- ret = glusterd_shdsvc_reconfigure();
- if (ret)
- goto out;
+ if (volinfo) {
+ ret = glusterd_shdsvc_reconfigure(volinfo);
+ if (ret)
+ goto out;
+ }
if (conf->op_version == GD_OP_VERSION_MIN)
goto out;
@@ -69,7 +75,7 @@ out:
}
int
-glusterd_svcs_stop()
+glusterd_svcs_stop(glusterd_volinfo_t *volinfo)
{
int ret = 0;
xlator_t *this = NULL;
@@ -81,23 +87,27 @@ glusterd_svcs_stop()
priv = this->private;
GF_ASSERT(priv);
- ret = glusterd_svc_stop(&(priv->nfs_svc), SIGKILL);
+#ifdef BUILD_GNFS
+ ret = priv->nfs_svc.stop(&(priv->nfs_svc), SIGKILL);
if (ret)
goto out;
-
- ret = glusterd_svc_stop(&(priv->shd_svc), SIGTERM);
+#endif
+ ret = priv->quotad_svc.stop(&(priv->quotad_svc), SIGTERM);
if (ret)
goto out;
- ret = glusterd_svc_stop(&(priv->quotad_svc), SIGTERM);
- if (ret)
- goto out;
+ if (volinfo) {
+ ret = volinfo->shd.svc.stop(&(volinfo->shd.svc), SIGTERM);
+ if (ret)
+ goto out;
+ }
- ret = glusterd_svc_stop(&(priv->bitd_svc), SIGTERM);
+ ret = priv->bitd_svc.stop(&(priv->bitd_svc), SIGTERM);
if (ret)
goto out;
- ret = glusterd_svc_stop(&(priv->scrub_svc), SIGTERM);
+ ret = priv->scrub_svc.stop(&(priv->scrub_svc), SIGTERM);
+
out:
return ret;
}
@@ -117,16 +127,11 @@ glusterd_svcs_manager(glusterd_volinfo_t *volinfo)
if (volinfo && volinfo->is_snap_volume)
return 0;
+#if BUILD_GNFS
ret = conf->nfs_svc.manager(&(conf->nfs_svc), NULL, PROC_START_NO_WAIT);
if (ret)
goto out;
-
- ret = conf->shd_svc.manager(&(conf->shd_svc), volinfo, PROC_START_NO_WAIT);
- if (ret == -EINVAL)
- ret = 0;
- if (ret)
- goto out;
-
+#endif
if (conf->op_version == GD_OP_VERSION_MIN)
goto out;
@@ -143,6 +148,15 @@ glusterd_svcs_manager(glusterd_volinfo_t *volinfo)
if (ret)
goto out;
+ if (volinfo) {
+ ret = volinfo->shd.svc.manager(&(volinfo->shd.svc), volinfo,
+ PROC_START_NO_WAIT);
+ if (ret == -EINVAL)
+ ret = 0;
+ if (ret)
+ goto out;
+ }
+
ret = conf->scrub_svc.manager(&(conf->scrub_svc), NULL, PROC_START_NO_WAIT);
if (ret == -EINVAL)
ret = 0;
@@ -179,7 +193,7 @@ glusterd_svc_check_volfile_identical(char *svc_name,
goto out;
}
- /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
+ /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
tmp_fd = mkstemp(tmpvol);
if (tmp_fd < 0) {
gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
@@ -225,8 +239,10 @@ glusterd_svc_check_topology_identical(char *svc_name,
int tmpclean = 0;
int tmpfd = -1;
- if ((!identical) || (!this) || (!this->private))
+ if ((!identical) || (!this) || (!this->private)) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
@@ -241,7 +257,7 @@ glusterd_svc_check_topology_identical(char *svc_name,
goto out;
}
- /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
+ /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
tmpfd = mkstemp(tmpvol);
if (tmpfd < 0) {
gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
@@ -269,3 +285,763 @@ out:
GF_FREE(tmpvol);
return ret;
}
+
+int
+glusterd_volume_svc_check_volfile_identical(
+ char *svc_name, dict_t *mode_dict, glusterd_volinfo_t *volinfo,
+ glusterd_vol_graph_builder_t builder, gf_boolean_t *identical)
+{
+ char orgvol[PATH_MAX] = {
+ 0,
+ };
+ char *tmpvol = NULL;
+ xlator_t *this = NULL;
+ int ret = -1;
+ int need_unlink = 0;
+ int tmp_fd = -1;
+
+ this = THIS;
+
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, identical, out);
+
+ /* This builds volfile for volume level dameons */
+ glusterd_volume_svc_build_volfile_path(svc_name, volinfo, orgvol,
+ sizeof(orgvol));
+
+ ret = gf_asprintf(&tmpvol, "/tmp/g%s-XXXXXX", svc_name);
+ if (ret < 0) {
+ goto out;
+ }
+
+ /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
+ tmp_fd = mkstemp(tmpvol);
+ if (tmp_fd < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
+ "Unable to create temp file"
+ " %s:(%s)",
+ tmpvol, strerror(errno));
+ ret = -1;
+ goto out;
+ }
+
+ need_unlink = 1;
+
+ ret = builder(volinfo, tmpvol, mode_dict);
+ if (ret)
+ goto out;
+
+ ret = glusterd_check_files_identical(orgvol, tmpvol, identical);
+out:
+ if (need_unlink)
+ sys_unlink(tmpvol);
+
+ if (tmpvol != NULL)
+ GF_FREE(tmpvol);
+
+ if (tmp_fd >= 0)
+ sys_close(tmp_fd);
+
+ return ret;
+}
+
+int
+glusterd_volume_svc_check_topology_identical(
+ char *svc_name, dict_t *mode_dict, glusterd_volinfo_t *volinfo,
+ glusterd_vol_graph_builder_t builder, gf_boolean_t *identical)
+{
+ char orgvol[PATH_MAX] = {
+ 0,
+ };
+ char *tmpvol = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = THIS;
+ int ret = -1;
+ int tmpclean = 0;
+ int tmpfd = -1;
+
+ if ((!identical) || (!this) || (!this->private)) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
+ goto out;
+ }
+
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ /* This builds volfile for volume level dameons */
+ glusterd_volume_svc_build_volfile_path(svc_name, volinfo, orgvol,
+ sizeof(orgvol));
+ /* Create the temporary volfile */
+ ret = gf_asprintf(&tmpvol, "/tmp/g%s-XXXXXX", svc_name);
+ if (ret < 0) {
+ goto out;
+ }
+
+ /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
+ tmpfd = mkstemp(tmpvol);
+ if (tmpfd < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
+ "Unable to create temp file"
+ " %s:(%s)",
+ tmpvol, strerror(errno));
+ ret = -1;
+ goto out;
+ }
+
+ tmpclean = 1; /* SET the flag to unlink() tmpfile */
+
+ ret = builder(volinfo, tmpvol, mode_dict);
+ if (ret)
+ goto out;
+
+ /* Compare the topology of volfiles */
+ ret = glusterd_check_topology_identical(orgvol, tmpvol, identical);
+out:
+ if (tmpfd >= 0)
+ sys_close(tmpfd);
+ if (tmpclean)
+ sys_unlink(tmpvol);
+ if (tmpvol != NULL)
+ GF_FREE(tmpvol);
+ return ret;
+}
+
+gf_boolean_t
+glusterd_is_svcproc_attachable(glusterd_svc_proc_t *svc_proc)
+{
+ int pid = -1;
+ glusterd_svc_t *parent_svc = NULL;
+
+ if (!svc_proc)
+ return _gf_false;
+
+ if (svc_proc->status == GF_SVC_STARTING)
+ return _gf_true;
+
+ if (svc_proc->status == GF_SVC_STARTED ||
+ svc_proc->status == GF_SVC_DISCONNECTED) {
+ parent_svc = cds_list_entry(svc_proc->svcs.next, glusterd_svc_t,
+ mux_svc);
+ if (parent_svc && gf_is_service_running(parent_svc->proc.pidfile, &pid))
+ return _gf_true;
+ }
+
+ if (svc_proc->status == GF_SVC_DIED || svc_proc->status == GF_SVC_STOPPING)
+ return _gf_false;
+
+ return _gf_false;
+}
+
+void *
+__gf_find_compatible_svc(gd_node_type daemon)
+{
+ glusterd_svc_proc_t *svc_proc = NULL;
+ struct cds_list_head *svc_procs = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ conf = THIS->private;
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+
+ switch (daemon) {
+ case GD_NODE_SHD: {
+ svc_procs = &conf->shd_procs;
+ if (!svc_procs)
+ goto out;
+ } break;
+ default:
+ /* Add support for other client daemons here */
+ goto out;
+ }
+
+ cds_list_for_each_entry(svc_proc, svc_procs, svc_proc_list)
+ {
+ if (glusterd_is_svcproc_attachable(svc_proc))
+ return (void *)svc_proc;
+ /*
+ * Logic to select one process goes here. Currently there is only one
+ * shd_proc. So selecting the first one;
+ */
+ }
+out:
+ return NULL;
+}
+
+glusterd_svc_proc_t *
+glusterd_svcprocess_new()
+{
+ glusterd_svc_proc_t *new_svcprocess = NULL;
+
+ new_svcprocess = GF_CALLOC(1, sizeof(*new_svcprocess),
+ gf_gld_mt_glusterd_svc_proc_t);
+
+ if (!new_svcprocess)
+ return NULL;
+
+ CDS_INIT_LIST_HEAD(&new_svcprocess->svc_proc_list);
+ CDS_INIT_LIST_HEAD(&new_svcprocess->svcs);
+ new_svcprocess->notify = glusterd_muxsvc_common_rpc_notify;
+ new_svcprocess->status = GF_SVC_STARTING;
+ return new_svcprocess;
+}
+
+int
+glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc)
+{
+ int ret = -1;
+ glusterd_svc_proc_t *mux_proc = NULL;
+ glusterd_conn_t *mux_conn = NULL;
+ glusterd_conf_t *conf = NULL;
+ glusterd_svc_t *parent_svc = NULL;
+ int pid = -1;
+ gf_boolean_t stop_daemon = _gf_false;
+ char pidfile[PATH_MAX] = {
+ 0,
+ };
+
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+ GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
+ conf = THIS->private;
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ if (svc->inited && !glusterd_proc_is_running(&(svc->proc))) {
+ /* This is the case when shd process was abnormally killed */
+ pthread_mutex_unlock(&conf->attach_lock);
+ glusterd_shd_svcproc_cleanup(&volinfo->shd);
+ pthread_mutex_lock(&conf->attach_lock);
+ }
+
+ if (!svc->inited) {
+ glusterd_svc_build_shd_pidfile(volinfo, pidfile, sizeof(pidfile));
+ ret = snprintf(svc->proc.name, sizeof(svc->proc.name), "%s",
+ "glustershd");
+ if (ret < 0)
+ goto unlock;
+
+ ret = snprintf(svc->proc.pidfile, sizeof(svc->proc.pidfile), "%s",
+ pidfile);
+ if (ret < 0)
+ goto unlock;
+
+ if (gf_is_service_running(pidfile, &pid)) {
+ /* Just connect is required, but we don't know what happens
+ * during the disconnect. So better to reattach.
+ */
+ mux_proc = __gf_find_compatible_svc_from_pid(GD_NODE_SHD, pid);
+ }
+
+ if (!mux_proc) {
+ if (pid != -1 && sys_access(pidfile, R_OK) == 0) {
+ /* stale pid file, stop and unlink it. This has to be
+ * done outside the attach_lock.
+ */
+ stop_daemon = _gf_true;
+ }
+ mux_proc = __gf_find_compatible_svc(GD_NODE_SHD);
+ }
+ if (mux_proc) {
+ /* Take first entry from the process */
+ parent_svc = cds_list_entry(mux_proc->svcs.next, glusterd_svc_t,
+ mux_svc);
+ mux_conn = &parent_svc->conn;
+ if (volinfo)
+ volinfo->shd.attached = _gf_true;
+ } else {
+ mux_proc = glusterd_svcprocess_new();
+ if (!mux_proc) {
+ ret = -1;
+ goto unlock;
+ }
+ cds_list_add_tail(&mux_proc->svc_proc_list, &conf->shd_procs);
+ }
+ svc->svc_proc = mux_proc;
+ cds_list_del_init(&svc->mux_svc);
+ cds_list_add_tail(&svc->mux_svc, &mux_proc->svcs);
+ ret = glusterd_shdsvc_init(volinfo, mux_conn, mux_proc);
+ if (ret) {
+ pthread_mutex_unlock(&conf->attach_lock);
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC,
+ "Failed to init shd "
+ "service");
+ goto out;
+ }
+ gf_msg_debug(THIS->name, 0, "shd service initialized");
+ svc->inited = _gf_true;
+ }
+ ret = 0;
+ }
+unlock:
+ pthread_mutex_unlock(&conf->attach_lock);
+out:
+ if (stop_daemon) {
+ glusterd_proc_stop(&svc->proc, SIGTERM, PROC_STOP_FORCE);
+ glusterd_unlink_file(pidfile);
+ }
+ return ret;
+}
+
+void *
+__gf_find_compatible_svc_from_pid(gd_node_type daemon, pid_t pid)
+{
+ glusterd_svc_proc_t *svc_proc = NULL;
+ struct cds_list_head *svc_procs = NULL;
+ glusterd_svc_t *svc = NULL;
+ pid_t mux_pid = -1;
+ glusterd_conf_t *conf = NULL;
+
+ conf = THIS->private;
+ if (!conf)
+ return NULL;
+
+ switch (daemon) {
+ case GD_NODE_SHD: {
+ svc_procs = &conf->shd_procs;
+ if (!svc_procs)
+ return NULL;
+ } break;
+ default:
+ /* Add support for other client daemons here */
+ return NULL;
+ }
+
+ cds_list_for_each_entry(svc_proc, svc_procs, svc_proc_list)
+ {
+ cds_list_for_each_entry(svc, &svc_proc->svcs, mux_svc)
+ {
+ if (gf_is_service_running(svc->proc.pidfile, &mux_pid)) {
+ if (mux_pid == pid &&
+ glusterd_is_svcproc_attachable(svc_proc)) {
+ /*TODO
+ * inefficient loop, but at the moment, there is only
+ * one shd.
+ */
+ return svc_proc;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+static int32_t
+my_callback(struct rpc_req *req, struct iovec *iov, int count, void *v_frame)
+{
+ call_frame_t *frame = v_frame;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ GF_VALIDATE_OR_GOTO("glusterd", frame, out);
+ this = frame->this;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ if (GF_ATOMIC_DEC(conf->blockers) == 0) {
+ synccond_broadcast(&conf->cond_blockers);
+ }
+
+ STACK_DESTROY(frame->root);
+out:
+ return 0;
+}
+
+static int32_t
+glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *v_frame)
+{
+ call_frame_t *frame = v_frame;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_shdsvc_t *shd = NULL;
+ glusterd_svc_t *svc = frame->cookie;
+ glusterd_conf_t *conf = NULL;
+ int *flag = (int *)frame->local;
+ xlator_t *this = THIS;
+ int ret = -1;
+ gf_getspec_rsp rsp = {
+ 0,
+ };
+
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+ GF_VALIDATE_OR_GOTO("glusterd", frame, out);
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (!strcmp(svc->name, "glustershd")) {
+ /* Get volinfo->shd from svc object */
+ shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
+ if (!shd) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
+ "Failed to get shd object "
+ "from shd service");
+ goto out;
+ }
+
+ /* Get volinfo from shd */
+ volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
+ if (!volinfo) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to get volinfo from "
+ "from shd");
+ goto out;
+ }
+ }
+
+ if (!iov) {
+ gf_msg(frame->this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "iov is NULL");
+ ret = -1;
+ goto out;
+ }
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp);
+ if (ret < 0) {
+ gf_msg(frame->this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "XDR decoding error");
+ ret = -1;
+ goto out;
+ }
+
+ if (rsp.op_ret == 0) {
+ svc->online = _gf_true;
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_ATTACH_FAIL,
+ "svc %s of volume %s attached successfully to pid %d", svc->name,
+ volinfo->volname, glusterd_proc_get_pid(&svc->proc));
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL,
+ "svc %s of volume %s failed to attach to pid %d", svc->name,
+ volinfo->volname, glusterd_proc_get_pid(&svc->proc));
+ if (!strcmp(svc->name, "glustershd")) {
+ glusterd_shd_svcproc_cleanup(&volinfo->shd);
+ }
+ }
+out:
+ if (flag) {
+ GF_FREE(flag);
+ }
+
+ if (volinfo)
+ glusterd_volinfo_unref(volinfo);
+
+ if (GF_ATOMIC_DEC(conf->blockers) == 0) {
+ synccond_broadcast(&conf->cond_blockers);
+ }
+ STACK_DESTROY(frame->root);
+ return 0;
+}
+
+extern size_t
+build_volfile_path(char *volume_id, char *path, size_t path_len,
+ char *trusted_str, dict_t *dict);
+
+int
+__glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
+ struct rpc_clnt *rpc, char *volfile_id,
+ int op)
+{
+ int ret = -1;
+ struct iobuf *iobuf = NULL;
+ struct iobref *iobref = NULL;
+ struct iovec iov = {
+ 0,
+ };
+ char path[PATH_MAX] = {
+ '\0',
+ };
+ struct stat stbuf = {
+ 0,
+ };
+ int32_t spec_fd = -1;
+ size_t file_len = -1;
+ char *volfile_content = NULL;
+ ssize_t req_size = 0;
+ call_frame_t *frame = NULL;
+ gd1_mgmt_brick_op_req brick_req;
+ dict_t *dict = NULL;
+ void *req = &brick_req;
+ void *errlbl = &&err;
+ struct rpc_clnt_connection *conn;
+ xlator_t *this = THIS;
+ glusterd_conf_t *conf = THIS->private;
+ extern struct rpc_clnt_program gd_brick_prog;
+ fop_cbk_fn_t cbkfn = my_callback;
+
+ if (!rpc) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_PARAM_NULL,
+ "called with null rpc");
+ return -1;
+ }
+
+ conn = &rpc->conn;
+ if (!conn->connected || conn->disconnected) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CONNECT_RETURNED,
+ "not connected yet");
+ return -1;
+ }
+
+ brick_req.op = op;
+ brick_req.name = volfile_id;
+ brick_req.input.input_val = NULL;
+ brick_req.input.input_len = 0;
+ brick_req.dict.dict_val = NULL;
+ brick_req.dict.dict_len = 0;
+
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_FRAME_CREATE_FAIL,
+ NULL);
+ goto *errlbl;
+ }
+
+ if (op == GLUSTERD_SVC_ATTACH) {
+ dict = dict_new();
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
+ ret = -ENOMEM;
+ goto *errlbl;
+ }
+
+ (void)build_volfile_path(volfile_id, path, sizeof(path), NULL, dict);
+
+ ret = sys_stat(path, &stbuf);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL,
+ "Unable to stat %s (%s)", path, strerror(errno));
+ ret = -EINVAL;
+ goto *errlbl;
+ }
+
+ file_len = stbuf.st_size;
+ volfile_content = GF_MALLOC(file_len + 1, gf_common_mt_char);
+ if (!volfile_content) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
+ ret = -ENOMEM;
+ goto *errlbl;
+ }
+ spec_fd = open(path, O_RDONLY);
+ if (spec_fd < 0) {
+ gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_SVC_ATTACH_FAIL,
+ "failed to read volfile %s", path);
+ ret = -EIO;
+ goto *errlbl;
+ }
+ ret = sys_read(spec_fd, volfile_content, file_len);
+ if (ret == file_len) {
+ brick_req.input.input_val = volfile_content;
+ brick_req.input.input_len = file_len;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL,
+ "read failed on path %s. File size=%" GF_PRI_SIZET
+ "read size=%d",
+ path, file_len, ret);
+ ret = -EIO;
+ goto *errlbl;
+ }
+ if (dict->count > 0) {
+ ret = dict_allocate_and_serialize(dict, &brick_req.dict.dict_val,
+ &brick_req.dict.dict_len);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
+ goto *errlbl;
+ }
+ }
+
+ frame->cookie = svc;
+ frame->local = GF_CALLOC(1, sizeof(int), gf_gld_mt_int);
+ *((int *)frame->local) = flags;
+ cbkfn = glusterd_svc_attach_cbk;
+ }
+
+ req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req);
+ iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size);
+ if (!iobuf) {
+ goto *errlbl;
+ }
+ errlbl = &&maybe_free_iobuf;
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = iobuf_pagesize(iobuf);
+
+ iobref = iobref_new();
+ if (!iobref) {
+ goto *errlbl;
+ }
+ errlbl = &&free_iobref;
+
+ iobref_add(iobref, iobuf);
+ /*
+ * Drop our reference to the iobuf. The iobref should already have
+ * one after iobref_add, so when we unref that we'll free the iobuf as
+ * well. This allows us to pass just the iobref as frame->local.
+ */
+ iobuf_unref(iobuf);
+ /* Set the pointer to null so we don't free it on a later error. */
+ iobuf = NULL;
+
+ /* Create the xdr payload */
+ ret = xdr_serialize_generic(iov, req, (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret == -1) {
+ goto *errlbl;
+ }
+ iov.iov_len = ret;
+
+ /* Send the msg */
+ GF_ATOMIC_INC(conf->blockers);
+ ret = rpc_clnt_submit(rpc, &gd_brick_prog, op, cbkfn, &iov, 1, NULL, 0,
+ iobref, frame, NULL, 0, NULL, 0, NULL);
+ if (dict)
+ dict_unref(dict);
+ GF_FREE(volfile_content);
+ if (spec_fd >= 0)
+ sys_close(spec_fd);
+ return ret;
+
+free_iobref:
+ iobref_unref(iobref);
+maybe_free_iobuf:
+ if (iobuf) {
+ iobuf_unref(iobuf);
+ }
+err:
+ if (dict)
+ dict_unref(dict);
+ if (brick_req.dict.dict_val)
+ GF_FREE(brick_req.dict.dict_val);
+
+ GF_FREE(volfile_content);
+ if (spec_fd >= 0)
+ sys_close(spec_fd);
+ if (frame)
+ STACK_DESTROY(frame->root);
+ return -1;
+}
+
+int
+glusterd_attach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int flags)
+{
+ glusterd_conf_t *conf = THIS->private;
+ int ret = -1;
+ int tries;
+ rpc_clnt_t *rpc = NULL;
+
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+ GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
+
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_ATTACH_INFO,
+ "adding svc %s (volume=%s) to existing "
+ "process with pid %d",
+ svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc));
+
+ rpc = rpc_clnt_ref(svc->conn.rpc);
+ for (tries = 15; tries > 0; --tries) {
+ /* There might be a case that the volume for which we're attempting to
+ * attach a shd svc might become stale and in the process of deletion.
+ * Given that the volinfo object is being already passed here before
+ * that sequence of operation has happened we might be operating on a
+ * stale volume. At every sync task switch we should check for existance
+ * of the volume now
+ */
+ if (!glusterd_volume_exists(volinfo->volname)) {
+ gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_SVC_ATTACH_FAIL,
+ "Volume %s "
+ " is marked as stale, not attempting further shd svc attach "
+ "attempts",
+ volinfo->volname);
+ ret = 0;
+ goto out;
+ }
+ if (rpc) {
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ ret = __glusterd_send_svc_configure_req(
+ svc, flags, rpc, svc->proc.volfileid, GLUSTERD_SVC_ATTACH);
+ }
+ pthread_mutex_unlock(&conf->attach_lock);
+ if (!ret) {
+ volinfo->shd.attached = _gf_true;
+ goto out;
+ }
+ }
+ /*
+ * It might not actually be safe to manipulate the lock
+ * like this, but if we don't then the connection can
+ * never actually complete and retries are useless.
+ * Unfortunately, all of the alternatives (e.g. doing
+ * all of this in a separate thread) are much more
+ * complicated and risky.
+ * TBD: see if there's a better way
+ */
+ synclock_unlock(&conf->big_lock);
+ synctask_sleep(1);
+ synclock_lock(&conf->big_lock);
+ }
+ ret = -1;
+ gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SVC_ATTACH_FAIL,
+ "attach failed for %s(volume=%s)", svc->name, volinfo->volname);
+out:
+ if (rpc)
+ rpc_clnt_unref(rpc);
+ return ret;
+}
+
+int
+glusterd_detach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int sig)
+{
+ glusterd_conf_t *conf = THIS->private;
+ int ret = -1;
+ int tries;
+ rpc_clnt_t *rpc = NULL;
+
+ GF_VALIDATE_OR_GOTO(THIS->name, conf, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, svc, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
+
+ gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DETACH_INFO,
+ "removing svc %s (volume=%s) from existing "
+ "process with pid %d",
+ svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc));
+
+ rpc = rpc_clnt_ref(svc->conn.rpc);
+ for (tries = 15; tries > 0; --tries) {
+ if (rpc) {
+ /*For detach there is no flags, and we are not using sig.*/
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ ret = __glusterd_send_svc_configure_req(svc, 0, svc->conn.rpc,
+ svc->proc.volfileid,
+ GLUSTERD_SVC_DETACH);
+ }
+ pthread_mutex_unlock(&conf->attach_lock);
+ if (!ret) {
+ goto out;
+ }
+ }
+ /*
+ * It might not actually be safe to manipulate the lock
+ * like this, but if we don't then the connection can
+ * never actually complete and retries are useless.
+ * Unfortunately, all of the alternatives (e.g. doing
+ * all of this in a separate thread) are much more
+ * complicated and risky.
+ * TBD: see if there's a better way
+ */
+ synclock_unlock(&conf->big_lock);
+ synctask_sleep(1);
+ synclock_lock(&conf->big_lock);
+ }
+ ret = -1;
+ gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SVC_DETACH_FAIL,
+ "detach failed for %s(volume=%s)", svc->name, volinfo->volname);
+out:
+ if (rpc)
+ rpc_clnt_unref(rpc);
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-svc-helper.h
index cc98e788bbe..12717dc58ac 100644
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.h
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.h
@@ -16,10 +16,10 @@
#include "glusterd-volgen.h"
int
-glusterd_svcs_reconfigure();
+glusterd_svcs_reconfigure(glusterd_volinfo_t *volinfo);
int
-glusterd_svcs_stop();
+glusterd_svcs_stop(glusterd_volinfo_t *vol);
int
glusterd_svcs_manager(glusterd_volinfo_t *volinfo);
@@ -32,14 +32,41 @@ int
glusterd_svc_check_topology_identical(char *svc_name,
glusterd_graph_builder_t builder,
gf_boolean_t *identical);
+int
+glusterd_volume_svc_check_volfile_identical(char *svc_name, dict_t *mode_dict,
+ glusterd_volinfo_t *volinfo,
+ glusterd_vol_graph_builder_t,
+ gf_boolean_t *identical);
+int
+glusterd_volume_svc_check_topology_identical(char *svc_name, dict_t *mode_dict,
+ glusterd_volinfo_t *volinfo,
+ glusterd_vol_graph_builder_t,
+ gf_boolean_t *identical);
+void
+glusterd_volume_svc_build_volfile_path(char *server, glusterd_volinfo_t *vol,
+ char *volfile, size_t len);
+void *
+__gf_find_compatible_svc(gd_node_type daemon);
+
+glusterd_svc_proc_t *
+glusterd_svcprocess_new();
int
-glusterd_svc_check_tier_volfile_identical(char *svc_name,
- glusterd_volinfo_t *volinfo,
- gf_boolean_t *identical);
+glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc);
+
+void *
+__gf_find_compatible_svc_from_pid(gd_node_type daemon, pid_t pid);
+
+int
+glusterd_attach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo,
+ int flags);
+
+int
+glusterd_detach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int sig);
+
int
-glusterd_svc_check_tier_topology_identical(char *svc_name,
- glusterd_volinfo_t *volinfo,
- gf_boolean_t *identical);
+__glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flag,
+ struct rpc_clnt *rpc, char *volfile_id,
+ int op);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
index 9954605f6e3..18b3fb13630 100644
--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
@@ -8,23 +8,24 @@
cases as published by the Free Software Foundation.
*/
-#include "globals.h"
-#include "run.h"
+#include <glusterfs/globals.h>
+#include <glusterfs/run.h>
#include "glusterd.h"
-#include "glusterfs.h"
+#include <glusterfs/glusterfs.h>
#include "glusterd-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-proc-mgmt.h"
#include "glusterd-conn-mgmt.h"
#include "glusterd-messages.h"
-#include "syscall.h"
+#include <glusterfs/syscall.h>
+#include "glusterd-shd-svc-helper.h"
int
glusterd_svc_create_rundir(char *rundir)
{
int ret = -1;
- ret = mkdir_p(rundir, 0777, _gf_true);
+ ret = mkdir_p(rundir, 0755, _gf_true);
if ((ret == -1) && (EEXIST != errno)) {
gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED,
"Unable to create rundir %s", rundir);
@@ -32,14 +33,14 @@ glusterd_svc_create_rundir(char *rundir)
return ret;
}
-static void
+void
glusterd_svc_build_logfile_path(char *server, char *logdir, char *logfile,
size_t len)
{
snprintf(logfile, len, "%s/%s.log", logdir, server);
}
-static void
+void
glusterd_svc_build_volfileid_path(char *server, char *volfileid, size_t len)
{
snprintf(volfileid, len, "gluster/%s", server);
@@ -143,7 +144,7 @@ glusterd_svc_init(glusterd_svc_t *svc, char *svc_name)
glusterd_svc_build_rundir(svc_name, priv->rundir, rundir, sizeof(rundir));
ret = glusterd_svc_init_common(svc, svc_name, priv->workdir, rundir,
- DEFAULT_LOG_FILE_DIRECTORY, NULL);
+ priv->logdir, NULL);
return ret;
}
@@ -161,74 +162,92 @@ glusterd_svc_start(glusterd_svc_t *svc, int flags, dict_t *cmdline)
char *localtime_logging = NULL;
char *log_level = NULL;
char daemon_log_level[30] = {0};
+ char msg[1024] = {
+ 0,
+ };
int32_t len = 0;
this = THIS;
GF_ASSERT(this);
priv = this->private;
- GF_ASSERT(priv);
+ GF_VALIDATE_OR_GOTO("glusterd", priv, out);
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+
+ pthread_mutex_lock(&priv->attach_lock);
+ {
+ if (glusterd_proc_is_running(&(svc->proc))) {
+ ret = 0;
+ goto unlock;
+ }
- if (glusterd_proc_is_running(&(svc->proc))) {
- ret = 0;
- goto out;
- }
+ ret = sys_access(svc->proc.volfile, F_OK);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_NOT_FOUND,
+ "Volfile %s is not present", svc->proc.volfile);
+ goto unlock;
+ }
- ret = sys_access(svc->proc.volfile, F_OK);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_NOT_FOUND,
- "Volfile %s is not present", svc->proc.volfile);
- goto out;
- }
+ runinit(&runner);
- runinit(&runner);
+ if (this->ctx->cmd_args.vgtool != _gf_none) {
+ len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s.log",
+ svc->proc.logdir, svc->name);
+ if ((len < 0) || (len >= PATH_MAX)) {
+ ret = -1;
+ goto unlock;
+ }
+
+ if (this->ctx->cmd_args.vgtool == _gf_memcheck)
+ runner_add_args(&runner, "valgrind", "--leak-check=full",
+ "--trace-children=yes", "--track-origins=yes",
+ NULL);
+ else
+ runner_add_args(&runner, "valgrind", "--tool=drd", NULL);
- if (this->ctx->cmd_args.valgrind) {
- len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s.log",
- svc->proc.logfile, svc->name);
- if ((len < 0) || (len >= PATH_MAX)) {
- ret = -1;
- goto out;
+ runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
}
- runner_add_args(&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes", NULL);
- runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
- }
+ runner_add_args(&runner, SBIN_DIR "/glusterfs", "-s",
+ svc->proc.volfileserver, "--volfile-id",
+ svc->proc.volfileid, "-p", svc->proc.pidfile, "-l",
+ svc->proc.logfile, "-S", svc->conn.sockpath, NULL);
- runner_add_args(&runner, SBIN_DIR "/glusterfs", "-s",
- svc->proc.volfileserver, "--volfile-id",
- svc->proc.volfileid, "-p", svc->proc.pidfile, "-l",
- svc->proc.logfile, "-S", svc->conn.sockpath, NULL);
+ if (dict_get_strn(priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY,
+ SLEN(GLUSTERD_LOCALTIME_LOGGING_KEY),
+ &localtime_logging) == 0) {
+ if (strcmp(localtime_logging, "enable") == 0)
+ runner_add_arg(&runner, "--localtime-logging");
+ }
+ if (dict_get_strn(priv->opts, GLUSTERD_DAEMON_LOG_LEVEL_KEY,
+ SLEN(GLUSTERD_DAEMON_LOG_LEVEL_KEY),
+ &log_level) == 0) {
+ snprintf(daemon_log_level, 30, "--log-level=%s", log_level);
+ runner_add_arg(&runner, daemon_log_level);
+ }
- if (dict_get_strn(priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY,
- SLEN(GLUSTERD_LOCALTIME_LOGGING_KEY),
- &localtime_logging) == 0) {
- if (strcmp(localtime_logging, "enable") == 0)
- runner_add_arg(&runner, "--localtime-logging");
- }
- if (dict_get_strn(priv->opts, GLUSTERD_DAEMON_LOG_LEVEL_KEY,
- SLEN(GLUSTERD_DAEMON_LOG_LEVEL_KEY), &log_level) == 0) {
- snprintf(daemon_log_level, 30, "--log-level=%s", log_level);
- runner_add_arg(&runner, daemon_log_level);
- }
+ if (this->ctx->cmd_args.global_threading) {
+ runner_add_arg(&runner, "--global-threading");
+ }
- if (cmdline)
- dict_foreach(cmdline, svc_add_args, (void *)&runner);
+ if (cmdline)
+ dict_foreach(cmdline, svc_add_args, (void *)&runner);
- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_START_SUCCESS,
- "Starting %s service", svc->name);
+ snprintf(msg, sizeof(msg), "Starting %s service", svc->name);
+ runner_log(&runner, this->name, GF_LOG_DEBUG, msg);
- if (flags == PROC_START_NO_WAIT) {
- ret = runner_run_nowait(&runner);
- } else {
- synclock_unlock(&priv->big_lock);
- {
- ret = runner_run(&runner);
+ if (flags == PROC_START_NO_WAIT) {
+ ret = runner_run_nowait(&runner);
+ } else {
+ synclock_unlock(&priv->big_lock);
+ {
+ ret = runner_run(&runner);
+ }
+ synclock_lock(&priv->big_lock);
}
- synclock_lock(&priv->big_lock);
}
-
+unlock:
+ pthread_mutex_unlock(&priv->attach_lock);
out:
gf_msg_debug(this->name, 0, "Returning %d", ret);
@@ -281,7 +300,8 @@ glusterd_svc_build_volfile_path(char *server, char *workdir, char *volfile,
glusterd_svc_build_svcdir(server, workdir, dir, sizeof(dir));
- if (!strcmp(server, "quotad")) /*quotad has different volfile name*/
+ if (!strcmp(server, "quotad"))
+ /*quotad has different volfile name*/
snprintf(volfile, len, "%s/%s.vol", dir, server);
else
snprintf(volfile, len, "%s/%s-server.vol", dir, server);
@@ -366,3 +386,151 @@ glusterd_svc_common_rpc_notify(glusterd_conn_t *conn, rpc_clnt_event_t event)
return ret;
}
+
+void
+glusterd_volume_svc_build_volfile_path(char *server, glusterd_volinfo_t *vol,
+ char *volfile, size_t len)
+{
+ GF_ASSERT(len == PATH_MAX);
+
+ if (!strcmp(server, "glustershd")) {
+ glusterd_svc_build_shd_volfile_path(vol, volfile, len);
+ }
+}
+
+int
+glusterd_muxsvc_common_rpc_notify(glusterd_svc_proc_t *mux_proc,
+ rpc_clnt_event_t event)
+{
+ int ret = 0;
+ glusterd_svc_t *svc = NULL;
+ glusterd_svc_t *tmp = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t need_logging = _gf_false;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ if (!mux_proc) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_GET_FAIL,
+ "Failed to get the svc proc data");
+ return -1;
+ }
+
+ /* Currently this function was used for shd svc, if this function is
+ * using for another svc, change ths glustershd reference. We can get
+ * the svc name from any of the attached svc's
+ */
+ switch (event) {
+ case RPC_CLNT_CONNECT:
+ gf_msg_debug(this->name, 0,
+ "glustershd has connected with glusterd.");
+ gf_event(EVENT_SVC_CONNECTED, "svc_name=glustershd");
+ cds_list_for_each_entry_safe(svc, tmp, &mux_proc->svcs, mux_svc)
+ {
+ if (svc->online)
+ continue;
+ svc->online = _gf_true;
+ }
+ if (mux_proc->status != GF_SVC_STARTED)
+ mux_proc->status = GF_SVC_STARTED;
+
+ break;
+
+ case RPC_CLNT_DISCONNECT:
+ cds_list_for_each_entry_safe(svc, tmp, &mux_proc->svcs, mux_svc)
+ {
+ if (svc->online) {
+ if (!need_logging)
+ need_logging = _gf_true;
+ svc->online = _gf_false;
+ }
+ }
+ if (mux_proc->status != GF_SVC_DIED) {
+ svc = cds_list_entry(mux_proc->svcs.next, glusterd_svc_t,
+ mux_svc);
+ if (svc && !glusterd_proc_is_running(&svc->proc)) {
+ mux_proc->status = GF_SVC_DIED;
+ } else {
+ mux_proc->status = GF_SVC_DISCONNECTED;
+ }
+ }
+
+ if (need_logging) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_NODE_DISCONNECTED,
+ "glustershd has disconnected from glusterd.");
+ gf_event(EVENT_SVC_DISCONNECTED, "svc_name=glustershd");
+ }
+ break;
+
+ default:
+ gf_msg_trace(this->name, 0, "got some other RPC event %d", event);
+ break;
+ }
+
+ return ret;
+}
+
+int
+glusterd_muxsvc_conn_init(glusterd_conn_t *conn, glusterd_svc_proc_t *mux_proc,
+ char *sockpath, int frame_timeout,
+ glusterd_muxsvc_conn_notify_t notify)
+{
+ int ret = -1;
+ dict_t *options = NULL;
+ struct rpc_clnt *rpc = NULL;
+ xlator_t *this = THIS;
+ glusterd_svc_t *svc = NULL;
+
+ options = dict_new();
+ if (!this || !options)
+ goto out;
+
+ svc = cds_list_entry(conn, glusterd_svc_t, conn);
+ if (!svc) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_GET_FAIL,
+ "Failed to get the service");
+ goto out;
+ }
+
+ ret = rpc_transport_unix_options_build(options, sockpath, frame_timeout);
+ if (ret)
+ goto out;
+
+ ret = dict_set_int32n(options, "transport.socket.ignore-enoent",
+ SLEN("transport.socket.ignore-enoent"), 1);
+ if (ret)
+ goto out;
+
+ /* @options is free'd by rpc_transport when destroyed */
+ rpc = rpc_clnt_new(options, this, (char *)svc->name, 16);
+ if (!rpc) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = rpc_clnt_register_notify(rpc, glusterd_muxsvc_conn_common_notify,
+ mux_proc);
+ if (ret)
+ goto out;
+
+ ret = snprintf(conn->sockpath, sizeof(conn->sockpath), "%s", sockpath);
+ if (ret < 0)
+ goto out;
+ else
+ ret = 0;
+
+ conn->frame_timeout = frame_timeout;
+ conn->rpc = rpc;
+ mux_proc->notify = notify;
+out:
+ if (options)
+ dict_unref(options);
+ if (ret) {
+ if (rpc) {
+ rpc_clnt_unref(rpc);
+ rpc = NULL;
+ }
+ }
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h
index 3bf142b58d3..5daee993833 100644
--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h
@@ -13,9 +13,12 @@
#include "glusterd-proc-mgmt.h"
#include "glusterd-conn-mgmt.h"
+#include "glusterd-rcu.h"
struct glusterd_svc_;
+
typedef struct glusterd_svc_ glusterd_svc_t;
+typedef struct glusterd_svc_proc_ glusterd_svc_proc_t;
typedef void (*glusterd_svc_build_t)(glusterd_svc_t *svc);
@@ -25,16 +28,38 @@ typedef int (*glusterd_svc_start_t)(glusterd_svc_t *svc, int flags);
typedef int (*glusterd_svc_stop_t)(glusterd_svc_t *svc, int sig);
typedef int (*glusterd_svc_reconfigure_t)(void *data);
+typedef int (*glusterd_muxsvc_conn_notify_t)(glusterd_svc_proc_t *mux_proc,
+ rpc_clnt_event_t event);
+
+typedef enum gf_svc_status {
+ GF_SVC_STARTING,
+ GF_SVC_STARTED,
+ GF_SVC_STOPPING,
+ GF_SVC_DISCONNECTED,
+ GF_SVC_DIED,
+} gf_svc_status_t;
+
+struct glusterd_svc_proc_ {
+ struct cds_list_head svc_proc_list;
+ struct cds_list_head svcs;
+ glusterd_muxsvc_conn_notify_t notify;
+ rpc_clnt_t *rpc;
+ void *data;
+ gf_svc_status_t status;
+};
+
struct glusterd_svc_ {
- char name[PATH_MAX];
glusterd_conn_t conn;
- glusterd_proc_t proc;
glusterd_svc_manager_t manager;
glusterd_svc_start_t start;
glusterd_svc_stop_t stop;
+ glusterd_svc_reconfigure_t reconfigure;
+ glusterd_svc_proc_t *svc_proc;
+ struct cds_list_head mux_svc;
+ glusterd_proc_t proc;
+ char name[NAME_MAX];
gf_boolean_t online;
gf_boolean_t inited;
- glusterd_svc_reconfigure_t reconfigure;
};
int
@@ -58,6 +83,10 @@ glusterd_svc_build_volfile_path(char *server, char *workdir, char *volfile,
size_t len);
void
+glusterd_svc_build_logfile_path(char *server, char *logdir, char *logfile,
+ size_t len);
+
+void
glusterd_svc_build_svcdir(char *server, char *workdir, char *path, size_t len);
void
@@ -69,4 +98,15 @@ glusterd_svc_reconfigure(int (*create_volfile)());
int
glusterd_svc_common_rpc_notify(glusterd_conn_t *conn, rpc_clnt_event_t event);
+int
+glusterd_muxsvc_common_rpc_notify(glusterd_svc_proc_t *conn,
+ rpc_clnt_event_t event);
+
+int
+glusterd_proc_get_pid(glusterd_proc_t *proc);
+
+int
+glusterd_muxsvc_conn_init(glusterd_conn_t *conn, glusterd_svc_proc_t *mux_proc,
+ char *sockpath, int frame_timeout,
+ glusterd_muxsvc_conn_notify_t notify);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index f45b1eacea6..b73d37ad08e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -52,13 +52,13 @@ gd_collate_errors(struct syncargs *args, int op_ret, int op_errno,
args->op_ret = op_ret;
args->op_errno = op_errno;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(peerid, NULL);
if (peerinfo)
peer_str = gf_strdup(peerinfo->hostname);
else
peer_str = gf_strdup(uuid_utoa(uuid));
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (op_errstr && strcmp(op_errstr, "")) {
len = snprintf(err_str, sizeof(err_str) - 1, "Error: %s",
@@ -143,6 +143,8 @@ gd_brick_op_req_free(gd1_mgmt_brick_op_req *req)
if (!req)
return;
+ if (req->dict.dict_val)
+ GF_FREE(req->dict.dict_val);
GF_FREE(req->input.input_val);
GF_FREE(req);
}
@@ -228,7 +230,6 @@ glusterd_syncop_aggr_rsp_dict(glusterd_op_t op, dict_t *aggr, dict_t *rsp)
case GD_OP_CREATE_VOLUME:
case GD_OP_ADD_BRICK:
case GD_OP_START_VOLUME:
- case GD_OP_ADD_TIER_BRICK:
ret = glusterd_aggr_brick_mount_dirs(aggr, rsp);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0,
@@ -309,11 +310,15 @@ glusterd_syncop_aggr_rsp_dict(glusterd_op_t op, dict_t *aggr, dict_t *rsp)
ret = glusterd_max_opversion_use_rsp_dict(aggr, rsp);
break;
- case GD_OP_TIER_STATUS:
- case GD_OP_DETACH_TIER_STATUS:
- case GD_OP_REMOVE_TIER_BRICK:
- ret = glusterd_volume_tier_use_rsp_dict(aggr, rsp);
- /* FALLTHROUGH */
+ case GD_OP_PROFILE_VOLUME:
+ ret = glusterd_profile_volume_use_rsp_dict(aggr, rsp);
+ break;
+
+ case GD_OP_REBALANCE:
+ case GD_OP_DEFRAG_BRICK_VOLUME:
+ ret = glusterd_volume_rebalance_use_rsp_dict(aggr, rsp);
+ break;
+
default:
break;
}
@@ -401,8 +406,11 @@ gd_syncop_mgmt_v3_lock(glusterd_op_t op, dict_t *op_ctx,
ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
&req.dict.dict_len);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
+ }
gf_uuid_copy(req.uuid, my_uuid);
gf_uuid_copy(req.txn_id, txn_id);
@@ -502,8 +510,11 @@ gd_syncop_mgmt_v3_unlock(dict_t *op_ctx, glusterd_peerinfo_t *peerinfo,
ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
&req.dict.dict_len);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
+ }
gf_uuid_copy(req.uuid, my_uuid);
gf_uuid_copy(req.txn_id, txn_id);
@@ -560,20 +571,21 @@ _gd_syncop_mgmt_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
gf_uuid_copy(args->uuid, rsp.uuid);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(*peerid, NULL);
if (peerinfo) {
/* Set peer as locked, so we unlock only the locked peers */
if (rsp.op_ret == 0)
peerinfo->locked = _gf_true;
+ RCU_READ_UNLOCK;
} else {
+ RCU_READ_UNLOCK;
rsp.op_ret = -1;
gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_PEER_NOT_FOUND,
"Could not find peer with "
"ID %s",
uuid_utoa(*peerid));
}
- rcu_read_unlock();
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
@@ -661,18 +673,19 @@ _gd_syncop_mgmt_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
gf_uuid_copy(args->uuid, rsp.uuid);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(*peerid, NULL);
if (peerinfo) {
peerinfo->locked = _gf_false;
+ RCU_READ_UNLOCK;
} else {
+ RCU_READ_UNLOCK;
rsp.op_ret = -1;
gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_PEER_NOT_FOUND,
"Could not find peer with "
"ID %s",
uuid_utoa(*peerid));
}
- rcu_read_unlock();
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
@@ -770,9 +783,9 @@ _gd_syncop_stage_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
ret = -1;
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
@@ -835,16 +848,21 @@ gd_syncop_mgmt_stage_op(glusterd_peerinfo_t *peerinfo, struct syncargs *args,
uuid_t *peerid = NULL;
req = GF_CALLOC(1, sizeof(*req), gf_gld_mt_mop_stage_req_t);
- if (!req)
+ if (!req) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
+ }
gf_uuid_copy(req->uuid, my_uuid);
req->op = op;
ret = dict_allocate_and_serialize(dict_out, &req->buf.buf_val,
&req->buf.buf_len);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
+ }
GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
if (ret)
@@ -896,6 +914,8 @@ _gd_syncop_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
if (rsp.output.output_len) {
args->dict = dict_new();
if (!args->dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
ret = -1;
args->op_errno = ENOMEM;
goto out;
@@ -903,8 +923,11 @@ _gd_syncop_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.output.output_val, rsp.output.output_len,
&args->dict);
- if (ret < 0)
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_UNSERIALIZE_FAIL, NULL);
goto out;
+ }
}
args->op_ret = rsp.op_ret;
@@ -1072,9 +1095,9 @@ _gd_syncop_commit_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == 0);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
ret = -1;
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
@@ -1145,16 +1168,21 @@ gd_syncop_mgmt_commit_op(glusterd_peerinfo_t *peerinfo, struct syncargs *args,
uuid_t *peerid = NULL;
req = GF_CALLOC(1, sizeof(*req), gf_gld_mt_mop_commit_req_t);
- if (!req)
+ if (!req) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
+ }
gf_uuid_copy(req->uuid, my_uuid);
req->op = op;
ret = dict_allocate_and_serialize(dict_out, &req->buf.buf_val,
&req->buf.buf_len);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
+ }
GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
if (ret)
@@ -1182,10 +1210,15 @@ gd_lock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
struct syncargs args = {0};
this = THIS;
- synctask_barrier_init((&args));
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ ret = synctask_barrier_init((&args));
+ if (ret)
+ goto out;
+
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1209,7 +1242,7 @@ gd_lock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
peer_uuid, txn_id);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1266,8 +1299,10 @@ gd_stage_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
GF_ASSERT(conf);
rsp_dict = dict_new();
- if (!rsp_dict)
+ if (!rsp_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
if ((op == GD_OP_CREATE_VOLUME) || (op == GD_OP_ADD_BRICK) ||
(op == GD_OP_START_VOLUME))
@@ -1312,10 +1347,13 @@ stage_done:
}
gd_syncargs_init(&args, aggr_dict);
- synctask_barrier_init((&args));
+ ret = synctask_barrier_init((&args));
+ if (ret)
+ goto out;
+
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1334,7 +1372,7 @@ stage_done:
req_dict, op_ctx);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1393,6 +1431,7 @@ gd_commit_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
rsp_dict = dict_new();
if (!rsp_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -1440,14 +1479,20 @@ commit_done:
}
gd_syncargs_init(&args, op_ctx);
- synctask_barrier_init((&args));
+ ret = synctask_barrier_init((&args));
+ if (ret)
+ goto out;
+
peer_cnt = 0;
origin_glusterd = is_origin_glusterd(req_dict);
if (op == GD_OP_STATUS_VOLUME) {
ret = dict_get_uint32(req_dict, "cmd", &cmd);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=cmd", NULL);
goto out;
+ }
if (origin_glusterd) {
if ((cmd & GF_CLI_STATUS_ALL)) {
@@ -1457,7 +1502,7 @@ commit_done:
}
}
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1476,7 +1521,7 @@ commit_done:
req_dict, op_ctx);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1532,11 +1577,14 @@ gd_unlock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
goto out;
}
- synctask_barrier_init((&args));
+ ret = synctask_barrier_init((&args));
+ if (ret)
+ goto out;
+
peer_cnt = 0;
if (cluster_lock) {
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before
@@ -1557,7 +1605,7 @@ gd_unlock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
peer_cnt++;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
} else {
ret = dict_get_int32(op_ctx, "hold_global_locks", &global);
if (!ret && global)
@@ -1565,7 +1613,7 @@ gd_unlock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
else
type = "vol";
if (volname || global) {
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were
@@ -1584,7 +1632,7 @@ gd_unlock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
tmp_uuid, txn_id);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
}
}
@@ -1660,6 +1708,7 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
char **op_errstr)
{
glusterd_pending_node_t *pending_node = NULL;
+ glusterd_pending_node_t *tmp = NULL;
struct cds_list_head selected = {
0,
};
@@ -1669,10 +1718,12 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
rpc_clnt_t *rpc = NULL;
dict_t *rsp_dict = NULL;
int32_t cmd = GF_OP_CMD_NONE;
+ glusterd_volinfo_t *volinfo = NULL;
this = THIS;
rsp_dict = dict_new();
if (!rsp_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -1697,40 +1748,33 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
rsp_dict = NULL;
brick_count = 0;
- cds_list_for_each_entry(pending_node, &selected, list)
+ cds_list_for_each_entry_safe(pending_node, tmp, &selected, list)
{
rpc = glusterd_pending_node_get_rpc(pending_node);
+ /* In the case of rebalance if the rpc object is null, we try to
+ * create the rpc object. if the rebalance daemon is down, it returns
+ * -1. otherwise, rpc object will be created and referenced.
+ */
if (!rpc) {
- if (pending_node->type == GD_NODE_REBALANCE) {
- ret = 0;
- glusterd_defrag_volume_node_rsp(req_dict, NULL, op_ctx);
+ if (pending_node->type == GD_NODE_REBALANCE && pending_node->node) {
+ volinfo = pending_node->node;
+ ret = glusterd_rebalance_rpc_create(volinfo);
+ if (ret) {
+ ret = 0;
+ glusterd_defrag_volume_node_rsp(req_dict, NULL, op_ctx);
+ goto out;
+ } else {
+ rpc = glusterd_defrag_rpc_get(volinfo->rebal.defrag);
+ }
+ } else {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE,
+ "Brick Op failed "
+ "due to rpc failure.");
goto out;
}
-
- ret = -1;
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE,
- "Brick Op failed "
- "due to rpc failure.");
- goto out;
}
- /* Redirect operation to be detach tier via rebalance flow. */
- ret = dict_get_int32(req_dict, "command", &cmd);
- if (!ret) {
- if (cmd == GF_OP_CMD_DETACH_START) {
- /* this change is left to support backward
- * compatibility. */
- op = GD_OP_REBALANCE;
- ret = dict_set_int32(req_dict, "rebalance-command",
- GF_DEFRAG_CMD_START_DETACH_TIER);
- } else if (cmd == GF_DEFRAG_CMD_DETACH_START) {
- op = GD_OP_REMOVE_TIER_BRICK;
- ret = dict_set_int32(req_dict, "rebalance-command",
- GF_DEFRAG_CMD_DETACH_START);
- }
- if (ret)
- goto out;
- }
ret = gd_syncop_mgmt_brick_op(rpc, pending_node, op, req_dict, op_ctx,
op_errstr);
if (op == GD_OP_STATUS_VOLUME) {
@@ -1742,24 +1786,19 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
if (dict_get(op_ctx, "client-count"))
break;
}
- } else if (cmd == GF_OP_CMD_DETACH_START) {
- op = GD_OP_REMOVE_BRICK;
- dict_del(req_dict, "rebalance-command");
- } else if (cmd == GF_DEFRAG_CMD_DETACH_START) {
- op = GD_OP_REMOVE_TIER_BRICK;
- dict_del(req_dict, "rebalance-command");
}
if (ret)
goto out;
brick_count++;
glusterd_pending_node_put_rpc(pending_node);
+ GF_FREE(pending_node);
}
pending_node = NULL;
ret = 0;
out:
- if (pending_node)
+ if (pending_node && pending_node->node)
glusterd_pending_node_put_rpc(pending_node);
if (rsp_dict)
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.h b/xlators/mgmt/glusterd/src/glusterd-syncop.h
index 37195ef0112..a265f2135c6 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.h
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.h
@@ -10,7 +10,7 @@
#ifndef __RPC_SYNCOP_H
#define __RPC_SYNCOP_H
-#include "syncop.h"
+#include <glusterfs/syncop.h>
#include "glusterd-sm.h"
#include "glusterd.h"
@@ -32,7 +32,7 @@
ret = gd_syncop_submit_request(rpc, req, stb, cookie, prog, procnum, \
cbk, (xdrproc_t)xdrproc); \
if (!ret) \
- synctask_yield(stb->task); \
+ synctask_yield(stb->task, NULL); \
else \
gf_asprintf(&stb->errstr, \
"%s failed. Check log file" \
diff --git a/xlators/mgmt/glusterd/src/glusterd-tier.c b/xlators/mgmt/glusterd/src/glusterd-tier.c
deleted file mode 100644
index e980026d8ec..00000000000
--- a/xlators/mgmt/glusterd/src/glusterd-tier.c
+++ /dev/null
@@ -1,1382 +0,0 @@
-/*
- Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
- */
-
-#include "common-utils.h"
-#include "cli1-xdr.h"
-#include "xdr-generic.h"
-#include "glusterd.h"
-#include "glusterd-op-sm.h"
-#include "glusterd-store.h"
-#include "glusterd-geo-rep.h"
-#include "glusterd-utils.h"
-#include "glusterd-volgen.h"
-#include "run.h"
-#include "syscall.h"
-#include "byte-order.h"
-#include "glusterd-svc-helper.h"
-#include "compat-errno.h"
-#include "glusterd-tierd-svc.h"
-#include "glusterd-tierd-svc-helper.h"
-#include "glusterd-messages.h"
-#include "glusterd-mgmt.h"
-#include "glusterd-syncop.h"
-
-#include <sys/wait.h>
-#include <dlfcn.h>
-
-extern struct rpc_clnt_program gd_brick_prog;
-
-const char *gd_tier_op_list[GF_DEFRAG_CMD_TYPE_MAX] = {
- [GF_DEFRAG_CMD_START_TIER] = "start",
- [GF_DEFRAG_CMD_STOP_TIER] = "stop",
-};
-
-int
-__glusterd_handle_tier(rpcsvc_request_t *req)
-{
- int32_t ret = -1;
- gf_cli_req cli_req = {{
- 0,
- }};
- dict_t *dict = NULL;
- glusterd_op_t cli_op = GD_OP_TIER_START_STOP;
- char *volname = NULL;
- int32_t cmd = 0;
- char msg[2048] = {
- 0,
- };
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- char err_str[2048] = {0};
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(THIS->name, this, out);
- GF_VALIDATE_OR_GOTO(this->name, req, out);
-
- conf = this->private;
- GF_VALIDATE_OR_GOTO(this->name, conf, out);
-
- ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new();
-
- ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- snprintf(msg, sizeof(msg),
- "Unable to decode the "
- "command");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
- }
-
- ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret) {
- snprintf(msg, sizeof(msg), "Unable to get volume name");
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get volume name, "
- "while handling tier command");
- goto out;
- }
-
- ret = dict_get_int32n(dict, "rebalance-command", SLEN("rebalance-command"),
- &cmd);
- if (ret) {
- snprintf(msg, sizeof(msg), "Unable to get the command");
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get the cmd");
- goto out;
- }
-
- if (conf->op_version < GD_OP_VERSION_3_7_0) {
- snprintf(msg, sizeof(msg),
- "Cannot execute command. The "
- "cluster is operating at version %d. Tier command "
- "%s is unavailable in this version",
- conf->op_version, gd_tier_op_list[cmd]);
- ret = -1;
- goto out;
- }
-
- if (conf->op_version < GD_OP_VERSION_3_10_0) {
- gf_msg_debug(this->name, 0,
- "The cluster is operating at "
- "version less than or equal to %d. Falling back "
- "to syncop framework.",
- GD_OP_VERSION_3_7_5);
- switch (cmd) {
- case GF_DEFRAG_CMD_DETACH_STOP:
- ret = dict_set_int32n(dict, "rebalance-command",
- SLEN("rebalance-command"),
- GF_DEFRAG_CMD_STOP_DETACH_TIER);
- break;
-
- case GF_DEFRAG_CMD_DETACH_COMMIT:
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Volume "
- "%s does not exist",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "%s", err_str);
- goto out;
- }
- ret = glusterd_set_detach_bricks(dict, volinfo);
- ret = dict_set_int32n(dict, "command", SLEN("command"),
- GF_OP_CMD_DETACH_COMMIT);
- break;
- case GF_DEFRAG_CMD_DETACH_COMMIT_FORCE:
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Volume "
- "%s does not exist",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "%s", err_str);
- goto out;
- }
- ret = glusterd_set_detach_bricks(dict, volinfo);
- ret = dict_set_int32n(dict, "command", SLEN("command"),
- GF_OP_CMD_DETACH_COMMIT_FORCE);
- break;
- case GF_DEFRAG_CMD_DETACH_START:
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Volume "
- "%s does not exist",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "%s", err_str);
- goto out;
- }
- ret = glusterd_set_detach_bricks(dict, volinfo);
- ret = dict_set_int32n(dict, "command", SLEN("command"),
- GF_OP_CMD_DETACH_START);
- break;
-
- default:
- break;
- }
- if (ret) {
- gf_log(this->name, GF_LOG_ERROR, "Failed to set dict");
- goto out;
- }
- if ((cmd == GF_DEFRAG_CMD_STATUS_TIER) ||
- (cmd == GF_DEFRAG_CMD_DETACH_STATUS) ||
- (cmd == GF_DEFRAG_CMD_START_TIER) ||
- (cmd == GF_DEFRAG_CMD_DETACH_STOP)) {
- ret = glusterd_op_begin(req, GD_OP_DEFRAG_BRICK_VOLUME, dict, msg,
- sizeof(msg));
- } else
- ret = glusterd_op_begin(req, GD_OP_REMOVE_BRICK, dict, msg,
- sizeof(msg));
-
- glusterd_friend_sm();
- glusterd_op_sm();
-
- } else {
- switch (cmd) {
- case GF_DEFRAG_CMD_STATUS_TIER:
- cli_op = GD_OP_TIER_STATUS;
- break;
-
- case GF_DEFRAG_CMD_DETACH_STATUS:
- cli_op = GD_OP_DETACH_TIER_STATUS;
- break;
-
- case GF_DEFRAG_CMD_DETACH_STOP:
- cli_op = GD_OP_REMOVE_TIER_BRICK;
- break;
-
- case GF_DEFRAG_CMD_DETACH_COMMIT:
- case GF_DEFRAG_CMD_DETACH_COMMIT_FORCE:
- case GF_DEFRAG_CMD_DETACH_START:
- cli_op = GD_OP_REMOVE_TIER_BRICK;
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Volume "
- "%s does not exist",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "%s", err_str);
- goto out;
- }
- ret = glusterd_set_detach_bricks(dict, volinfo);
- break;
-
- default:
- break;
- }
- if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "dict set failed");
- goto out;
- }
- ret = glusterd_mgmt_v3_initiate_all_phases(req, cli_op, dict);
- }
-
-out:
- if (ret) {
- if (msg[0] == '\0')
- snprintf(msg, sizeof(msg), "Tier operation failed");
- ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, msg);
- }
-
- return ret;
-}
-
-int
-glusterd_handle_tier(rpcsvc_request_t *req)
-{
- return glusterd_big_locked_handler(req, __glusterd_handle_tier);
-}
-
-int
-glusterd_op_remove_tier_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
-{
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- int ret = -1;
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- char *brick = NULL;
- int32_t count = 0;
- int32_t i = 1;
- char key[256] = {
- 0,
- };
- int keylen;
- int32_t flag = 0;
- char err_str[4096] = {
- 0,
- };
- int need_rebalance = 0;
- int force = 0;
- int32_t cmd = 0;
- int32_t replica_count = 0;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_brickinfo_t *tmp = NULL;
- char *task_id_str = NULL;
- dict_t *bricks_dict = NULL;
- char *brick_tmpstr = NULL;
- uint32_t commit_hash = 0;
- int detach_commit = 0;
- void *tier_info = NULL;
- char *cold_shd_key = NULL;
- char *hot_shd_key = NULL;
- int delete_key = 1;
- glusterd_svc_t *svc = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(THIS->name, this, out);
- GF_VALIDATE_OR_GOTO(this->name, dict, out);
- GF_VALIDATE_OR_GOTO(this->name, op_errstr, out);
-
- priv = this->private;
- GF_VALIDATE_OR_GOTO(this->name, priv, out);
-
- ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
- "Unable to get volinfo");
- goto out;
- }
-
- ret = dict_get_int32n(dict, "rebalance-command", SLEN("rebalance-command"),
- &cmd);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "cmd not found");
- goto out;
- }
-
- if (is_origin_glusterd(dict) && (cmd != GF_DEFRAG_CMD_DETACH_START)) {
- if (!gf_uuid_is_null(volinfo->rebal.rebalance_id)) {
- ret = glusterd_copy_uuid_to_dict(volinfo->rebal.rebalance_id, dict,
- GF_REMOVE_BRICK_TID_KEY,
- SLEN(GF_REMOVE_BRICK_TID_KEY));
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_REMOVE_BRICK_ID_SET_FAIL,
- "Failed to set remove-brick-id");
- goto out;
- }
- }
- }
- /*check only if a tierd is supposed to be running
- * if no brick in the tierd volume is a local brick
- * skip it */
- cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
- {
- if (glusterd_is_local_brick(this, volinfo, brickinfo)) {
- flag = _gf_true;
- break;
- }
- }
- if (!flag)
- goto out;
-
- ret = -1;
-
- switch (cmd) {
- case GF_DEFRAG_CMD_DETACH_STOP:
- /* Fall back to the old volume file */
- cds_list_for_each_entry_safe(brickinfo, tmp, &volinfo->bricks,
- brick_list)
- {
- if (!brickinfo->decommissioned)
- continue;
- brickinfo->decommissioned = 0;
- }
- volinfo->tier.op = GD_OP_DETACH_NOT_STARTED;
- ret = volinfo->tierd.svc.manager(&(volinfo->tierd.svc), volinfo,
- PROC_START_NO_WAIT);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_MANAGER_FUNCTION_FAILED,
- "Calling manager for tier "
- "failed on volume: %s for "
- "detach stop",
- volinfo->volname);
- goto out;
- }
-
- ret = glusterd_create_volfiles_and_notify_services(volinfo);
-
- if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0,
- GD_MSG_VOLFILE_CREATE_FAIL, "failed to create volfiles");
- goto out;
- }
-
- ret = glusterd_store_volinfo(volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_SET_FAIL,
- "failed to store volinfo");
- goto out;
- }
- ret = 0;
- goto out;
-
- case GF_DEFRAG_CMD_DETACH_START:
- volinfo->tier.op = GD_OP_DETACH_TIER;
- svc = &(volinfo->tierd.svc);
- ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_MANAGER_FUNCTION_FAILED,
- "calling manager for tier "
- "failed on volume: %s for "
- "detach start",
- volname);
- goto out;
- }
- ret = dict_get_strn(dict, GF_REMOVE_BRICK_TID_KEY,
- SLEN(GF_REMOVE_BRICK_TID_KEY), &task_id_str);
- if (ret) {
- gf_msg_debug(this->name, errno, "Missing remove-brick-id");
- ret = 0;
- } else {
- ret = dict_set_strn(rsp_dict, GF_REMOVE_BRICK_TID_KEY,
- SLEN(GF_REMOVE_BRICK_TID_KEY), task_id_str);
- if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_SET_FAILED,
- "failed to set remove-brick-id"
- "in the dict");
- }
- gf_uuid_parse(task_id_str, volinfo->tier.rebalance_id);
- }
- force = 0;
-
- break;
-
- case GF_DEFRAG_CMD_DETACH_COMMIT:
- if (volinfo->decommission_in_progress) {
- gf_asprintf(op_errstr,
- "use 'force' option as "
- "migration is in progress");
- goto out;
- }
- if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_FAILED) {
- gf_asprintf(op_errstr,
- "use 'force' option as "
- "migration has failed");
- goto out;
- }
- /* Fall through */
-
- case GF_DEFRAG_CMD_DETACH_COMMIT_FORCE:
- if (cmd == GF_DEFRAG_CMD_DETACH_COMMIT_FORCE) {
- svc = &(volinfo->tierd.svc);
- ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_MANAGER_FUNCTION_FAILED,
- "calling manager for tier "
- "failed on volume: %s for "
- "commit force",
- volname);
- goto out;
- }
- }
- glusterd_op_perform_detach_tier(volinfo);
- detach_commit = 1;
-
- /* Disabling ctr when detaching a tier, since
- * currently tier is the only consumer of ctr.
- * Revisit this code when this constraint no
- * longer exist.
- */
- dict_deln(volinfo->dict, "features.ctr-enabled",
- SLEN("features.ctr-enabled"));
- dict_deln(volinfo->dict, "cluster.tier-mode",
- SLEN("cluster.tier-mode"));
-
- hot_shd_key = gd_get_shd_key(volinfo->tier_info.hot_type);
- cold_shd_key = gd_get_shd_key(volinfo->tier_info.cold_type);
- if (hot_shd_key) {
- /*
- * Since post detach, shd graph will not
- * contain hot tier. So we need to clear
- * option set for hot tier. For a tiered
- * volume there can be different key
- * for both hot and cold. If hot tier is
- * shd compatible then we need to remove
- * the configured value when detaching a tier,
- * only if the key's are different or
- * cold key is NULL. So we will set
- * delete_key first, and if cold key is not
- * null and they are equal then we will clear
- * the flag. Otherwise we will delete the
- * key.
- */
-
- if (cold_shd_key)
- delete_key = strcmp(hot_shd_key, cold_shd_key);
- if (delete_key)
- dict_del(volinfo->dict, hot_shd_key);
- }
- /* fall through */
-
- if (volinfo->decommission_in_progress) {
- if (volinfo->tier.defrag) {
- LOCK(&volinfo->rebal.defrag->lock);
- /* Fake 'rebalance-complete' so the
- * graph change
- * happens right away */
- volinfo->tier.defrag_status = GF_DEFRAG_STATUS_COMPLETE;
-
- UNLOCK(&volinfo->tier.defrag->lock);
- }
- }
-
- volinfo->tier.op = GD_OP_DETACH_NOT_STARTED;
- ret = 0;
- force = 1;
- break;
- default:
- gf_asprintf(op_errstr,
- "tier command failed. Invalid "
- "opcode");
- ret = -1;
- goto out;
- }
-
- count = glusterd_set_detach_bricks(dict, volinfo);
-
- if (cmd == GF_DEFRAG_CMD_DETACH_START) {
- bricks_dict = dict_new();
- if (!bricks_dict) {
- ret = -1;
- goto out;
- }
- ret = dict_set_int32n(bricks_dict, "count", SLEN("count"), count);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
- "Failed to save remove-brick count");
- goto out;
- }
- }
-
- while (i <= count) {
- keylen = snprintf(key, sizeof(key), "brick%d", i);
- ret = dict_get_strn(dict, key, keylen, &brick);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get %s", key);
- goto out;
- }
-
- if (cmd == GF_DEFRAG_CMD_DETACH_START) {
- brick_tmpstr = gf_strdup(brick);
- if (!brick_tmpstr) {
- ret = -1;
- gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
- "Failed to duplicate brick name");
- goto out;
- }
- ret = dict_set_dynstrn(bricks_dict, key, keylen, brick_tmpstr);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
- "Failed to add brick to dict");
- goto out;
- }
- brick_tmpstr = NULL;
- }
-
- ret = glusterd_op_perform_remove_brick(volinfo, brick, force,
- &need_rebalance);
- if (ret)
- goto out;
- i++;
- }
-
- if (detach_commit) {
- /* Clear related information from volinfo */
- tier_info = ((void *)(&volinfo->tier_info));
- memset(tier_info, 0, sizeof(volinfo->tier_info));
- }
-
- if (cmd == GF_DEFRAG_CMD_DETACH_START)
- volinfo->tier.dict = dict_ref(bricks_dict);
-
- ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
- &replica_count);
- if (!ret) {
- gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED,
- "changing replica count %d to %d on volume %s",
- volinfo->replica_count, replica_count, volinfo->volname);
- volinfo->replica_count = replica_count;
- volinfo->sub_count = replica_count;
- volinfo->dist_leaf_count = glusterd_get_dist_leaf_count(volinfo);
-
- /*
- * volinfo->type and sub_count have already been set for
- * volumes undergoing a detach operation, they should not
- * be modified here.
- */
- if ((replica_count == 1) && (cmd != GF_DEFRAG_CMD_DETACH_COMMIT) &&
- (cmd != GF_DEFRAG_CMD_DETACH_COMMIT_FORCE)) {
- if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
- volinfo->type = GF_CLUSTER_TYPE_NONE;
- /* backward compatibility */
- volinfo->sub_count = 0;
- } else {
- volinfo->type = GF_CLUSTER_TYPE_STRIPE;
- /* backward compatibility */
- volinfo->sub_count = volinfo->dist_leaf_count;
- }
- }
- }
- volinfo->subvol_count = (volinfo->brick_count / volinfo->dist_leaf_count);
-
- ret = glusterd_create_volfiles_and_notify_services(volinfo);
- if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLFILE_CREATE_FAIL,
- "failed to create"
- "volfiles");
- goto out;
- }
-
- ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_STORE_FAIL,
- "failed to store volinfo");
- goto out;
- }
-
- if (cmd == GF_DEFRAG_CMD_DETACH_START &&
- volinfo->status == GLUSTERD_STATUS_STARTED) {
- ret = glusterd_svcs_reconfigure();
- if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_RECONF_FAIL,
- "Unable to reconfigure NFS-Server");
- goto out;
- }
- }
- /* Need to reset the defrag/rebalance status accordingly */
- switch (volinfo->tier.defrag_status) {
- case GF_DEFRAG_STATUS_FAILED:
- case GF_DEFRAG_STATUS_COMPLETE:
- volinfo->tier.defrag_status = 0;
- /* FALLTHROUGH */
- default:
- break;
- }
- if (!force && need_rebalance) {
- if (dict_get_uint32(dict, "commit-hash", &commit_hash) == 0) {
- volinfo->tier.commit_hash = commit_hash;
- }
- /* perform the rebalance operations */
- ret = glusterd_handle_defrag_start(
- volinfo, err_str, sizeof(err_str), GF_DEFRAG_CMD_START_DETACH_TIER,
- /*change this label to GF_DEFRAG_CMD_DETACH_START
- * while removing old code
- */
- glusterd_remove_brick_migrate_cbk, GD_OP_REMOVE_BRICK);
-
- if (!ret)
- volinfo->decommission_in_progress = 1;
-
- else if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REBALANCE_START_FAIL,
- "failed to start the rebalance");
- }
- } else {
- if (GLUSTERD_STATUS_STARTED == volinfo->status)
- ret = glusterd_svcs_manager(volinfo);
- }
-
-out:
- if (ret && err_str[0] && op_errstr)
- *op_errstr = gf_strdup(err_str);
-
- GF_FREE(brick_tmpstr);
- if (bricks_dict)
- dict_unref(bricks_dict);
-
- return ret;
-}
-
-int
-glusterd_op_tier_start_stop(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
-{
- glusterd_volinfo_t *volinfo = NULL;
- int32_t ret = -1;
- char *volname = NULL;
- int cmd = -1;
- xlator_t *this = NULL;
- glusterd_brickinfo_t *brick = NULL;
- gf_boolean_t retval = _gf_false;
- glusterd_conf_t *priv = NULL;
- int32_t pid = -1;
- char pidfile[PATH_MAX] = {0};
- int is_force = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(THIS->name, this, out);
- GF_VALIDATE_OR_GOTO(this->name, dict, out);
- GF_VALIDATE_OR_GOTO(this->name, op_errstr, out);
-
- priv = this->private;
- GF_VALIDATE_OR_GOTO(this->name, priv, out);
-
- ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret) {
- gf_asprintf(op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
- goto out;
- }
-
- ret = dict_get_int32n(dict, "rebalance-command", SLEN("rebalance-command"),
- &cmd);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get cmd from "
- "dict");
- goto out;
- }
-
- cds_list_for_each_entry(brick, &volinfo->bricks, brick_list)
- {
- if (gf_uuid_compare(MY_UUID, brick->uuid) == 0) {
- retval = _gf_true;
- break;
- }
- }
- /*check if this node needs tierd*/
-
- if (!retval)
- goto out;
-
- if (glusterd_is_volume_started(volinfo) == 0) {
- *op_errstr = gf_strdup(
- "Volume is stopped, start "
- "volume to enable/disable tier.");
- ret = -1;
- goto out;
- }
-
- GLUSTERD_GET_TIER_PID_FILE(pidfile, volinfo, priv);
-
- switch (cmd) {
- case GF_DEFRAG_CMD_START_TIER:
- /* we check if its running and skip so that we don't get a
- * failure during force start
- */
- ret = dict_get_int32n(dict, "force", SLEN("force"), &is_force);
- if (ret) {
- gf_msg_debug(this->name, 0,
- "Unable to get is_force"
- " from dict");
- }
- ret = dict_set_int32n(volinfo->dict, "force", SLEN("force"),
- is_force);
- if (ret) {
- gf_msg_debug(this->name, errno,
- "Unable to set"
- " is_force to dict");
- }
-
- if (!is_force) {
- if (gf_is_service_running(pidfile, &pid)) {
- gf_asprintf(op_errstr,
- "Tier is already "
- "enabled on volume %s.",
- volinfo->volname);
- goto out;
- }
- }
-
- break;
-
- case GF_DEFRAG_CMD_STOP_TIER:
- if (!gf_is_service_running(pidfile, &pid)) {
- gf_asprintf(op_errstr,
- "Tier is alreaady disabled on "
- "volume %s.",
- volinfo->volname);
- goto out;
- }
- break;
- default:
- gf_asprintf(op_errstr,
- "tier command failed. Invalid "
- "opcode");
- ret = -1;
- goto out;
- }
-
- ret = volinfo->tierd.svc.manager(&(volinfo->tierd.svc), volinfo,
- PROC_START_NO_WAIT);
- if (ret)
- goto out;
-
- ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL,
- "Failed to store volinfo for tier");
- goto out;
- }
-
-out:
- return ret;
-}
-
-int
-glusterd_op_stage_tier(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
-{
- char *volname = NULL;
- int ret = -1;
- int32_t cmd = 0;
- char msg[2048] = {0};
- glusterd_volinfo_t *volinfo = NULL;
- char *task_id_str = NULL;
- xlator_t *this = 0;
- int32_t is_force = 0;
- char pidfile[PATH_MAX] = {0};
- int32_t tier_online = -1;
- int32_t pid = -1;
- int32_t brick_count = 0;
- gsync_status_param_t param = {
- 0,
- };
- glusterd_conf_t *priv = NULL;
- gf_boolean_t flag = _gf_false;
- glusterd_brickinfo_t *brickinfo = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(THIS->name, this, out);
- GF_VALIDATE_OR_GOTO(this->name, dict, out);
- GF_VALIDATE_OR_GOTO(this->name, op_errstr, out);
-
- priv = this->private;
- GF_VALIDATE_OR_GOTO(this->name, priv, out);
-
- ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "volname not found");
- goto out;
- }
-
- ret = dict_get_int32n(dict, "rebalance-command", SLEN("rebalance-command"),
- &cmd);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "cmd not found");
- goto out;
- }
-
- ret = glusterd_rebalance_cmd_validate(cmd, volname, &volinfo, msg,
- sizeof(msg));
- if (ret) {
- gf_msg_debug(this->name, 0, "cmd validate failed");
- goto out;
- }
-
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf(msg, sizeof(msg),
- "volume %s is not a tier "
- "volume",
- volinfo->volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_TIER,
- "volume: %s is not a tier "
- "volume",
- volinfo->volname);
- ret = -1;
- goto out;
- }
- /* Check if the connected clients are all of version
- * glusterfs-3.6 and higher. This is needed to prevent some data
- * loss issues that could occur when older clients are connected
- * when rebalance is run. This check can be bypassed by using
- * 'force'
- */
- ret = glusterd_check_client_op_version_support(volname, GD_OP_VERSION_3_6_0,
- NULL);
- if (ret) {
- ret = gf_asprintf(op_errstr,
- "Volume %s has one or "
- "more connected clients of a version"
- " lower than GlusterFS-v3.6.0. "
- "Tier operations not supported in"
- " below this version",
- volname);
- goto out;
- }
- /*check only if a tierd is supposed to be running
- * if no brick in the tierd volume is a local brick
- * skip it */
- cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
- {
- if (glusterd_is_local_brick(this, volinfo, brickinfo)) {
- flag = _gf_true;
- break;
- }
- }
- if (!flag)
- goto out;
-
- GLUSTERD_GET_TIER_PID_FILE(pidfile, volinfo, priv);
- tier_online = gf_is_service_running(pidfile, &pid);
-
- switch (cmd) {
- case GF_DEFRAG_CMD_START_TIER:
- ret = dict_get_int32n(dict, "force", SLEN("force"), &is_force);
- if (ret)
- is_force = 0;
-
- if (brickinfo->status != GF_BRICK_STARTED) {
- gf_asprintf(op_errstr,
- "Received"
- " tier start on volume "
- "with stopped brick %s",
- brickinfo->path);
- ret = -1;
- goto out;
- }
- if ((!is_force) && tier_online) {
- ret = gf_asprintf(op_errstr,
- "Tier daemon is "
- "already running on volume %s",
- volname);
- ret = -1;
- goto out;
- }
- ret = glusterd_defrag_start_validate(volinfo, msg, sizeof(msg),
- GD_OP_REBALANCE);
- if (ret) {
- gf_msg(this->name, 0, GF_LOG_ERROR, GD_MSG_REBALANCE_START_FAIL,
- "start validate failed");
- goto out;
- }
- if (volinfo->tier.op == GD_OP_DETACH_TIER) {
- snprintf(msg, sizeof(msg),
- "A detach tier task "
- "exists for volume %s. Either commit it"
- " or stop it before starting a new task.",
- volinfo->volname);
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_OLD_REMOVE_BRICK_EXISTS,
- "Earlier detach-tier"
- " task exists for volume %s.",
- volinfo->volname);
- ret = -1;
- goto out;
- }
- break;
-
- case GF_DEFRAG_CMD_STOP_TIER:
-
- if (!tier_online) {
- ret = gf_asprintf(op_errstr,
- "Tier daemon is "
- "not running on volume %s",
- volname);
- ret = -1;
- goto out;
- }
- break;
-
- case GF_DEFRAG_CMD_DETACH_START:
-
- ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get brick count");
- goto out;
- }
-
- if (!tier_online) {
- ret = gf_asprintf(op_errstr,
- "Tier daemon is "
- "not running on volume %s",
- volname);
- ret = -1;
- goto out;
- }
- if (volinfo->tier.op == GD_OP_DETACH_TIER) {
- snprintf(msg, sizeof(msg),
- "An earlier detach tier "
- "task exists for volume %s. Either commit it"
- " or stop it before starting a new task.",
- volinfo->volname);
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_OLD_REMOVE_BRICK_EXISTS,
- "Earlier remove-brick"
- " task exists for volume %s.",
- volinfo->volname);
- ret = -1;
- goto out;
- }
- if (glusterd_is_defrag_on(volinfo)) {
- snprintf(msg, sizeof(msg),
- "Migration is in progress."
- " Please retry after completion");
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_OIP_RETRY_LATER,
- "Migration is"
- "in progress");
- goto out;
- }
-
- ret = glusterd_remove_brick_validate_bricks(
- GF_OP_CMD_NONE, brick_count, dict, volinfo, op_errstr, cmd);
- if (ret)
- goto out;
-
- if (is_origin_glusterd(dict)) {
- ret = glusterd_generate_and_set_task_id(
- dict, GF_REMOVE_BRICK_TID_KEY,
- SLEN(GF_REMOVE_BRICK_TID_KEY));
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TASKID_GEN_FAIL,
- "Failed to generate task-id");
- goto out;
- }
- } else {
- ret = dict_get_strn(dict, GF_REMOVE_BRICK_TID_KEY,
- SLEN(GF_REMOVE_BRICK_TID_KEY),
- &task_id_str);
- if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, errno,
- GD_MSG_DICT_GET_FAILED, "Missing remove-brick-id");
- ret = 0;
- }
- }
- break;
-
- case GF_DEFRAG_CMD_DETACH_STOP:
- if (volinfo->tier.op != GD_OP_DETACH_TIER) {
- snprintf(msg, sizeof(msg),
- "Detach-tier "
- "not started");
- ret = -1;
- goto out;
- }
- ret = 0;
- break;
-
- case GF_DEFRAG_CMD_STATUS_TIER:
-
- if (!tier_online) {
- ret = gf_asprintf(op_errstr,
- "Tier daemon is "
- "not running on volume %s",
- volname);
- ret = -1;
- goto out;
- }
- break;
-
- case GF_DEFRAG_CMD_DETACH_COMMIT:
-
- if (volinfo->tier.op != GD_OP_DETACH_TIER) {
- snprintf(msg, sizeof(msg),
- "Detach-tier "
- "not started");
- ret = -1;
- goto out;
- }
- if ((volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_STARTED) &&
- (volinfo->tier.op == GD_OP_DETACH_TIER)) {
- ret = -1;
- snprintf(msg, sizeof(msg),
- "Detach is in progress. "
- "Please retry after completion");
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_OIP_RETRY_LATER,
- "Detach is in "
- "progress");
- goto out;
- }
-
- ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get brick count");
- goto out;
- }
-
- ret = glusterd_remove_brick_validate_bricks(
- GF_OP_CMD_NONE, brick_count, dict, volinfo, op_errstr, cmd);
- if (ret)
- goto out;
-
- /* If geo-rep is configured, for this volume, it should be
- * stopped.
- */
- param.volinfo = volinfo;
- ret = glusterd_check_geo_rep_running(&param, op_errstr);
- if (ret || param.is_active) {
- ret = -1;
- goto out;
- }
-
- break;
- case GF_DEFRAG_CMD_DETACH_STATUS:
- if (volinfo->tier.op != GD_OP_DETACH_TIER) {
- snprintf(msg, sizeof(msg),
- "Detach-tier "
- "not started");
- ret = -1;
- goto out;
- }
- break;
-
- case GF_DEFRAG_CMD_DETACH_COMMIT_FORCE:
- default:
- break;
- }
-
- ret = 0;
-out:
- if (ret && op_errstr && msg[0])
- *op_errstr = gf_strdup(msg);
-
- return ret;
-}
-
-int32_t
-glusterd_add_tierd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
- int32_t count)
-{
- int ret = -1;
- int32_t pid = -1;
- int32_t brick_online = -1;
- char key[64] = {0};
- int keylen;
- char pidfile[PATH_MAX] = {0};
- xlator_t *this = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(THIS->name, this, out);
-
- GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
- GF_VALIDATE_OR_GOTO(this->name, dict, out);
-
- keylen = snprintf(key, sizeof(key), "brick%d.hostname", count);
- ret = dict_set_nstrn(dict, key, keylen, "Tier Daemon", SLEN("Tier Daemon"));
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "brick%d.path", count);
- ret = dict_set_dynstrn(dict, key, keylen, gf_strdup(uuid_utoa(MY_UUID)));
- if (ret)
- goto out;
-
- /* tier doesn't have a port. but the cli needs a port key with
- * an zero value to parse.
- * */
-
- keylen = snprintf(key, sizeof(key), "brick%d.port", count);
- ret = dict_set_int32n(dict, key, keylen, 0);
- if (ret)
- goto out;
-
- glusterd_svc_build_tierd_pidfile(volinfo, pidfile, sizeof(pidfile));
-
- brick_online = gf_is_service_running(pidfile, &pid);
-
- keylen = snprintf(key, sizeof(key), "brick%d.pid", count);
- ret = dict_set_int32n(dict, key, keylen, pid);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "brick%d.status", count);
- ret = dict_set_int32n(dict, key, keylen, brick_online);
-
-out:
- if (ret)
- gf_msg(this ? this->name : "glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Returning %d. adding values to dict failed", ret);
-
- return ret;
-}
-
-int32_t
-__glusterd_tier_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
- void *myframe)
-{
- gd1_mgmt_brick_op_rsp rsp = {0};
- int ret = -1;
- call_frame_t *frame = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- struct syncargs *args = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(THIS->name, this, out);
- GF_VALIDATE_OR_GOTO(this->name, req, out);
-
- priv = this->private;
- GF_VALIDATE_OR_GOTO(this->name, priv, out);
-
- frame = myframe;
- args = frame->local;
-
- if (-1 == req->rpc_status) {
- args->op_errno = ENOTCONN;
- goto out;
- }
-
- ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
- if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
- "Failed to decode brick op "
- "response received");
- goto out;
- }
-
- if (rsp.output.output_len) {
- args->dict = dict_new();
- if (!args->dict) {
- ret = -1;
- args->op_errno = ENOMEM;
- goto out;
- }
-
- ret = dict_unserialize(rsp.output.output_val, rsp.output.output_len,
- &args->dict);
- if (ret < 0)
- goto out;
- }
- args->op_ret = rsp.op_ret;
- args->op_errno = rsp.op_errno;
- args->errstr = gf_strdup(rsp.op_errstr);
-
-out:
- if ((rsp.op_errstr) && (strcmp(rsp.op_errstr, "") != 0))
- free(rsp.op_errstr);
- free(rsp.output.output_val);
- if (req && (req->rpc_status != -1) && (frame)) {
- GLUSTERD_STACK_DESTROY(frame);
- }
- if (args) {
- __wake(args);
- }
-
- return ret;
-}
-
-int32_t
-glusterd_tier_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
- void *myframe)
-{
- return glusterd_big_locked_cbk(req, iov, count, myframe,
- __glusterd_tier_status_cbk);
-}
-
-int
-glusterd_op_tier_status(dict_t *dict, char **op_errstr, dict_t *rsp_dict,
- glusterd_op_t op)
-{
- int ret = -1;
- xlator_t *this = NULL;
- struct syncargs args = {
- 0,
- };
- gd1_mgmt_brick_op_req *req = NULL;
- glusterd_conf_t *priv = NULL;
- int pending_bricks = 0;
- glusterd_pending_node_t *pending_node;
- glusterd_req_ctx_t *req_ctx = NULL;
- struct rpc_clnt *rpc = NULL;
- uuid_t *txn_id = NULL;
- extern glusterd_op_info_t opinfo;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(THIS->name, this, out);
- GF_VALIDATE_OR_GOTO(this->name, dict, out);
- GF_VALIDATE_OR_GOTO(this->name, rsp_dict, out);
-
- priv = this->private;
- GF_VALIDATE_OR_GOTO(this->name, priv, out);
- args.op_ret = -1;
- args.op_errno = ENOTCONN;
-
- req_ctx = GF_MALLOC(sizeof(*req_ctx), gf_gld_mt_op_allack_ctx_t);
- if (!req_ctx) {
- gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
- "Allocation failed");
- goto out;
- }
-
- gf_uuid_copy(req_ctx->uuid, MY_UUID);
-
- /* we are printing the detach status for issue of detach start
- * by then we need the op to be GD_OP_DETACH_TIER_STATUS for it to
- * get the status. ad for the rest of the condition it can go as such.
- */
-
- if (op == GD_OP_REMOVE_TIER_BRICK)
- req_ctx->op = GD_OP_DETACH_TIER_STATUS;
- else
- req_ctx->op = op;
-
- req_ctx->dict = dict;
- txn_id = &priv->global_txn_id;
- CDS_INIT_LIST_HEAD(&opinfo.pending_bricks);
-
- ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
- gf_msg_debug(this->name, 0, "transaction ID = %s", uuid_utoa(*txn_id));
-
- ret = glusterd_op_bricks_select(req_ctx->op, req_ctx->dict, op_errstr,
- &opinfo.pending_bricks, NULL);
-
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_SELECT_FAIL,
- "Failed to select bricks");
- opinfo.op_errstr = *op_errstr;
- goto out;
- }
-
- cds_list_for_each_entry(pending_node, &opinfo.pending_bricks, list)
- {
- ret = glusterd_brick_op_build_payload(req_ctx->op, pending_node->node,
- (gd1_mgmt_brick_op_req **)&req,
- req_ctx->dict);
-
- if (ret || !req) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL,
- "Failed to build brick op payload during "
- "'Volume %s'",
- gd_op_list[req_ctx->op]);
- goto out;
- }
-
- rpc = glusterd_pending_node_get_rpc(pending_node);
- if (!rpc) {
- opinfo.brick_pending_count = 0;
- ret = 0;
- if (req) {
- if (req->input.input_val)
- GF_FREE(req->input.input_val);
- GF_FREE(req);
- req = NULL;
- }
- glusterd_defrag_volume_node_rsp(req_ctx->dict, NULL, rsp_dict);
-
- goto out;
- }
-
- GD_SYNCOP(rpc, (&args), NULL, glusterd_tier_status_cbk, req,
- &gd_brick_prog, req->op, xdr_gd1_mgmt_brick_op_req);
-
- if (req->input.input_val)
- GF_FREE(req->input.input_val);
- GF_FREE(req);
- req = NULL;
-
- if (!ret)
- pending_bricks++;
-
- glusterd_pending_node_put_rpc(pending_node);
- }
- glusterd_handle_node_rsp(req_ctx->dict, pending_node->node, req_ctx->op,
- args.dict, rsp_dict, op_errstr,
- pending_node->type);
- gf_msg_trace(this->name, 0,
- "Sent commit op req for operation "
- "'Volume %s' to %d bricks",
- gd_op_list[req_ctx->op], pending_bricks);
- opinfo.brick_pending_count = pending_bricks;
-out:
- if (ret)
- opinfo.op_ret = ret;
-
- ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
- if (ret)
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set transaction's opinfo");
- if (args.dict)
- dict_unref(args.dict);
- if (args.errstr)
- GF_FREE(args.errstr);
-
- if (req_ctx)
- GF_FREE(req_ctx);
-
- gf_msg_debug(this ? this->name : "glusterd", 0,
- "Returning %d. Failed to get tier status", ret);
- return ret;
-}
diff --git a/xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.c
index 04a6a2e4965..035795b3deb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.c
+++ b/xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.c
@@ -12,7 +12,7 @@
#include "glusterd-utils.h"
#include "glusterd-tierd-svc-helper.h"
#include "glusterd-messages.h"
-#include "syscall.h"
+#include <glusterfs/syscall.h>
#include "glusterd-volgen.h"
void
@@ -81,7 +81,8 @@ glusterd_svc_build_tierd_volfile_path(glusterd_volinfo_t *volinfo, char *path,
void
glusterd_svc_build_tierd_logdir(char *logdir, char *volname, size_t len)
{
- snprintf(logdir, len, "%s/tier/%s", DEFAULT_LOG_FILE_DIRECTORY, volname);
+ glusterd_conf_t *conf = THIS->private;
+ snprintf(logdir, len, "%s/tier/%s", priv->logdir, volname);
}
void
@@ -116,7 +117,7 @@ glusterd_svc_check_tier_volfile_identical(char *svc_name,
goto out;
}
- /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
+ /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
tmp_fd = mkstemp(tmpvol);
if (tmp_fd < 0) {
gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
@@ -177,7 +178,7 @@ glusterd_svc_check_tier_topology_identical(char *svc_name,
goto out;
}
- /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
+ /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
tmpfd = mkstemp(tmpvol);
if (tmpfd < 0) {
gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
diff --git a/xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.h
deleted file mode 100644
index 56b794df506..00000000000
--- a/xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#ifndef _GLUSTERD_TIERD_SVC_HELPER_H_
-#define _GLUSTERD_TIERD_SVC_HELPER_H_
-
-#include "glusterd.h"
-
-void
-glusterd_svc_build_tierd_rundir(glusterd_volinfo_t *volinfo, char *path,
- int path_len);
-
-void
-glusterd_svc_build_tierd_socket_filepath(glusterd_volinfo_t *volinfo,
- char *path, int path_len);
-
-void
-glusterd_svc_build_tierd_pidfile(glusterd_volinfo_t *volinfo, char *path,
- int path_len);
-
-void
-glusterd_svc_build_tierd_volfile_path(glusterd_volinfo_t *volinfo, char *path,
- int path_len);
-
-void
-glusterd_svc_build_tierd_logdir(char *logdir, char *volname, size_t len);
-
-void
-glusterd_svc_build_tierd_logfile(char *logfile, char *logdir, size_t len);
-#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c b/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c
deleted file mode 100644
index 43438379647..00000000000
--- a/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c
+++ /dev/null
@@ -1,503 +0,0 @@
-/*
- Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#include "globals.h"
-#include "run.h"
-#include "glusterd.h"
-#include "glusterd-utils.h"
-#include "glusterd-volgen.h"
-#include "glusterd-tierd-svc.h"
-#include "glusterd-tierd-svc-helper.h"
-#include "glusterd-svc-helper.h"
-#include "syscall.h"
-#include "glusterd-store.h"
-
-char *tierd_svc_name = "tierd";
-
-void
-glusterd_tierdsvc_build(glusterd_svc_t *svc)
-{
- svc->manager = glusterd_tierdsvc_manager;
- svc->start = glusterd_tierdsvc_start;
- svc->stop = glusterd_svc_stop;
- svc->reconfigure = glusterd_tierdsvc_reconfigure;
-}
-
-/* a separate service framework is used because the tierd is a
- * volume based framework while the common services are for node
- * based daemons. when volume based common framework is available
- * this can be consolidated into it.
- */
-
-int
-glusterd_tierdsvc_init(void *data)
-{
- int ret = -1;
- char rundir[PATH_MAX] = {
- 0,
- };
- char sockpath[PATH_MAX] = {
- 0,
- };
- char pidfile[PATH_MAX] = {
- 0,
- };
- char volfile[PATH_MAX] = {
- 0,
- };
- char logdir[PATH_MAX] = {
- 0,
- };
- char logfile[PATH_MAX] = {
- 0,
- };
- char volfileid[256] = {0};
- glusterd_svc_t *svc = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_conn_notify_t notify = NULL;
- xlator_t *this = NULL;
- char *volfileserver = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(THIS->name, this, out);
-
- priv = this->private;
- GF_VALIDATE_OR_GOTO(this->name, priv, out);
-
- volinfo = data;
- GF_VALIDATE_OR_GOTO(this->name, data, out);
-
- svc = &(volinfo->tierd.svc);
-
- ret = snprintf(svc->name, sizeof(svc->name), "%s", tierd_svc_name);
- if (ret < 0)
- goto out;
-
- notify = glusterd_svc_common_rpc_notify;
- glusterd_store_perform_node_state_store(volinfo);
-
- volinfo->type = GF_CLUSTER_TYPE_TIER;
-
- glusterd_svc_build_tierd_rundir(volinfo, rundir, sizeof(rundir));
- glusterd_svc_create_rundir(rundir);
-
- /* Initialize the connection mgmt */
- glusterd_svc_build_tierd_socket_filepath(volinfo, sockpath,
- sizeof(sockpath));
- ret = glusterd_conn_init(&(svc->conn), sockpath, 600, notify);
- if (ret)
- goto out;
-
- /* Initialize the process mgmt */
- glusterd_svc_build_tierd_pidfile(volinfo, pidfile, sizeof(pidfile));
- glusterd_svc_build_tierd_volfile_path(volinfo, volfile, sizeof(volfile));
- glusterd_svc_build_tierd_logdir(logdir, volinfo->volname, sizeof(logdir));
- ret = mkdir_p(logdir, 0755, _gf_true);
- if ((ret == -1) && (EEXIST != errno)) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED,
- "Unable to create logdir %s", logdir);
- goto out;
- }
- glusterd_svc_build_tierd_logfile(logfile, logdir, sizeof(logfile));
- len = snprintf(volfileid, sizeof(volfileid), "tierd/%s", volinfo->volname);
- if ((len < 0) || (len >= sizeof(volfileid))) {
- ret = -1;
- goto out;
- }
-
- if (dict_get_strn(this->options, "transport.socket.bind-address",
- SLEN("transport.socket.bind-address"),
- &volfileserver) != 0) {
- volfileserver = "localhost";
- }
- ret = glusterd_proc_init(&(svc->proc), tierd_svc_name, pidfile, logdir,
- logfile, volfile, volfileid, volfileserver);
- if (ret)
- goto out;
-
-out:
- gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
- return ret;
-}
-
-static int
-glusterd_tierdsvc_create_volfile(glusterd_volinfo_t *volinfo)
-{
- char filepath[PATH_MAX] = {
- 0,
- };
- int ret = -1;
- glusterd_conf_t *conf = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(THIS->name, this, out);
- conf = this->private;
- GF_VALIDATE_OR_GOTO(this->name, conf, out);
-
- glusterd_svc_build_tierd_volfile_path(volinfo, filepath, sizeof(filepath));
- ret = build_rebalance_volfile(volinfo, filepath, NULL);
-
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
- "Failed to create volfile");
- goto out;
- }
-
-out:
- gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
-
- return ret;
-}
-
-int
-glusterd_tierdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
-{
- int ret = 0;
- xlator_t *this = THIS;
- glusterd_volinfo_t *volinfo = NULL;
- int is_force = 0;
-
- volinfo = data;
- GF_VALIDATE_OR_GOTO(this->name, data, out);
-
- if (!svc->inited) {
- ret = glusterd_tierdsvc_init(volinfo);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TIERD_INIT_FAIL,
- "Failed to initialize "
- "tierd service for volume %s",
- volinfo->volname);
- goto out;
- } else {
- svc->inited = _gf_true;
- gf_msg_debug(THIS->name, 0,
- "tierd service "
- "initialized");
- }
- }
-
- ret = dict_get_int32n(volinfo->dict, "force", SLEN("force"), &is_force);
- if (ret) {
- gf_msg_debug(this->name, errno,
- "Unable to get"
- " is_force from dict");
- }
-
- if (is_force)
- ret = 1;
- else
- ret = (glusterd_is_tierd_supposed_to_be_enabled(volinfo));
-
- if (ret) {
- if (!glusterd_is_volume_started(volinfo)) {
- if (glusterd_proc_is_running(&svc->proc)) {
- ret = svc->stop(svc, SIGTERM);
- if (ret)
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPD_STOP_FAIL,
- "Couldn't stop tierd for "
- "volume: %s",
- volinfo->volname);
- } else {
- /* Since tierd is not running set ret to 0 */
- ret = 0;
- }
- goto out;
- }
-
- ret = glusterd_tierdsvc_create_volfile(volinfo);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TIERD_CREATE_FAIL,
- "Couldn't create "
- "tierd volfile for volume: %s",
- volinfo->volname);
- goto out;
- }
-
- ret = svc->start(svc, flags);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TIERD_START_FAIL,
- "Couldn't start "
- "tierd for volume: %s",
- volinfo->volname);
- goto out;
- }
- volinfo->is_tier_enabled = _gf_true;
-
- glusterd_volinfo_ref(volinfo);
- ret = glusterd_conn_connect(&(svc->conn));
- if (ret) {
- glusterd_volinfo_unref(volinfo);
- goto out;
- }
- } else {
- if (glusterd_proc_is_running(&svc->proc)) {
- ret = svc->stop(svc, SIGTERM);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TIERD_STOP_FAIL,
- "Couldn't stop tierd for volume: %s", volinfo->volname);
- goto out;
- }
- volinfo->is_tier_enabled = _gf_false;
- }
- ret = 0;
- }
-
-out:
- gf_msg_debug(THIS->name, 0, "Returning %d", ret);
-
- return ret;
-}
-
-int32_t
-glusterd_tierdsvc_start(glusterd_svc_t *svc, int flags)
-{
- int ret = -1;
- runner_t runner = {
- 0,
- };
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- char valgrind_logfile[PATH_MAX] = {0};
- char msg[1024] = {
- 0,
- };
- char tierd_id[PATH_MAX] = {
- 0,
- };
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_tierdsvc_t *tierd = NULL;
- int cmd = GF_DEFRAG_CMD_START_TIER;
- char *localtime_logging = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(THIS->name, this, out);
-
- priv = this->private;
- GF_VALIDATE_OR_GOTO(this->name, priv, out);
-
- if (glusterd_proc_is_running(&svc->proc)) {
- ret = 0;
- goto out;
- }
-
- /* Get volinfo->tierd from svc object */
- tierd = cds_list_entry(svc, glusterd_tierdsvc_t, svc);
- if (!tierd) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TIERD_OBJ_GET_FAIL,
- "Failed to get tierd object "
- "from tierd service");
- goto out;
- }
-
- /* Get volinfo from tierd */
- volinfo = cds_list_entry(tierd, glusterd_volinfo_t, tierd);
- if (!volinfo) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
- "Failed to get volinfo from "
- "from tierd");
- goto out;
- }
-
- ret = sys_access(svc->proc.volfile, F_OK);
- if (ret) {
- gf_msg(this->name, GF_LOG_DEBUG, 0, GD_MSG_VOLINFO_GET_FAIL,
- "tierd Volfile %s is not present", svc->proc.volfile);
- /* If glusterd is down on one of the nodes and during
- * that time if tier is started for the first time. After some
- * time when the glusterd which was down comes back it tries
- * to look for the tierd volfile and it does not find tierd
- * volfile and because of this starting of tierd fails.
- * Therefore, if volfile is not present then create a fresh
- * volfile.
- */
- ret = glusterd_tierdsvc_create_volfile(volinfo);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
- "Couldn't create "
- "tierd volfile for volume: %s",
- volinfo->volname);
- goto out;
- }
- }
- runinit(&runner);
-
- if (this->ctx->cmd_args.valgrind) {
- len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-tierd.log",
- svc->proc.logdir);
- if ((len < 0) || (len >= PATH_MAX)) {
- ret = -1;
- goto out;
- }
-
- runner_add_args(&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes", NULL);
- runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
- }
-
- snprintf(tierd_id, sizeof(tierd_id), "tierd-%s", volinfo->volname);
- runner_add_args(
- &runner, SBIN_DIR "/glusterfs", "-s", svc->proc.volfileserver,
- "--volfile-id", svc->proc.volfileid, "-p", svc->proc.pidfile, "-l",
- svc->proc.logfile, "--brick-name", tierd_id, "-S", svc->conn.sockpath,
- "--xlator-option", "*dht.use-readdirp=yes", "--xlator-option",
- "*dht.lookup-unhashed=yes", "--xlator-option",
- "*dht.assert-no-child-down=yes", "--xlator-option",
- "*dht.readdir-optimize=on", "--xlator-option",
- "*tier-dht.xattr-name=trusted.tier.tier-dht", "--process-name",
- svc->name, NULL);
-
- runner_add_arg(&runner, "--xlator-option");
- runner_argprintf(&runner, "*dht.rebalance-cmd=%d", cmd);
- runner_add_arg(&runner, "--xlator-option");
- runner_argprintf(&runner, "*dht.node-uuid=%s", uuid_utoa(MY_UUID));
- runner_add_arg(&runner, "--xlator-option");
- runner_argprintf(&runner, "*dht.commit-hash=%u",
- volinfo->rebal.commit_hash);
- if (volinfo->memory_accounting)
- runner_add_arg(&runner, "--mem-accounting");
- if (dict_get_strn(priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY,
- SLEN(GLUSTERD_LOCALTIME_LOGGING_KEY),
- &localtime_logging) == 0) {
- if (strcmp(localtime_logging, "enable") == 0)
- runner_add_arg(&runner, "--localtime-logging");
- }
-
- snprintf(msg, sizeof(msg), "Starting the tierd service for volume %s",
- volinfo->volname);
- runner_log(&runner, this->name, GF_LOG_DEBUG, msg);
-
- if (flags == PROC_START_NO_WAIT) {
- ret = runner_run_nowait(&runner);
- } else {
- synclock_unlock(&priv->big_lock);
- {
- ret = runner_run(&runner);
- }
- synclock_lock(&priv->big_lock);
- }
-
-out:
- return ret;
-}
-
-int
-glusterd_tierdsvc_restart()
-{
- glusterd_volinfo_t *volinfo = NULL;
- int ret = 0;
- xlator_t *this = THIS;
- glusterd_conf_t *conf = NULL;
- glusterd_svc_t *svc = NULL;
-
- GF_VALIDATE_OR_GOTO(THIS->name, this, out);
-
- conf = this->private;
- GF_VALIDATE_OR_GOTO(this->name, conf, out);
-
- cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
- {
- /* Start per volume tierd svc */
- if (volinfo->status == GLUSTERD_STATUS_STARTED &&
- volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- if (volinfo->tier.op != GD_OP_DETACH_TIER) {
- ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TIERD_START_FAIL,
- "Couldn't restart tierd for "
- "vol: %s",
- volinfo->volname);
- goto out;
- }
- }
- }
- }
-out:
- return ret;
-}
-
-int
-glusterd_tierdsvc_reconfigure(void *data)
-{
- int ret = -1;
- xlator_t *this = NULL;
- gf_boolean_t identical_topology = _gf_false;
- gf_boolean_t identical_volfile = _gf_false;
- glusterd_volinfo_t *volinfo = NULL;
-
- volinfo = data;
-
- /* reconfigure function is not available for other volume based
- * service. but it has been implemented for tier because there can be
- * changes on the volfile that need not be related to topology.
- * during these changes it is better not to restart the tierd.
- * So reconfigure is written to avoid calling restart at such
- * situations.
- */
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(THIS->name, this, out);
-
- if (!glusterd_is_tierd_enabled(volinfo))
- goto manager;
- /*
- * Check both OLD and NEW volfiles, if they are SAME by size
- * and cksum i.e. "character-by-character". If YES, then
- * NOTHING has been changed, just return.
- */
-
- ret = glusterd_svc_check_tier_volfile_identical(
- volinfo->tierd.svc.name, volinfo, &identical_volfile);
- if (ret)
- goto out;
- if (identical_volfile) {
- ret = 0;
- goto out;
- }
-
- /*
- * They are not identical. Find out if the topology is changed
- * OR just the volume options. If just the options which got
- * changed, then inform the xlator to reconfigure the options.
- */
- ret = glusterd_svc_check_tier_topology_identical(
- volinfo->tierd.svc.name, volinfo, &identical_topology);
- if (ret)
- goto out; /*not able to compare due to some corruption */
-
- /* Topology is not changed, but just the options. But write the
- * options to tierd volfile, so that tierd will be reconfigured.
- */
- if (identical_topology) {
- ret = glusterd_tierdsvc_create_volfile(volinfo);
- if (ret == 0) { /* Only if above PASSES */
- ret = glusterd_fetchspec_notify(this);
- }
- goto out;
- }
- goto out;
- /*pending add/remove brick functionality*/
-
-manager:
- /*
- * tierd volfile's topology has been changed. tierd server needs
- * to be RESTARTED to ACT on the changed volfile.
- */
- ret = volinfo->tierd.svc.manager(&(volinfo->tierd.svc), volinfo,
- PROC_START_NO_WAIT);
-
-out:
- gf_msg_debug(THIS->name, 0, "Returning %d", ret);
- return ret;
-}
diff --git a/xlators/mgmt/glusterd/src/glusterd-tierd-svc.h b/xlators/mgmt/glusterd/src/glusterd-tierd-svc.h
deleted file mode 100644
index 78d3d11b6a3..00000000000
--- a/xlators/mgmt/glusterd/src/glusterd-tierd-svc.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#ifndef _GLUSTERD_TIERD_SVC_H_
-#define _GLUSTERD_TIERD_SVC_H_
-
-#include "glusterd-svc-mgmt.h"
-
-typedef struct glusterd_tierdsvc_ glusterd_tierdsvc_t;
-
-struct glusterd_tierdsvc_ {
- glusterd_svc_t svc;
- gf_store_handle_t *handle;
-};
-
-void
-glusterd_tierdsvc_build(glusterd_svc_t *svc);
-
-int
-glusterd_tierdsvc_init(void *data);
-
-int
-glusterd_tierdsvc_manager(glusterd_svc_t *svc, void *data, int flags);
-
-int
-glusterd_tierdsvc_start(glusterd_svc_t *svc, int flags);
-
-int
-glusterd_tierdsvc_reconfigure(void *data);
-
-int
-glusterd_tierdsvc_restart();
-
-#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 2a4ff542c13..90ef2cf4c9c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -20,20 +20,20 @@
#include <libxml/xmlwriter.h>
#endif
-#include "glusterfs.h"
-#include "compat.h"
-#include "dict.h"
-#include "xlator.h"
-#include "logging.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
#include "glusterd-messages.h"
-#include "timer.h"
-#include "defaults.h"
-#include "compat.h"
-#include "syncop.h"
-#include "run.h"
-#include "compat-errno.h"
-#include "statedump.h"
-#include "syscall.h"
+#include <glusterfs/timer.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/syncop.h>
+#include <glusterfs/run.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/statedump.h>
+#include <glusterfs/syscall.h>
#include "glusterd-mem-types.h"
#include "glusterd.h"
#include "glusterd-op-sm.h"
@@ -43,7 +43,7 @@
#include "glusterd-store.h"
#include "glusterd-volgen.h"
#include "glusterd-pmap.h"
-#include "glusterfs-acl.h"
+#include <glusterfs/glusterfs-acl.h>
#include "glusterd-syncop.h"
#include "glusterd-mgmt.h"
#include "glusterd-locks.h"
@@ -53,14 +53,14 @@
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
#include "glusterd-shd-svc.h"
-#include "glusterd-nfs-svc.h"
#include "glusterd-quotad-svc.h"
#include "glusterd-snapd-svc.h"
#include "glusterd-bitd-svc.h"
#include "glusterd-gfproxyd-svc.h"
#include "glusterd-server-quorum.h"
-#include "quota-common-utils.h"
-#include "common-utils.h"
+#include <glusterfs/quota-common-utils.h>
+#include <glusterfs/common-utils.h>
+#include "glusterd-shd-svc-helper.h"
#include "xdr-generic.h"
#include <sys/resource.h>
@@ -74,14 +74,19 @@
#include <fnmatch.h>
#include <sys/statvfs.h>
#include <ifaddrs.h>
-#ifdef HAVE_BD_XLATOR
-#include <lvm2app.h>
-#endif
#ifdef GF_SOLARIS_HOST_OS
#include <sys/sockio.h>
#endif
+#ifdef __FreeBSD__
+#include <sys/sysctl.h>
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <libprocstat.h>
+#include <libutil.h>
+#endif
+
#define NFS_PROGRAM 100003
#define NFSV3_VERSION 3
@@ -93,6 +98,28 @@
#define NLMV4_VERSION 4
#define NLMV1_VERSION 1
+#ifdef BUILD_GNFS
+#define GLUSTERD_GET_NFS_PIDFILE(pidfile, priv) \
+ do { \
+ int32_t _nfs_pid_len; \
+ _nfs_pid_len = snprintf(pidfile, PATH_MAX, "%s/nfs/nfs.pid", \
+ priv->rundir); \
+ if ((_nfs_pid_len < 0) || (_nfs_pid_len >= PATH_MAX)) { \
+ pidfile[0] = 0; \
+ } \
+ } while (0)
+#endif
+
+#define GLUSTERD_GET_QUOTAD_PIDFILE(pidfile, priv) \
+ do { \
+ int32_t _quotad_pid_len; \
+ _quotad_pid_len = snprintf(pidfile, PATH_MAX, "%s/quotad/quotad.pid", \
+ priv->rundir); \
+ if ((_quotad_pid_len < 0) || (_quotad_pid_len >= PATH_MAX)) { \
+ pidfile[0] = 0; \
+ } \
+ } while (0)
+
gf_boolean_t
is_brick_mx_enabled(void)
{
@@ -139,15 +166,8 @@ get_mux_limit_per_process(int *mux_limit)
ret = dict_get_strn(priv->opts, GLUSTERD_BRICKMUX_LIMIT_KEY,
SLEN(GLUSTERD_BRICKMUX_LIMIT_KEY), &value);
if (ret) {
- gf_msg_debug(this->name, 0,
- "Limit for number of bricks per "
- "brick process not yet set in dict. Returning "
- "limit as 0 denoting that multiplexing can "
- "happen with no limit set.");
- ret = 0;
- goto out;
+ value = GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE;
}
-
ret = gf_string2int(value, &max_bricks_per_proc);
if (ret)
goto out;
@@ -161,6 +181,47 @@ out:
return ret;
}
+int
+get_gd_vol_thread_limit(int *thread_limit)
+{
+ char *value = NULL;
+ int ret = -1;
+ int vol_per_thread_limit = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
+
+ if (!is_brick_mx_enabled()) {
+ vol_per_thread_limit = 1;
+ ret = 0;
+ goto out;
+ }
+
+ ret = dict_get_strn(priv->opts, GLUSTERD_VOL_CNT_PER_THRD,
+ SLEN(GLUSTERD_VOL_CNT_PER_THRD), &value);
+ if (ret) {
+ value = GLUSTERD_VOL_CNT_PER_THRD_DEFAULT_VALUE;
+ }
+ ret = gf_string2int(value, &vol_per_thread_limit);
+ if (ret)
+ goto out;
+
+out:
+ *thread_limit = vol_per_thread_limit;
+
+ gf_msg_debug("glusterd", 0,
+ "Per Thread volume limit set to %d glusterd to populate dict "
+ "data parallel",
+ *thread_limit);
+
+ return ret;
+}
+
extern struct volopt_map_entry glusterd_volopt_map[];
extern glusterd_all_vol_opts valid_all_vol_opts[];
@@ -219,53 +280,9 @@ glusterd_volume_brick_for_each(glusterd_volinfo_t *volinfo, void *data,
glusterd_brickinfo_t *,
dict_t *mod_dict, void *))
{
- dict_t *mod_dict = NULL;
- glusterd_volinfo_t *dup_volinfo = NULL;
- int ret = 0;
-
gd_set_shared_brick_count(volinfo);
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- ret = _brick_for_each(volinfo, NULL, data, fn);
- if (ret)
- goto out;
- } else {
- ret = glusterd_create_sub_tier_volinfo(volinfo, &dup_volinfo, _gf_true,
- volinfo->volname);
- if (ret)
- goto out;
-
- mod_dict = dict_new();
- if (!mod_dict) {
- ret = -1;
- goto out;
- }
-
- ret = dict_set_nstrn(mod_dict, "hot-brick", SLEN("hot-brick"), "on",
- SLEN("on"));
- if (ret)
- goto out;
-
- ret = _brick_for_each(dup_volinfo, mod_dict, data, fn);
- if (ret)
- goto out;
- GF_FREE(dup_volinfo);
- dup_volinfo = NULL;
- ret = glusterd_create_sub_tier_volinfo(volinfo, &dup_volinfo, _gf_false,
- volinfo->volname);
- if (ret)
- goto out;
- ret = _brick_for_each(dup_volinfo, NULL, data, fn);
- if (ret)
- goto out;
- }
-out:
- if (dup_volinfo)
- glusterd_volinfo_delete(dup_volinfo);
-
- if (mod_dict)
- dict_unref(mod_dict);
- return ret;
+ return _brick_for_each(volinfo, NULL, data, fn);
}
int32_t
@@ -434,6 +451,8 @@ glusterd_submit_request(struct rpc_clnt *rpc, void *req, call_frame_t *frame,
if (!iobref) {
iobref = iobref_new();
if (!iobref) {
+ gf_smsg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
}
@@ -578,25 +597,21 @@ out:
return ret;
}
-gf_boolean_t
-glusterd_check_volume_exists(char *volname)
-{
- glusterd_volinfo_t *volinfo = NULL;
-
- return (glusterd_volinfo_find(volname, &volinfo) == 0);
-}
-
glusterd_volinfo_t *
glusterd_volinfo_unref(glusterd_volinfo_t *volinfo)
{
int refcnt = -1;
+ glusterd_conf_t *conf = THIS->private;
- pthread_mutex_lock(&volinfo->reflock);
+ pthread_mutex_lock(&conf->volume_lock);
{
- refcnt = --volinfo->refcnt;
+ pthread_mutex_lock(&volinfo->reflock);
+ {
+ refcnt = --volinfo->refcnt;
+ }
+ pthread_mutex_unlock(&volinfo->reflock);
}
- pthread_mutex_unlock(&volinfo->reflock);
-
+ pthread_mutex_unlock(&conf->volume_lock);
if (!refcnt) {
glusterd_volinfo_delete(volinfo);
return NULL;
@@ -635,10 +650,12 @@ glusterd_volinfo_new(glusterd_volinfo_t **volinfo)
CDS_INIT_LIST_HEAD(&new_volinfo->vol_list);
CDS_INIT_LIST_HEAD(&new_volinfo->snapvol_list);
CDS_INIT_LIST_HEAD(&new_volinfo->bricks);
+ CDS_INIT_LIST_HEAD(&new_volinfo->ta_bricks);
CDS_INIT_LIST_HEAD(&new_volinfo->snap_volumes);
new_volinfo->dict = dict_new();
if (!new_volinfo->dict) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
GF_FREE(new_volinfo);
goto out;
@@ -646,6 +663,7 @@ glusterd_volinfo_new(glusterd_volinfo_t **volinfo)
new_volinfo->gsync_slaves = dict_new();
if (!new_volinfo->gsync_slaves) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
dict_unref(new_volinfo->dict);
GF_FREE(new_volinfo);
goto out;
@@ -653,6 +671,7 @@ glusterd_volinfo_new(glusterd_volinfo_t **volinfo)
new_volinfo->gsync_active_slaves = dict_new();
if (!new_volinfo->gsync_active_slaves) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
dict_unref(new_volinfo->dict);
dict_unref(new_volinfo->gsync_slaves);
GF_FREE(new_volinfo);
@@ -666,10 +685,12 @@ glusterd_volinfo_new(glusterd_volinfo_t **volinfo)
new_volinfo->xl = THIS;
glusterd_snapdsvc_build(&new_volinfo->snapd.svc);
- glusterd_tierdsvc_build(&new_volinfo->tierd.svc);
glusterd_gfproxydsvc_build(&new_volinfo->gfproxyd.svc);
+ glusterd_shdsvc_build(&new_volinfo->shd.svc);
+ pthread_mutex_init(&new_volinfo->store_volinfo_lock, NULL);
pthread_mutex_init(&new_volinfo->reflock, NULL);
+
*volinfo = glusterd_volinfo_ref(new_volinfo);
ret = 0;
@@ -722,7 +743,6 @@ glusterd_volinfo_dup(glusterd_volinfo_t *volinfo,
new_volinfo->subvol_count = volinfo->subvol_count;
new_volinfo->transport_type = volinfo->transport_type;
new_volinfo->brick_count = volinfo->brick_count;
- new_volinfo->tier_info = volinfo->tier_info;
new_volinfo->quota_conf_version = volinfo->quota_conf_version;
new_volinfo->quota_xattr_version = volinfo->quota_xattr_version;
new_volinfo->snap_max_hard_limit = volinfo->snap_max_hard_limit;
@@ -798,89 +818,6 @@ glusterd_brickinfo_dup(glusterd_brickinfo_t *brickinfo,
out:
return ret;
}
-int32_t
-glusterd_create_sub_tier_volinfo(glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t **dup_volinfo,
- gf_boolean_t is_hot_tier,
- const char *new_volname)
-{
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_brickinfo_t *brickinfo_dup = NULL;
- gd_tier_info_t *tier_info = NULL;
- int i = 0;
- int ret = -1;
-
- tier_info = &(volinfo->tier_info);
-
- ret = glusterd_volinfo_dup(volinfo, dup_volinfo, _gf_true);
- if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_OP_FAILED,
- "Failed to create volinfo");
- return ret;
- }
-
- gf_uuid_copy((*dup_volinfo)->volume_id, volinfo->volume_id);
- (*dup_volinfo)->is_snap_volume = volinfo->is_snap_volume;
- (*dup_volinfo)->status = volinfo->status;
- (*dup_volinfo)->snapshot = volinfo->snapshot;
-
- if (snprintf((*dup_volinfo)->volname, sizeof((*dup_volinfo)->volname), "%s",
- new_volname) >= sizeof((*dup_volinfo)->volname)) {
- ret = -1;
- goto out;
- }
-
- memcpy(&(*dup_volinfo)->tier_info, &volinfo->tier_info,
- sizeof(volinfo->tier_info));
-
- cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
- {
- i++;
-
- if (is_hot_tier) {
- if (i > volinfo->tier_info.hot_brick_count)
- break;
- } else {
- if (i <= volinfo->tier_info.hot_brick_count)
- continue;
- }
-
- ret = glusterd_brickinfo_new(&brickinfo_dup);
- if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_NEW_INFO_FAIL,
- "Failed to create "
- "new brickinfo");
- goto out;
- }
-
- glusterd_brickinfo_dup(brickinfo, brickinfo_dup);
- cds_list_add_tail(&brickinfo_dup->brick_list,
- &((*dup_volinfo)->bricks));
- }
-
- if (is_hot_tier) {
- (*dup_volinfo)->type = tier_info->hot_type;
- (*dup_volinfo)->replica_count = tier_info->hot_replica_count;
- (*dup_volinfo)->brick_count = tier_info->hot_brick_count;
- (*dup_volinfo)->dist_leaf_count = glusterd_get_dist_leaf_count(
- *dup_volinfo);
-
- } else {
- (*dup_volinfo)->type = tier_info->cold_type;
- (*dup_volinfo)->replica_count = tier_info->cold_replica_count;
- (*dup_volinfo)->disperse_count = tier_info->cold_disperse_count;
- (*dup_volinfo)->redundancy_count = tier_info->cold_redundancy_count;
- (*dup_volinfo)->dist_leaf_count = tier_info->cold_dist_leaf_count;
- (*dup_volinfo)->brick_count = tier_info->cold_brick_count;
- }
-out:
- if (ret && *dup_volinfo) {
- glusterd_volinfo_delete(*dup_volinfo);
- *dup_volinfo = NULL;
- }
-
- return ret;
-}
/*
* gd_vol_is_geo_rep_active:
@@ -1024,7 +961,6 @@ glusterd_volinfo_delete(glusterd_volinfo_t *volinfo)
/* Destroy the connection object for per volume svc daemons */
glusterd_conn_term(&volinfo->snapd.svc.conn);
- glusterd_conn_term(&volinfo->tierd.svc.conn);
glusterd_conn_term(&volinfo->gfproxyd.svc.conn);
gf_store_handle_destroy(volinfo->quota_conf_shandle);
@@ -1033,11 +969,14 @@ glusterd_volinfo_delete(glusterd_volinfo_t *volinfo)
gf_store_handle_destroy(volinfo->snapd.handle);
glusterd_auth_cleanup(volinfo);
+ glusterd_shd_svcproc_cleanup(&volinfo->shd);
+ pthread_mutex_destroy(&volinfo->store_volinfo_lock);
pthread_mutex_destroy(&volinfo->reflock);
+ LOCK_DESTROY(&volinfo->lock);
+
GF_FREE(volinfo);
ret = 0;
-
out:
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
return ret;
@@ -1085,6 +1024,7 @@ glusterd_brickinfo_new(glusterd_brickinfo_t **brickinfo)
goto out;
CDS_INIT_LIST_HEAD(&new_brickinfo->brick_list);
+ CDS_INIT_LIST_HEAD(&new_brickinfo->mux_bricks);
pthread_mutex_init(&new_brickinfo->restart_mutex, NULL);
*brickinfo = new_brickinfo;
@@ -1185,7 +1125,8 @@ glusterd_get_brick_mount_dir(char *brickpath, char *hostname, char *mount_dir)
}
brick_dir = &brickpath[strlen(mnt_pt)];
- brick_dir++;
+ if (brick_dir[0] == '/')
+ brick_dir++;
snprintf(mount_dir, VALID_GLUSTERD_PATHMAX, "/%s", brick_dir);
}
@@ -1207,9 +1148,6 @@ glusterd_brickinfo_new_from_brick(char *brick, glusterd_brickinfo_t **brickinfo,
char *path = NULL;
char *tmp_host = NULL;
char *tmp_path = NULL;
-#ifdef HAVE_BD_XLATOR
- char *vg = NULL;
-#endif
int32_t ret = -1;
glusterd_brickinfo_t *new_brickinfo = NULL;
xlator_t *this = NULL;
@@ -1234,18 +1172,6 @@ glusterd_brickinfo_new_from_brick(char *brick, glusterd_brickinfo_t **brickinfo,
if (ret)
goto out;
-#ifdef HAVE_BD_XLATOR
- vg = strchr(path, '?');
- /* ? is used as a delimiter for vg */
- if (vg) {
- if (snprintf(new_brickinfo->vg, PATH_MAX, "%s", vg + 1) >= PATH_MAX) {
- ret = -1;
- goto out;
- }
- *vg = '\0';
- }
- new_brickinfo->caps = CAPS_BD;
-#endif
ret = gf_canonicalize_path(path);
if (ret)
goto out;
@@ -1294,7 +1220,12 @@ glusterd_brickinfo_new_from_brick(char *brick, glusterd_brickinfo_t **brickinfo,
goto out;
}
}
- strncpy(new_brickinfo->real_path, abspath, strlen(abspath));
+ if (strlen(abspath) >= sizeof(new_brickinfo->real_path)) {
+ ret = -1;
+ goto out;
+ }
+ (void)strncpy(new_brickinfo->real_path, abspath,
+ sizeof(new_brickinfo->real_path));
}
*brickinfo = new_brickinfo;
@@ -1366,7 +1297,7 @@ glusterd_is_brickpath_available(uuid_t uuid, char *path)
glusterd_volinfo_t *volinfo = NULL;
glusterd_conf_t *priv = NULL;
gf_boolean_t available = _gf_false;
- char tmp_path[PATH_MAX + 1] = "";
+ char tmp_path[PATH_MAX] = "";
priv = THIS->private;
@@ -1385,7 +1316,7 @@ glusterd_is_brickpath_available(uuid_t uuid, char *path)
goto out;
}
/* When realpath(3) fails, tmp_path is undefined. */
- strncpy(tmp_path, path, PATH_MAX);
+ (void)snprintf(tmp_path, sizeof(tmp_path), "%s", path);
}
cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
@@ -1409,63 +1340,6 @@ out:
return available;
}
-#ifdef HAVE_BD_XLATOR
-/*
- * Sets the tag of the format "trusted.glusterfs.volume-id:<uuid>" in
- * the brick VG. It is used to avoid using same VG for another brick.
- * @volume-id - gfid, @brick - brick info, @msg - Error message returned
- * to the caller
- */
-int
-glusterd_bd_set_vg_tag(unsigned char *volume_id, glusterd_brickinfo_t *brick,
- char *msg, int msg_size)
-{
- lvm_t handle = NULL;
- vg_t vg = NULL;
- char *uuid = NULL;
- int ret = -1;
-
- gf_asprintf(&uuid, "%s:%s", GF_XATTR_VOL_ID_KEY, uuid_utoa(volume_id));
- if (!uuid) {
- snprintf(msg, sizeof(*msg),
- "Could not allocate memory "
- "for tag");
- return -1;
- }
-
- handle = lvm_init(NULL);
- if (!handle) {
- snprintf(msg, sizeof(*msg), "lvm_init failed");
- goto out;
- }
-
- vg = lvm_vg_open(handle, brick->vg, "w", 0);
- if (!vg) {
- snprintf(msg, sizeof(*msg), "Could not open VG %s", brick->vg);
- goto out;
- }
-
- if (lvm_vg_add_tag(vg, uuid) < 0) {
- snprintf(msg, sizeof(*msg),
- "Could not set tag %s for "
- "VG %s",
- uuid, brick->vg);
- goto out;
- }
- lvm_vg_write(vg);
- ret = 0;
-out:
- GF_FREE(uuid);
-
- if (vg)
- lvm_vg_close(vg);
- if (handle)
- lvm_quit(handle);
-
- return ret;
-}
-#endif
-
int
glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo,
uuid_t volume_id, char *volname,
@@ -1488,7 +1362,7 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo,
char glusterfs_dir_path[PATH_MAX] = "";
int32_t len = 0;
- ret = sys_mkdir(brickinfo->path, 0777);
+ ret = sys_mkdir(brickinfo->path, 0755);
if (ret) {
if (errno != EEXIST) {
len = snprintf(msg, sizeof(msg),
@@ -1497,6 +1371,10 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo,
"Reason : %s ",
brickinfo->hostname, brickinfo->path,
strerror(errno));
+ gf_smsg(
+ "glusterd", GF_LOG_ERROR, errno, GD_MSG_CREATE_BRICK_DIR_FAILED,
+ "Brick_hostname=%s, Brick_path=%s, Reason=%s",
+ brickinfo->hostname, brickinfo->path, strerror(errno), NULL);
goto out;
}
} else {
@@ -1509,6 +1387,9 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo,
"lstat failed on %s. "
"Reason : %s",
brickinfo->path, strerror(errno));
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_LSTAT_FAIL,
+ "Failed on Brick_path=%s, Reason=%s", brickinfo->path,
+ strerror(errno), NULL);
goto out;
}
@@ -1517,6 +1398,8 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo,
"The provided path %s "
"which is already present, is not a directory",
brickinfo->path);
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED,
+ "Brick_path=%s", brickinfo->path, NULL);
ret = -1;
goto out;
}
@@ -1533,6 +1416,8 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo,
"lstat failed on /. "
"Reason : %s",
strerror(errno));
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_LSTAT_FAIL,
+ "Failed on /, Reason=%s", strerror(errno), NULL);
goto out;
}
@@ -1542,6 +1427,9 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo,
"lstat failed on %s. "
"Reason : %s",
parentdir, strerror(errno));
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_LSTAT_FAIL,
+ "Failed on parentdir=%s, Reason=%s", parentdir, strerror(errno),
+ NULL);
goto out;
}
if (strncmp(volname, GLUSTER_SHARED_STORAGE,
@@ -1552,6 +1440,8 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo,
len = snprintf(msg, sizeof(msg),
"Brick isn't allowed to be "
"created inside glusterd's working directory.");
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_BRICK_CREATION_FAIL,
+ NULL);
ret = -1;
goto out;
}
@@ -1567,6 +1457,10 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo,
"command if you want to override this "
"behavior.",
brickinfo->hostname, brickinfo->path);
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_BRICK_CREATE_MNTPNT,
+ "Use 'force' at the end of the command if you want to "
+ "override this behavior, Brick_hostname=%s, Brick_path=%s",
+ brickinfo->hostname, brickinfo->path, NULL);
ret = -1;
goto out;
} else if (parent_st.st_dev == root_st.st_dev) {
@@ -1580,6 +1474,10 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo,
"command if you want to override this "
"behavior.",
brickinfo->hostname, brickinfo->path);
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_BRICK_CREATE_ROOT,
+ "Use 'force' at the end of the command if you want to "
+ "override this behavior, Brick_hostname=%s, Brick_path=%s",
+ brickinfo->hostname, brickinfo->path, NULL);
/* If --wignore-partition flag is used, ignore warnings
* related to bricks being on root partition when 'force'
@@ -1591,13 +1489,6 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo,
}
}
-#ifdef HAVE_BD_XLATOR
- if (brickinfo->vg[0]) {
- ret = glusterd_bd_set_vg_tag(volume_id, brickinfo, msg, sizeof(msg));
- if (ret)
- goto out;
- }
-#endif
ret = glusterd_check_and_set_brick_xattr(
brickinfo->hostname, brickinfo->path, volume_id, op_errstr, is_force);
if (ret)
@@ -1618,6 +1509,10 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo,
".glusterfs directory for brick %s:%s. "
"Reason : %s ",
brickinfo->hostname, brickinfo->path, strerror(errno));
+ gf_smsg("glusterd", GF_LOG_ERROR, errno,
+ GD_MSG_CREATE_GLUSTER_DIR_FAILED,
+ "Brick_hostname=%s, Brick_path=%s, Reason=%s",
+ brickinfo->hostname, brickinfo->path, strerror(errno), NULL);
goto out;
}
@@ -1680,6 +1575,37 @@ out:
}
int32_t
+glusterd_volume_ta_brickinfo_get(uuid_t uuid, char *hostname, char *path,
+ glusterd_volinfo_t *volinfo,
+ glusterd_brickinfo_t **ta_brickinfo)
+{
+ glusterd_brickinfo_t *ta_brickiter = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ ret = -1;
+
+ cds_list_for_each_entry(ta_brickiter, &volinfo->ta_bricks, brick_list)
+ {
+ if (strcmp(ta_brickiter->path, path) == 0 &&
+ strcmp(ta_brickiter->hostname, hostname) == 0) {
+ gf_msg_debug(this->name, 0, LOGSTR_FOUND_BRICK,
+ ta_brickiter->hostname, ta_brickiter->path,
+ volinfo->volname);
+ ret = 0;
+ if (ta_brickinfo)
+ *ta_brickinfo = ta_brickiter;
+ break;
+ }
+ }
+
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
glusterd_volume_brickinfo_get_by_brick(char *brick, glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t **brickinfo,
gf_boolean_t construct_real_path)
@@ -1729,8 +1655,10 @@ glusterd_volinfo_find_by_volume_id(uuid_t volume_id,
glusterd_volinfo_t *voliter = NULL;
glusterd_conf_t *priv = NULL;
- if (!volume_id)
+ if (!volume_id) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
return -1;
+ }
this = THIS;
priv = this->private;
@@ -1776,6 +1704,33 @@ glusterd_volinfo_find(const char *volname, glusterd_volinfo_t **volinfo)
return ret;
}
+gf_boolean_t
+glusterd_volume_exists(const char *volname)
+{
+ glusterd_volinfo_t *tmp_volinfo = NULL;
+ gf_boolean_t volume_found = _gf_false;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ GF_ASSERT(volname);
+ this = THIS;
+ GF_ASSERT(this);
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ cds_list_for_each_entry(tmp_volinfo, &priv->volumes, vol_list)
+ {
+ if (!strcmp(tmp_volinfo->volname, volname)) {
+ gf_msg_debug(this->name, 0, "Volume %s found", volname);
+ volume_found = _gf_true;
+ break;
+ }
+ }
+
+ return volume_found;
+}
+
int32_t
glusterd_service_stop(const char *service, char *pidfile, int sig,
gf_boolean_t force_kill)
@@ -1890,7 +1845,6 @@ glusterd_service_stop_nolock(const char *service, char *pidfile, int sig,
"Unable to find pid:%d, "
"must be dead already. Ignoring.",
pid);
- ret = 0;
} else {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_PID_KILL_FAIL,
"Unable to kill pid:%d, "
@@ -1975,7 +1929,14 @@ glusterd_brick_connect(glusterd_volinfo_t *volinfo,
* The default timeout of 30mins used for unreliable network
* connections is too long for unix domain socket connections.
*/
- ret = rpc_transport_unix_options_build(&options, socketpath, 600);
+ options = dict_new();
+ if (!options) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
+ goto out;
+ }
+
+ ret = rpc_transport_unix_options_build(options, socketpath, 600);
if (ret)
goto out;
@@ -1994,7 +1955,8 @@ glusterd_brick_connect(glusterd_volinfo_t *volinfo,
brickinfo->rpc = rpc;
}
out:
-
+ if (options)
+ dict_unref(options);
gf_msg_debug("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -2010,7 +1972,7 @@ _mk_rundir_p(glusterd_volinfo_t *volinfo)
this = THIS;
priv = this->private;
GLUSTERD_GET_VOLUME_PID_DIR(rundir, volinfo, priv);
- ret = mkdir_p(rundir, 0777, _gf_true);
+ ret = mkdir_p(rundir, 0755, _gf_true);
if (ret)
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED,
"Failed to create rundir");
@@ -2044,6 +2006,10 @@ glusterd_volume_start_glusterfs(glusterd_volinfo_t *volinfo,
rpc_clnt_connection_t *conn = NULL;
int pid = -1;
int32_t len = 0;
+ glusterd_brick_proc_t *brick_proc = NULL;
+ char *inet_family = NULL;
+ char *global_threading = NULL;
+ bool threading = false;
GF_ASSERT(volinfo);
GF_ASSERT(brickinfo);
@@ -2111,23 +2077,28 @@ glusterd_volume_start_glusterfs(glusterd_volinfo_t *volinfo,
retry:
runinit(&runner);
- if (this->ctx->cmd_args.valgrind) {
- /* Run bricks with valgrind */
+ if (this->ctx->cmd_args.vgtool != _gf_none) {
+ /* Run bricks with valgrind. */
if (volinfo->logdir) {
len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s-%s.log",
volinfo->logdir, volinfo->volname, exp_path);
} else {
- len = snprintf(
- valgrind_logfile, PATH_MAX, "%s/bricks/valgrind-%s-%s.log",
- DEFAULT_LOG_FILE_DIRECTORY, volinfo->volname, exp_path);
+ len = snprintf(valgrind_logfile, PATH_MAX,
+ "%s/bricks/valgrind-%s-%s.log", priv->logdir,
+ volinfo->volname, exp_path);
}
if ((len < 0) || (len >= PATH_MAX)) {
ret = -1;
goto out;
}
- runner_add_args(&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes", NULL);
+ if (this->ctx->cmd_args.vgtool == _gf_memcheck)
+ runner_add_args(&runner, "valgrind", "--leak-check=full",
+ "--trace-children=yes", "--track-origins=yes",
+ NULL);
+ else
+ runner_add_args(&runner, "valgrind", "--tool=drd", NULL);
+
runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
}
@@ -2149,8 +2120,8 @@ retry:
len = snprintf(logfile, PATH_MAX, "%s/%s.log", volinfo->logdir,
exp_path);
} else {
- len = snprintf(logfile, PATH_MAX, "%s/bricks/%s.log",
- DEFAULT_LOG_FILE_DIRECTORY, exp_path);
+ len = snprintf(logfile, PATH_MAX, "%s/bricks/%s.log", priv->logdir,
+ exp_path);
}
if ((len < 0) || (len >= PATH_MAX)) {
ret = -1;
@@ -2202,6 +2173,15 @@ retry:
volinfo->volname, rdma_port);
}
+ if (dict_get_strn(volinfo->dict, VKEY_CONFIG_GLOBAL_THREADING,
+ SLEN(VKEY_CONFIG_GLOBAL_THREADING),
+ &global_threading) == 0) {
+ if ((gf_string2boolean(global_threading, &threading) == 0) &&
+ threading) {
+ runner_add_arg(&runner, "--global-threading");
+ }
+ }
+
runner_add_arg(&runner, "--xlator-option");
runner_argprintf(&runner, "%s-server.listen-port=%d", volinfo->volname,
port);
@@ -2219,10 +2199,19 @@ retry:
else if (volinfo->transport_type == GF_TRANSPORT_BOTH_TCP_RDMA)
runner_argprintf(&runner, "--volfile-server-transport=socket,rdma");
+ ret = dict_get_str(this->options, "transport.address-family", &inet_family);
+ if (!ret) {
+ runner_add_arg(&runner, "--xlator-option");
+ runner_argprintf(&runner, "transport.address-family=%s", inet_family);
+ }
+
if (volinfo->memory_accounting)
runner_add_arg(&runner, "--mem-accounting");
- runner_log(&runner, "", 0, "Starting GlusterFS");
+ if (is_brick_mx_enabled())
+ runner_add_arg(&runner, "--brick-mux");
+
+ runner_log(&runner, "", GF_LOG_DEBUG, "Starting GlusterFS");
brickinfo->port = port;
brickinfo->rdma_port = rdma_port;
@@ -2231,7 +2220,10 @@ retry:
if (wait) {
synclock_unlock(&priv->big_lock);
+ errno = 0;
ret = runner_run(&runner);
+ if (errno != 0)
+ ret = errno;
synclock_lock(&priv->big_lock);
if (ret == EADDRINUSE) {
@@ -2262,15 +2254,21 @@ retry:
goto out;
}
- ret = glusterd_brick_process_add_brick(brickinfo);
+ ret = glusterd_brickprocess_new(&brick_proc);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICKPROC_ADD_BRICK_FAILED,
- "Adding brick %s:%s "
- "to brick process failed.",
- brickinfo->hostname, brickinfo->path);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICKPROC_NEW_FAILED,
+ "Failed to create "
+ "new brick process instance");
goto out;
}
+ brick_proc->port = brickinfo->port;
+ cds_list_add_tail(&brick_proc->brick_proc_list, &priv->brick_procs);
+ brickinfo->brick_proc = brick_proc;
+ cds_list_add_tail(&brickinfo->mux_bricks, &brick_proc->bricks);
+ brickinfo->brick_proc = brick_proc;
+ brick_proc->brick_count++;
+
connect:
ret = glusterd_brick_connect(volinfo, brickinfo, socketpath);
if (ret) {
@@ -2354,6 +2352,13 @@ unsafe_option(dict_t *this, char *key, data_t *value, void *arg)
return _gf_false;
}
+ if (fnmatch("*diagnostics.client-log*", key, 0) == 0) {
+ return _gf_false;
+ }
+ if (fnmatch("user.*", key, 0) == 0) {
+ return _gf_false;
+ }
+
return _gf_true;
}
@@ -2405,9 +2410,6 @@ glusterd_brick_process_remove_brick(glusterd_brickinfo_t *brickinfo,
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
glusterd_brick_proc_t *brick_proc = NULL;
- glusterd_brickinfo_t *brickinfoiter = NULL;
- glusterd_brick_proc_t *brick_proc_tmp = NULL;
- glusterd_brickinfo_t *tmp = NULL;
this = THIS;
GF_VALIDATE_OR_GOTO("glusterd", this, out);
@@ -2416,52 +2418,45 @@ glusterd_brick_process_remove_brick(glusterd_brickinfo_t *brickinfo,
GF_VALIDATE_OR_GOTO(this->name, priv, out);
GF_VALIDATE_OR_GOTO(this->name, brickinfo, out);
- cds_list_for_each_entry_safe(brick_proc, brick_proc_tmp, &priv->brick_procs,
- brick_proc_list)
- {
- if (brickinfo->port != brick_proc->port) {
- continue;
+ brick_proc = brickinfo->brick_proc;
+ if (!brick_proc) {
+ if (brickinfo->status != GF_BRICK_STARTED) {
+ /* this function will be called from gluster_pmap_signout and
+ * glusterd_volume_stop_glusterfs. So it is possible to have
+ * brick_proc set as null.
+ */
+ ret = 0;
}
+ goto out;
+ }
- GF_VALIDATE_OR_GOTO(this->name, (brick_proc->brick_count > 0), out);
-
- cds_list_for_each_entry_safe(brickinfoiter, tmp, &brick_proc->bricks,
- brick_list)
- {
- if (strcmp(brickinfoiter->path, brickinfo->path) == 0) {
- cds_list_del_init(&brickinfoiter->brick_list);
+ GF_VALIDATE_OR_GOTO(this->name, (brick_proc->brick_count > 0), out);
- GF_FREE(brickinfoiter->logfile);
- GF_FREE(brickinfoiter);
- brick_proc->brick_count--;
- break;
- }
- }
+ cds_list_del_init(&brickinfo->mux_bricks);
+ brick_proc->brick_count--;
- /* If all bricks have been removed, delete the brick process */
- if (brick_proc->brick_count == 0) {
- if (last_brick != NULL)
- *last_brick = 1;
- ret = glusterd_brickprocess_delete(brick_proc);
- if (ret)
- goto out;
- }
- break;
+ /* If all bricks have been removed, delete the brick process */
+ if (brick_proc->brick_count == 0) {
+ if (last_brick != NULL)
+ *last_brick = 1;
+ ret = glusterd_brickprocess_delete(brick_proc);
+ if (ret)
+ goto out;
}
-
+ brickinfo->brick_proc = NULL;
ret = 0;
out:
return ret;
}
int
-glusterd_brick_process_add_brick(glusterd_brickinfo_t *brickinfo)
+glusterd_brick_process_add_brick(glusterd_brickinfo_t *brickinfo,
+ glusterd_brickinfo_t *parent_brickinfo)
{
int ret = -1;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
glusterd_brick_proc_t *brick_proc = NULL;
- glusterd_brickinfo_t *brickinfo_dup = NULL;
this = THIS;
GF_VALIDATE_OR_GOTO("glusterd", this, out);
@@ -2470,36 +2465,28 @@ glusterd_brick_process_add_brick(glusterd_brickinfo_t *brickinfo)
GF_VALIDATE_OR_GOTO(this->name, priv, out);
GF_VALIDATE_OR_GOTO(this->name, brickinfo, out);
- ret = glusterd_brickinfo_new(&brickinfo_dup);
- if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_NEW_INFO_FAIL,
- "Failed to create new brickinfo");
- goto out;
- }
-
- ret = glusterd_brickinfo_dup(brickinfo, brickinfo_dup);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_SET_INFO_FAIL,
- "Failed to dup brickinfo");
- goto out;
- }
-
- ret = glusterd_brick_proc_for_port(brickinfo->port, &brick_proc);
- if (ret) {
- ret = glusterd_brickprocess_new(&brick_proc);
+ if (!parent_brickinfo) {
+ ret = glusterd_brick_proc_for_port(brickinfo->port, &brick_proc);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICKPROC_NEW_FAILED,
- "Failed to create "
- "new brick process instance");
- goto out;
- }
+ ret = glusterd_brickprocess_new(&brick_proc);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICKPROC_NEW_FAILED,
+ "Failed to create "
+ "new brick process instance");
+ goto out;
+ }
- brick_proc->port = brickinfo->port;
+ brick_proc->port = brickinfo->port;
- cds_list_add_tail(&brick_proc->brick_proc_list, &priv->brick_procs);
+ cds_list_add_tail(&brick_proc->brick_proc_list, &priv->brick_procs);
+ }
+ } else {
+ ret = 0;
+ brick_proc = parent_brickinfo->brick_proc;
}
- cds_list_add_tail(&brickinfo_dup->brick_list, &brick_proc->bricks);
+ cds_list_add_tail(&brickinfo->mux_bricks, &brick_proc->bricks);
+ brickinfo->brick_proc = brick_proc;
brick_proc->brick_count++;
out:
return ret;
@@ -2555,8 +2542,6 @@ glusterd_volume_stop_glusterfs(glusterd_volinfo_t *volinfo,
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- ret = 0;
-
ret = glusterd_brick_process_remove_brick(brickinfo, &last_brick);
if (ret) {
gf_msg_debug(this->name, 0,
@@ -2632,6 +2617,7 @@ glusterd_volume_stop_glusterfs(glusterd_volinfo_t *volinfo,
brickinfo->status = GF_BRICK_STOPPED;
brickinfo->start_triggered = _gf_false;
+ brickinfo->brick_proc = NULL;
if (del_brick)
glusterd_delete_brick(volinfo, brickinfo);
out:
@@ -2648,7 +2634,7 @@ free_lines(char **line, size_t n)
GF_FREE(line);
}
-char **
+static char **
glusterd_readin_file(const char *filepath, int *line_count)
{
int ret = -1;
@@ -2686,6 +2672,7 @@ glusterd_readin_file(const char *filepath, int *line_count)
/* Reduce allocation to minimal size. */
p = GF_REALLOC(lines, (counter + 1) * sizeof(char *));
if (!p) {
+ /* coverity[TAINTED_SCALAR] */
free_lines(lines, counter);
lines = NULL;
goto out;
@@ -2711,7 +2698,7 @@ glusterd_compare_lines(const void *a, const void *b)
return strcmp(*(char *const *)a, *(char *const *)b);
}
-int
+static int
glusterd_sort_and_redirect(const char *src_filepath, int dest_fd)
{
int ret = -1;
@@ -2743,7 +2730,7 @@ out:
return ret;
}
-int
+static int
glusterd_volume_compute_cksum(glusterd_volinfo_t *volinfo, char *cksum_path,
char *filepath, gf_boolean_t is_quota_conf,
uint32_t *cs)
@@ -2753,20 +2740,16 @@ glusterd_volume_compute_cksum(glusterd_volinfo_t *volinfo, char *cksum_path,
int fd = -1;
int sort_fd = 0;
char sort_filepath[PATH_MAX] = "";
- char *cksum_path_final = NULL;
- char buf[4096] = "";
+ char buf[32];
gf_boolean_t unlink_sortfile = _gf_false;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
+ glusterd_conf_t *priv = THIS->private;
+ xlator_t *this = THIS;
mode_t orig_umask = 0;
GF_ASSERT(volinfo);
- this = THIS;
- priv = THIS->private;
GF_ASSERT(priv);
fd = open(cksum_path, O_RDWR | O_APPEND | O_CREAT | O_TRUNC, 0600);
-
if (-1 == fd) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
"Unable to open %s,"
@@ -2783,7 +2766,7 @@ glusterd_volume_compute_cksum(glusterd_volinfo_t *volinfo, char *cksum_path,
orig_umask = umask(S_IRWXG | S_IRWXO);
sort_fd = mkstemp(sort_filepath);
umask(orig_umask);
- if (sort_fd < 0) {
+ if (-1 == sort_fd) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
"Could not generate "
"temp file, reason: %s for volume: %s",
@@ -2806,35 +2789,44 @@ glusterd_volume_compute_cksum(glusterd_volinfo_t *volinfo, char *cksum_path,
ret = sys_close(sort_fd);
if (ret)
goto out;
- }
- cksum_path_final = is_quota_conf ? filepath : sort_filepath;
+ ret = get_checksum_for_path(sort_filepath, &cksum, priv->op_version);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CKSUM_GET_FAIL,
+ "unable to get "
+ "checksum for path: %s",
+ sort_filepath);
+ goto out;
+ }
- ret = get_checksum_for_path(cksum_path_final, &cksum, priv->op_version);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CKSUM_GET_FAIL,
- "unable to get "
- "checksum for path: %s",
- cksum_path_final);
- goto out;
- }
- if (!is_quota_conf) {
- snprintf(buf, sizeof(buf), "%s=%u\n", "info", cksum);
- ret = sys_write(fd, buf, strlen(buf));
+ ret = snprintf(buf, sizeof(buf), "info=%u\n", cksum);
+ ret = sys_write(fd, buf, ret);
if (ret <= 0) {
ret = -1;
goto out;
}
+ } else if (priv->op_version < GD_OP_VERSION_7_0) {
+ ret = get_checksum_for_path(filepath, &cksum, priv->op_version);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CKSUM_GET_FAIL,
+ "unable to get "
+ "checksum for path: %s",
+ filepath);
+ goto out;
+ }
}
ret = get_checksum_for_file(fd, &cksum, priv->op_version);
- if (ret)
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CKSUM_GET_FAIL,
+ "unable to get checksum for path: %s", filepath);
goto out;
+ }
*cs = cksum;
out:
- if (fd > 0)
+ if (fd != -1)
sys_close(fd);
if (unlink_sortfile)
sys_unlink(sort_filepath);
@@ -2899,22 +2891,25 @@ out:
return ret;
}
-int
+static int
_add_dict_to_prdict(dict_t *this, char *key, data_t *value, void *data)
{
- glusterd_dict_ctx_t *ctx = NULL;
- char optkey[512] = "";
+ glusterd_dict_ctx_t *ctx = data;
+ char optkey[64]; /* optkey are usually quite small */
int ret = -1;
- ctx = data;
ret = snprintf(optkey, sizeof(optkey), "%s.%s%d", ctx->prefix,
ctx->key_name, ctx->opt_count);
+ if (ret < 0 || ret >= sizeof(optkey))
+ return -1;
ret = dict_set_strn(ctx->dict, optkey, ret, key);
if (ret)
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"option add for %s%d %s", ctx->key_name, ctx->opt_count, key);
ret = snprintf(optkey, sizeof(optkey), "%s.%s%d", ctx->prefix,
ctx->val_name, ctx->opt_count);
+ if (ret < 0 || ret >= sizeof(optkey))
+ return -1;
ret = dict_set_strn(ctx->dict, optkey, ret, value->data);
if (ret)
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
@@ -2938,13 +2933,19 @@ glusterd_add_bricks_hname_path_to_dict(dict_t *dict,
{
ret = snprintf(key, sizeof(key), "%d-hostname", index);
ret = dict_set_strn(dict, key, ret, brickinfo->hostname);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
ret = snprintf(key, sizeof(key), "%d-path", index);
ret = dict_set_strn(dict, key, ret, brickinfo->path);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
index++;
}
@@ -2961,10 +2962,11 @@ glusterd_add_volume_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
int32_t count, char *prefix)
{
int32_t ret = -1;
- char pfx[512] = "";
- char key[512] = "";
+ char pfx[32] = ""; /* prefix should be quite small */
+ char key[64] = "";
int keylen;
glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_brickinfo_t *ta_brickinfo = NULL;
int32_t i = 1;
char *volume_id_str = NULL;
char *str = NULL;
@@ -2979,145 +2981,118 @@ glusterd_add_volume_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
GF_ASSERT(volinfo);
GF_ASSERT(prefix);
- keylen = snprintf(key, sizeof(key), "%s%d.name", prefix, count);
+ ret = snprintf(pfx, sizeof(pfx), "%s%d", prefix, count);
+ if (ret < 0 || ret >= sizeof(pfx)) {
+ ret = -1;
+ goto out;
+ }
+
+ keylen = snprintf(key, sizeof(key), "%s.name", pfx);
ret = dict_set_strn(dict, key, keylen, volinfo->volname);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.type", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.type", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->type);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.brick_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.brick_count", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->brick_count);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.version", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.version", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->version);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.status", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.status", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->status);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.sub_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.sub_count", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->sub_count);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.stripe_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.subvol_count", pfx);
+ ret = dict_set_int32n(dict, key, keylen, volinfo->subvol_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "%s.stripe_count", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->stripe_count);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.replica_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.replica_count", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->replica_count);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.arbiter_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.arbiter_count", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->arbiter_count);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.disperse_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.thin_arbiter_count", pfx);
+ ret = dict_set_int32n(dict, key, keylen, volinfo->thin_arbiter_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "%s.disperse_count", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->disperse_count);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.redundancy_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.redundancy_count", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->redundancy_count);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.dist_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.dist_count", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->dist_leaf_count);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%s%d.ckusm", prefix, count);
+ snprintf(key, sizeof(key), "%s.ckusm", pfx);
ret = dict_set_int64(dict, key, volinfo->cksum);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%s%d.transport_type", prefix, count);
+ snprintf(key, sizeof(key), "%s.transport_type", pfx);
ret = dict_set_uint32(dict, key, volinfo->transport_type);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%s%d.stage_deleted", prefix, count);
+ snprintf(key, sizeof(key), "%s.stage_deleted", pfx);
ret = dict_set_uint32(dict, key, (uint32_t)volinfo->stage_deleted);
if (ret)
goto out;
- /* tiering related variables */
-
- snprintf(key, sizeof(key), "%s%d.cold_brick_count", prefix, count);
- ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_brick_count);
- if (ret)
- goto out;
-
- snprintf(key, sizeof(key), "%s%d.cold_type", prefix, count);
- ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_type);
- if (ret)
- goto out;
-
- snprintf(key, sizeof(key), "%s%d.cold_replica_count", prefix, count);
- ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_replica_count);
- if (ret)
- goto out;
-
- snprintf(key, sizeof(key), "%s%d.cold_disperse_count", prefix, count);
- ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_disperse_count);
- if (ret)
- goto out;
-
- snprintf(key, sizeof(key), "%s%d.cold_redundancy_count", prefix, count);
- ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_redundancy_count);
- if (ret)
- goto out;
-
- snprintf(key, sizeof(key), "%s%d.cold_dist_count", prefix, count);
- ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_dist_leaf_count);
- if (ret)
- goto out;
-
- snprintf(key, sizeof(key), "%s%d.hot_brick_count", prefix, count);
- ret = dict_set_uint32(dict, key, volinfo->tier_info.hot_brick_count);
- if (ret)
- goto out;
-
- snprintf(key, sizeof(key), "%s%d.hot_type", prefix, count);
- ret = dict_set_uint32(dict, key, volinfo->tier_info.hot_type);
- if (ret)
- goto out;
-
- snprintf(key, sizeof(key), "%s%d.hot_replica_count", prefix, count);
- ret = dict_set_uint32(dict, key, volinfo->tier_info.hot_replica_count);
- if (ret)
- goto out;
-
- snprintf(key, sizeof(key), "%s%d", prefix, count);
- ret = gd_add_vol_snap_details_to_dict(dict, key, volinfo);
- if (ret)
+ ret = gd_add_vol_snap_details_to_dict(dict, pfx, volinfo);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "vol snap details", NULL);
goto out;
+ }
volume_id_str = gf_strdup(uuid_utoa(volinfo->volume_id));
if (!volume_id_str) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "volume id=%s", volinfo->volume_id, NULL);
ret = -1;
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.volume_id", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.volume_id", pfx);
ret = dict_set_dynstrn(dict, key, keylen, volume_id_str);
if (ret)
goto out;
volume_id_str = NULL;
- keylen = snprintf(key, sizeof(key), "%s%d.username", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.username", pfx);
str = glusterd_auth_get_username(volinfo);
if (str) {
ret = dict_set_dynstrn(dict, key, keylen, gf_strdup(str));
@@ -3125,7 +3100,7 @@ glusterd_add_volume_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.password", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.password", pfx);
str = glusterd_auth_get_password(volinfo);
if (str) {
ret = dict_set_dynstrn(dict, key, keylen, gf_strdup(str));
@@ -3133,29 +3108,30 @@ glusterd_add_volume_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.rebalance", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.rebalance", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->rebal.defrag_cmd);
if (ret)
goto out;
rebalance_id_str = gf_strdup(uuid_utoa(volinfo->rebal.rebalance_id));
if (!rebalance_id_str) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "rebalance_id=%s", volinfo->rebal.rebalance_id, NULL);
ret = -1;
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.rebalance-id", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.rebalance-id", pfx);
ret = dict_set_dynstrn(dict, key, keylen, rebalance_id_str);
if (ret)
goto out;
rebalance_id_str = NULL;
- snprintf(key, sizeof(key), "%s%d.rebalance-op", prefix, count);
+ snprintf(key, sizeof(key), "%s.rebalance-op", pfx);
ret = dict_set_uint32(dict, key, volinfo->rebal.op);
if (ret)
goto out;
if (volinfo->rebal.dict) {
- snprintf(pfx, sizeof(pfx), "%s%d", prefix, count);
ctx.dict = dict;
ctx.prefix = pfx;
ctx.opt_count = 1;
@@ -3170,7 +3146,6 @@ glusterd_add_volume_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
goto out;
}
- snprintf(pfx, sizeof(pfx), "%s%d", prefix, count);
ctx.dict = dict;
ctx.prefix = pfx;
ctx.opt_count = 1;
@@ -3180,7 +3155,7 @@ glusterd_add_volume_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
dict_foreach(volinfo->dict, _add_dict_to_prdict, &ctx);
ctx.opt_count--;
- keylen = snprintf(key, sizeof(key), "%s%d.opt-count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.opt-count", pfx);
ret = dict_set_int32n(dict, key, keylen, ctx.opt_count);
if (ret)
goto out;
@@ -3195,43 +3170,40 @@ glusterd_add_volume_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
dict_foreach(volinfo->gsync_slaves, _add_dict_to_prdict, &ctx);
ctx.opt_count--;
- keylen = snprintf(key, sizeof(key), "%s%d.gsync-count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.gsync-count", pfx);
ret = dict_set_int32n(dict, key, keylen, ctx.opt_count);
if (ret)
goto out;
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
{
- keylen = snprintf(key, sizeof(key), "%s%d.brick%d.hostname", prefix,
- count, i);
+ keylen = snprintf(key, sizeof(key), "%s.brick%d.hostname", pfx, i);
ret = dict_set_strn(dict, key, keylen, brickinfo->hostname);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.brick%d.path", prefix, count,
- i);
+ keylen = snprintf(key, sizeof(key), "%s.brick%d.path", pfx, i);
ret = dict_set_strn(dict, key, keylen, brickinfo->path);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.brick%d.decommissioned",
- prefix, count, i);
+ keylen = snprintf(key, sizeof(key), "%s.brick%d.decommissioned", pfx,
+ i);
ret = dict_set_int32n(dict, key, keylen, brickinfo->decommissioned);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.brick%d.brick_id", prefix,
- count, i);
+ keylen = snprintf(key, sizeof(key), "%s.brick%d.brick_id", pfx, i);
ret = dict_set_strn(dict, key, keylen, brickinfo->brick_id);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%s%d.brick%d.uuid", prefix, count, i);
+ snprintf(key, sizeof(key), "%s.brick%d.uuid", pfx, i);
ret = dict_set_dynstr_with_alloc(dict, key, uuid_utoa(brickinfo->uuid));
if (ret)
goto out;
- snprintf(key, sizeof(key), "%s%d.brick%d", prefix, count, i);
+ snprintf(key, sizeof(key), "%s.brick%d", pfx, i);
ret = gd_add_brick_snap_details_to_dict(dict, key, brickinfo);
if (ret)
goto out;
@@ -3239,31 +3211,66 @@ glusterd_add_volume_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
i++;
}
+ i = 1;
+ if (volinfo->thin_arbiter_count == 1) {
+ cds_list_for_each_entry(ta_brickinfo, &volinfo->ta_bricks, brick_list)
+ {
+ keylen = snprintf(key, sizeof(key), "%s.ta-brick%d.hostname", pfx,
+ i);
+ ret = dict_set_strn(dict, key, keylen, ta_brickinfo->hostname);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "%s.ta-brick%d.path", pfx, i);
+ ret = dict_set_strn(dict, key, keylen, ta_brickinfo->path);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "%s.ta-brick%d.decommissioned",
+ pfx, i);
+ ret = dict_set_int32n(dict, key, keylen,
+ ta_brickinfo->decommissioned);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "%s.ta-brick%d.brick_id", pfx,
+ i);
+ ret = dict_set_strn(dict, key, keylen, ta_brickinfo->brick_id);
+ if (ret)
+ goto out;
+
+ snprintf(key, sizeof(key), "%s.ta-brick%d.uuid", pfx, i);
+ ret = dict_set_dynstr_with_alloc(dict, key,
+ uuid_utoa(ta_brickinfo->uuid));
+ if (ret)
+ goto out;
+
+ i++;
+ }
+ }
+
/* Add volume op-versions to dict. This prevents volume inconsistencies
* in the cluster
*/
- keylen = snprintf(key, sizeof(key), "%s%d.op-version", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.op-version", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->op_version);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.client-op-version", prefix,
- count);
+ keylen = snprintf(key, sizeof(key), "%s.client-op-version", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->client_op_version);
if (ret)
goto out;
- /*Add volume Capability (BD Xlator) to dict*/
- keylen = snprintf(key, sizeof(key), "%s%d.caps", prefix, count);
- ret = dict_set_int32n(dict, key, keylen, volinfo->caps);
-
- keylen = snprintf(key, sizeof(key), "%s%d.quota-xattr-version", prefix,
- count);
+ keylen = snprintf(key, sizeof(key), "%s.quota-xattr-version", pfx);
ret = dict_set_int32n(dict, key, keylen, volinfo->quota_xattr_version);
out:
GF_FREE(volume_id_str);
GF_FREE(rebalance_id_str);
GF_FREE(rb_id_str);
+ if (key[0] != '\0' && ret != 0)
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
gf_msg_debug(this->name, 0, "Returning with %d", ret);
return ret;
}
@@ -3278,7 +3285,8 @@ glusterd_vol_add_quota_conf_to_dict(glusterd_volinfo_t *volinfo, dict_t *load,
{
int fd = -1;
unsigned char buf[16] = "";
- char key[PATH_MAX] = "";
+ char key[64];
+ char key_prefix[32];
int gfid_idx = 0;
int ret = -1;
xlator_t *this = NULL;
@@ -3303,6 +3311,11 @@ glusterd_vol_add_quota_conf_to_dict(glusterd_volinfo_t *volinfo, dict_t *load,
if (ret)
goto out;
+ ret = snprintf(key_prefix, sizeof(key_prefix), "%s%d", prefix, vol_idx);
+ if (ret < 0 || ret >= sizeof(key_prefix)) {
+ ret = -1;
+ goto out;
+ }
for (gfid_idx = 0;; gfid_idx++) {
ret = quota_conf_read_gfid(fd, buf, &type, version);
if (ret == 0) {
@@ -3314,33 +3327,46 @@ glusterd_vol_add_quota_conf_to_dict(glusterd_volinfo_t *volinfo, dict_t *load,
goto out;
}
- snprintf(key, sizeof(key) - 1, "%s%d.gfid%d", prefix, vol_idx,
- gfid_idx);
+ snprintf(key, sizeof(key) - 1, "%s.gfid%d", key_prefix, gfid_idx);
ret = dict_set_dynstr_with_alloc(load, key, uuid_utoa(buf));
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
- snprintf(key, sizeof(key) - 1, "%s%d.gfid-type%d", prefix, vol_idx,
- gfid_idx);
+ snprintf(key, sizeof(key) - 1, "%s.gfid-type%d", key_prefix, gfid_idx);
ret = dict_set_int8(load, key, type);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
}
- ret = snprintf(key, sizeof(key), "%s%d.gfid-count", prefix, vol_idx);
+ ret = snprintf(key, sizeof(key), "%s.gfid-count", key_prefix);
ret = dict_set_int32n(load, key, ret, gfid_idx);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
- snprintf(key, sizeof(key), "%s%d.quota-cksum", prefix, vol_idx);
+ snprintf(key, sizeof(key), "%s.quota-cksum", key_prefix);
ret = dict_set_uint32(load, key, volinfo->quota_conf_cksum);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
- snprintf(key, sizeof(key), "%s%d.quota-version", prefix, vol_idx);
+ snprintf(key, sizeof(key), "%s.quota-version", key_prefix);
ret = dict_set_uint32(load, key, volinfo->quota_conf_version);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
ret = 0;
out:
@@ -3349,33 +3375,50 @@ out:
return ret;
}
-int32_t
-glusterd_add_volumes_to_export_dict(dict_t **peer_data)
+void *
+glusterd_add_bulk_volumes_create_thread(void *data)
{
int32_t ret = -1;
- dict_t *dict = NULL;
glusterd_conf_t *priv = NULL;
glusterd_volinfo_t *volinfo = NULL;
int32_t count = 0;
- glusterd_dict_ctx_t ctx = {0};
xlator_t *this = NULL;
+ glusterd_add_dict_args_t *arg = NULL;
+ dict_t *dict = NULL;
+ int start = 0;
+ int end = 0;
- this = THIS;
- GF_ASSERT(this);
+ GF_ASSERT(data);
+
+ arg = data;
+ dict = arg->voldict;
+ start = arg->start;
+ end = arg->end;
+ this = arg->this;
+ THIS = arg->this;
priv = this->private;
GF_ASSERT(priv);
- dict = dict_new();
- if (!dict)
- goto out;
-
cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
{
count++;
+
+ /* Skip volumes if index count is less than start
+ index to handle volume for specific thread
+ */
+ if (count < start)
+ continue;
+
+ /* No need to process volume if index count is greater
+ than end index
+ */
+ if (count > end)
+ break;
+
ret = glusterd_add_volume_to_dict(volinfo, dict, count, "volume");
if (ret)
goto out;
- if (!glusterd_is_volume_quota_enabled(volinfo))
+ if (!dict_get_sizen(volinfo->dict, VKEY_FEATURES_QUOTA))
continue;
ret = glusterd_vol_add_quota_conf_to_dict(volinfo, dict, count,
"volume");
@@ -3383,37 +3426,260 @@ glusterd_add_volumes_to_export_dict(dict_t **peer_data)
goto out;
}
- ret = dict_set_int32n(dict, "count", SLEN("count"), count);
+out:
+ GF_ATOMIC_DEC(priv->thread_count);
+ free(arg);
+ return NULL;
+}
+
+int
+glusterd_dict_searialize(dict_t *dict_arr[], int count, int totcount, char *buf)
+{
+ int i = 0;
+ int32_t keylen = 0;
+ int64_t netword = 0;
+ data_pair_t *pair = NULL;
+ int dict_count = 0;
+ int ret = 0;
+
+ netword = hton32(totcount);
+ memcpy(buf, &netword, sizeof(netword));
+ buf += DICT_HDR_LEN;
+
+ for (i = 0; i < count; i++) {
+ if (dict_arr[i]) {
+ dict_count = dict_arr[i]->count;
+ pair = dict_arr[i]->members_list;
+ while (dict_count) {
+ if (!pair) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0,
+ LG_MSG_PAIRS_LESS_THAN_COUNT,
+ "less than count data pairs found!");
+ ret = -1;
+ goto out;
+ }
+
+ if (!pair->key) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, LG_MSG_NULL_PTR,
+ "pair->key is null!");
+ ret = -1;
+ goto out;
+ }
+
+ keylen = strlen(pair->key);
+ netword = hton32(keylen);
+ memcpy(buf, &netword, sizeof(netword));
+ buf += DICT_DATA_HDR_KEY_LEN;
+ if (!pair->value) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, LG_MSG_NULL_PTR,
+ "pair->value is null!");
+ ret = -1;
+ goto out;
+ }
+
+ netword = hton32(pair->value->len);
+ memcpy(buf, &netword, sizeof(netword));
+ buf += DICT_DATA_HDR_VAL_LEN;
+
+ memcpy(buf, pair->key, keylen);
+ buf += keylen;
+ *buf++ = '\0';
+
+ if (pair->value->data) {
+ memcpy(buf, pair->value->data, pair->value->len);
+ buf += pair->value->len;
+ }
+
+ pair = pair->next;
+ dict_count--;
+ }
+ }
+ }
+
+out:
+ for (i = 0; i < count; i++) {
+ if (dict_arr[i])
+ dict_unref(dict_arr[i]);
+ }
+ return ret;
+}
+
+int
+glusterd_dict_arr_serialize(dict_t *dict_arr[], int count, char **buf,
+ u_int *length)
+{
+ ssize_t len = 0;
+ int i = 0;
+ int totcount = 0;
+ int ret = 0;
+
+ for (i = 0; i < count; i++) {
+ if (dict_arr[i]) {
+ len += dict_serialized_length_lk(dict_arr[i]);
+ totcount += dict_arr[i]->count;
+ }
+ }
+
+ // Subtract HDR_LEN except one dictionary
+ len = len - ((count - 1) * DICT_HDR_LEN);
+
+ *buf = GF_MALLOC(len, gf_common_mt_char);
+ if (*buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (length != NULL) {
+ *length = len;
+ }
+
+ ret = glusterd_dict_searialize(dict_arr, count, totcount, *buf);
+
+out:
+ return ret;
+}
+
+int32_t
+glusterd_add_volumes_to_export_dict(dict_t *peer_data, char **buf,
+ u_int *length)
+{
+ int32_t ret = -1;
+ dict_t *dict_arr[128] = {
+ 0,
+ };
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ int32_t count = 0;
+ glusterd_dict_ctx_t ctx = {0};
+ xlator_t *this = NULL;
+ int totthread = 0;
+ int volcnt = 0;
+ int start = 1;
+ int endindex = 0;
+ int vol_per_thread_limit = 0;
+ glusterd_add_dict_args_t *arg = NULL;
+ pthread_t th_id = {
+ 0,
+ };
+ int th_ret = 0;
+ int i = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ /* Count the total number of volumes */
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list) volcnt++;
+
+ get_gd_vol_thread_limit(&vol_per_thread_limit);
+
+ if ((vol_per_thread_limit == 1) || (vol_per_thread_limit == 0) ||
+ (vol_per_thread_limit > 100)) {
+ totthread = 0;
+ } else {
+ totthread = volcnt / vol_per_thread_limit;
+ if (totthread) {
+ endindex = volcnt % vol_per_thread_limit;
+ if (endindex)
+ totthread++;
+ }
+ }
+
+ if (totthread == 0) {
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
+ {
+ count++;
+ ret = glusterd_add_volume_to_dict(volinfo, peer_data, count,
+ "volume");
+ if (ret)
+ goto out;
+
+ if (!dict_get_sizen(volinfo->dict, VKEY_FEATURES_QUOTA))
+ continue;
+
+ ret = glusterd_vol_add_quota_conf_to_dict(volinfo, peer_data, count,
+ "volume");
+ if (ret)
+ goto out;
+ }
+ } else {
+ for (i = 0; i < totthread; i++) {
+ arg = calloc(1, sizeof(*arg));
+ dict_arr[i] = dict_new();
+ arg->this = this;
+ arg->voldict = dict_arr[i];
+ arg->start = start;
+ if ((i + 1) != totthread) {
+ arg->end = ((i + 1) * vol_per_thread_limit);
+ } else {
+ arg->end = (((i + 1) * vol_per_thread_limit) + endindex);
+ }
+ th_ret = gf_thread_create_detached(
+ &th_id, glusterd_add_bulk_volumes_create_thread, arg,
+ "bulkvoldict");
+ if (th_ret) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "glusterd_add_bulk_volume %s"
+ " thread creation failed",
+ "bulkvoldict");
+ free(arg);
+ goto out;
+ }
+
+ start = start + vol_per_thread_limit;
+ GF_ATOMIC_INC(priv->thread_count);
+ gf_log(this->name, GF_LOG_INFO,
+ "Create thread %d to populate dict data for volume"
+ " start index is %d end index is %d",
+ (i + 1), arg->start, arg->end);
+ }
+ while (GF_ATOMIC_GET(priv->thread_count)) {
+ sleep(1);
+ }
+
+ gf_log(this->name, GF_LOG_INFO,
+ "Finished dictionary population in all threads");
+ }
+
+ ret = dict_set_int32n(peer_data, "count", SLEN("count"), volcnt);
if (ret)
goto out;
- ctx.dict = dict;
+ ctx.dict = peer_data;
ctx.prefix = "global";
ctx.opt_count = 1;
ctx.key_name = "key";
ctx.val_name = "val";
dict_foreach(priv->opts, _add_dict_to_prdict, &ctx);
ctx.opt_count--;
- ret = dict_set_int32n(dict, "global-opt-count", SLEN("global-opt-count"),
- ctx.opt_count);
+ ret = dict_set_int32n(peer_data, "global-opt-count",
+ SLEN("global-opt-count"), ctx.opt_count);
if (ret)
goto out;
- *peer_data = dict;
+ if (totthread) {
+ gf_log(this->name, GF_LOG_INFO,
+ "Merged multiple dictionaries into a single one");
+ dict_arr[totthread++] = dict_ref(peer_data);
+ ret = glusterd_dict_arr_serialize(dict_arr, totthread, buf, length);
+ gf_log(this->name, GF_LOG_INFO, "Serialize dictionary data returned %d",
+ ret);
+ }
+
out:
- if (ret)
- dict_unref(dict);
gf_msg_trace(this->name, 0, "Returning %d", ret);
return ret;
}
-int32_t
+static int32_t
glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
int32_t *status, char *hostname)
{
int32_t ret = -1;
char key[64] = "";
+ char key_prefix[32];
int keylen;
glusterd_volinfo_t *volinfo = NULL;
char *volname = NULL;
@@ -3430,37 +3696,43 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
this = THIS;
GF_ASSERT(this);
- keylen = snprintf(key, sizeof(key), "volume%d.name", count);
+ snprintf(key_prefix, sizeof(key_prefix), "volume%d", count);
+ keylen = snprintf(key, sizeof(key), "%s.name", key_prefix);
ret = dict_get_strn(peer_data, key, keylen, &volname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
- snprintf(key, sizeof(key), "volume%d.stage_deleted", count);
+ snprintf(key, sizeof(key), "%s.stage_deleted", key_prefix);
ret = dict_get_uint32(peer_data, key, &stage_deleted);
/* stage_deleted = 1 means the volume is still in the process of
* deleting a volume, so we shouldn't be trying to create a
* fresh volume here which would lead to a stale entry
*/
- if (stage_deleted == 0)
+ if (!ret && stage_deleted == 0)
*status = GLUSTERD_VOL_COMP_UPDATE_REQ;
- ret = 0;
goto out;
}
- keylen = snprintf(key, sizeof(key), "volume%d.version", count);
+ keylen = snprintf(key, sizeof(key), "%s.version", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &version);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
if (version > volinfo->version) {
// Mismatch detected
- ret = 0;
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_VOL_VERS_MISMATCH,
"Version of volume %s differ. local version = %d, "
"remote version = %d on peer %s",
volinfo->volname, volinfo->version, version, hostname);
+ GF_ATOMIC_INIT(volinfo->volpeerupdate, 1);
*status = GLUSTERD_VOL_COMP_UPDATE_REQ;
goto out;
} else if (version < volinfo->version) {
@@ -3470,13 +3742,15 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
// Now, versions are same, compare cksums.
//
- snprintf(key, sizeof(key), "volume%d.ckusm", count);
+ snprintf(key, sizeof(key), "%s.ckusm", key_prefix);
ret = dict_get_uint32(peer_data, key, &cksum);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
if (cksum != volinfo->cksum) {
- ret = 0;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CKSUM_VERS_MISMATCH,
"Version of Cksums %s differ. local cksum = %u, remote "
"cksum = %u on peer %s",
@@ -3485,18 +3759,19 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
goto out;
}
- snprintf(key, sizeof(key), "volume%d.quota-version", count);
+ if (!dict_get_sizen(volinfo->dict, VKEY_FEATURES_QUOTA))
+ goto skip_quota;
+
+ snprintf(key, sizeof(key), "%s.quota-version", key_prefix);
ret = dict_get_uint32(peer_data, key, &quota_version);
if (ret) {
gf_msg_debug(this->name, 0,
"quota-version key absent for"
" volume %s in peer %s's response",
volinfo->volname, hostname);
- ret = 0;
} else {
if (quota_version > volinfo->quota_conf_version) {
// Mismatch detected
- ret = 0;
gf_msg(this->name, GF_LOG_INFO, 0,
GD_MSG_QUOTA_CONFIG_VERS_MISMATCH,
"Quota configuration versions of volume %s "
@@ -3514,17 +3789,15 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
// Now, versions are same, compare cksums.
//
- snprintf(key, sizeof(key), "volume%d.quota-cksum", count);
+ snprintf(key, sizeof(key), "%s.quota-cksum", key_prefix);
ret = dict_get_uint32(peer_data, key, &quota_cksum);
if (ret) {
gf_msg_debug(this->name, 0,
"quota checksum absent for "
"volume %s in peer %s's response",
volinfo->volname, hostname);
- ret = 0;
} else {
if (quota_cksum != volinfo->quota_conf_cksum) {
- ret = 0;
gf_msg(this->name, GF_LOG_ERROR, 0,
GD_MSG_QUOTA_CONFIG_CKSUM_MISMATCH,
"Cksums of "
@@ -3536,10 +3809,12 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
goto out;
}
}
+
+skip_quota:
*status = GLUSTERD_VOL_COMP_SCS;
out:
- keylen = snprintf(key, sizeof(key), "volume%d.update", count);
+ keylen = snprintf(key, sizeof(key), "%s.update", key_prefix);
if (*status == GLUSTERD_VOL_COMP_UPDATE_REQ) {
ret = dict_set_int32n(peer_data, key, keylen, 1);
@@ -3616,31 +3891,33 @@ glusterd_spawn_daemons(void *opaque)
glusterd_conf_t *conf = THIS->private;
int ret = -1;
- synclock_lock(&conf->big_lock);
- glusterd_restart_bricks();
+ /* glusterd_restart_brick() will take the sync_lock. */
+ glusterd_restart_bricks(NULL);
glusterd_restart_gsyncds(conf);
glusterd_restart_rebalance(conf);
ret = glusterd_snapdsvc_restart();
- ret = glusterd_tierdsvc_restart();
ret = glusterd_gfproxydsvc_restart();
+ ret = glusterd_shdsvc_restart();
return ret;
}
-int32_t
+static int32_t
glusterd_import_friend_volume_opts(dict_t *peer_data, int count,
glusterd_volinfo_t *volinfo, char *prefix)
{
- char key[512] = "";
+ char key[64];
int keylen;
int32_t ret = -1;
int opt_count = 0;
char msg[2048] = "";
- char volume_prefix[1024] = "";
+ char volume_prefix[32];
GF_ASSERT(peer_data);
GF_ASSERT(volinfo);
- keylen = snprintf(key, sizeof(key), "%s%d.opt-count", prefix, count);
+ snprintf(volume_prefix, sizeof(volume_prefix), "%s%d", prefix, count);
+
+ keylen = snprintf(key, sizeof(key), "%s.opt-count", volume_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &opt_count);
if (ret) {
snprintf(msg, sizeof(msg),
@@ -3650,7 +3927,6 @@ glusterd_import_friend_volume_opts(dict_t *peer_data, int count,
goto out;
}
- snprintf(volume_prefix, sizeof(volume_prefix), "%s%d", prefix, count);
ret = import_prdict_dict(peer_data, volinfo->dict, "key", "value",
opt_count, volume_prefix);
if (ret) {
@@ -3661,7 +3937,7 @@ glusterd_import_friend_volume_opts(dict_t *peer_data, int count,
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.gsync-count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.gsync-count", volume_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &opt_count);
if (ret) {
snprintf(msg, sizeof(msg),
@@ -3689,18 +3965,111 @@ out:
return ret;
}
+static int32_t
+glusterd_import_new_ta_brick(dict_t *peer_data, int32_t vol_count,
+ int32_t brick_count,
+ glusterd_brickinfo_t **ta_brickinfo, char *prefix)
+{
+ char key[128];
+ char key_prefix[64];
+ int keylen;
+ int ret = -1;
+ char *hostname = NULL;
+ char *path = NULL;
+ char *brick_id = NULL;
+ int decommissioned = 0;
+ glusterd_brickinfo_t *new_ta_brickinfo = NULL;
+ char msg[256] = "";
+ char *brick_uuid_str = NULL;
+
+ GF_ASSERT(peer_data);
+ GF_ASSERT(vol_count >= 0);
+ GF_ASSERT(ta_brickinfo);
+ GF_ASSERT(prefix);
+
+ ret = snprintf(key_prefix, sizeof(key_prefix), "%s%d.ta-brick%d", prefix,
+ vol_count, brick_count);
+
+ if (ret < 0 || ret >= sizeof(key_prefix)) {
+ ret = -1;
+ snprintf(msg, sizeof(msg), "key_prefix too long");
+ goto out;
+ }
+
+ keylen = snprintf(key, sizeof(key), "%s.hostname", key_prefix);
+ ret = dict_get_strn(peer_data, key, keylen, &hostname);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "%s missing in payload", key);
+ goto out;
+ }
+
+ keylen = snprintf(key, sizeof(key), "%s.path", key_prefix);
+ ret = dict_get_strn(peer_data, key, keylen, &path);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "%s missing in payload", key);
+ goto out;
+ }
+
+ keylen = snprintf(key, sizeof(key), "%s.brick_id", key_prefix);
+ ret = dict_get_strn(peer_data, key, keylen, &brick_id);
+
+ keylen = snprintf(key, sizeof(key), "%s.decommissioned", key_prefix);
+ ret = dict_get_int32n(peer_data, key, keylen, &decommissioned);
+ if (ret) {
+ /* For backward compatibility */
+ ret = 0;
+ }
+
+ ret = glusterd_brickinfo_new(&new_ta_brickinfo);
+ if (ret)
+ goto out;
+
+ ret = snprintf(new_ta_brickinfo->path, sizeof(new_ta_brickinfo->path), "%s",
+ path);
+ if (ret < 0 || ret >= sizeof(new_ta_brickinfo->path)) {
+ ret = -1;
+ goto out;
+ }
+ ret = snprintf(new_ta_brickinfo->hostname,
+ sizeof(new_ta_brickinfo->hostname), "%s", hostname);
+ if (ret < 0 || ret >= sizeof(new_ta_brickinfo->hostname)) {
+ ret = -1;
+ goto out;
+ }
+ new_ta_brickinfo->decommissioned = decommissioned;
+ if (brick_id)
+ (void)snprintf(new_ta_brickinfo->brick_id,
+ sizeof(new_ta_brickinfo->brick_id), "%s", brick_id);
+ keylen = snprintf(key, sizeof(key), "%s.uuid", key_prefix);
+ ret = dict_get_strn(peer_data, key, keylen, &brick_uuid_str);
+ if (ret)
+ goto out;
+ gf_uuid_parse(brick_uuid_str, new_ta_brickinfo->uuid);
+
+ *ta_brickinfo = new_ta_brickinfo;
+
+out:
+ if (msg[0]) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_IMPORT_FAIL, "%s",
+ msg);
+ gf_event(EVENT_IMPORT_BRICK_FAILED, "peer=%s;ta-brick=%s",
+ new_ta_brickinfo->hostname, new_ta_brickinfo->path);
+ }
+ gf_msg_debug("glusterd", 0, "Returning with %d", ret);
+ return ret;
+}
+
/* The prefix represents the type of volume to be added.
* It will be "volume" for normal volumes, and snap# like
* snap1, snap2, for snapshot volumes
*/
-int32_t
+static int32_t
glusterd_import_new_brick(dict_t *peer_data, int32_t vol_count,
int32_t brick_count, glusterd_brickinfo_t **brickinfo,
char *prefix)
{
- char key[512] = {
- 0,
- };
+ char key[128];
+ char key_prefix[64];
int keylen;
int ret = -1;
char *hostname = NULL;
@@ -3708,7 +4077,7 @@ glusterd_import_new_brick(dict_t *peer_data, int32_t vol_count,
char *brick_id = NULL;
int decommissioned = 0;
glusterd_brickinfo_t *new_brickinfo = NULL;
- char msg[2048] = "";
+ char msg[256] = "";
char *brick_uuid_str = NULL;
GF_ASSERT(peer_data);
@@ -3716,28 +4085,31 @@ glusterd_import_new_brick(dict_t *peer_data, int32_t vol_count,
GF_ASSERT(brickinfo);
GF_ASSERT(prefix);
- keylen = snprintf(key, sizeof(key), "%s%d.brick%d.hostname", prefix,
- vol_count, brick_count);
+ ret = snprintf(key_prefix, sizeof(key_prefix), "%s%d.brick%d", prefix,
+ vol_count, brick_count);
+ if (ret < 0 || ret >= sizeof(key_prefix)) {
+ ret = -1;
+ snprintf(msg, sizeof(msg), "key_prefix too long");
+ goto out;
+ }
+ keylen = snprintf(key, sizeof(key), "%s.hostname", key_prefix);
ret = dict_get_strn(peer_data, key, keylen, &hostname);
if (ret) {
snprintf(msg, sizeof(msg), "%s missing in payload", key);
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.brick%d.path", prefix, vol_count,
- brick_count);
+ keylen = snprintf(key, sizeof(key), "%s.path", key_prefix);
ret = dict_get_strn(peer_data, key, keylen, &path);
if (ret) {
snprintf(msg, sizeof(msg), "%s missing in payload", key);
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.brick%d.brick_id", prefix,
- vol_count, brick_count);
+ keylen = snprintf(key, sizeof(key), "%s.brick_id", key_prefix);
ret = dict_get_strn(peer_data, key, keylen, &brick_id);
- keylen = snprintf(key, sizeof(key), "%s%d.brick%d.decommissioned", prefix,
- vol_count, brick_count);
+ keylen = snprintf(key, sizeof(key), "%s.decommissioned", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &decommissioned);
if (ret) {
/* For backward compatibility */
@@ -3762,15 +4134,15 @@ glusterd_import_new_brick(dict_t *peer_data, int32_t vol_count,
}
new_brickinfo->decommissioned = decommissioned;
if (brick_id)
- strcpy(new_brickinfo->brick_id, brick_id);
+ (void)snprintf(new_brickinfo->brick_id, sizeof(new_brickinfo->brick_id),
+ "%s", brick_id);
- snprintf(key, sizeof(key), "%s%d.brick%d", prefix, vol_count, brick_count);
- ret = gd_import_new_brick_snap_details(peer_data, key, new_brickinfo);
+ ret = gd_import_new_brick_snap_details(peer_data, key_prefix,
+ new_brickinfo);
if (ret)
goto out;
- keylen = snprintf(key, sizeof(key), "%s%d.brick%d.uuid", prefix, vol_count,
- brick_count);
+ keylen = snprintf(key, sizeof(key), "%s.uuid", key_prefix);
ret = dict_get_strn(peer_data, key, keylen, &brick_uuid_str);
if (ret)
goto out;
@@ -3781,8 +4153,9 @@ out:
if (msg[0]) {
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_IMPORT_FAIL, "%s",
msg);
- gf_event(EVENT_IMPORT_BRICK_FAILED, "peer=%s;brick=%s",
- new_brickinfo->hostname, new_brickinfo->path);
+ if (new_brickinfo)
+ gf_event(EVENT_IMPORT_BRICK_FAILED, "peer=%s;brick=%s",
+ new_brickinfo->hostname, new_brickinfo->path);
}
gf_msg_debug("glusterd", 0, "Returning with %d", ret);
return ret;
@@ -3798,8 +4171,10 @@ glusterd_import_bricks(dict_t *peer_data, int32_t vol_count,
{
int ret = -1;
int brick_count = 1;
+ int ta_brick_count = 1;
int brickid = 0;
glusterd_brickinfo_t *new_brickinfo = NULL;
+ glusterd_brickinfo_t *new_ta_brickinfo = NULL;
GF_ASSERT(peer_data);
GF_ASSERT(vol_count >= 0);
@@ -3818,6 +4193,19 @@ glusterd_import_bricks(dict_t *peer_data, int32_t vol_count,
cds_list_add_tail(&new_brickinfo->brick_list, &new_volinfo->bricks);
brick_count++;
}
+
+ if (new_volinfo->thin_arbiter_count == 1) {
+ while (ta_brick_count <= new_volinfo->subvol_count) {
+ ret = glusterd_import_new_ta_brick(peer_data, vol_count,
+ ta_brick_count,
+ &new_ta_brickinfo, prefix);
+ if (ret)
+ goto out;
+ cds_list_add_tail(&new_ta_brickinfo->brick_list,
+ &new_volinfo->ta_bricks);
+ ta_brick_count++;
+ }
+ }
ret = 0;
out:
gf_msg_debug("glusterd", 0, "Returning with %d", ret);
@@ -3836,7 +4224,8 @@ glusterd_import_quota_conf(dict_t *peer_data, int vol_idx,
int gfid_count = 0;
int ret = -1;
int fd = -1;
- char key[PATH_MAX] = "";
+ char key[128];
+ char key_prefix[64];
int keylen;
char *gfid_str = NULL;
uuid_t gfid = {
@@ -3865,37 +4254,47 @@ glusterd_import_quota_conf(dict_t *peer_data, int vol_idx,
goto out;
}
- snprintf(key, sizeof(key), "%s%d.quota-cksum", prefix, vol_idx);
+ ret = snprintf(key_prefix, sizeof(key_prefix), "%s%d", prefix, vol_idx);
+ if (ret < 0 || ret >= sizeof(key_prefix)) {
+ ret = -1;
+ gf_msg_debug(this->name, 0, "Failed to set key_prefix for quota conf");
+ goto out;
+ }
+ snprintf(key, sizeof(key), "%s.quota-cksum", key_prefix);
ret = dict_get_uint32(peer_data, key, &new_volinfo->quota_conf_cksum);
if (ret)
gf_msg_debug(this->name, 0, "Failed to get quota cksum");
- snprintf(key, sizeof(key), "%s%d.quota-version", prefix, vol_idx);
+ snprintf(key, sizeof(key), "%s.quota-version", key_prefix);
ret = dict_get_uint32(peer_data, key, &new_volinfo->quota_conf_version);
if (ret)
gf_msg_debug(this->name, 0,
"Failed to get quota "
"version");
- keylen = snprintf(key, sizeof(key), "%s%d.gfid-count", prefix, vol_idx);
+ keylen = snprintf(key, sizeof(key), "%s.gfid-count", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &gfid_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
ret = glusterd_quota_conf_write_header(fd);
if (ret)
goto out;
- gfid_idx = 0;
for (gfid_idx = 0; gfid_idx < gfid_count; gfid_idx++) {
- keylen = snprintf(key, sizeof(key) - 1, "%s%d.gfid%d", prefix, vol_idx,
+ keylen = snprintf(key, sizeof(key) - 1, "%s.gfid%d", key_prefix,
gfid_idx);
ret = dict_get_strn(peer_data, key, keylen, &gfid_str);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
- snprintf(key, sizeof(key) - 1, "%s%d.gfid-type%d", prefix, vol_idx,
- gfid_idx);
+ snprintf(key, sizeof(key) - 1, "%s.gfid-type%d", key_prefix, gfid_idx);
ret = dict_get_int8(peer_data, key, &gfid_type);
if (ret)
gfid_type = GF_QUOTA_CONF_TYPE_USAGE;
@@ -3950,32 +4349,37 @@ gd_import_friend_volume_rebal_dict(dict_t *dict, int count,
int ret = -1;
char key[64] = "";
int dict_count = 0;
- char prefix[64] = "";
+ char key_prefix[32];
GF_ASSERT(dict);
GF_ASSERT(volinfo);
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- ret = snprintf(key, sizeof(key), "volume%d.rebal-dict-count", count);
+ snprintf(key_prefix, sizeof(key_prefix), "volume%d", count);
+ ret = snprintf(key, sizeof(key), "%s.rebal-dict-count", key_prefix);
ret = dict_get_int32n(dict, key, ret, &dict_count);
if (ret) {
/* Older peers will not have this dict */
+ gf_smsg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", key, NULL);
ret = 0;
goto out;
}
volinfo->rebal.dict = dict_new();
if (!volinfo->rebal.dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
- snprintf(prefix, sizeof(prefix), "volume%d", count);
ret = import_prdict_dict(dict, volinfo->rebal.dict, "rebal-dict-key",
- "rebal-dict-value", dict_count, prefix);
+ "rebal-dict-value", dict_count, key_prefix);
out:
if (ret && volinfo->rebal.dict)
dict_unref(volinfo->rebal.dict);
- gf_msg_debug(THIS->name, 0, "Returning with %d", ret);
+ gf_msg_debug(this->name, 0, "Returning with %d", ret);
return ret;
}
@@ -3988,7 +4392,8 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
glusterd_volinfo_t **volinfo, char *prefix)
{
int ret = -1;
- char key[256] = "";
+ char key[64] = "";
+ char key_prefix[32];
int keylen;
char *parent_volname = NULL;
char *volname = NULL;
@@ -4005,21 +4410,27 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
GF_ASSERT(volinfo);
GF_ASSERT(prefix);
- keylen = snprintf(key, sizeof(key), "%s%d.name", prefix, count);
+ ret = snprintf(key_prefix, sizeof(key_prefix), "%s%d", prefix, count);
+ if (ret < 0 || ret >= sizeof(key_prefix)) {
+ ret = -1;
+ snprintf(msg, sizeof(msg), "key_prefix too big");
+ goto out;
+ }
+
+ keylen = snprintf(key, sizeof(key), "%s.name", key_prefix);
ret = dict_get_strn(peer_data, key, keylen, &volname);
if (ret) {
snprintf(msg, sizeof(msg), "%s missing in payload", key);
goto out;
}
- snprintf(key, sizeof(key), "%s%d.stage_deleted", prefix, count);
+ snprintf(key, sizeof(key), "%s.stage_deleted", key_prefix);
ret = dict_get_uint32(peer_data, key, &stage_deleted);
/* stage_deleted = 1 means the volume is still in the process of
* deleting a volume, so we shouldn't be trying to create a
* fresh volume here which would lead to a stale entry
*/
if (stage_deleted) {
- ret = 0;
goto out;
}
@@ -4032,7 +4443,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
ret = -1;
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.type", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.type", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &new_volinfo->type);
if (ret) {
snprintf(msg, sizeof(msg), "%s missing in payload for %s", key,
@@ -4040,7 +4451,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.parent_volname", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.parent_volname", key_prefix);
ret = dict_get_strn(peer_data, key, keylen, &parent_volname);
if (!ret) {
ret = snprintf(new_volinfo->parent_volname,
@@ -4051,7 +4462,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
goto out;
}
}
- keylen = snprintf(key, sizeof(key), "%s%d.brick_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.brick_count", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &new_volinfo->brick_count);
if (ret) {
snprintf(msg, sizeof(msg), "%s missing in payload for %s", key,
@@ -4059,7 +4470,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.version", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.version", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &new_volinfo->version);
if (ret) {
snprintf(msg, sizeof(msg), "%s missing in payload for %s", key,
@@ -4067,7 +4478,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.status", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.status", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen,
(int32_t *)&new_volinfo->status);
if (ret) {
@@ -4076,7 +4487,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.sub_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.sub_count", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &new_volinfo->sub_count);
if (ret) {
snprintf(msg, sizeof(msg), "%s missing in payload for %s", key,
@@ -4084,9 +4495,17 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
goto out;
}
+ keylen = snprintf(key, sizeof(key), "%s.subvol_count", key_prefix);
+ ret = dict_get_int32n(peer_data, key, keylen, &new_volinfo->subvol_count);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "%s missing in payload for %s", key,
+ volname);
+ goto out;
+ }
+
/* not having a 'stripe_count' key is not a error
(as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.stripe_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.stripe_count", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &new_volinfo->stripe_count);
if (ret)
gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
@@ -4094,7 +4513,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
/* not having a 'replica_count' key is not a error
(as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.replica_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.replica_count", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &new_volinfo->replica_count);
if (ret)
gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
@@ -4102,15 +4521,24 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
/* not having a 'arbiter_count' key is not a error
(as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.arbiter_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.arbiter_count", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &new_volinfo->arbiter_count);
if (ret)
gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
"peer is possibly old version");
+ /* not having a 'thin_arbiter_count' key is not a error
+ (as peer may be of old version) */
+ keylen = snprintf(key, sizeof(key), "%s.thin_arbiter_count", key_prefix);
+ ret = dict_get_int32n(peer_data, key, keylen,
+ &new_volinfo->thin_arbiter_count);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
+ "peer is possibly old version");
+
/* not having a 'disperse_count' key is not a error
(as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.disperse_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.disperse_count", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &new_volinfo->disperse_count);
if (ret)
gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
@@ -4118,7 +4546,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
/* not having a 'redundancy_count' key is not a error
(as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.redundancy_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.redundancy_count", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen,
&new_volinfo->redundancy_count);
if (ret)
@@ -4127,92 +4555,16 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
/* not having a 'dist_count' key is not a error
(as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.dist_count", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.dist_count", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen,
&new_volinfo->dist_leaf_count);
if (ret)
gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
"peer is possibly old version");
- /* not having a 'hot_brick_count' key is not a error
- (as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.hot_brick_count", prefix, count);
- ret = dict_get_int32n(peer_data, key, keylen,
- &new_volinfo->tier_info.hot_brick_count);
- if (ret)
- gf_msg_debug(THIS->name, 0, "peer is possibly old version");
-
- /* not having a 'hot_type' key is not a error
- (as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.hot_type", prefix, count);
- ret = dict_get_int32n(peer_data, key, keylen,
- &new_volinfo->tier_info.hot_type);
- if (ret)
- gf_msg_debug(THIS->name, 0, "peer is possibly old version");
-
- /* not having a 'hot_replica_count' key is not a error
- (as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.hot_replica_count", prefix,
- count);
- ret = dict_get_int32n(peer_data, key, keylen,
- &new_volinfo->tier_info.hot_replica_count);
- if (ret)
- gf_msg_debug(THIS->name, 0, "peer is possibly old version");
-
- /* not having a 'cold_brick_count' key is not a error
- (as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.cold_brick_count", prefix, count);
- ret = dict_get_int32n(peer_data, key, keylen,
- &new_volinfo->tier_info.cold_brick_count);
- if (ret)
- gf_msg_debug(THIS->name, 0, "peer is possibly old version");
-
- /* not having a 'cold_type' key is not a error
- (as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.cold_type", prefix, count);
- ret = dict_get_int32n(peer_data, key, keylen,
- &new_volinfo->tier_info.cold_type);
- if (ret)
- gf_msg_debug(THIS->name, 0, "peer is possibly old version");
-
- /* not having a 'cold_replica_count' key is not a error
- (as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.cold_replica_count", prefix,
- count);
- ret = dict_get_int32n(peer_data, key, keylen,
- &new_volinfo->tier_info.cold_replica_count);
- if (ret)
- gf_msg_debug(THIS->name, 0, "peer is possibly old version");
-
- /* not having a 'cold_disperse_count' key is not a error
- (as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.cold_disperse_count", prefix,
- count);
- ret = dict_get_int32n(peer_data, key, keylen,
- &new_volinfo->tier_info.cold_disperse_count);
- if (ret)
- gf_msg_debug(THIS->name, 0, "peer is possibly old version");
-
- /* not having a 'cold_redundancy_count' key is not a error
- (as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.cold_redundancy_count", prefix,
- count);
- ret = dict_get_int32n(peer_data, key, keylen,
- &new_volinfo->tier_info.cold_redundancy_count);
- if (ret)
- gf_msg_debug(THIS->name, 0, "peer is possibly old version");
-
- /* not having a 'cold_dist_count' key is not a error
- (as peer may be of old version) */
- keylen = snprintf(key, sizeof(key), "%s%d.cold_dist_count", prefix, count);
- ret = dict_get_int32n(peer_data, key, keylen,
- &new_volinfo->tier_info.cold_dist_leaf_count);
- if (ret)
- gf_msg_debug(THIS->name, 0, "peer is possibly old version");
-
new_volinfo->subvol_count = new_volinfo->brick_count /
glusterd_get_dist_leaf_count(new_volinfo);
- snprintf(key, sizeof(key), "%s%d.ckusm", prefix, count);
+ snprintf(key, sizeof(key), "%s.ckusm", key_prefix);
ret = dict_get_uint32(peer_data, key, &new_volinfo->cksum);
if (ret) {
snprintf(msg, sizeof(msg), "%s missing in payload for %s", key,
@@ -4220,7 +4572,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.volume_id", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.volume_id", key_prefix);
ret = dict_get_strn(peer_data, key, keylen, &volume_id_str);
if (ret) {
snprintf(msg, sizeof(msg), "%s missing in payload for %s", key,
@@ -4230,7 +4582,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
gf_uuid_parse(volume_id_str, new_volinfo->volume_id);
- keylen = snprintf(key, sizeof(key), "%s%d.username", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.username", key_prefix);
ret = dict_get_strn(peer_data, key, keylen, &str);
if (!ret) {
ret = glusterd_auth_set_username(new_volinfo, str);
@@ -4238,7 +4590,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.password", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.password", key_prefix);
ret = dict_get_strn(peer_data, key, keylen, &str);
if (!ret) {
ret = glusterd_auth_set_password(new_volinfo, str);
@@ -4246,7 +4598,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
goto out;
}
- snprintf(key, sizeof(key), "%s%d.transport_type", prefix, count);
+ snprintf(key, sizeof(key), "%s.transport_type", key_prefix);
ret = dict_get_uint32(peer_data, key, &new_volinfo->transport_type);
if (ret) {
snprintf(msg, sizeof(msg), "%s missing in payload for %s", key,
@@ -4254,7 +4606,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
goto out;
}
- snprintf(key, sizeof(key), "%s%d.rebalance", prefix, count);
+ snprintf(key, sizeof(key), "%s.rebalance", key_prefix);
ret = dict_get_uint32(peer_data, key, &new_volinfo->rebal.defrag_cmd);
if (ret) {
snprintf(msg, sizeof(msg), "%s missing in payload for %s", key,
@@ -4262,7 +4614,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
goto out;
}
- keylen = snprintf(key, sizeof(key), "%s%d.rebalance-id", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.rebalance-id", key_prefix);
ret = dict_get_strn(peer_data, key, keylen, &rebalance_id_str);
if (ret) {
/* This is not present in older glusterfs versions,
@@ -4273,14 +4625,12 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
gf_uuid_parse(rebalance_id_str, new_volinfo->rebal.rebalance_id);
}
- snprintf(key, sizeof(key), "%s%d.rebalance-op", prefix, count);
+ snprintf(key, sizeof(key), "%s.rebalance-op", key_prefix);
+ /* This is not present in older glusterfs versions,
+ * so don't error out
+ */
ret = dict_get_uint32(peer_data, key, (uint32_t *)&new_volinfo->rebal.op);
- if (ret) {
- /* This is not present in older glusterfs versions,
- * so don't error out
- */
- ret = 0;
- }
+
ret = gd_import_friend_volume_rebal_dict(peer_data, count, new_volinfo);
if (ret) {
snprintf(msg, sizeof(msg),
@@ -4289,8 +4639,8 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
goto out;
}
- snprintf(key, sizeof(key), "%s%d", prefix, count);
- ret = gd_import_volume_snap_details(peer_data, new_volinfo, key, volname);
+ ret = gd_import_volume_snap_details(peer_data, new_volinfo, key_prefix,
+ volname);
if (ret) {
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SNAP_DETAILS_IMPORT_FAIL,
"Failed to import snapshot "
@@ -4312,12 +4662,11 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
* Either both the volume op-versions should be absent or both should be
* present. Only one being present is a failure
*/
- keylen = snprintf(key, sizeof(key), "%s%d.op-version", prefix, count);
+ keylen = snprintf(key, sizeof(key), "%s.op-version", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &op_version);
if (ret)
ret = 0;
- keylen = snprintf(key, sizeof(key), "%s%d.client-op-version", prefix,
- count);
+ keylen = snprintf(key, sizeof(key), "%s.client-op-version", key_prefix);
ret = dict_get_int32n(peer_data, key, keylen, &client_op_version);
if (ret)
ret = 0;
@@ -4336,12 +4685,7 @@ glusterd_import_volinfo(dict_t *peer_data, int count,
new_volinfo->client_op_version = 1;
}
- keylen = snprintf(key, sizeof(key), "%s%d.caps", prefix, count);
- /*This is not present in older glusterfs versions, so ignore ret value*/
- ret = dict_get_int32n(peer_data, key, keylen, &new_volinfo->caps);
-
- keylen = snprintf(key, sizeof(key), "%s%d.quota-xattr-version", prefix,
- count);
+ keylen = snprintf(key, sizeof(key), "%s.quota-xattr-version", key_prefix);
/*This is not present in older glusterfs versions, so ignore ret value*/
ret = dict_get_int32n(peer_data, key, keylen,
&new_volinfo->quota_xattr_version);
@@ -4405,6 +4749,8 @@ glusterd_volinfo_copy_brickinfo(glusterd_volinfo_t *old_volinfo,
{
glusterd_brickinfo_t *new_brickinfo = NULL;
glusterd_brickinfo_t *old_brickinfo = NULL;
+ glusterd_brickinfo_t *new_ta_brickinfo = NULL;
+ glusterd_brickinfo_t *old_ta_brickinfo = NULL;
glusterd_conf_t *priv = NULL;
int ret = 0;
xlator_t *this = NULL;
@@ -4440,10 +4786,56 @@ glusterd_volinfo_copy_brickinfo(glusterd_volinfo_t *old_volinfo,
ret = -1;
goto out;
}
- strncpy(new_brickinfo->real_path, abspath, strlen(abspath));
+ if (strlen(abspath) >= sizeof(new_brickinfo->real_path)) {
+ ret = -1;
+ goto out;
+ }
+ (void)strncpy(new_brickinfo->real_path, abspath,
+ sizeof(new_brickinfo->real_path));
} else {
- strncpy(new_brickinfo->real_path, old_brickinfo->real_path,
- strlen(old_brickinfo->real_path));
+ (void)strncpy(new_brickinfo->real_path,
+ old_brickinfo->real_path,
+ sizeof(new_brickinfo->real_path));
+ }
+ }
+ }
+ if (new_volinfo->thin_arbiter_count == 1) {
+ cds_list_for_each_entry(new_ta_brickinfo, &new_volinfo->ta_bricks,
+ brick_list)
+ {
+ ret = glusterd_volume_ta_brickinfo_get(
+ new_ta_brickinfo->uuid, new_ta_brickinfo->hostname,
+ new_ta_brickinfo->path, old_volinfo, &old_ta_brickinfo);
+ if (ret == 0) {
+ new_ta_brickinfo->port = old_ta_brickinfo->port;
+
+ if (old_ta_brickinfo->real_path[0] == '\0') {
+ if (!realpath(new_ta_brickinfo->path, abspath)) {
+ /* Here an ENOENT should also be a
+ * failure as the brick is expected to
+ * be in existence
+ */
+ gf_msg(this->name, GF_LOG_CRITICAL, errno,
+ GD_MSG_BRICKINFO_CREATE_FAIL,
+ "realpath () failed for brick "
+ "%s. The underlying filesystem "
+ "may be in bad state",
+ new_brickinfo->path);
+ ret = -1;
+ goto out;
+ }
+ if (strlen(abspath) >=
+ sizeof(new_ta_brickinfo->real_path)) {
+ ret = -1;
+ goto out;
+ }
+ (void)strncpy(new_ta_brickinfo->real_path, abspath,
+ sizeof(new_ta_brickinfo->real_path));
+ } else {
+ (void)strncpy(new_ta_brickinfo->real_path,
+ old_ta_brickinfo->real_path,
+ sizeof(new_ta_brickinfo->real_path));
+ }
}
}
}
@@ -4471,10 +4863,11 @@ glusterd_volinfo_stop_stale_bricks(glusterd_volinfo_t *new_volinfo,
old_brickinfo->uuid, old_brickinfo->hostname, old_brickinfo->path,
new_volinfo, &new_brickinfo);
/* If the brick is stale, i.e it's not a part of the new volume
- * or if it's part of the new volume and is pending a snap,
- * then stop the brick process
+ * or if it's part of the new volume and is pending a snap or if it's
+ * brick multiplexing enabled, then stop the brick process
*/
- if (ret || (new_brickinfo->snap_status == -1)) {
+ if (ret || (new_brickinfo->snap_status == -1) ||
+ GF_ATOMIC_GET(old_volinfo->volpeerupdate)) {
/*TODO: may need to switch to 'atomic' flavour of
* brick_stop, once we make peer rpc program also
* synctask enabled*/
@@ -4566,6 +4959,9 @@ glusterd_delete_stale_volume(glusterd_volinfo_t *stale_volinfo,
svc = &(stale_volinfo->snapd.svc);
(void)svc->manager(svc, stale_volinfo, PROC_START_NO_WAIT);
}
+ svc = &(stale_volinfo->shd.svc);
+ (void)svc->manager(svc, stale_volinfo, PROC_START_NO_WAIT);
+
(void)glusterd_volinfo_remove(stale_volinfo);
return 0;
@@ -4612,14 +5008,14 @@ gd_check_and_update_rebalance_info(glusterd_volinfo_t *old_volinfo,
new->rebalance_time = old->rebalance_time;
/* glusterd_rebalance_t.{op, id, defrag_cmd} are copied during volume
- * import
- * a new defrag object should come to life with rebalance being restarted
+ * import a new defrag object should come to life with rebalance being
+ * restarted
*/
out:
return ret;
}
-int32_t
+static int32_t
glusterd_import_friend_volume(dict_t *peer_data, int count)
{
int32_t ret = -1;
@@ -4640,8 +5036,15 @@ glusterd_import_friend_volume(dict_t *peer_data, int count)
ret = snprintf(key, sizeof(key), "volume%d.update", count);
ret = dict_get_int32n(peer_data, key, ret, &update);
- if (ret || !update) {
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", key, NULL);
+ goto out;
+ }
+
+ if (!update) {
/* if update is 0 that means the volume is not imported */
+ gf_smsg(this->name, GF_LOG_INFO, 0, GD_MSG_VOLUME_NOT_IMPORTED, NULL);
goto out;
}
@@ -4680,16 +5083,6 @@ glusterd_import_friend_volume(dict_t *peer_data, int count)
glusterd_volinfo_unref(old_volinfo);
}
- if (glusterd_is_volume_started(new_volinfo)) {
- (void)glusterd_start_bricks(new_volinfo);
- if (glusterd_is_snapd_enabled(new_volinfo)) {
- svc = &(new_volinfo->snapd.svc);
- if (svc->manager(svc, new_volinfo, PROC_START_NO_WAIT)) {
- gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
- }
- }
- }
-
ret = glusterd_store_volinfo(new_volinfo, GLUSTERD_VOLINFO_VER_AC_NONE);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL,
@@ -4699,19 +5092,35 @@ glusterd_import_friend_volume(dict_t *peer_data, int count)
goto out;
}
- ret = glusterd_create_volfiles_and_notify_services(new_volinfo);
+ ret = glusterd_create_volfiles(new_volinfo);
if (ret)
goto out;
+ glusterd_list_add_order(&new_volinfo->vol_list, &priv->volumes,
+ glusterd_compare_volume_name);
+
+ if (glusterd_is_volume_started(new_volinfo)) {
+ (void)glusterd_start_bricks(new_volinfo);
+ if (glusterd_is_snapd_enabled(new_volinfo)) {
+ svc = &(new_volinfo->snapd.svc);
+ if (svc->manager(svc, new_volinfo, PROC_START_NO_WAIT)) {
+ gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
+ }
+ }
+ svc = &(new_volinfo->shd.svc);
+ if (svc->manager(svc, new_volinfo, PROC_START_NO_WAIT)) {
+ gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
+ }
+ }
+
ret = glusterd_import_quota_conf(peer_data, count, new_volinfo, "volume");
if (ret) {
gf_event(EVENT_IMPORT_QUOTA_CONF_FAILED, "volume=%s",
new_volinfo->volname);
goto out;
}
- glusterd_list_add_order(&new_volinfo->vol_list, &priv->volumes,
- glusterd_compare_volume_name);
+ ret = glusterd_fetchspec_notify(this);
out:
gf_msg_debug("glusterd", 0, "Returning with ret: %d", ret);
return ret;
@@ -4726,6 +5135,7 @@ glusterd_import_friend_volumes_synctask(void *opaque)
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
dict_t *peer_data = NULL;
+ glusterd_friend_synctask_args_t *arg = NULL;
this = THIS;
GF_ASSERT(this);
@@ -4733,12 +5143,30 @@ glusterd_import_friend_volumes_synctask(void *opaque)
conf = this->private;
GF_ASSERT(conf);
- peer_data = (dict_t *)opaque;
- GF_ASSERT(peer_data);
+ arg = opaque;
+ if (!arg)
+ goto out;
+
+ peer_data = dict_new();
+ if (!peer_data) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
+ goto out;
+ }
+
+ ret = dict_unserialize(arg->dict_buf, arg->dictlen, &peer_data);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ NULL);
+ errno = ENOMEM;
+ goto out;
+ }
ret = dict_get_int32n(peer_data, "count", SLEN("count"), &count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=count", NULL);
goto out;
+ }
synclock_lock(&conf->big_lock);
@@ -4747,25 +5175,30 @@ glusterd_import_friend_volumes_synctask(void *opaque)
* restarted (refer glusterd_restart_bricks ())
*/
while (conf->restart_bricks) {
- synclock_unlock(&conf->big_lock);
- sleep(2);
- synclock_lock(&conf->big_lock);
+ synccond_wait(&conf->cond_restart_bricks, &conf->big_lock);
}
conf->restart_bricks = _gf_true;
while (i <= count) {
ret = glusterd_import_friend_volume(peer_data, i);
if (ret) {
- conf->restart_bricks = _gf_false;
- goto out;
+ break;
}
i++;
}
- glusterd_svcs_manager(NULL);
+ if (i > count) {
+ glusterd_svcs_manager(NULL);
+ }
conf->restart_bricks = _gf_false;
+ synccond_broadcast(&conf->cond_restart_bricks);
out:
if (peer_data)
dict_unref(peer_data);
+ if (arg) {
+ if (arg->dict_buf)
+ GF_FREE(arg->dict_buf);
+ GF_FREE(arg);
+ }
gf_msg_debug("glusterd", 0, "Returning with %d", ret);
return ret;
@@ -4781,8 +5214,11 @@ glusterd_import_friend_volumes(dict_t *peer_data)
GF_ASSERT(peer_data);
ret = dict_get_int32n(peer_data, "count", SLEN("count"), &count);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=count", NULL);
goto out;
+ }
while (i <= count) {
ret = glusterd_import_friend_volume(peer_data, i);
@@ -4801,11 +5237,16 @@ glusterd_get_global_server_quorum_ratio(dict_t *opts, double *quorum)
{
int ret = -1;
char *quorum_str = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
ret = dict_get_strn(opts, GLUSTERD_QUORUM_RATIO_KEY,
SLEN(GLUSTERD_QUORUM_RATIO_KEY), &quorum_str);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", GLUSTERD_QUORUM_RATIO_KEY, NULL);
goto out;
+ }
ret = gf_string2percent(quorum_str, quorum);
if (ret)
@@ -4820,11 +5261,16 @@ glusterd_get_global_opt_version(dict_t *opts, uint32_t *version)
{
int ret = -1;
char *version_str = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
ret = dict_get_strn(opts, GLUSTERD_GLOBAL_OPT_VERSION,
SLEN(GLUSTERD_GLOBAL_OPT_VERSION), &version_str);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
goto out;
+ }
ret = gf_string2uint(version_str, version);
if (ret)
@@ -4873,13 +5319,17 @@ glusterd_import_global_opts(dict_t *friend_data)
SLEN("global-opt-count"), &count);
if (ret) {
// old version peer
+ gf_smsg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=global-opt-count", NULL);
ret = 0;
goto out;
}
import_options = dict_new();
- if (!import_options)
+ if (!import_options) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
ret = import_prdict_dict(friend_data, import_options, "key", "val", count,
"global");
if (ret) {
@@ -4932,7 +5382,7 @@ glusterd_compare_friend_data(dict_t *peer_data, int32_t *status, char *hostname)
gf_boolean_t update = _gf_false;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
- dict_t *peer_data_copy = NULL;
+ glusterd_friend_synctask_args_t *arg = NULL;
this = THIS;
GF_ASSERT(this);
@@ -4950,8 +5400,11 @@ glusterd_compare_friend_data(dict_t *peer_data, int32_t *status, char *hostname)
}
ret = dict_get_int32n(peer_data, "count", SLEN("count"), &count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=count", NULL);
goto out;
+ }
while (i <= count) {
ret = glusterd_compare_friend_volume(peer_data, i, status, hostname);
@@ -4974,12 +5427,23 @@ glusterd_compare_friend_data(dict_t *peer_data, int32_t *status, char *hostname)
* first brick to come up before attaching the subsequent bricks
* in case brick multiplexing is enabled
*/
- peer_data_copy = dict_copy_with_ref(peer_data, NULL);
- glusterd_launch_synctask(glusterd_import_friend_volumes_synctask,
- peer_data_copy);
+ arg = GF_CALLOC(1, sizeof(*arg), gf_common_mt_char);
+ ret = dict_allocate_and_serialize(peer_data, &arg->dict_buf,
+ &arg->dictlen);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "dict_serialize failed while handling "
+ " import friend volume request");
+ goto out;
+ }
+
+ glusterd_launch_synctask(glusterd_import_friend_volumes_synctask, arg);
}
out:
+ if (ret && arg) {
+ GF_FREE(arg);
+ }
gf_msg_debug(this->name, 0, "Returning with ret: %d, status: %d", ret,
*status);
return ret;
@@ -5046,9 +5510,6 @@ glusterd_pending_node_get_rpc(glusterd_pending_node_t *pending_node)
} else if (pending_node->type == GD_NODE_SNAPD) {
volinfo = pending_node->node;
rpc = volinfo->snapd.svc.conn.rpc;
- } else if (pending_node->type == GD_NODE_TIERD) {
- volinfo = pending_node->node;
- rpc = volinfo->tierd.svc.conn.rpc;
} else {
GF_ASSERT(0);
}
@@ -5068,10 +5529,6 @@ glusterd_pending_node_put_rpc(glusterd_pending_node_t *pending_node)
glusterd_defrag_rpc_put(volinfo->rebal.defrag);
break;
- case GD_NODE_TIERD:
- volinfo = pending_node->node;
- glusterd_defrag_rpc_put(volinfo->tier.defrag);
- break;
default:
break;
}
@@ -5163,15 +5620,20 @@ glusterd_add_node_to_dict(char *server, dict_t *dict, int count,
priv = this->private;
GF_ASSERT(priv);
+ if (!strcmp(server, "")) {
+ ret = 0;
+ goto out;
+ }
+
glusterd_svc_build_pidfile_path(server, priv->rundir, pidfile,
sizeof(pidfile));
- if (strcmp(server, priv->shd_svc.name) == 0)
- svc = &(priv->shd_svc);
+ if (strcmp(server, priv->quotad_svc.name) == 0)
+ svc = &(priv->quotad_svc);
+#ifdef BUILD_GNFS
else if (strcmp(server, priv->nfs_svc.name) == 0)
svc = &(priv->nfs_svc);
- else if (strcmp(server, priv->quotad_svc.name) == 0)
- svc = &(priv->quotad_svc);
+#endif
else if (strcmp(server, priv->bitd_svc.name) == 0)
svc = &(priv->bitd_svc);
else if (strcmp(server, priv->scrub_svc.name) == 0)
@@ -5196,29 +5658,35 @@ glusterd_add_node_to_dict(char *server, dict_t *dict, int count,
*/
keylen = snprintf(key, sizeof(key), "brick%d.hostname", count);
- if (!strcmp(server, priv->nfs_svc.name))
- ret = dict_set_nstrn(dict, key, keylen, "NFS Server",
- SLEN("NFS Server"));
- else if (!strcmp(server, priv->shd_svc.name))
- ret = dict_set_nstrn(dict, key, keylen, "Self-heal Daemon",
- SLEN("Self-heal Daemon"));
- else if (!strcmp(server, priv->quotad_svc.name))
+ if (!strcmp(server, priv->quotad_svc.name))
ret = dict_set_nstrn(dict, key, keylen, "Quota Daemon",
SLEN("Quota Daemon"));
+#ifdef BUILD_GNFS
+ else if (!strcmp(server, priv->nfs_svc.name))
+ ret = dict_set_nstrn(dict, key, keylen, "NFS Server",
+ SLEN("NFS Server"));
+#endif
else if (!strcmp(server, priv->bitd_svc.name))
ret = dict_set_nstrn(dict, key, keylen, "Bitrot Daemon",
SLEN("Bitrot Daemon"));
else if (!strcmp(server, priv->scrub_svc.name))
ret = dict_set_nstrn(dict, key, keylen, "Scrubber Daemon",
SLEN("Scrubber Daemon"));
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "brick%d.path", count);
ret = dict_set_dynstrn(dict, key, keylen, gf_strdup(uuid_utoa(MY_UUID)));
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
+#ifdef BUILD_GNFS
/* Port is available only for the NFS server.
* Self-heal daemon doesn't provide any port for access
* by entities other than gluster.
@@ -5227,25 +5695,38 @@ glusterd_add_node_to_dict(char *server, dict_t *dict, int count,
if (dict_getn(vol_opts, "nfs.port", SLEN("nfs.port"))) {
ret = dict_get_int32n(vol_opts, "nfs.port", SLEN("nfs.port"),
&port);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=nfs.port", NULL);
goto out;
+ }
} else
port = GF_NFS3_PORT;
}
+#endif
keylen = snprintf(key, sizeof(key), "brick%d.port", count);
ret = dict_set_int32n(dict, key, keylen, port);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "brick%d.pid", count);
ret = dict_set_int32n(dict, key, keylen, pid);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "brick%d.status", count);
ret = dict_set_int32n(dict, key, keylen, running);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
out:
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
@@ -5282,7 +5763,7 @@ glusterd_remote_hostname_get(rpcsvc_request_t *req, char *remote_host, int len)
tmp_host = hostname = canon;
}
- strncpy(remote_host, hostname, strlen(hostname));
+ (void)snprintf(remote_host, len, "%s", hostname);
out:
GF_FREE(tmp_host);
@@ -5427,9 +5908,9 @@ my_callback(struct rpc_req *req, struct iovec *iov, int count, void *v_frame)
call_frame_t *frame = v_frame;
glusterd_conf_t *conf = frame->this->private;
- synclock_lock(&conf->big_lock);
- --(conf->blockers);
- synclock_unlock(&conf->big_lock);
+ if (GF_ATOMIC_DEC(conf->blockers) == 0) {
+ synccond_broadcast(&conf->cond_blockers);
+ }
STACK_DESTROY(frame->root);
return 0;
@@ -5496,7 +5977,13 @@ attach_brick_callback(struct rpc_req *req, struct iovec *iov, int count,
/* PID file is copied once brick has attached
successfully
*/
- glusterd_copy_file(pidfile1, pidfile2);
+ ret = glusterd_copy_file(pidfile1, pidfile2);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Could not copy file %s to %s", pidfile1, pidfile2);
+ goto out;
+ }
+
brickinfo->status = GF_BRICK_STARTED;
brickinfo->rpc = rpc_clnt_ref(other_brick->rpc);
gf_log(THIS->name, GF_LOG_INFO, "brick %s is attached successfully",
@@ -5525,9 +6012,9 @@ attach_brick_callback(struct rpc_req *req, struct iovec *iov, int count,
}
}
out:
- synclock_lock(&conf->big_lock);
- --(conf->blockers);
- synclock_unlock(&conf->big_lock);
+ if (GF_ATOMIC_DEC(conf->blockers) == 0) {
+ synccond_broadcast(&conf->cond_blockers);
+ }
STACK_DESTROY(frame->root);
return 0;
}
@@ -5568,6 +6055,8 @@ send_attach_req(xlator_t *this, struct rpc_clnt *rpc, char *path,
brick_req.name = path;
brick_req.input.input_val = NULL;
brick_req.input.input_len = 0;
+ brick_req.dict.dict_val = NULL;
+ brick_req.dict.dict_len = 0;
req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req);
iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size);
@@ -5581,12 +6070,15 @@ send_attach_req(xlator_t *this, struct rpc_clnt *rpc, char *path,
iobref = iobref_new();
if (!iobref) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto *errlbl;
}
errlbl = &&free_iobref;
frame = create_frame(this, this->ctx->pool);
if (!frame) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_FRAME_CREATE_FAIL,
+ NULL);
goto *errlbl;
}
@@ -5614,10 +6106,9 @@ send_attach_req(xlator_t *this, struct rpc_clnt *rpc, char *path,
cbkfn = attach_brick_callback;
}
/* Send the msg */
- ++(conf->blockers);
+ GF_ATOMIC_INC(conf->blockers);
ret = rpc_clnt_submit(rpc, &gd_brick_prog, op, cbkfn, &iov, 1, NULL, 0,
iobref, frame, NULL, 0, NULL, 0, NULL);
- return ret;
free_iobref:
iobref_unref(iobref);
@@ -5626,12 +6117,12 @@ maybe_free_iobuf:
iobuf_unref(iobuf);
}
err:
- return -1;
+ return ret;
}
extern size_t
build_volfile_path(char *volume_id, char *path, size_t path_len,
- char *trusted_str);
+ char *trusted_str, dict_t *dict);
static int
attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo,
@@ -5639,8 +6130,6 @@ attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo,
glusterd_volinfo_t *other_vol)
{
glusterd_conf_t *conf = this->private;
- char pidfile1[PATH_MAX] = "";
- char pidfile2[PATH_MAX] = "";
char unslashed[PATH_MAX] = {
'\0',
};
@@ -5660,9 +6149,6 @@ attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo,
GLUSTERD_REMOVE_SLASH_FROM_PATH(brickinfo->path, unslashed);
- GLUSTERD_GET_BRICK_PIDFILE(pidfile1, other_vol, other_brick, conf);
- GLUSTERD_GET_BRICK_PIDFILE(pidfile2, volinfo, brickinfo, conf);
-
if (volinfo->is_snap_volume) {
len = snprintf(full_id, sizeof(full_id), "/%s/%s/%s/%s.%s.%s",
GLUSTERD_VOL_SNAP_DIR_PREFIX,
@@ -5676,7 +6162,7 @@ attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo,
goto out;
}
- (void)build_volfile_path(full_id, path, sizeof(path), NULL);
+ (void)build_volfile_path(full_id, path, sizeof(path), NULL, NULL);
for (tries = 15; tries > 0; --tries) {
rpc = rpc_clnt_ref(other_brick->rpc);
@@ -5693,7 +6179,7 @@ attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo,
goto out;
}
brickinfo->port = other_brick->port;
- ret = glusterd_brick_process_add_brick(brickinfo);
+ ret = glusterd_brick_process_add_brick(brickinfo, other_brick);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0,
GD_MSG_BRICKPROC_ADD_BRICK_FAILED,
@@ -5715,7 +6201,7 @@ attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo,
* TBD: see if there's a better way
*/
synclock_unlock(&conf->big_lock);
- sleep(1);
+ synctask_sleep(1);
synclock_lock(&conf->big_lock);
}
@@ -5855,7 +6341,7 @@ find_compat_brick_in_vol(glusterd_conf_t *conf,
"brick %s is still"
" starting, waiting for 2 seconds ",
other_brick->path);
- sleep(2);
+ synctask_sleep(2);
synclock_lock(&conf->big_lock);
retries--;
}
@@ -5959,7 +6445,6 @@ find_compatible_brick(glusterd_conf_t *conf, glusterd_volinfo_t *volinfo,
int
glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len)
{
- char fname[128] = "";
char buf[1024] = "";
char cmdline[2048] = "";
xlator_t *this = NULL;
@@ -5974,6 +6459,22 @@ glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len)
this = THIS;
GF_ASSERT(this);
+#ifdef __FreeBSD__
+ blen = sizeof(buf);
+ int mib[4];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_ARGS;
+ mib[3] = pid;
+
+ if (sys_sysctl(mib, 4, buf, &blen, NULL, blen) != 0) {
+ gf_log(this->name, GF_LOG_ERROR, "brick process %d is not running",
+ pid);
+ return ret;
+ }
+#else
+ char fname[128] = "";
snprintf(fname, sizeof(fname), "/proc/%d/cmdline", pid);
if (sys_access(fname, R_OK) != 0) {
@@ -5990,6 +6491,7 @@ glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len)
strerror(errno), fname);
return ret;
}
+#endif
/* convert cmdline to single string */
for (i = 0, j = 0; i < blen; i++) {
@@ -6038,6 +6540,43 @@ glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len)
char *
search_brick_path_from_proc(pid_t brick_pid, char *brickpath)
{
+ char *brick_path = NULL;
+#ifdef __FreeBSD__
+ struct filestat *fst;
+ struct procstat *ps;
+ struct kinfo_proc *kp;
+ struct filestat_list *head;
+
+ ps = procstat_open_sysctl();
+ if (ps == NULL)
+ goto out;
+
+ kp = kinfo_getproc(brick_pid);
+ if (kp == NULL)
+ goto out;
+
+ head = procstat_getfiles(ps, (void *)kp, 0);
+ if (head == NULL)
+ goto out;
+
+ STAILQ_FOREACH(fst, head, next)
+ {
+ if (fst->fs_fd < 0)
+ continue;
+
+ if (!strcmp(fst->fs_path, brickpath)) {
+ brick_path = gf_strdup(fst->fs_path);
+ break;
+ }
+ }
+
+out:
+ if (head != NULL)
+ procstat_freefiles(ps, head);
+ if (kp != NULL)
+ free(kp);
+ procstat_close(ps);
+#else
struct dirent *dp = NULL;
DIR *dirp = NULL;
size_t len = 0;
@@ -6048,7 +6587,6 @@ search_brick_path_from_proc(pid_t brick_pid, char *brickpath)
0,
},
};
- char *brick_path = NULL;
if (!brickpath)
goto out;
@@ -6084,7 +6622,9 @@ search_brick_path_from_proc(pid_t brick_pid, char *brickpath)
}
}
out:
- sys_closedir(dirp);
+ if (dirp)
+ sys_closedir(dirp);
+#endif
return brick_path;
}
@@ -6112,8 +6652,10 @@ glusterd_brick_start(glusterd_volinfo_t *volinfo,
GF_ASSERT(this);
conf = this->private;
- if ((!brickinfo) || (!volinfo))
+ if ((!brickinfo) || (!volinfo)) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
if (gf_uuid_is_null(brickinfo->uuid)) {
ret = glusterd_resolve_brick(brickinfo);
@@ -6138,7 +6680,8 @@ glusterd_brick_start(glusterd_volinfo_t *volinfo,
* three different triggers for an attempt to start the brick process
* due to the quorum handling code in glusterd_friend_sm.
*/
- if (brickinfo->status == GF_BRICK_STARTING || brickinfo->start_triggered) {
+ if (brickinfo->status == GF_BRICK_STARTING || brickinfo->start_triggered ||
+ GF_ATOMIC_GET(volinfo->volpeerupdate)) {
gf_msg_debug(this->name, 0,
"brick %s is already in starting "
"phase",
@@ -6233,7 +6776,7 @@ glusterd_brick_start(glusterd_volinfo_t *volinfo,
(void)glusterd_brick_connect(volinfo, brickinfo, socketpath);
- ret = glusterd_brick_process_add_brick(brickinfo);
+ ret = glusterd_brick_process_add_brick(brickinfo, NULL);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0,
GD_MSG_BRICKPROC_ADD_BRICK_FAILED,
@@ -6337,18 +6880,18 @@ glusterd_restart_bricks(void *opaque)
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, return_block);
+ synclock_lock(&conf->big_lock);
+
/* We need to ensure that restarting the bricks during glusterd restart
* shouldn't race with the import volume thread (refer
* glusterd_compare_friend_data ())
*/
while (conf->restart_bricks) {
- synclock_unlock(&conf->big_lock);
- sleep(2);
- synclock_lock(&conf->big_lock);
+ synccond_wait(&conf->cond_restart_bricks, &conf->big_lock);
}
conf->restart_bricks = _gf_true;
- ++(conf->blockers);
+ GF_ATOMIC_INC(conf->blockers);
ret = glusterd_get_quorum_cluster_counts(this, &active_count,
&quorum_count);
if (ret)
@@ -6434,6 +6977,7 @@ glusterd_restart_bricks(void *opaque)
if (!brickinfo->start_triggered) {
pthread_mutex_lock(&brickinfo->restart_mutex);
{
+ /* coverity[SLEEP] */
glusterd_brick_start(volinfo, brickinfo, _gf_false,
_gf_false);
}
@@ -6457,9 +7001,12 @@ glusterd_restart_bricks(void *opaque)
ret = 0;
out:
- --(conf->blockers);
conf->restart_done = _gf_true;
conf->restart_bricks = _gf_false;
+ if (GF_ATOMIC_DEC(conf->blockers) == 0) {
+ synccond_broadcast(&conf->cond_blockers);
+ }
+ synccond_broadcast(&conf->cond_restart_bricks);
return_block:
return ret;
@@ -6475,7 +7022,6 @@ _local_gsyncd_start(dict_t *this, char *key, data_t *value, void *data)
char *slave_host = NULL;
char *statefile = NULL;
char buf[1024] = "faulty";
- int uuid_len = 0;
int ret = 0;
int op_ret = 0;
int ret_status = 0;
@@ -6501,9 +7047,8 @@ _local_gsyncd_start(dict_t *this, char *key, data_t *value, void *data)
slave++;
else
return 0;
- uuid_len = (slave - value->data - 1);
- strncpy(uuid_str, (char *)value->data, uuid_len);
+ (void)snprintf(uuid_str, sizeof(uuid_str), "%s", (char *)value->data);
/* Getting Local Brickpaths */
ret = glusterd_get_local_brickpaths(volinfo, &path_list);
@@ -6634,7 +7179,8 @@ out:
ret = op_ret;
}
}
-
+ if (slave_vol)
+ GF_FREE(slave_vol);
GF_FREE(path_list);
GF_FREE(op_errstr);
@@ -6798,22 +7344,26 @@ glusterd_get_brick_root(char *path, char **mount_point)
char *mnt_pt = NULL;
struct stat brickstat = {0};
struct stat buf = {0};
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!path)
+ if (!path) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto err;
+ }
mnt_pt = gf_strdup(path);
- if (!mnt_pt)
+ if (!mnt_pt) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto err;
+ }
if (sys_stat(mnt_pt, &brickstat))
goto err;
while ((ptr = strrchr(mnt_pt, '/')) && ptr != mnt_pt) {
*ptr = '\0';
if (sys_stat(mnt_pt, &buf)) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
- "error in "
- "stat: %s",
- strerror(errno));
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
+ "Error in stat=%s", strerror(errno), NULL);
goto err;
}
@@ -6825,10 +7375,8 @@ glusterd_get_brick_root(char *path, char **mount_point)
if (ptr == mnt_pt) {
if (sys_stat("/", &buf)) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
- "error in "
- "stat: %s",
- strerror(errno));
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
+ "Error in stat=%s", strerror(errno), NULL);
goto err;
}
if (brickstat.st_dev == buf.st_dev)
@@ -6882,7 +7430,7 @@ static int
glusterd_add_inode_size_to_dict(dict_t *dict, int count)
{
int ret = -1;
- char key[1024] = "";
+ char key[64];
char buffer[4096] = "";
char *device = NULL;
char *fs_name = NULL;
@@ -6893,11 +7441,16 @@ glusterd_add_inode_size_to_dict(dict_t *dict, int count)
};
struct fs_info *fs = NULL;
static dict_t *cached_fs = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
ret = snprintf(key, sizeof(key), "brick%d.device", count);
ret = dict_get_strn(dict, key, ret, &device);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
if (cached_fs) {
if (dict_get_str(cached_fs, device, &cur_word) == 0) {
@@ -6909,8 +7462,11 @@ glusterd_add_inode_size_to_dict(dict_t *dict, int count)
ret = snprintf(key, sizeof(key), "brick%d.fs_name", count);
ret = dict_get_strn(dict, key, ret, &fs_name);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
runinit(&runner);
runner_redir(&runner, STDOUT_FILENO, RUN_PIPE);
@@ -6919,11 +7475,9 @@ glusterd_add_inode_size_to_dict(dict_t *dict, int count)
if (strcmp(fs_name, fs->fs_type_name) == 0) {
if (!fs->fs_tool_name) {
/* dynamic inodes */
- gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_INODE_SIZE_GET_FAIL,
- "the "
- "brick on %s (%s) uses dynamic inode "
- "sizes",
- device, fs_name);
+ gf_smsg(this->name, GF_LOG_INFO, 0, GD_MSG_INODE_SIZE_GET_FAIL,
+ "The brick on device uses dynamic inode sizes",
+ "Device=%s (%s)", device, fs_name, NULL);
cur_word = "N/A";
goto cached;
}
@@ -6937,19 +7491,17 @@ glusterd_add_inode_size_to_dict(dict_t *dict, int count)
runner_add_arg(&runner, fs->fs_tool_arg);
runner_add_arg(&runner, device);
} else {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_INODE_SIZE_GET_FAIL,
- "could not find %s to get"
- "inode size for %s (%s): %s package missing?",
- fs->fs_tool_name, device, fs_name, fs->fs_tool_pkg);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INODE_SIZE_GET_FAIL,
+ "Could not find tool to get inode size for device", "Tool=%s",
+ fs->fs_tool_name, "Device=%s (%s)", device, fs_name,
+ "Missing package=%s ?", fs->fs_tool_pkg, NULL);
goto out;
}
ret = runner_start(&runner);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_CMD_EXEC_FAIL,
- "failed to execute "
- "\"%s\"",
- fs->fs_tool_name);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_CMD_EXEC_FAIL,
+ "Failed to execute \"%s\"", fs->fs_tool_name, NULL);
/*
* Runner_start might return an error after the child has
* been forked, e.g. if the program isn't there. In that
@@ -6977,21 +7529,22 @@ glusterd_add_inode_size_to_dict(dict_t *dict, int count)
ret = runner_end(&runner);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_CMD_EXEC_FAIL,
- "%s exited with non-zero exit status", fs->fs_tool_name);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_CMD_EXEC_FAIL,
+ "Tool exited with non-zero exit status", "Tool=%s",
+ fs->fs_tool_name, NULL);
goto out;
}
if (!cur_word) {
ret = -1;
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_INODE_SIZE_GET_FAIL,
- "Unable to retrieve inode size using %s", fs->fs_tool_name);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INODE_SIZE_GET_FAIL,
+ "Using Tool=%s", fs->fs_tool_name, NULL);
goto out;
}
if (dict_set_dynstr_with_alloc(cached_fs, device, cur_word)) {
/* not fatal if not entered into the cache */
- gf_msg_debug(THIS->name, 0, "failed to cache fs inode size for %s",
+ gf_msg_debug(this->name, 0, "failed to cache fs inode size for %s",
device);
}
@@ -7002,8 +7555,7 @@ cached:
out:
if (ret)
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_INODE_SIZE_GET_FAIL,
- "failed to get inode size");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INODE_SIZE_GET_FAIL, NULL);
return ret;
}
@@ -7053,22 +7605,29 @@ glusterd_add_brick_mount_details(glusterd_brickinfo_t *brickinfo, dict_t *dict,
int count)
{
int ret = -1;
- char key[1024] = "";
+ char key[64] = "";
char buff[PATH_MAX] = "";
char base_key[32] = "";
struct mntent save_entry = {0};
char *mnt_pt = NULL;
struct mntent *entry = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
snprintf(base_key, sizeof(base_key), "brick%d", count);
ret = glusterd_get_brick_root(brickinfo->path, &mnt_pt);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BRICKPATH_ROOT_GET_FAIL,
+ NULL);
goto out;
+ }
entry = glusterd_get_mnt_entry_info(mnt_pt, buff, sizeof(buff),
&save_entry);
if (!entry) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GET_MNT_ENTRY_INFO_FAIL,
+ NULL);
ret = -1;
goto out;
}
@@ -7077,15 +7636,21 @@ glusterd_add_brick_mount_details(glusterd_brickinfo_t *brickinfo, dict_t *dict,
snprintf(key, sizeof(key), "%s.device", base_key);
ret = dict_set_dynstr_with_alloc(dict, key, entry->mnt_fsname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
/* fs type */
snprintf(key, sizeof(key), "%s.fs_name", base_key);
ret = dict_set_dynstr_with_alloc(dict, key, entry->mnt_type);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
/* mount options */
snprintf(key, sizeof(key), "%s.mnt_options", base_key);
@@ -7156,8 +7721,8 @@ glusterd_add_brick_detail_to_dict(glusterd_volinfo_t *volinfo,
uint64_t inodes_total = 0;
uint64_t inodes_free = 0;
uint64_t block_size = 0;
- char key[1024 + 16] = "";
- char base_key[1024] = "";
+ char key[64];
+ char base_key[32];
struct statvfs brickstat = {0};
xlator_t *this = NULL;
@@ -7179,43 +7744,61 @@ glusterd_add_brick_detail_to_dict(glusterd_volinfo_t *volinfo,
block_size = brickstat.f_bsize;
snprintf(key, sizeof(key), "%s.block_size", base_key);
ret = dict_set_uint64(dict, key, block_size);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
/* free space in brick */
memfree = brickstat.f_bfree * brickstat.f_bsize;
snprintf(key, sizeof(key), "%s.free", base_key);
ret = dict_set_uint64(dict, key, memfree);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
/* total space of brick */
memtotal = brickstat.f_blocks * brickstat.f_bsize;
snprintf(key, sizeof(key), "%s.total", base_key);
ret = dict_set_uint64(dict, key, memtotal);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
/* inodes: total and free counts only for ext2/3/4 and xfs */
inodes_total = brickstat.f_files;
if (inodes_total) {
snprintf(key, sizeof(key), "%s.total_inodes", base_key);
ret = dict_set_uint64(dict, key, inodes_total);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
}
inodes_free = brickstat.f_ffree;
if (inodes_free) {
snprintf(key, sizeof(key), "%s.free_inodes", base_key);
ret = dict_set_uint64(dict, key, inodes_free);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
}
ret = glusterd_add_brick_mount_details(brickinfo, dict, count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_ADD_BRICK_MNT_INFO_FAIL,
+ NULL);
goto out;
+ }
ret = glusterd_add_inode_size_to_dict(dict, count);
out:
@@ -7234,9 +7817,9 @@ glusterd_add_brick_to_dict(glusterd_volinfo_t *volinfo,
{
int ret = -1;
int32_t pid = -1;
- char key[2048] = "";
+ char key[64];
int keylen;
- char base_key[1024] = "";
+ char base_key[32];
char pidfile[PATH_MAX] = "";
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
@@ -7321,8 +7904,11 @@ glusterd_add_brick_to_dict(glusterd_volinfo_t *volinfo,
ret = dict_set_int32n(dict, key, keylen, brick_online);
out:
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
gf_msg_debug(this->name, 0, "Returning %d", ret);
+ }
return ret;
}
@@ -7403,8 +7989,10 @@ glusterd_brick_stop(glusterd_volinfo_t *volinfo,
conf = this->private;
GF_ASSERT(conf);
- if ((!brickinfo) || (!volinfo))
+ if ((!brickinfo) || (!volinfo)) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
if (gf_uuid_is_null(brickinfo->uuid)) {
ret = glusterd_resolve_brick(brickinfo);
@@ -7439,32 +8027,9 @@ out:
return ret;
}
-gf_boolean_t
-glusterd_is_tier_daemon_running(glusterd_volinfo_t *volinfo)
-{
- if (volinfo->type != GF_CLUSTER_TYPE_TIER)
- return _gf_false;
-
- if (volinfo->rebal.defrag &&
- volinfo->rebal.defrag_cmd == GF_DEFRAG_CMD_START_TIER) {
- return _gf_true;
- }
-
- return _gf_false;
-}
-
int
glusterd_is_defrag_on(glusterd_volinfo_t *volinfo)
{
- /*
- * Do not need to consider tier daemon as a rebalance
- * daemon and with current design rebalance is not supported
- * on a tiered volume.
- */
-
- if (glusterd_is_tier_daemon_running(volinfo))
- return 0;
-
return (volinfo->rebal.defrag != NULL);
}
@@ -7570,8 +8135,10 @@ glusterd_rb_check_bricks(glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *src,
rb = &volinfo->rep_brick;
- if (!rb->src_brick || !rb->dst_brick)
+ if (!rb->src_brick || !rb->dst_brick) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
return -1;
+ }
if (strcmp(rb->src_brick->hostname, src->hostname) ||
strcmp(rb->src_brick->path, src->path)) {
@@ -7717,6 +8284,8 @@ glusterd_check_and_set_brick_xattr(char *host, char *path, uuid_t uuid,
char msg[2048] = "";
gf_boolean_t in_use = _gf_false;
int flags = 0;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
/* Check for xattr support in backend fs */
ret = sys_lsetxattr(path, "trusted.glusterfs.test", "working", 8, 0);
@@ -7727,6 +8296,8 @@ glusterd_check_and_set_brick_xattr(char *host, char *path, uuid_t uuid,
" extended attributes failed, reason:"
" %s.",
host, path, strerror(errno));
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SET_XATTR_BRICK_FAIL,
+ "Host=%s, Path=%s", host, path, NULL);
goto out;
} else {
@@ -7736,6 +8307,8 @@ glusterd_check_and_set_brick_xattr(char *host, char *path, uuid_t uuid,
"Removing test extended"
" attribute failed, reason: %s",
strerror(errno));
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_REMOVE_XATTR_FAIL,
+ NULL);
goto out;
}
}
@@ -7758,6 +8331,8 @@ glusterd_check_and_set_brick_xattr(char *host, char *path, uuid_t uuid,
"Failed to set extended "
"attributes %s, reason: %s",
GF_XATTR_VOL_ID_KEY, strerror(errno));
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SET_XATTR_FAIL,
+ "Attriutes=%s", GF_XATTR_VOL_ID_KEY, NULL);
goto out;
}
@@ -7769,7 +8344,7 @@ out:
return ret;
}
-int
+static int
glusterd_sm_tr_log_transition_add_to_dict(dict_t *dict,
glusterd_sm_tr_log_t *log, int i,
int count)
@@ -7777,7 +8352,7 @@ glusterd_sm_tr_log_transition_add_to_dict(dict_t *dict,
int ret = -1;
char key[64] = "";
int keylen;
- char timestr[64] = "";
+ char timestr[GF_TIMESTR_SIZE] = "";
char *str = NULL;
GF_ASSERT(dict);
@@ -7809,6 +8384,9 @@ glusterd_sm_tr_log_transition_add_to_dict(dict_t *dict,
goto out;
out:
+ if (key[0] != '\0' && ret != 0)
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
gf_msg_debug("glusterd", 0, "returning %d", ret);
return ret;
}
@@ -7917,7 +8495,8 @@ glusterd_sm_tr_log_transition_add(glusterd_sm_tr_log_t *log, int old_state,
transitions[next].old_state = old_state;
transitions[next].new_state = new_state;
transitions[next].event = event;
- time(&transitions[next].time);
+ transitions[next].time = gf_time();
+
log->current = next;
if (log->count < log->size)
log->count++;
@@ -8033,8 +8612,10 @@ glusterd_get_local_brickpaths(glusterd_volinfo_t *volinfo, char **pathlist)
int i = 0;
glusterd_brickinfo_t *brickinfo = NULL;
- if ((!volinfo) || (!pathlist))
+ if ((!volinfo) || (!pathlist)) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
path_tokens = GF_CALLOC(sizeof(char *), volinfo->brick_count,
gf_gld_mt_charptr);
@@ -8269,20 +8850,13 @@ out:
static inline int
glusterd_is_replica_volume(int type)
{
- if (type == GF_CLUSTER_TYPE_REPLICATE ||
- type == GF_CLUSTER_TYPE_STRIPE_REPLICATE)
+ if (type == GF_CLUSTER_TYPE_REPLICATE)
return 1;
return 0;
}
gf_boolean_t
glusterd_is_volume_replicate(glusterd_volinfo_t *volinfo)
{
- gf_boolean_t replicates = _gf_false;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- replicates = glusterd_is_replica_volume(volinfo->tier_info.cold_type) |
- glusterd_is_replica_volume(volinfo->tier_info.hot_type);
- return replicates;
- }
return glusterd_is_replica_volume((volinfo->type));
}
@@ -8291,7 +8865,6 @@ glusterd_is_shd_compatible_type(int type)
{
switch (type) {
case GF_CLUSTER_TYPE_REPLICATE:
- case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
case GF_CLUSTER_TYPE_DISPERSE:
return _gf_true;
}
@@ -8301,13 +8874,6 @@ glusterd_is_shd_compatible_type(int type)
gf_boolean_t
glusterd_is_shd_compatible_volume(glusterd_volinfo_t *volinfo)
{
- int ret = 0;
-
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_is_shd_compatible_type(volinfo->tier_info.cold_type) |
- glusterd_is_shd_compatible_type(volinfo->tier_info.hot_type);
- return ret;
- }
return glusterd_is_shd_compatible_type(volinfo->type);
}
@@ -8340,6 +8906,10 @@ glusterd_set_dump_options(char *dumpoptions_path, char *options, int option_cnt)
goto out;
}
dup_options = gf_strdup(options);
+
+ if (!dup_options) {
+ goto out;
+ }
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_STATEDUMP_OPTS_RCVD,
"Received following statedump options: %s", dup_options);
option = strtok_r(dup_options, " ", &tmpptr);
@@ -8442,9 +9012,9 @@ glusterd_brick_signal(glusterd_volinfo_t *volinfo,
kill(pid, sig);
sleep(1);
+ sys_unlink(dumpoptions_path);
ret = 0;
out:
- sys_unlink(dumpoptions_path);
if (pidfile)
fclose(pidfile);
return ret;
@@ -8468,6 +9038,7 @@ glusterd_brick_terminate(glusterd_volinfo_t *volinfo,
op_errstr, SIGTERM);
}
+#ifdef BUILD_GNFS
int
glusterd_nfs_statedump(char *options, int option_cnt, char **op_errstr)
{
@@ -8475,7 +9046,6 @@ glusterd_nfs_statedump(char *options, int option_cnt, char **op_errstr)
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
char pidfile_path[PATH_MAX] = "";
- char path[PATH_MAX] = "";
FILE *pidfile = NULL;
pid_t pid = -1;
char dumpoptions_path[PATH_MAX] = "";
@@ -8490,18 +9060,23 @@ glusterd_nfs_statedump(char *options, int option_cnt, char **op_errstr)
GF_ASSERT(conf);
dup_options = gf_strdup(options);
+
+ if (!dup_options) {
+ goto out;
+ }
option = strtok_r(dup_options, " ", &tmpptr);
if (strcmp(option, conf->nfs_svc.name)) {
snprintf(msg, sizeof(msg),
"for nfs statedump, options should"
" be after the key nfs");
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY,
+ "Options misplaced", NULL);
*op_errstr = gf_strdup(msg);
ret = -1;
goto out;
}
- GLUSTERD_GET_NFS_DIR(path, conf);
- GLUSTERD_GET_NFS_PIDFILE(pidfile_path, path, conf);
+ GLUSTERD_GET_NFS_PIDFILE(pidfile_path, conf);
pidfile = fopen(pidfile_path, "r");
if (!pidfile) {
@@ -8538,15 +9113,16 @@ glusterd_nfs_statedump(char *options, int option_cnt, char **op_errstr)
kill(pid, SIGUSR1);
sleep(1);
-
+ /* coverity[TAINTED_STRING] */
+ sys_unlink(dumpoptions_path);
ret = 0;
out:
if (pidfile)
fclose(pidfile);
- sys_unlink(dumpoptions_path);
GF_FREE(dup_options);
return ret;
}
+#endif
int
glusterd_client_statedump(char *volname, char *options, int option_cnt,
@@ -8561,11 +9137,18 @@ glusterd_client_statedump(char *volname, char *options, int option_cnt,
char *pid = NULL;
dup_options = gf_strdup(options);
+ if (!dup_options) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "options=%s", options, NULL);
+ goto out;
+ }
option = strtok_r(dup_options, " ", &tmpptr);
if (strcmp(option, "client")) {
snprintf(msg, sizeof(msg),
"for gluster client statedump, options "
"should be after the key 'client'");
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY,
+ "Options misplaced", NULL);
*op_errstr = gf_strdup(msg);
ret = -1;
goto out;
@@ -8573,6 +9156,8 @@ glusterd_client_statedump(char *volname, char *options, int option_cnt,
target_ip = strtok_r(NULL, " ", &tmpptr);
if (target_ip == NULL) {
snprintf(msg, sizeof(msg), "ip address not specified");
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY, msg,
+ NULL);
*op_errstr = gf_strdup(msg);
ret = -1;
goto out;
@@ -8581,6 +9166,8 @@ glusterd_client_statedump(char *volname, char *options, int option_cnt,
pid = strtok_r(NULL, " ", &tmpptr);
if (pid == NULL) {
snprintf(msg, sizeof(msg), "pid not specified");
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY, msg,
+ NULL);
*op_errstr = gf_strdup(msg);
ret = -1;
goto out;
@@ -8599,7 +9186,6 @@ glusterd_quotad_statedump(char *options, int option_cnt, char **op_errstr)
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
char pidfile_path[PATH_MAX] = "";
- char path[PATH_MAX] = "";
FILE *pidfile = NULL;
pid_t pid = -1;
char dumpoptions_path[PATH_MAX] = "";
@@ -8614,18 +9200,22 @@ glusterd_quotad_statedump(char *options, int option_cnt, char **op_errstr)
GF_ASSERT(conf);
dup_options = gf_strdup(options);
+ if (!dup_options) {
+ goto out;
+ }
option = strtok_r(dup_options, " ", &tmpptr);
if (strcmp(option, conf->quotad_svc.name)) {
snprintf(msg, sizeof(msg),
"for quotad statedump, options "
"should be after the key 'quotad'");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY,
+ "Options misplaced", NULL);
*op_errstr = gf_strdup(msg);
ret = -1;
goto out;
}
- GLUSTERD_GET_QUOTAD_DIR(path, conf);
- GLUSTERD_GET_QUOTAD_PIDFILE(pidfile_path, path, conf);
+ GLUSTERD_GET_QUOTAD_PIDFILE(pidfile_path, conf);
pidfile = fopen(pidfile_path, "r");
if (!pidfile) {
@@ -8664,11 +9254,12 @@ glusterd_quotad_statedump(char *options, int option_cnt, char **op_errstr)
sleep(1);
+ /* coverity[TAINTED_STRING] */
+ sys_unlink(dumpoptions_path);
ret = 0;
out:
if (pidfile)
fclose(pidfile);
- sys_unlink(dumpoptions_path);
GF_FREE(dup_options);
return ret;
}
@@ -8772,13 +9363,18 @@ glusterd_friend_remove_cleanup_vols(uuid_t uuid)
"to stop snapd daemon service");
}
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
+
+ if (glusterd_is_shd_compatible_volume(volinfo)) {
+ /*
+ * Sending stop request for all volumes. So it is fine
+ * to send stop for mux shd
+ */
+ svc = &(volinfo->shd.svc);
ret = svc->stop(svc, SIGTERM);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
"Failed "
- "to stop tierd daemon service");
+ "to stop shd daemon service");
}
}
}
@@ -8797,7 +9393,7 @@ glusterd_friend_remove_cleanup_vols(uuid_t uuid)
}
/* Reconfigure all daemon services upon peer detach */
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(NULL);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
"Failed to reconfigure all daemon services.");
@@ -8898,32 +9494,6 @@ glusterd_get_trusted_client_filepath(char *filepath,
return ret;
}
-void
-glusterd_update_tier_status(glusterd_volinfo_t *volinfo)
-{
- glusterd_rebalance_t *rebal = NULL;
-
- rebal = &volinfo->rebal;
-
- if (volinfo->type != GF_CLUSTER_TYPE_TIER)
- return;
-
- /*
- * If tier process status is stopped or failed, then
- * manually changing the status.
- */
-
- switch (rebal->defrag_status) {
- case GF_DEFRAG_STATUS_FAILED:
- case GF_DEFRAG_STATUS_STOPPED:
- rebal->defrag_status = GF_DEFRAG_STATUS_STARTED;
- break;
- default:
- break;
- }
- return;
-}
-
int
glusterd_get_dummy_client_filepath(char *filepath, glusterd_volinfo_t *volinfo,
gf_transport_type type)
@@ -8972,12 +9542,6 @@ glusterd_volume_defrag_restart(glusterd_volinfo_t *volinfo, char *op_errstr,
* start the rebalance process
*/
- /*
- * Changing the status of tier process to start the daemon
- * forcefully.
- */
- glusterd_update_tier_status(volinfo);
-
switch (volinfo->rebal.defrag_status) {
case GF_DEFRAG_STATUS_COMPLETE:
case GF_DEFRAG_STATUS_STOPPED:
@@ -9083,9 +9647,6 @@ glusterd_restart_rebalance_for_volume(glusterd_volinfo_t *volinfo)
* work correctly.
*/
volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- glusterd_store_perform_node_state_store(volinfo);
-
return 0;
}
if (!volinfo->rebal.defrag_cmd) {
@@ -9259,76 +9820,25 @@ glusterd_defrag_volume_status_update(glusterd_volinfo_t *volinfo,
if (ret2)
gf_msg_trace(this->name, 0, "failed to get time left");
- if (cmd == GF_DEFRAG_CMD_STATUS_TIER) {
- if (files)
- volinfo->tier.rebalance_files = files;
- if (size)
- volinfo->tier.rebalance_data = size;
- if (lookup)
- volinfo->tier.lookedup_files = lookup;
- if (status)
- volinfo->tier.defrag_status = status;
- if (failures)
- volinfo->tier.rebalance_failures = failures;
- if (skipped)
- volinfo->tier.skipped_files = skipped;
- if (run_time)
- volinfo->tier.rebalance_time = run_time;
- } else {
- if (files)
- volinfo->rebal.rebalance_files = files;
- if (size)
- volinfo->rebal.rebalance_data = size;
- if (lookup)
- volinfo->rebal.lookedup_files = lookup;
- if (status)
- volinfo->rebal.defrag_status = status;
- if (failures)
- volinfo->rebal.rebalance_failures = failures;
- if (skipped)
- volinfo->rebal.skipped_files = skipped;
- if (run_time)
- volinfo->rebal.rebalance_time = run_time;
- if (!ret2)
- volinfo->rebal.time_left = time_left;
- }
-
- if (promoted)
- volinfo->tier_info.promoted = promoted;
- if (demoted)
- volinfo->tier_info.demoted = demoted;
+ if (files)
+ volinfo->rebal.rebalance_files = files;
+ if (size)
+ volinfo->rebal.rebalance_data = size;
+ if (lookup)
+ volinfo->rebal.lookedup_files = lookup;
+ if (status)
+ volinfo->rebal.defrag_status = status;
+ if (failures)
+ volinfo->rebal.rebalance_failures = failures;
+ if (skipped)
+ volinfo->rebal.skipped_files = skipped;
+ if (run_time)
+ volinfo->rebal.rebalance_time = run_time;
+ if (!ret2)
+ volinfo->rebal.time_left = time_left;
return ret;
}
-/*
- The function is required to take dict ref for every xlator at graph.
- At the time of compare graph topology create a graph and populate
- key values in the dictionary, after finished graph comparison we do destroy
- the new graph.At the time of construct graph we don't take any reference
- so to avoid leak due to ref counter underflow we need to call dict_ref here.
-
-*/
-
-void
-glusterd_graph_take_reference(xlator_t *tree)
-{
- xlator_t *trav = tree;
- xlator_t *prev = tree;
-
- if (!tree) {
- gf_msg("parser", GF_LOG_ERROR, 0, LG_MSG_TREE_NOT_FOUND,
- "Translator tree not found");
- return;
- }
-
- while (prev) {
- trav = prev->next;
- if (prev->options)
- dict_ref(prev->options);
- prev = trav;
- }
- return;
-}
int
glusterd_check_topology_identical(const char *filename1, const char *filename2,
@@ -9375,15 +9885,11 @@ glusterd_check_topology_identical(const char *filename1, const char *filename2,
if (grph1 == NULL)
goto out;
- glusterd_graph_take_reference(grph1->first);
-
/* create the graph for filename2 */
grph2 = glusterfs_graph_construct(fp2);
if (grph2 == NULL)
goto out;
- glusterd_graph_take_reference(grph2->first);
-
/* compare the graph topology */
*identical = is_graph_topology_equal(grph1, grph2);
ret = 0; /* SUCCESS */
@@ -9566,6 +10072,8 @@ glusterd_append_gsync_status(dict_t *dst, dict_t *src)
ret = dict_get_strn(src, "gsync-status", SLEN("gsync-status"), &stop_msg);
if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=gsync-status", NULL);
ret = 0;
goto out;
}
@@ -9820,8 +10328,11 @@ glusterd_sync_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict)
int ret = 0;
GF_ASSERT(rsp_dict);
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
if (!rsp_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
}
@@ -9869,6 +10380,8 @@ glusterd_profile_volume_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict)
ret = dict_get_int32n(rsp_dict, "count", SLEN("count"), &brick_count);
if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=count", NULL);
ret = 0; // no bricks in the rsp
goto out;
}
@@ -9928,7 +10441,7 @@ glusterd_volume_status_add_peer_rsp(dict_t *this, char *key, data_t *value,
if (len < 0 || len >= sizeof(new_key))
goto out;
- ret = dict_set(rsp_ctx->dict, new_key, new_value);
+ ret = dict_setn(rsp_ctx->dict, new_key, len, new_value);
out:
if (ret) {
data_unref(new_value);
@@ -10148,6 +10661,8 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict)
glusterd_volinfo_t *volinfo = NULL;
GF_ASSERT(rsp_dict);
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
if (aggr) {
ctx_dict = aggr;
@@ -10157,8 +10672,11 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict)
}
ret = dict_get_int32n(ctx_dict, "cmd", SLEN("cmd"), &cmd);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Key=cmd",
+ NULL);
goto out;
+ }
if (cmd & GF_CLI_STATUS_ALL && is_origin_glusterd(ctx_dict)) {
ret = dict_get_int32n(rsp_dict, "vol_count", SLEN("vol_count"),
@@ -10166,18 +10684,27 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict)
if (ret == 0) {
ret = dict_set_int32n(ctx_dict, "vol_count", SLEN("vol_count"),
vol_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=vol_count", NULL);
goto out;
+ }
for (i = 0; i < vol_count; i++) {
keylen = snprintf(key, sizeof(key), "vol%d", i);
ret = dict_get_strn(rsp_dict, key, keylen, &volname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
ret = dict_set_strn(ctx_dict, key, keylen, volname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
}
} else {
/* Ignore the error as still the aggregation applies in
@@ -10191,6 +10718,8 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict)
ret = dict_get_int32n(rsp_dict, "count", SLEN("count"), &rsp_node_count);
if (ret) {
+ gf_smsg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED, "Key=count",
+ NULL);
ret = 0; // no bricks in the rsp
goto out;
}
@@ -10198,8 +10727,8 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict)
ret = dict_get_int32n(rsp_dict, "other-count", SLEN("other-count"),
&rsp_other_count);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Failed to get other count from rsp_dict");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=other-count", NULL);
goto out;
}
@@ -10209,18 +10738,27 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict)
if (!dict_getn(ctx_dict, "brick-index-max", SLEN("brick-index-max"))) {
ret = dict_get_int32n(rsp_dict, "brick-index-max",
SLEN("brick-index-max"), &brick_index_max);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=brick-index-max", NULL);
goto out;
+ }
ret = dict_set_int32n(ctx_dict, "brick-index-max",
SLEN("brick-index-max"), brick_index_max);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=brick-index-max", NULL);
goto out;
+ }
} else {
ret = dict_get_int32n(ctx_dict, "brick-index-max",
SLEN("brick-index-max"), &brick_index_max);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=brick-index-max", NULL);
goto out;
+ }
}
rsp_ctx.count = node_count;
@@ -10233,62 +10771,45 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict)
ret = dict_set_int32n(ctx_dict, "count", SLEN("count"),
node_count + rsp_node_count);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Failed to update node count");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=count", NULL);
goto out;
}
ret = dict_set_int32n(ctx_dict, "other-count", SLEN("other-count"),
(other_count + rsp_other_count));
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Failed to update other-count");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=other-count", NULL);
goto out;
}
ret = dict_get_strn(ctx_dict, "volname", SLEN("volname"), &volname);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Failed to get volname");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=volname", NULL);
goto out;
}
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
- "Failed to get volinfo for volume: %s", volname);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Volume=%s", volname, NULL);
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = dict_get_int32n(rsp_dict, "hot_brick_count",
- SLEN("hot_brick_count"), &hot_brick_count);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Failed to get hot brick count from rsp_dict");
- goto out;
- }
-
- ret = dict_get_int32n(rsp_dict, "type", SLEN("type"), &type);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Failed to get type from rsp_dict");
- goto out;
- }
- }
-
ret = dict_set_int32n(ctx_dict, "hot_brick_count", SLEN("hot_brick_count"),
hot_brick_count);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
- "Failed to update hot_brick_count");
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=hot_brick_count", NULL);
goto out;
}
ret = dict_set_int32n(ctx_dict, "type", SLEN("type"), type);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
- "Failed to update type");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=type", NULL);
goto out;
}
@@ -10817,7 +11338,7 @@ glusterd_volume_rebalance_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict)
int ret = 0;
int32_t index = 0;
int32_t count = 0;
- int32_t current_index = 2;
+ int32_t current_index = 1;
int32_t value32 = 0;
uint64_t value = 0;
char *peer_uuid_str = NULL;
@@ -10828,6 +11349,8 @@ glusterd_volume_rebalance_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict)
GF_ASSERT(this);
conf = this->private;
+ if (conf->op_version < GD_OP_VERSION_6_0)
+ current_index = 2;
if (aggr) {
ctx_dict = aggr;
@@ -10852,7 +11375,7 @@ glusterd_volume_rebalance_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict)
ret = dict_get_int32n(rsp_dict, "count", SLEN("count"), &index);
if (ret)
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "failed to get index");
+ "failed to get index from rsp dict");
keylen = snprintf(key, sizeof(key), "node-uuid-%d", index);
ret = dict_get_strn(rsp_dict, key, keylen, &node_uuid);
@@ -10860,7 +11383,7 @@ glusterd_volume_rebalance_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict)
node_uuid_str = gf_strdup(node_uuid);
/* Finding the index of the node-uuid in the peer-list */
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
peer_uuid_str = gd_peer_uuid_str(peerinfo);
@@ -10869,7 +11392,7 @@ glusterd_volume_rebalance_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict)
current_index++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
/* Setting the largest index value as the total count. */
ret = dict_get_int32n(ctx_dict, "count", SLEN("count"), &count);
@@ -10993,189 +11516,6 @@ out:
}
int
-glusterd_volume_tier_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict)
-{
- char key[64] = "";
- int keylen;
- char *node_uuid = NULL;
- char *node_uuid_str = NULL;
- char *volname = NULL;
- dict_t *ctx_dict = NULL;
- double elapsed_time = 0;
- glusterd_volinfo_t *volinfo = NULL;
- int ret = 0;
- int32_t index = 0;
- int32_t count = 0;
- int32_t value32 = 0;
- uint64_t value = 0;
- xlator_t *this = NULL;
- char *task_id_str = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(this->name, this, out);
- GF_VALIDATE_OR_GOTO(this->name, rsp_dict, out);
-
- if (aggr) {
- ctx_dict = aggr;
-
- } else {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_GET_FAIL,
- "Operation Context is not present");
- goto out;
- }
-
- ret = dict_get_strn(ctx_dict, "volname", SLEN("volname"), &volname);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find(volname, &volinfo);
-
- if (ret)
- goto out;
-
- ret = dict_get_int32n(rsp_dict, "count", SLEN("count"), &index);
- if (ret)
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "failed to get index");
-
- keylen = snprintf(key, sizeof(key), "node-uuid-%d", index);
- ret = dict_get_strn(rsp_dict, key, keylen, &node_uuid);
- if (!ret) {
- node_uuid_str = gf_strdup(node_uuid);
- }
- ret = dict_get_int32n(ctx_dict, "count", SLEN("count"), &count);
- count++;
- ret = dict_set_int32n(ctx_dict, "count", SLEN("count"), count);
- if (ret)
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Failed to set count");
-
- keylen = snprintf(key, sizeof(key), "node-uuid-%d", count);
- ret = dict_set_dynstrn(ctx_dict, key, keylen, node_uuid_str);
- if (ret) {
- gf_msg_debug(this->name, 0, "failed to set node-uuid");
- }
-
- snprintf(key, sizeof(key), "files-%d", index);
- ret = dict_get_uint64(rsp_dict, key, &value);
- if (!ret) {
- snprintf(key, sizeof(key), "files-%d", count);
- ret = dict_set_uint64(ctx_dict, key, value);
- if (ret) {
- gf_msg_debug(this->name, 0, "failed to set the file count");
- }
- }
-
- snprintf(key, sizeof(key), "size-%d", index);
- ret = dict_get_uint64(rsp_dict, key, &value);
- if (!ret) {
- snprintf(key, sizeof(key), "size-%d", count);
- ret = dict_set_uint64(ctx_dict, key, value);
- if (ret) {
- gf_msg_debug(this->name, 0, "failed to set the size of migration");
- }
- }
-
- snprintf(key, sizeof(key), "lookups-%d", index);
- ret = dict_get_uint64(rsp_dict, key, &value);
- if (!ret) {
- snprintf(key, sizeof(key), "lookups-%d", count);
- ret = dict_set_uint64(ctx_dict, key, value);
- if (ret) {
- gf_msg_debug(this->name, 0, "failed to set looked up file count");
- }
- }
-
- keylen = snprintf(key, sizeof(key), "status-%d", index);
- ret = dict_get_int32n(rsp_dict, key, keylen, &value32);
- if (!ret) {
- keylen = snprintf(key, sizeof(key), "status-%d", count);
- ret = dict_set_int32n(ctx_dict, key, keylen, value32);
- if (ret) {
- gf_msg_debug(this->name, 0, "failed to set status");
- }
- }
-
- snprintf(key, sizeof(key), "failures-%d", index);
- ret = dict_get_uint64(rsp_dict, key, &value);
- if (!ret) {
- snprintf(key, sizeof(key), "failures-%d", count);
- ret = dict_set_uint64(ctx_dict, key, value);
- if (ret) {
- gf_msg_debug(this->name, 0, "failed to set failure count");
- }
- }
-
- snprintf(key, sizeof(key), "skipped-%d", index);
- ret = dict_get_uint64(rsp_dict, key, &value);
- if (!ret) {
- snprintf(key, sizeof(key), "skipped-%d", count);
- ret = dict_set_uint64(ctx_dict, key, value);
- if (ret) {
- gf_msg_debug(this->name, 0, "failed to set skipped count");
- }
- }
- snprintf(key, sizeof(key), "run-time-%d", index);
- ret = dict_get_double(rsp_dict, key, &elapsed_time);
- if (!ret) {
- snprintf(key, sizeof(key), "run-time-%d", count);
- ret = dict_set_double(ctx_dict, key, elapsed_time);
- if (ret) {
- gf_msg_debug(this->name, 0, "failed to set run-time");
- }
- }
-
- snprintf(key, sizeof(key), "demoted-%d", index);
- ret = dict_get_uint64(rsp_dict, key, &value);
- if (!ret) {
- snprintf(key, sizeof(key), "demoted-%d", count);
- ret = dict_set_uint64(ctx_dict, key, value);
- if (ret) {
- gf_msg_debug(this->name, 0, "failed to set demoted count");
- }
- }
- snprintf(key, sizeof(key), "promoted-%d", index);
- ret = dict_get_uint64(rsp_dict, key, &value);
- if (!ret) {
- snprintf(key, sizeof(key), "promoted-%d", count);
- ret = dict_set_uint64(ctx_dict, key, value);
- if (ret) {
- gf_msg_debug(this->name, 0, "failed to set promoted count");
- }
- }
-
- snprintf(key, sizeof(key), "time-left-%d", index);
- ret = dict_get_uint64(rsp_dict, key, &value);
- if (!ret) {
- snprintf(key, sizeof(key), "time-left-%d", count);
- ret = dict_set_uint64(ctx_dict, key, value);
- if (ret) {
- gf_msg_debug(THIS->name, 0, "failed to set time-left");
- }
- }
-
- ret = dict_get_strn(rsp_dict, GF_REMOVE_BRICK_TID_KEY,
- SLEN(GF_REMOVE_BRICK_TID_KEY), &task_id_str);
- if (ret) {
- gf_msg_debug(this->name, errno, "Missing remove-brick-id");
- } else {
- ret = dict_set_strn(ctx_dict, GF_REMOVE_BRICK_TID_KEY,
- SLEN(GF_REMOVE_BRICK_TID_KEY), task_id_str);
- if (ret)
- gf_msg_debug(this->name, errno,
- "Failed to set remove brick task ID");
- }
-
- ret = 0;
-
-out:
- return ret;
-}
-
-int
glusterd_sys_exec_output_rsp_dict(dict_t *dst, dict_t *src)
{
char output_name[64] = "";
@@ -11489,7 +11829,7 @@ _heal_volume_add_shd_rsp(dict_t *this, char *key, data_t *value, void *data)
rxl_end_len = strlen(rxl_end);
int_len = strlen(key) - rxl_end_len;
- strncpy(int_str, key, int_len);
+ (void)memcpy(int_str, key, int_len);
int_str[int_len] = '\0';
ret = gf_string2int(int_str, &rxl_id);
@@ -11497,7 +11837,7 @@ _heal_volume_add_shd_rsp(dict_t *this, char *key, data_t *value, void *data)
goto out;
int_len = rxl_end_len - strlen(rxl_child_end) - 1;
- strncpy(int_str, rxl_end + 1, int_len);
+ (void)memcpy(int_str, rxl_end + 1, int_len);
int_str[int_len] = '\0';
ret = gf_string2int(int_str, &rxl_child_id);
@@ -11561,19 +11901,19 @@ _heal_volume_add_shd_rsp_of_statistics(dict_t *this, char *key, data_t *value,
key_begin_strlen = strlen(key_begin_str);
int_len = strlen(key) - key_begin_strlen;
- strncpy(key_begin_string, key, int_len);
+ (void)memcpy(key_begin_string, key, int_len);
key_begin_string[int_len] = '\0';
rxl_end_len = strlen(rxl_end);
int_len = key_begin_strlen - rxl_end_len - 1;
- strncpy(int_str, key_begin_str + 1, int_len);
+ (void)memcpy(int_str, key_begin_str + 1, int_len);
int_str[int_len] = '\0';
ret = gf_string2int(int_str, &rxl_id);
if (ret)
goto out;
int_len = rxl_end_len - strlen(rxl_child_end) - 1;
- strncpy(int_str, rxl_end + 1, int_len);
+ (void)memcpy(int_str, rxl_end + 1, int_len);
int_str[int_len] = '\0';
ret = gf_string2int(int_str, &rxl_child_id);
if (ret)
@@ -11705,7 +12045,6 @@ glusterd_status_volume_client_list(dict_t *rsp_dict, dict_t *op_ctx,
int32_t count = 0;
int32_t fuse_count = 0;
int32_t gfapi_count = 0;
- int32_t tierd_count = 0;
int32_t rebalance_count = 0;
int32_t glustershd_count = 0;
int32_t quotad_count = 0;
@@ -11764,15 +12103,6 @@ glusterd_status_volume_client_list(dict_t *rsp_dict, dict_t *op_ctx,
gfapi_count++;
continue;
- } else if (!strcmp(process, "tierd")) {
- ret = dict_get_int32n(op_ctx, "tierd-count", SLEN("tierd-count"),
- &count);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
- "Couldn't get tierd-count");
- }
- tierd_count++;
- continue;
} else if (!strcmp(process, "rebalance")) {
ret = dict_get_int32n(op_ctx, "rebalance-count",
SLEN("rebalance-count"), &count);
@@ -11829,15 +12159,6 @@ glusterd_status_volume_client_list(dict_t *rsp_dict, dict_t *op_ctx,
goto out;
}
}
- if (tierd_count) {
- ret = dict_set_int32n(op_ctx, "tierd-count", SLEN("tierd-count"),
- tierd_count);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Couldn't set tierd-count");
- goto out;
- }
- }
if (rebalance_count) {
ret = dict_set_int32n(op_ctx, "rebalance-count",
SLEN("rebalance-count"), rebalance_count);
@@ -11880,8 +12201,7 @@ out:
}
int
-glusterd_tier_or_rebalance_rsp(dict_t *op_ctx, glusterd_rebalance_t *index,
- int32_t i)
+glusterd_rebalance_rsp(dict_t *op_ctx, glusterd_rebalance_t *index, int32_t i)
{
int ret = 0;
char key[64] = "";
@@ -11942,7 +12262,7 @@ glusterd_defrag_volume_node_rsp(dict_t *req_dict, dict_t *rsp_dict,
char key[64] = "";
int keylen;
int32_t i = 0;
- char buf[1024] = "";
+ char buf[64] = "";
char *node_str = NULL;
int32_t cmd = 0;
@@ -11991,10 +12311,7 @@ glusterd_defrag_volume_node_rsp(dict_t *req_dict, dict_t *rsp_dict,
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"failed to set node-uuid");
- if (cmd == GF_DEFRAG_CMD_STATUS_TIER)
- glusterd_tier_or_rebalance_rsp(op_ctx, &volinfo->tier, i);
- else
- glusterd_tier_or_rebalance_rsp(op_ctx, &volinfo->rebal, i);
+ glusterd_rebalance_rsp(op_ctx, &volinfo->rebal, i);
snprintf(key, sizeof(key), "time-left-%d", i);
ret = dict_set_uint64(op_ctx, key, volinfo->rebal.time_left);
@@ -12002,18 +12319,6 @@ glusterd_defrag_volume_node_rsp(dict_t *req_dict, dict_t *rsp_dict,
gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
"failed to set time left");
- snprintf(key, sizeof(key), "promoted-%d", i);
- ret = dict_set_uint64(op_ctx, key, volinfo->tier_info.promoted);
- if (ret)
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
- "failed to set lookedup file count");
-
- snprintf(key, sizeof(key), "demoted-%d", i);
- ret = dict_set_uint64(op_ctx, key, volinfo->tier_info.demoted);
- if (ret)
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
- "failed to set lookedup file count");
-
out:
return ret;
}
@@ -12041,8 +12346,6 @@ glusterd_handle_node_rsp(dict_t *req_dict, void *pending_entry,
ret = glusterd_status_volume_brick_rsp(rsp_dict, op_ctx,
op_errstr);
break;
- case GD_OP_TIER_STATUS:
- case GD_OP_DETACH_TIER_STATUS:
case GD_OP_DEFRAG_BRICK_VOLUME:
glusterd_defrag_volume_node_rsp(req_dict, rsp_dict, op_ctx);
break;
@@ -12188,20 +12491,22 @@ glusterd_copy_uuid_to_dict(uuid_t uuid, dict_t *dict, char *key,
return 0;
}
-int
+static int
_update_volume_op_versions(dict_t *this, char *key, data_t *value, void *data)
{
int op_version = 0;
glusterd_volinfo_t *ctx = NULL;
gf_boolean_t enabled = _gf_true;
int ret = -1;
+ struct volopt_map_entry *vmep = NULL;
GF_ASSERT(data);
ctx = data;
- op_version = glusterd_get_op_version_for_key(key);
+ vmep = gd_get_vmep(key);
+ op_version = glusterd_get_op_version_from_vmep(vmep);
- if (gd_is_xlator_option(key) || gd_is_boolean_option(key)) {
+ if (gd_is_xlator_option(vmep) || gd_is_boolean_option(vmep)) {
ret = gf_string2boolean(value->data, &enabled);
if (ret)
return 0;
@@ -12213,7 +12518,7 @@ _update_volume_op_versions(dict_t *this, char *key, data_t *value, void *data)
if (op_version > ctx->op_version)
ctx->op_version = op_version;
- if (gd_is_client_option(key) && (op_version > ctx->client_op_version))
+ if (gd_is_client_option(vmep) && (op_version > ctx->client_op_version))
ctx->client_op_version = op_version;
return 0;
@@ -12402,22 +12707,6 @@ glusterd_is_volume_inode_quota_enabled(glusterd_volinfo_t *volinfo)
}
int
-glusterd_is_tierd_supposed_to_be_enabled(glusterd_volinfo_t *volinfo)
-{
- if ((volinfo->type != GF_CLUSTER_TYPE_TIER) ||
- (volinfo->tier.op == GD_OP_DETACH_TIER))
- return _gf_false;
- else
- return _gf_true;
-}
-
-int
-glusterd_is_tierd_enabled(glusterd_volinfo_t *volinfo)
-{
- return volinfo->is_tier_enabled;
-}
-
-int
glusterd_is_bitrot_enabled(glusterd_volinfo_t *volinfo)
{
return glusterd_volinfo_get_boolean(volinfo, VKEY_FEATURES_BITROT);
@@ -12723,6 +13012,11 @@ glusterd_enable_default_options(glusterd_volinfo_t *volinfo, char *option)
int ret = 0;
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
+#ifdef IPV6_DEFAULT
+ char *addr_family = "inet6";
+#else
+ char *addr_family = "inet";
+#endif
this = THIS;
GF_ASSERT(this);
@@ -12783,23 +13077,36 @@ glusterd_enable_default_options(glusterd_volinfo_t *volinfo, char *option)
}
}
}
+ }
- if (!option || !strcmp("features.ctr-enabled", option)) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = dict_set_dynstr_with_alloc(volinfo->dict,
- "features.ctr-enabled", "on");
+ if (conf->op_version >= GD_OP_VERSION_3_9_0) {
+ if (!option || !strcmp("transport.address-family", option)) {
+ if (volinfo->transport_type == GF_TRANSPORT_TCP) {
+ ret = dict_set_dynstr_with_alloc(
+ volinfo->dict, "transport.address-family", addr_family);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, errno,
GD_MSG_DICT_SET_FAILED,
- "Failed to set option "
- "'features.ctr-enabled' "
- "on volume %s",
+ "failed to set transport."
+ "address-family on %s",
volinfo->volname);
goto out;
}
}
}
}
+
+ if (conf->op_version >= GD_OP_VERSION_7_0) {
+ ret = dict_set_dynstr_with_alloc(volinfo->dict,
+ "storage.fips-mode-rchecksum", "on");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set option 'storage.fips-mode-rchecksum' "
+ "on volume %s",
+ volinfo->volname);
+ goto out;
+ }
+ }
out:
return ret;
}
@@ -12815,6 +13122,7 @@ glusterd_get_gfproxy_client_volfile(glusterd_volinfo_t *volinfo, char *path,
switch (volinfo->transport_type) {
case GF_TRANSPORT_TCP:
+ case GF_TRANSPORT_BOTH_TCP_RDMA:
snprintf(path, path_len, "%s/trusted-%s.tcp-gfproxy-fuse.vol",
workdir, volinfo->volname);
break;
@@ -12891,7 +13199,11 @@ glusterd_update_mntopts(char *brick_path, glusterd_brickinfo_t *brickinfo)
ret = -1;
goto out;
}
- strcpy(brickinfo->mnt_opts, entry->mnt_opts);
+ (void)snprintf(brickinfo->mnt_opts, sizeof(brickinfo->mnt_opts), "%s",
+ entry->mnt_opts);
+
+ gf_strncpy(brickinfo->mnt_opts, entry->mnt_opts,
+ sizeof(brickinfo->mnt_opts));
ret = 0;
out:
@@ -12939,7 +13251,7 @@ glusterd_get_value_for_vme_entry(struct volopt_map_entry *vme, char **def_val)
ret = xlator_option_info_list(&vol_opt_handle, key, &local_def_val, &descr);
if (ret) {
/*Swallow Error if option not found*/
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GET_KEY_FAILED,
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_GET_KEY_FAILED,
"Failed to get option for %s "
"key",
key);
@@ -13068,7 +13380,7 @@ glusterd_get_global_options_for_all_vols(rpcsvc_request_t *req, dict_t *ctx,
if (key_fixed)
key = key_fixed;
}
-
+ /* coverity[CONSTANT_EXPRESSION_RESULT] */
ALL_VOLUME_OPTION_CHECK("all", _gf_true, key, ret, op_errstr, out);
for (i = 0; valid_all_vol_opts[i].option; i++) {
@@ -13096,7 +13408,9 @@ glusterd_get_global_options_for_all_vols(rpcsvc_request_t *req, dict_t *ctx,
gf_asprintf(&def_val, "%d", priv->op_version);
need_free = _gf_true;
} else {
- def_val = valid_all_vol_opts[i].dflt_val;
+ gf_asprintf(&def_val, "%s (DEFAULT)",
+ valid_all_vol_opts[i].dflt_val);
+ need_free = _gf_true;
}
}
@@ -13146,7 +13460,7 @@ out:
if (ret && need_free) {
GF_FREE(def_val);
}
-
+ GF_FREE(key_fixed);
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
return ret;
@@ -13182,9 +13496,11 @@ glusterd_get_default_val_for_volopt(dict_t *ctx, gf_boolean_t all_opts,
int count = 0;
xlator_t *this = NULL;
char *def_val = NULL;
+ char *def_val_str = NULL;
char dict_key[50] = "";
int keylen;
gf_boolean_t key_found = _gf_false;
+ gf_boolean_t get_value_vme = _gf_false;
glusterd_conf_t *priv = NULL;
dict_t *vol_dict = NULL;
@@ -13207,6 +13523,7 @@ glusterd_get_default_val_for_volopt(dict_t *ctx, gf_boolean_t all_opts,
if (!all_opts && strcmp(vme->key, input_key))
continue;
key_found = _gf_true;
+ get_value_vme = _gf_false;
/* First look for the key in the priv->opts for global option
* and then into vol_dict, if its not present then look for
* translator default value */
@@ -13221,6 +13538,7 @@ glusterd_get_default_val_for_volopt(dict_t *ctx, gf_boolean_t all_opts,
def_val = vme->value;
} else {
ret = glusterd_get_value_for_vme_entry(vme, &def_val);
+ get_value_vme = _gf_true;
if (!all_opts && ret)
goto out;
else if (ret == -2)
@@ -13239,7 +13557,13 @@ glusterd_get_default_val_for_volopt(dict_t *ctx, gf_boolean_t all_opts,
goto out;
}
sprintf(dict_key, "value%d", count);
- ret = dict_set_dynstr_with_alloc(ctx, dict_key, def_val);
+ if (get_value_vme) { // the value was never changed - DEFAULT is used
+ gf_asprintf(&def_val_str, "%s (DEFAULT)", def_val);
+ ret = dict_set_dynstr_with_alloc(ctx, dict_key, def_val_str);
+ GF_FREE(def_val_str);
+ def_val_str = NULL;
+ } else
+ ret = dict_set_dynstr_with_alloc(ctx, dict_key, def_val);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"Failed to "
@@ -13247,6 +13571,9 @@ glusterd_get_default_val_for_volopt(dict_t *ctx, gf_boolean_t all_opts,
def_val, vme->key);
goto out;
}
+ if (get_value_vme)
+ GF_FREE(def_val);
+
def_val = NULL;
if (!all_opts)
break;
@@ -13544,6 +13871,34 @@ out:
return ret;
}
+void
+glusterd_brick_get_status_str(glusterd_brickinfo_t *brickinfo, char *status_str)
+{
+ GF_VALIDATE_OR_GOTO(THIS->name, brickinfo, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, status_str, out);
+
+ switch (brickinfo->status) {
+ case GF_BRICK_STOPPED:
+ sprintf(status_str, "%s", "Stopped");
+ break;
+ case GF_BRICK_STARTED:
+ sprintf(status_str, "%s", "Started");
+ break;
+ case GF_BRICK_STARTING:
+ sprintf(status_str, "%s", "Starting");
+ break;
+ case GF_BRICK_STOPPING:
+ sprintf(status_str, "%s", "Stopping");
+ break;
+ default:
+ sprintf(status_str, "%s", "None");
+ break;
+ }
+
+out:
+ return;
+}
+
int
glusterd_volume_get_transport_type_str(glusterd_volinfo_t *volinfo,
char *transport_type_str)
@@ -13643,52 +13998,6 @@ out:
return ret;
}
-int
-glusterd_volume_get_hot_tier_type_str(glusterd_volinfo_t *volinfo,
- char **hot_tier_type_str)
-{
- int ret = -1;
- int hot_tier_type = 0;
- int hot_dist_count = 0;
-
- GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
- GF_VALIDATE_OR_GOTO(THIS->name, hot_tier_type_str, out);
-
- hot_dist_count = volinfo->tier_info.hot_replica_count
- ? volinfo->tier_info.hot_replica_count
- : 1;
-
- hot_tier_type = get_vol_type(volinfo->tier_info.hot_type, hot_dist_count,
- volinfo->tier_info.hot_brick_count);
-
- *hot_tier_type_str = vol_type_str[hot_tier_type];
-
- ret = 0;
-out:
- return ret;
-}
-
-int
-glusterd_volume_get_cold_tier_type_str(glusterd_volinfo_t *volinfo,
- char **cold_tier_type_str)
-{
- int ret = -1;
- int cold_tier_type = 0;
-
- GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
- GF_VALIDATE_OR_GOTO(THIS->name, cold_tier_type_str, out);
-
- cold_tier_type = get_vol_type(volinfo->tier_info.cold_type,
- volinfo->tier_info.cold_dist_leaf_count,
- volinfo->tier_info.cold_brick_count);
-
- *cold_tier_type_str = vol_type_str[cold_tier_type];
-
- ret = 0;
-out:
- return ret;
-}
-
/* This function will insert the element to the list in a order.
Order will be based on the compare function provided as a input.
If element to be inserted in ascending order compare should return:
@@ -13711,78 +14020,6 @@ glusterd_list_add_order(struct cds_list_head *new, struct cds_list_head *head,
cds_list_add_rcu(new, rcu_dereference(pos->prev));
}
-int
-glusterd_disallow_op_for_tier(glusterd_volinfo_t *volinfo, glusterd_op_t op,
- int cmd)
-{
- xlator_t *this = NULL;
- int ret = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
-
- if (volinfo->type != GF_CLUSTER_TYPE_TIER)
- goto out;
-
- switch (op) {
- case GD_OP_ADD_BRICK:
- case GD_OP_REPLACE_BRICK:
- case GD_OP_RESET_BRICK:
- ret = -1;
- gf_msg_debug(this->name, 0,
- "Operation not "
- "permitted on tiered volume %s",
- volinfo->volname);
- break;
- case GD_OP_REBALANCE:
- switch (cmd) {
- case GF_DEFRAG_CMD_START_TIER:
- case GF_DEFRAG_CMD_STATUS_TIER:
- case GF_DEFRAG_CMD_START_DETACH_TIER:
- case GF_DEFRAG_CMD_STOP_DETACH_TIER:
- case GF_DEFRAG_CMD_STATUS:
- case GF_DEFRAG_CMD_DETACH_STATUS:
- case GF_DEFRAG_CMD_STOP_TIER:
- case GF_DEFRAG_CMD_DETACH_START:
- case GF_DEFRAG_CMD_DETACH_COMMIT:
- case GF_DEFRAG_CMD_DETACH_COMMIT_FORCE:
- case GF_DEFRAG_CMD_DETACH_STOP:
- ret = 0;
- break;
- default:
- gf_msg_debug(this->name, 0,
- "Rebalance Operation not permitted"
- " on tiered volume %s",
- volinfo->volname);
- ret = -1;
- break;
- }
- break;
- case GD_OP_REMOVE_BRICK:
- switch (cmd) {
- case GF_DEFRAG_CMD_DETACH_START:
- case GF_OP_CMD_DETACH_COMMIT_FORCE:
- case GF_OP_CMD_DETACH_COMMIT:
- case GF_OP_CMD_DETACH_START:
- case GF_DEFRAG_CMD_STOP_DETACH_TIER:
- ret = 0;
- break;
- default:
- gf_msg_debug(this->name, 0,
- "Remove brick operation not "
- "permitted on tiered volume %s",
- volinfo->volname);
- ret = -1;
- break;
- }
- break;
- default:
- break;
- }
-out:
- return ret;
-}
-
int32_t
glusterd_count_connected_peers(int32_t *count)
{
@@ -13799,7 +14036,7 @@ glusterd_count_connected_peers(int32_t *count)
*count = 1;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Find peer who is connected and is a friend */
@@ -13808,7 +14045,7 @@ glusterd_count_connected_peers(int32_t *count)
(*count)++;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
ret = 0;
out:
@@ -13822,7 +14059,6 @@ gd_get_shd_key(int type)
switch (type) {
case GF_CLUSTER_TYPE_REPLICATE:
- case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
key = "cluster.self-heal-daemon";
break;
case GF_CLUSTER_TYPE_DISPERSE:
@@ -13852,41 +14088,41 @@ glusterd_handle_replicate_brick_ops(glusterd_volinfo_t *volinfo,
char vpath[PATH_MAX] = "";
char *volfileserver = NULL;
- priv = THIS->private;
- GF_VALIDATE_OR_GOTO(THIS->name, priv, out);
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
dirty[2] = hton32(1);
ret = sys_lsetxattr(brickinfo->path, GF_AFR_DIRTY, dirty, sizeof(dirty), 0);
if (ret == -1) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_SETXATTR_FAIL,
- "Failed to set extended"
- " attribute %s : %s.",
- GF_AFR_DIRTY, strerror(errno));
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SET_XATTR_FAIL,
+ "Attribute=%s", GF_AFR_DIRTY, "Reason=%s", strerror(errno),
+ NULL);
goto out;
}
if (mkdtemp(tmpmount) == NULL) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED,
- "failed to create a temporary mount directory.");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED,
+ NULL);
ret = -1;
goto out;
}
- ret = gf_asprintf(&pid, "%d", GF_CLIENT_PID_SELF_HEALD);
+ ret = gf_asprintf(&pid, "%d", GF_CLIENT_PID_ADD_REPLICA_MOUNT);
if (ret < 0)
goto out;
switch (op) {
case GD_OP_REPLACE_BRICK:
- if (dict_get_strn(THIS->options, "transport.socket.bind-address",
+ if (dict_get_strn(this->options, "transport.socket.bind-address",
SLEN("transport.socket.bind-address"),
&volfileserver) != 0)
volfileserver = "localhost";
- snprintf(logfile, sizeof(logfile),
- DEFAULT_LOG_FILE_DIRECTORY "/%s-replace-brick-mount.log",
- volinfo->volname);
+ snprintf(logfile, sizeof(logfile), "%s/%s-replace-brick-mount.log",
+ priv->logdir, volinfo->volname);
if (!*logfile) {
ret = -1;
goto out;
@@ -13898,9 +14134,8 @@ glusterd_handle_replicate_brick_ops(glusterd_volinfo_t *volinfo,
break;
case GD_OP_ADD_BRICK:
- snprintf(logfile, sizeof(logfile),
- DEFAULT_LOG_FILE_DIRECTORY "/%s-add-brick-mount.log",
- volinfo->volname);
+ snprintf(logfile, sizeof(logfile), "%s/%s-add-brick-mount.log",
+ priv->logdir, volinfo->volname);
if (!*logfile) {
ret = -1;
goto out;
@@ -13924,7 +14159,7 @@ glusterd_handle_replicate_brick_ops(glusterd_volinfo_t *volinfo,
ret = runner_run(&runner);
if (ret) {
- gf_log(THIS->name, GF_LOG_ERROR,
+ gf_log(this->name, GF_LOG_ERROR,
"mount command"
" failed.");
goto lock;
@@ -13934,19 +14169,18 @@ glusterd_handle_replicate_brick_ops(glusterd_volinfo_t *volinfo,
(op == GD_OP_REPLACE_BRICK) ? GF_AFR_REPLACE_BRICK : GF_AFR_ADD_BRICK,
brickinfo->brick_id, sizeof(brickinfo->brick_id), 0);
if (ret == -1)
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_SETXATTR_FAIL,
- "Failed to set extended"
- " attribute %s : %s",
- (op == GD_OP_REPLACE_BRICK) ? GF_AFR_REPLACE_BRICK
- : GF_AFR_ADD_BRICK,
- strerror(errno));
- gf_umount_lazy(THIS->name, tmpmount, 1);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SET_XATTR_FAIL,
+ "Attribute=%s, Reason=%s",
+ (op == GD_OP_REPLACE_BRICK) ? GF_AFR_REPLACE_BRICK
+ : GF_AFR_ADD_BRICK,
+ strerror(errno), NULL);
+ gf_umount_lazy(this->name, tmpmount, 1);
lock:
synclock_lock(&priv->big_lock);
out:
if (pid)
GF_FREE(pid);
- gf_msg_debug("glusterd", 0, "Returning with ret");
+ gf_msg_debug(this->name, 0, "Returning with ret");
return ret;
}
@@ -14100,17 +14334,6 @@ glusterd_brick_op_prerequisites(dict_t *dict, char **op, glusterd_op_t *gd_op,
goto out;
}
- ret = glusterd_disallow_op_for_tier(*volinfo, *gd_op, -1);
- if (ret) {
- snprintf(msg, sizeof(msg),
- "%sbrick commands are not "
- "supported on tiered volume %s",
- (*gd_op == GD_OP_REPLACE_BRICK) ? "replace-" : "reset-",
- *volname);
- *op_errstr = gf_strdup(msg);
- goto out;
- }
-
/* If geo-rep is configured, for this volume, it should be stopped. */
param.volinfo = *volinfo;
ret = glusterd_check_geo_rep_running(&param, op_errstr);
@@ -14166,6 +14389,8 @@ glusterd_brick_op_prerequisites(dict_t *dict, char **op, glusterd_op_t *gd_op,
"brick: %s does not exist in "
"volume: %s",
*src_brick, *volname);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND,
+ "Brick=%s, Volume=%s", *src_brick, *volname, NULL);
*op_errstr = gf_strdup(msg);
goto out;
}
@@ -14367,3 +14592,455 @@ glusterd_is_profile_on(glusterd_volinfo_t *volinfo)
return _gf_true;
return _gf_false;
}
+
+int32_t
+glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
+ int32_t count)
+{
+ int ret = -1;
+ int32_t pid = -1;
+ int32_t brick_online = -1;
+ char key[64] = {0};
+ int keylen;
+ char *pidfile = NULL;
+ xlator_t *this = NULL;
+ char *uuid_str = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO(THIS->name, this, out);
+
+ GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO(this->name, dict, out);
+
+ keylen = snprintf(key, sizeof(key), "brick%d.hostname", count);
+ ret = dict_set_nstrn(dict, key, keylen, "Self-heal Daemon",
+ SLEN("Self-heal Daemon"));
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
+ key, NULL);
+ goto out;
+ }
+
+ keylen = snprintf(key, sizeof(key), "brick%d.path", count);
+ uuid_str = gf_strdup(uuid_utoa(MY_UUID));
+ if (!uuid_str) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstrn(dict, key, keylen, uuid_str);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
+ key, NULL);
+ goto out;
+ }
+ uuid_str = NULL;
+
+ /* shd doesn't have a port. but the cli needs a port key with
+ * a zero value to parse.
+ * */
+
+ keylen = snprintf(key, sizeof(key), "brick%d.port", count);
+ ret = dict_set_int32n(dict, key, keylen, 0);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
+ key, NULL);
+ goto out;
+ }
+
+ pidfile = volinfo->shd.svc.proc.pidfile;
+
+ brick_online = gf_is_service_running(pidfile, &pid);
+
+ /* If shd is not running, then don't print the pid */
+ if (!brick_online)
+ pid = -1;
+ keylen = snprintf(key, sizeof(key), "brick%d.pid", count);
+ ret = dict_set_int32n(dict, key, keylen, pid);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
+ key, NULL);
+ goto out;
+ }
+
+ keylen = snprintf(key, sizeof(key), "brick%d.status", count);
+ ret = dict_set_int32n(dict, key, keylen, brick_online);
+
+out:
+ if (uuid_str)
+ GF_FREE(uuid_str);
+ if (ret)
+ gf_msg(this ? this->name : "glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_DICT_SET_FAILED,
+ "Returning %d. adding values to dict failed", ret);
+
+ return ret;
+}
+
+static gf_ai_compare_t
+glusterd_compare_addrinfo(struct addrinfo *first, struct addrinfo *next)
+{
+ int ret = -1;
+ struct addrinfo *tmp1 = NULL;
+ struct addrinfo *tmp2 = NULL;
+ char firstip[NI_MAXHOST] = {0.};
+ char nextip[NI_MAXHOST] = {
+ 0,
+ };
+
+ for (tmp1 = first; tmp1 != NULL; tmp1 = tmp1->ai_next) {
+ ret = getnameinfo(tmp1->ai_addr, tmp1->ai_addrlen, firstip, NI_MAXHOST,
+ NULL, 0, NI_NUMERICHOST);
+ if (ret)
+ return GF_AI_COMPARE_ERROR;
+ for (tmp2 = next; tmp2 != NULL; tmp2 = tmp2->ai_next) {
+ ret = getnameinfo(tmp2->ai_addr, tmp2->ai_addrlen, nextip,
+ NI_MAXHOST, NULL, 0, NI_NUMERICHOST);
+ if (ret)
+ return GF_AI_COMPARE_ERROR;
+ if (!strcmp(firstip, nextip)) {
+ return GF_AI_COMPARE_MATCH;
+ }
+ }
+ }
+ return GF_AI_COMPARE_NO_MATCH;
+}
+
+/* Check for non optimal brick order for Replicate/Disperse :
+ * Checks if bricks belonging to a replicate or disperse
+ * volume are present on the same server
+ */
+int32_t
+glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type,
+ char **volname, char **brick_list,
+ int32_t *brick_count, int32_t sub_count)
+{
+ int ret = -1;
+ int i = 0;
+ int j = 0;
+ int k = 0;
+ xlator_t *this = NULL;
+ addrinfo_list_t *ai_list = NULL;
+ addrinfo_list_t *ai_list_tmp1 = NULL;
+ addrinfo_list_t *ai_list_tmp2 = NULL;
+ char *brick = NULL;
+ char *brick_list_dup = NULL;
+ char *brick_list_ptr = NULL;
+ char *tmpptr = NULL;
+ struct addrinfo *ai_info = NULL;
+ char brick_addr[128] = {
+ 0,
+ };
+ int addrlen = 0;
+
+ const char failed_string[2048] =
+ "Failed to perform brick order "
+ "check. Use 'force' at the end of the command"
+ " if you want to override this behavior. ";
+ const char found_string[2048] =
+ "Multiple bricks of a %s "
+ "volume are present on the same server. This "
+ "setup is not optimal. Bricks should be on "
+ "different nodes to have best fault tolerant "
+ "configuration. Use 'force' at the end of the "
+ "command if you want to override this "
+ "behavior. ";
+
+ this = THIS;
+
+ GF_ASSERT(this);
+
+ ai_list = MALLOC(sizeof(addrinfo_list_t));
+ ai_list->info = NULL;
+ CDS_INIT_LIST_HEAD(&ai_list->list);
+
+ if (!(*volname)) {
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &(*volname));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+ }
+
+ if (!(*brick_list)) {
+ ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &(*brick_list));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Bricks check : Could not "
+ "retrieve bricks list");
+ goto out;
+ }
+ }
+
+ if (!(*brick_count)) {
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &(*brick_count));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Bricks check : Could not "
+ "retrieve brick count");
+ goto out;
+ }
+ }
+
+ brick_list_dup = brick_list_ptr = gf_strdup(*brick_list);
+ /* Resolve hostnames and get addrinfo */
+ while (i < *brick_count) {
+ ++i;
+ brick = strtok_r(brick_list_dup, " \n", &tmpptr);
+ brick_list_dup = tmpptr;
+ if (brick == NULL)
+ goto check_failed;
+ tmpptr = strrchr(brick, ':');
+ if (tmpptr == NULL)
+ goto check_failed;
+ addrlen = strlen(brick) - strlen(tmpptr);
+ strncpy(brick_addr, brick, addrlen);
+ brick_addr[addrlen] = '\0';
+ ret = getaddrinfo(brick_addr, NULL, NULL, &ai_info);
+ if (ret != 0) {
+ ret = 0;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_RESOLVE_FAIL,
+ "unable to resolve host name for addr %s", brick_addr);
+ goto out;
+ }
+ ai_list_tmp1 = MALLOC(sizeof(addrinfo_list_t));
+ if (ai_list_tmp1 == NULL) {
+ ret = 0;
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "failed to allocate "
+ "memory");
+ freeaddrinfo(ai_info);
+ goto out;
+ }
+ ai_list_tmp1->info = ai_info;
+ cds_list_add_tail(&ai_list_tmp1->list, &ai_list->list);
+ ai_list_tmp1 = NULL;
+ }
+
+ i = 0;
+ ai_list_tmp1 = cds_list_entry(ai_list->list.next, addrinfo_list_t, list);
+
+ if (*brick_count < sub_count) {
+ sub_count = *brick_count;
+ }
+
+ /* Check for bad brick order */
+ while (i < *brick_count) {
+ ++i;
+ ai_info = ai_list_tmp1->info;
+ ai_list_tmp1 = cds_list_entry(ai_list_tmp1->list.next, addrinfo_list_t,
+ list);
+ if (0 == i % sub_count) {
+ j = 0;
+ continue;
+ }
+ ai_list_tmp2 = ai_list_tmp1;
+ k = j;
+ while (k < sub_count - 1) {
+ ++k;
+ ret = glusterd_compare_addrinfo(ai_info, ai_list_tmp2->info);
+ if (GF_AI_COMPARE_ERROR == ret)
+ goto check_failed;
+ if (GF_AI_COMPARE_MATCH == ret)
+ goto found_bad_brick_order;
+ ai_list_tmp2 = cds_list_entry(ai_list_tmp2->list.next,
+ addrinfo_list_t, list);
+ }
+ ++j;
+ }
+ gf_msg_debug(this->name, 0, "Brick order okay");
+ ret = 0;
+ goto out;
+
+check_failed:
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER_CHECK_FAIL,
+ "Failed bad brick order check");
+ snprintf(err_str, sizeof(failed_string), failed_string);
+ ret = -1;
+ goto out;
+
+found_bad_brick_order:
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BAD_BRKORDER,
+ "Bad brick order found");
+ if (type == GF_CLUSTER_TYPE_DISPERSE) {
+ snprintf(err_str, sizeof(found_string), found_string, "disperse");
+ } else {
+ snprintf(err_str, sizeof(found_string), found_string, "replicate");
+ }
+
+ ret = -1;
+out:
+ ai_list_tmp2 = NULL;
+ GF_FREE(brick_list_ptr);
+ cds_list_for_each_entry(ai_list_tmp1, &ai_list->list, list)
+ {
+ if (ai_list_tmp1->info)
+ freeaddrinfo(ai_list_tmp1->info);
+ free(ai_list_tmp2);
+ ai_list_tmp2 = ai_list_tmp1;
+ }
+ free(ai_list);
+ free(ai_list_tmp2);
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+ return ret;
+}
+
+static gf_boolean_t
+search_peer_in_auth_list(char *peer_hostname, char *auth_allow_list)
+{
+ if (strstr(auth_allow_list, peer_hostname)) {
+ return _gf_true;
+ }
+
+ return _gf_false;
+}
+
+/* glusterd_add_peers_to_auth_list() adds peers into auth.allow list
+ * if auth.allow list is not empty. This is called for add-brick and
+ * replica brick operations to avoid failing the temporary mount. New
+ * volfiles will be generated and clients are notified reg new volfiles.
+ */
+void
+glusterd_add_peers_to_auth_list(char *volname)
+{
+ int ret = 0;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ int32_t len = 0;
+ char *auth_allow_list = NULL;
+ char *new_auth_allow_list = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ GF_VALIDATE_OR_GOTO(this->name, volname, out);
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ "Unable to find volume: %s", volname);
+ goto out;
+ }
+
+ ret = dict_get_str_sizen(volinfo->dict, "auth.allow", &auth_allow_list);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED,
+ "auth allow list is not set");
+ goto out;
+ }
+ cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
+ {
+ len += strlen(peerinfo->hostname);
+ }
+ len += strlen(auth_allow_list) + 1;
+
+ new_auth_allow_list = GF_CALLOC(1, len, gf_common_mt_char);
+
+ new_auth_allow_list = strncat(new_auth_allow_list, auth_allow_list,
+ strlen(auth_allow_list));
+ cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
+ {
+ ret = search_peer_in_auth_list(peerinfo->hostname, new_auth_allow_list);
+ if (!ret) {
+ gf_log(this->name, GF_LOG_DEBUG,
+ "peer %s not found in auth.allow list", peerinfo->hostname);
+ new_auth_allow_list = strcat(new_auth_allow_list, ",");
+ new_auth_allow_list = strncat(new_auth_allow_list,
+ peerinfo->hostname,
+ strlen(peerinfo->hostname));
+ }
+ }
+ if (strcmp(new_auth_allow_list, auth_allow_list) != 0) {
+ /* In case, new_auth_allow_list is not same as auth_allow_list,
+ * we need to update the volinfo->dict with new_auth_allow_list.
+ * we delete the auth_allow_list and replace it with
+ * new_auth_allow_list. for reverting the changes in post commit, we
+ * keep the copy of auth_allow_list as old_auth_allow_list in
+ * volinfo->dict.
+ */
+ dict_del_sizen(volinfo->dict, "auth.allow");
+ ret = dict_set_strn(volinfo->dict, "auth.allow", SLEN("auth.allow"),
+ new_auth_allow_list);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Unable to set new auth.allow list");
+ goto out;
+ }
+ ret = dict_set_strn(volinfo->dict, "old.auth.allow",
+ SLEN("old.auth.allow"), auth_allow_list);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Unable to set old auth.allow list");
+ goto out;
+ }
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "failed to create volfiles");
+ goto out;
+ }
+ }
+out:
+ GF_FREE(new_auth_allow_list);
+ return;
+}
+
+int
+glusterd_replace_old_auth_allow_list(char *volname)
+{
+ int ret = 0;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ char *old_auth_allow_list = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_VALIDATE_OR_GOTO(this->name, volname, out);
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ "Unable to find volume: %s", volname);
+ goto out;
+ }
+
+ ret = dict_get_str_sizen(volinfo->dict, "old.auth.allow",
+ &old_auth_allow_list);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED,
+ "old auth allow list is not set, no need to replace the list");
+ ret = 0;
+ goto out;
+ }
+
+ dict_del_sizen(volinfo->dict, "auth.allow");
+ ret = dict_set_strn(volinfo->dict, "auth.allow", SLEN("auth.allow"),
+ old_auth_allow_list);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Unable to replace auth.allow list");
+ goto out;
+ }
+
+ dict_del_sizen(volinfo->dict, "old.auth.allow");
+
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "failed to create volfiles");
+ goto out;
+ }
+ ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_STORE_FAIL,
+ "failed to store volinfo");
+ goto out;
+ }
+out:
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index cffbebda70c..bf6ac295e26 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -11,13 +11,13 @@
#define _GLUSTERD_UTILS_H
#include <pthread.h>
-#include "compat-uuid.h"
+#include <glusterfs/compat-uuid.h>
-#include "glusterfs.h"
-#include "xlator.h"
-#include "logging.h"
-#include "call-stub.h"
-#include "byte-order.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/byte-order.h>
#include "glusterd.h"
#include "rpc-clnt.h"
#include "protocol-common.h"
@@ -32,38 +32,43 @@
brickid); \
} while (0)
+#define GLUSTERD_ASSIGN_BRICKID_TO_TA_BRICKINFO(ta_brickinfo, volinfo, \
+ brickid) \
+ do { \
+ sprintf(ta_brickinfo->brick_id, "%s-ta-%d", volinfo->volname, \
+ brickid); \
+ } while (0)
+
#define ALL_VOLUME_OPTION_CHECK(volname, get_opt, key, ret, op_errstr, label) \
do { \
gf_boolean_t _all = !strcmp("all", volname); \
+ gf_boolean_t _key_all = !strcmp(key, "all"); \
gf_boolean_t _is_valid_opt = _gf_false; \
int32_t i = 0; \
\
- if (!get_opt && (!strcmp(key, "all") || \
- !strcmp(key, GLUSTERD_MAX_OP_VERSION_KEY))) { \
+ if (!get_opt && \
+ (_key_all || !strcmp(key, GLUSTERD_MAX_OP_VERSION_KEY))) { \
ret = -1; \
*op_errstr = gf_strdup("Not a valid option to set"); \
goto out; \
} \
- \
- for (i = 0; valid_all_vol_opts[i].option; i++) { \
- if (!strcmp(key, "all") || \
- !strcmp(key, valid_all_vol_opts[i].option)) { \
- _is_valid_opt = _gf_true; \
- break; \
+ if (_key_all) { \
+ _is_valid_opt = _gf_true; \
+ } else { \
+ for (i = 0; valid_all_vol_opts[i].option; i++) { \
+ if (!strcmp(key, valid_all_vol_opts[i].option)) { \
+ _is_valid_opt = _gf_true; \
+ break; \
+ } \
} \
} \
- \
if (_all && !_is_valid_opt) { \
ret = -1; \
- *op_errstr = gf_strdup( \
- "Not a valid option for all " \
- "volumes"); \
+ *op_errstr = gf_strdup("Not a valid option for all volumes"); \
goto label; \
} else if (!_all && _is_valid_opt) { \
ret = -1; \
- *op_errstr = gf_strdup( \
- "Not a valid option for " \
- "single volume"); \
+ *op_errstr = gf_strdup("Not a valid option for single volume"); \
goto label; \
} \
} while (0)
@@ -145,9 +150,6 @@ glusterd_auth_set_password(glusterd_volinfo_t *volinfo, char *password);
void
glusterd_auth_cleanup(glusterd_volinfo_t *volinfo);
-gf_boolean_t
-glusterd_check_volume_exists(char *volname);
-
int32_t
glusterd_brickprocess_new(glusterd_brick_proc_t **brickprocess);
@@ -162,6 +164,9 @@ glusterd_brickinfo_new_from_brick(char *brick, glusterd_brickinfo_t **brickinfo,
int32_t
glusterd_volinfo_find(const char *volname, glusterd_volinfo_t **volinfo);
+gf_boolean_t
+glusterd_volume_exists(const char *volname);
+
int
glusterd_volinfo_find_by_volume_id(uuid_t volume_id,
glusterd_volinfo_t **volinfo);
@@ -181,7 +186,8 @@ int32_t
glusterd_resolve_brick(glusterd_brickinfo_t *brickinfo);
int
-glusterd_brick_process_add_brick(glusterd_brickinfo_t *brickinfo);
+glusterd_brick_process_add_brick(glusterd_brickinfo_t *brickinfo,
+ glusterd_brickinfo_t *parent_brickinfo);
int
glusterd_brick_process_remove_brick(glusterd_brickinfo_t *brickinfo,
@@ -226,7 +232,8 @@ glusterd_volume_brickinfo_get_by_brick(char *brick, glusterd_volinfo_t *volinfo,
gf_boolean_t construct_real_path);
int32_t
-glusterd_add_volumes_to_export_dict(dict_t **peer_data);
+glusterd_add_volumes_to_export_dict(dict_t *peer_data, char **buf,
+ u_int *length);
int32_t
glusterd_compare_friend_data(dict_t *peer_data, int32_t *status,
@@ -286,17 +293,6 @@ int
glusterd_brick_stop(glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo, gf_boolean_t del_brick);
-gf_boolean_t
-glusterd_is_tier_daemon_running(glusterd_volinfo_t *volinfo);
-
-int32_t
-glusterd_add_tierd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
- int32_t count);
-
-int
-glusterd_op_tier_status(dict_t *dict, char **op_errstr, dict_t *rsp_dict,
- glusterd_op_t op);
-
int
glusterd_is_defrag_on(glusterd_volinfo_t *volinfo);
@@ -407,8 +403,10 @@ glusterd_brick_terminate(glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo, char *options,
int option_cnt, char **op_errstr);
+#ifdef BUILD_GNFS
int
glusterd_nfs_statedump(char *options, int option_cnt, char **op_errstr);
+#endif
int
glusterd_client_statedump(char *volname, char *options, int option_cnt,
@@ -443,11 +441,6 @@ glusterd_get_trusted_client_filepath(char *filepath,
int
glusterd_restart_rebalance(glusterd_conf_t *conf);
-int32_t
-glusterd_create_sub_tier_volinfo(glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t **dup_volinfo,
- gf_boolean_t is_hot_tier,
- const char *new_name);
int
glusterd_restart_rebalance_for_volume(glusterd_volinfo_t *volinfo);
@@ -508,8 +501,6 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict);
int
glusterd_volume_rebalance_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict);
int
-glusterd_volume_tier_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict);
-int
glusterd_volume_heal_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict);
int
glusterd_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict);
@@ -618,12 +609,6 @@ gf_boolean_t
gd_should_i_start_rebalance(glusterd_volinfo_t *volinfo);
int
-glusterd_is_tierd_enabled(glusterd_volinfo_t *volinfo);
-
-int
-glusterd_is_tierd_supposed_to_be_enabled(glusterd_volinfo_t *volinfo);
-
-int
glusterd_is_volume_quota_enabled(glusterd_volinfo_t *volinfo);
int
@@ -779,6 +764,10 @@ glusterd_volume_get_type_str(glusterd_volinfo_t *volinfo, char **vol_type_str);
int
glusterd_volume_get_status_str(glusterd_volinfo_t *volinfo, char *status_str);
+void
+glusterd_brick_get_status_str(glusterd_brickinfo_t *brickinfo,
+ char *status_str);
+
int
glusterd_volume_get_transport_type_str(glusterd_volinfo_t *volinfo,
char *transport_type_str);
@@ -791,21 +780,10 @@ int
glusterd_volume_get_rebalance_status_str(glusterd_volinfo_t *volinfo,
char *rebal_status_str);
-int
-glusterd_volume_get_hot_tier_type_str(glusterd_volinfo_t *volinfo,
- char **hot_tier_type_str);
-
-int
-glusterd_volume_get_cold_tier_type_str(glusterd_volinfo_t *volinfo,
- char **cold_tier_type_str);
-
void
glusterd_list_add_order(struct cds_list_head *new, struct cds_list_head *head,
int (*compare)(struct cds_list_head *,
struct cds_list_head *));
-int
-glusterd_disallow_op_for_tier(glusterd_volinfo_t *volinfo, glusterd_op_t op,
- int cmd);
struct rpc_clnt *
glusterd_defrag_rpc_get(glusterd_defrag_info_t *defrag);
@@ -873,4 +851,15 @@ glusterd_get_volinfo_from_brick(char *brick, glusterd_volinfo_t **volinfo);
gf_boolean_t
glusterd_is_profile_on(glusterd_volinfo_t *volinfo);
+char *
+search_brick_path_from_proc(pid_t brick_pid, char *brickpath);
+
+int32_t
+glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
+ int32_t count);
+int32_t
+glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type,
+ char **volname, char **bricks, int32_t *brick_count,
+ int32_t sub_count);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index 9c00f6b976a..8d6fb5e0fac 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -13,29 +13,30 @@
#include <dlfcn.h>
#include <utime.h>
-#include "xlator.h"
+#include <glusterfs/xlator.h>
#include "glusterd.h"
-#include "defaults.h"
-#include "syscall.h"
-#include "logging.h"
-#include "dict.h"
-#include "graph-utils.h"
-#include "common-utils.h"
+#include <glusterfs/defaults.h>
+#include <glusterfs/syscall.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/graph-utils.h>
+#include <glusterfs/common-utils.h>
#include "glusterd-store.h"
#include "glusterd-hooks.h"
-#include "trie.h"
+#include <glusterfs/trie.h>
#include "glusterd-mem-types.h"
#include "cli1-xdr.h"
#include "glusterd-volgen.h"
#include "glusterd-geo-rep.h"
#include "glusterd-utils.h"
#include "glusterd-messages.h"
-#include "run.h"
-#include "options.h"
+#include <glusterfs/run.h>
+#include <glusterfs/options.h>
#include "glusterd-snapshot-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
#include "glusterd-snapd-svc-helper.h"
+#include "glusterd-shd-svc-helper.h"
#include "glusterd-gfproxyd-svc-helper.h"
struct gd_validate_reconf_opts {
@@ -49,9 +50,9 @@ extern struct volopt_map_entry glusterd_volopt_map[];
do { \
char *_value = NULL; \
\
- if (dict_get_str(set_dict, CLI_OPT, &_value) == 0) { \
- if (xlator_set_option(XL, "transport.socket." XLATOR_OPT, \
- _value) != 0) { \
+ if (dict_get_str_sizen(set_dict, CLI_OPT, &_value) == 0) { \
+ if (xlator_set_fixed_option(XL, "transport.socket." XLATOR_OPT, \
+ _value) != 0) { \
gf_msg("glusterd", GF_LOG_WARNING, errno, \
GD_MSG_XLATOR_SET_OPT_FAIL, \
"failed to set " XLATOR_OPT); \
@@ -89,6 +90,8 @@ xlator_instantiate_va(const char *type, const char *format, va_list arg)
xlator_t *xl = NULL;
char *volname = NULL;
int ret = 0;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
ret = gf_vasprintf(&volname, format, arg);
if (ret < 0) {
@@ -98,14 +101,21 @@ xlator_instantiate_va(const char *type, const char *format, va_list arg)
}
xl = GF_CALLOC(1, sizeof(*xl), gf_common_mt_xlator_t);
- if (!xl)
+ if (!xl) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto error;
+ }
ret = xlator_set_type_virtual(xl, type);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_XLATOR_SET_OPT_FAIL,
+ NULL);
goto error;
+ }
xl->options = dict_new();
- if (!xl->options)
+ if (!xl->options) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto error;
+ }
xl->name = volname;
CDS_INIT_LIST_HEAD(&xl->volume_options);
@@ -114,8 +124,8 @@ xlator_instantiate_va(const char *type, const char *format, va_list arg)
return xl;
error:
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_XLATOR_CREATE_FAIL,
- "creating xlator of type %s failed", type);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_XLATOR_CREATE_FAIL, "Type=%s",
+ type, NULL);
GF_FREE(volname);
if (xl)
xlator_destroy(xl);
@@ -123,19 +133,6 @@ error:
return NULL;
}
-static xlator_t *
-xlator_instantiate(const char *type, const char *format, ...)
-{
- va_list arg;
- xlator_t *xl;
-
- va_start(arg, format);
- xl = xlator_instantiate_va(type, format, arg);
- va_end(arg);
-
- return xl;
-}
-
static int
volgen_xlator_link(xlator_t *pxl, xlator_t *cxl)
{
@@ -225,15 +222,17 @@ volgen_graph_add(volgen_graph_t *graph, char *type, char *volname)
return volgen_graph_add_as(graph, type, "%s-%s", volname, shorttype);
}
+#define xlator_set_fixed_option(xl, key, value) \
+ xlator_set_option(xl, key, SLEN(key), value)
+
/* XXX Seems there is no such generic routine?
* Maybe should put to xlator.c ??
*/
static int
-xlator_set_option(xlator_t *xl, char *key, char *value)
+xlator_set_option(xlator_t *xl, char *key, const int keylen, char *value)
{
- char *dval = NULL;
+ char *dval = gf_strdup(value);
- dval = gf_strdup(value);
if (!dval) {
gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
"failed to set xlator opt: %s[%s] = %s", xl->name, key, value);
@@ -241,14 +240,17 @@ xlator_set_option(xlator_t *xl, char *key, char *value)
return -1;
}
- return dict_set_dynstr(xl->options, key, dval);
+ return dict_set_dynstrn(xl->options, key, keylen, dval);
}
+#define xlator_get_fixed_option(xl, key, value) \
+ xlator_get_option(xl, key, SLEN(key), value)
+
static int
-xlator_get_option(xlator_t *xl, char *key, char **value)
+xlator_get_option(xlator_t *xl, char *key, const int keylen, char **value)
{
GF_ASSERT(xl);
- return dict_get_str(xl->options, key, value);
+ return dict_get_strn(xl->options, key, keylen, value);
}
static xlator_t *
@@ -317,7 +319,7 @@ volopt_trie_cbk(char *word, void *param)
}
static int
-process_nodevec(struct trienodevec *nodevec, char **hint)
+process_nodevec(struct trienodevec *nodevec, char **outputhint, char *inputhint)
{
int ret = 0;
char *hint1 = NULL;
@@ -326,14 +328,14 @@ process_nodevec(struct trienodevec *nodevec, char **hint)
trienode_t **nodes = nodevec->nodes;
if (!nodes[0]) {
- *hint = NULL;
+ *outputhint = NULL;
return 0;
}
#if 0
/* Limit as in git */
if (trienode_get_dist (nodes[0]) >= 6) {
- *hint = NULL;
+ *outputhint = NULL;
return 0;
}
#endif
@@ -342,23 +344,30 @@ process_nodevec(struct trienodevec *nodevec, char **hint)
return -1;
if (nodevec->cnt < 2 || !nodes[1]) {
- *hint = hint1;
+ *outputhint = hint1;
return 0;
}
- if (trienode_get_word(nodes[1], &hint2))
+ if (trienode_get_word(nodes[1], &hint2)) {
+ GF_FREE(hint1);
return -1;
+ }
- if (*hint)
- hintinfx = *hint;
- ret = gf_asprintf(hint, "%s or %s%s", hint1, hintinfx, hint2);
+ if (inputhint)
+ hintinfx = inputhint;
+ ret = gf_asprintf(outputhint, "%s or %s%s", hint1, hintinfx, hint2);
if (ret > 0)
ret = 0;
+ if (hint1)
+ GF_FREE(hint1);
+ if (hint2)
+ GF_FREE(hint2);
return ret;
}
static int
-volopt_trie_section(int lvl, char **patt, char *word, char **hint, int hints)
+volopt_trie_section(int lvl, char **patt, char *word, char **outputhint,
+ char *inputhint, int hints)
{
trienode_t *nodes[] = {NULL, NULL};
struct trienodevec nodevec = {nodes, 2};
@@ -379,7 +388,7 @@ volopt_trie_section(int lvl, char **patt, char *word, char **hint, int hints)
nodevec.cnt = hints;
ret = trie_measure_vec(trie, word, &nodevec);
if (!ret && nodevec.nodes[0])
- ret = process_nodevec(&nodevec, hint);
+ ret = process_nodevec(&nodevec, outputhint, inputhint);
trie_destroy(trie);
@@ -391,6 +400,7 @@ volopt_trie(char *key, char **hint)
{
char *patt[] = {NULL};
char *fullhint = NULL;
+ char *inputhint = NULL;
char *dot = NULL;
char *dom = NULL;
int len = 0;
@@ -400,7 +410,7 @@ volopt_trie(char *key, char **hint)
dot = strchr(key, '.');
if (!dot)
- return volopt_trie_section(1, patt, key, hint, 2);
+ return volopt_trie_section(1, patt, key, hint, inputhint, 2);
len = dot - key;
dom = gf_strdup(key);
@@ -408,7 +418,7 @@ volopt_trie(char *key, char **hint)
return -1;
dom[len] = '\0';
- ret = volopt_trie_section(0, NULL, dom, patt, 1);
+ ret = volopt_trie_section(0, NULL, dom, patt, inputhint, 1);
GF_FREE(dom);
if (ret) {
patt[0] = NULL;
@@ -417,8 +427,8 @@ volopt_trie(char *key, char **hint)
if (!patt[0])
goto out;
- *hint = "...";
- ret = volopt_trie_section(1, patt, dot + 1, hint, 2);
+ inputhint = "...";
+ ret = volopt_trie_section(1, patt, dot + 1, hint, inputhint, 2);
if (ret)
goto out;
if (*hint) {
@@ -458,7 +468,7 @@ struct opthandler_data {
void *param;
};
-static int
+static void
process_option(char *key, data_t *value, void *param)
{
struct opthandler_data *odt = param;
@@ -467,7 +477,7 @@ process_option(char *key, data_t *value, void *param)
};
if (odt->rv)
- return 0;
+ return;
odt->found = _gf_true;
vme.key = key;
@@ -488,7 +498,7 @@ process_option(char *key, data_t *value, void *param)
vme.value = value->data;
odt->rv = odt->handler(odt->graph, &vme, odt->param);
- return 0;
+ return;
}
static int
@@ -500,6 +510,7 @@ volgen_graph_set_options_generic(volgen_graph_t *graph, dict_t *dict,
0,
};
data_t *data = NULL;
+ int keylen;
odt.graph = graph;
odt.handler = handler;
@@ -507,16 +518,17 @@ volgen_graph_set_options_generic(volgen_graph_t *graph, dict_t *dict,
(void)data;
for (vme = glusterd_volopt_map; vme->key; vme++) {
- odt.vme = vme;
- odt.found = _gf_false;
- odt.data_t_fake = _gf_false;
-
- data = dict_get(dict, vme->key);
- if (!strcmp(vme->key, "performance.client-io-threads") &&
+ keylen = strlen(vme->key);
+ if (keylen == SLEN("performance.client-io-threads") &&
+ !strcmp(vme->key, "performance.client-io-threads") &&
dict_get_str_boolean(dict, "skip-CLIOT", _gf_false) == _gf_true) {
continue;
}
+ odt.vme = vme;
+ odt.found = _gf_false;
+ odt.data_t_fake = _gf_false;
+ data = dict_getn(dict, vme->key, keylen);
if (data)
process_option(vme->key, data, &odt);
if (odt.rv)
@@ -551,8 +563,15 @@ no_filter_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme,
for (trav = first_of(graph); trav; trav = trav->next) {
if (strcmp(trav->type, vme->voltype) != 0)
continue;
-
- ret = xlator_set_option(trav, vme->option, vme->value);
+ if (strcmp(vme->option, "ta-remote-port") == 0) {
+ if (strstr(trav->name, "-ta-") != NULL) {
+ ret = xlator_set_option(trav, "remote-port",
+ strlen(vme->option), vme->value);
+ }
+ continue;
+ }
+ ret = xlator_set_option(trav, vme->option, strlen(vme->option),
+ vme->value);
if (ret)
break;
}
@@ -855,6 +874,8 @@ _xl_link_children(xlator_t *parent, xlator_t *children, size_t child_count)
xlator_t *trav = NULL;
size_t seek = 0;
int ret = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
if (child_count == 0)
goto out;
@@ -863,9 +884,12 @@ _xl_link_children(xlator_t *parent, xlator_t *children, size_t child_count)
;
for (; child_count--; trav = trav->prev) {
ret = volgen_xlator_link(parent, trav);
- gf_msg_debug(THIS->name, 0, "%s:%s", parent->name, trav->name);
- if (ret)
+ gf_msg_debug(this->name, 0, "%s:%s", parent->name, trav->name);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_XLATOR_LINK_FAIL,
+ NULL);
goto out;
+ }
}
ret = 0;
out:
@@ -923,8 +947,10 @@ volgen_apply_filters(char *orig_volfile)
entry = sys_readdir(filterdir, scratch);
- if (!entry || errno != 0)
+ if (!entry || errno != 0) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_READ_ERROR, NULL);
break;
+ }
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0)
continue;
@@ -1110,6 +1136,7 @@ get_vol_transport_type(glusterd_volinfo_t *volinfo, char *tt)
transport_type_to_str(volinfo->transport_type, tt);
}
+#ifdef BUILD_GNFS
/* If no value has specified for tcp,rdma volume from cli
* use tcp as default value.Otherwise, use transport type
* mentioned in volinfo
@@ -1125,6 +1152,7 @@ get_vol_nfs_transport_type(glusterd_volinfo_t *volinfo, char *tt)
} else
transport_type_to_str(volinfo->transport_type, tt);
}
+#endif
/* gets the volinfo, dict, a character array for filling in
* the transport type and a boolean option which says whether
@@ -1138,21 +1166,19 @@ get_transport_type(glusterd_volinfo_t *volinfo, dict_t *set_dict, char *transt,
{
int ret = -1;
char *tt = NULL;
- char *key = NULL;
- typedef void (*transport_type)(glusterd_volinfo_t * volinfo, char *tt);
- transport_type get_transport;
if (is_nfs == _gf_false) {
- key = "client-transport-type";
- get_transport = get_vol_transport_type;
+ ret = dict_get_str_sizen(set_dict, "client-transport-type", &tt);
+ if (ret)
+ get_vol_transport_type(volinfo, transt);
} else {
- key = "nfs.transport-type";
- get_transport = get_vol_nfs_transport_type;
+#ifdef BUILD_GNFS
+ ret = dict_get_str_sizen(set_dict, "nfs.transport-type", &tt);
+ if (ret)
+ get_vol_nfs_transport_type(volinfo, transt);
+#endif
}
- ret = dict_get_str(set_dict, key, &tt);
- if (ret)
- get_transport(volinfo, transt);
if (!ret)
strcpy(transt, tt);
}
@@ -1175,7 +1201,7 @@ server_auth_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme,
/* from 'auth.allow' -> 'allow', and 'auth.reject' -> 'reject' */
key = strchr(vme->key, '.') + 1;
- ret = xlator_get_option(xl, "auth-path", &auth_path);
+ ret = xlator_get_fixed_option(xl, "auth-path", &auth_path);
if (ret) {
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DEFAULT_OPT_INFO,
"Failed to get auth-path from server graph");
@@ -1183,7 +1209,7 @@ server_auth_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme,
}
ret = gf_asprintf(&aa, "auth.addr.%s.%s", auth_path, key);
if (ret != -1) {
- ret = xlator_set_option(xl, aa, vme->value);
+ ret = xlator_set_option(xl, aa, ret, vme->value);
GF_FREE(aa);
}
if (ret)
@@ -1213,6 +1239,26 @@ loglevel_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme,
}
static int
+threads_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme,
+ void *param)
+{
+ char *role = param;
+ struct volopt_map_entry vme2 = {
+ 0,
+ };
+
+ if ((strcmp(vme->option, "!client-threads") != 0 &&
+ strcmp(vme->option, "!brick-threads") != 0) ||
+ !strstr(vme->key, role))
+ return 0;
+
+ memcpy(&vme2, vme, sizeof(vme2));
+ vme2.option = "threads";
+
+ return basic_option_handler(graph, &vme2, NULL);
+}
+
+static int
server_check_changelog_off(volgen_graph_t *graph, struct volopt_map_entry *vme,
glusterd_volinfo_t *volinfo)
{
@@ -1442,14 +1488,22 @@ volgen_graph_set_xl_options(volgen_graph_t *graph, dict_t *dict)
}; /* for posix* -> *posix* */
char *loglevel = NULL;
xlator_t *trav = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- ret = dict_get_str(dict, "xlator", &xlator);
- if (ret)
+ ret = dict_get_str_sizen(dict, "xlator", &xlator);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=xlator", NULL);
goto out;
+ }
- ret = dict_get_str(dict, "loglevel", &loglevel);
- if (ret)
+ ret = dict_get_str_sizen(dict, "loglevel", &loglevel);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=loglevel", NULL);
goto out;
+ }
snprintf(xlator_match, 1024, "*%s", xlator);
@@ -1457,7 +1511,7 @@ volgen_graph_set_xl_options(volgen_graph_t *graph, dict_t *dict)
if (fnmatch(xlator_match, trav->type, FNM_NOESCAPE) == 0) {
gf_msg_debug("glusterd", 0, "Setting log level for xlator: %s",
trav->type);
- ret = xlator_set_option(trav, "log-level", loglevel);
+ ret = xlator_set_fixed_option(trav, "log-level", loglevel);
if (ret)
break;
}
@@ -1504,6 +1558,9 @@ server_spec_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme,
if (!ret)
ret = log_localtime_logging_option_handler(graph, vme, "brick");
+ if (!ret)
+ ret = threads_option_handler(graph, vme, "brick");
+
return ret;
}
@@ -1539,29 +1596,37 @@ gfproxy_server_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
char key[1024] = {
0,
};
+ int keylen;
/*char port_str[7] = {0, };*/
int ret = 0;
char *username = NULL;
char *password = NULL;
/*int rclusters = 0;*/
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
/* We are a trusted client */
ret = dict_set_uint32(set_dict, "trusted-client", GF_CLIENT_TRUSTED);
- if (ret != 0)
+ if (ret != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=trusted-client", NULL);
goto out;
+ }
- ret = dict_set_int32n(set_dict, "gfproxy-server", SLEN("gfproxy-server"),
- 1);
- if (ret != 0)
+ ret = dict_set_int32_sizen(set_dict, "gfproxy-server", 1);
+ if (ret != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=gfproxy-server", NULL);
goto out;
+ }
/* Build the client section of the graph first */
build_client_graph(graph, volinfo, set_dict);
/* Clear this setting so that future users of set_dict do not end up
* thinking they are a gfproxy server */
- dict_deln(set_dict, "gfproxy-server", SLEN("gfproxy-server"));
- dict_deln(set_dict, "trusted-client", SLEN("trusted-client"));
+ dict_del_sizen(set_dict, "gfproxy-server");
+ dict_del_sizen(set_dict, "trusted-client");
/* Then add the server to it */
get_vol_transport_type(volinfo, transt);
@@ -1569,7 +1634,7 @@ gfproxy_server_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (!xl)
goto out;
- ret = xlator_set_option(xl, "transport-type", transt);
+ ret = xlator_set_fixed_option(xl, "transport-type", transt);
if (ret != 0)
goto out;
@@ -1577,22 +1642,22 @@ gfproxy_server_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
username = glusterd_auth_get_username(volinfo);
password = glusterd_auth_get_password(volinfo);
if (username) {
- snprintf(key, sizeof(key), "auth.login.gfproxyd-%s.allow",
- volinfo->volname);
- ret = xlator_set_option(xl, key, username);
+ keylen = snprintf(key, sizeof(key), "auth.login.gfproxyd-%s.allow",
+ volinfo->volname);
+ ret = xlator_set_option(xl, key, keylen, username);
if (ret)
return -1;
}
if (password) {
- snprintf(key, sizeof(key), "auth.login.%s.password", username);
- ret = xlator_set_option(xl, key, password);
+ keylen = snprintf(key, sizeof(key), "auth.login.%s.password", username);
+ ret = xlator_set_option(xl, key, keylen, password);
if (ret != 0)
goto out;
}
snprintf(key, sizeof(key), "gfproxyd-%s", volinfo->volname);
- ret = xlator_set_option(xl, "auth-path", key);
+ ret = xlator_set_fixed_option(xl, "auth-path", key);
out:
return ret;
@@ -1611,9 +1676,18 @@ brick_graph_add_posix(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
gf_boolean_t pgfid_feat = _gf_false;
char *value = NULL;
xlator_t *xl = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
- if (!graph || !volinfo || !set_dict || !brickinfo)
+ if (!graph || !volinfo || !set_dict || !brickinfo) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
+
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO("glusterd", priv, out);
ret = glusterd_volinfo_get(volinfo, VKEY_FEATURES_QUOTA, &value);
if (value) {
@@ -1642,23 +1716,30 @@ brick_graph_add_posix(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (!xl)
goto out;
- ret = xlator_set_option(xl, "directory", brickinfo->path);
+ ret = xlator_set_fixed_option(xl, "directory", brickinfo->path);
if (ret)
goto out;
- ret = xlator_set_option(xl, "volume-id", uuid_utoa(volinfo->volume_id));
+ ret = xlator_set_fixed_option(xl, "volume-id",
+ uuid_utoa(volinfo->volume_id));
if (ret)
goto out;
if (quota_enabled || pgfid_feat || trash_enabled) {
- ret = xlator_set_option(xl, "update-link-count-parent", "on");
+ ret = xlator_set_fixed_option(xl, "update-link-count-parent", "on");
if (ret) {
goto out;
}
}
+ if (priv->op_version >= GD_OP_VERSION_7_0) {
+ ret = xlator_set_fixed_option(xl, "fips-mode-rchecksum", "on");
+ if (ret) {
+ goto out;
+ }
+ }
snprintf(tmpstr, sizeof(tmpstr), "%d", brickinfo->fs_share_count);
- ret = xlator_set_option(xl, "shared-brick-count", tmpstr);
+ ret = xlator_set_fixed_option(xl, "shared-brick-count", tmpstr);
out:
return ret;
}
@@ -1669,9 +1750,13 @@ brick_graph_add_selinux(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
{
xlator_t *xl = NULL;
int ret = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo)
+ if (!graph || !volinfo) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
xl = volgen_graph_add(graph, "features/selinux", volinfo->volname);
if (!xl)
@@ -1692,13 +1777,13 @@ brick_graph_add_trash(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
xl = volgen_graph_add(graph, "features/trash", volinfo->volname);
if (!xl)
goto out;
- ret = xlator_set_option(xl, "trash-dir", ".trashcan");
+ ret = xlator_set_fixed_option(xl, "trash-dir", ".trashcan");
if (ret)
goto out;
- ret = xlator_set_option(xl, "brick-path", brickinfo->path);
+ ret = xlator_set_fixed_option(xl, "brick-path", brickinfo->path);
if (ret)
goto out;
- ret = xlator_set_option(xl, "trash-internal-op", "off");
+ ret = xlator_set_fixed_option(xl, "trash-internal-op", "off");
if (ret)
goto out;
out:
@@ -1706,28 +1791,6 @@ out:
}
static int
-brick_graph_add_decompounder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
- dict_t *set_dict, glusterd_brickinfo_t *brickinfo)
-{
- xlator_t *xl = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- int ret = -1;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO("glusterd", this, out);
- conf = this->private;
- GF_VALIDATE_OR_GOTO(this->name, conf, out);
-
- xl = volgen_graph_add_as(graph, "performance/decompounder",
- brickinfo->path);
- if (xl)
- ret = 0;
-out:
- return ret;
-}
-
-static int
brick_graph_add_arbiter(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
dict_t *set_dict, glusterd_brickinfo_t *brickinfo)
{
@@ -1752,41 +1815,6 @@ out:
}
static int
-brick_graph_add_bd(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
- dict_t *set_dict, glusterd_brickinfo_t *brickinfo)
-{
- int ret = -1;
-
- if (!graph || !volinfo || !set_dict || !brickinfo)
- goto out;
-
- ret = 0;
-
-#ifdef HAVE_BD_XLATOR
- if (*brickinfo->vg != '\0') {
- xlator_t *xl = NULL;
- /* Now add BD v2 xlator if volume is BD type */
- xl = volgen_graph_add(graph, "storage/bd", volinfo->volname);
- if (!xl) {
- ret = -1;
- goto out;
- }
-
- ret = xlator_set_option(xl, "device", "vg");
- if (ret)
- goto out;
-
- ret = xlator_set_option(xl, "export", brickinfo->vg);
- if (ret)
- goto out;
- }
-#endif
-
-out:
- return ret;
-}
-
-static int
brick_graph_add_bitrot_stub(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
dict_t *set_dict, glusterd_brickinfo_t *brickinfo)
{
@@ -1795,14 +1823,16 @@ brick_graph_add_bitrot_stub(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
char *value = NULL;
xlator_t *this = THIS;
- if (!graph || !volinfo || !set_dict || !brickinfo)
+ if (!graph || !volinfo || !set_dict || !brickinfo) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
xl = volgen_graph_add(graph, "features/bitrot-stub", volinfo->volname);
if (!xl)
goto out;
- ret = xlator_set_option(xl, "export", brickinfo->path);
+ ret = xlator_set_fixed_option(xl, "export", brickinfo->path);
if (ret) {
gf_log(this->name, GF_LOG_WARNING,
"failed to set the export "
@@ -1811,7 +1841,7 @@ brick_graph_add_bitrot_stub(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
}
ret = glusterd_volinfo_get(volinfo, VKEY_FEATURES_BITROT, &value);
- ret = xlator_set_option(xl, "bitrot", value);
+ ret = xlator_set_fixed_option(xl, "bitrot", value);
if (ret)
gf_log(this->name, GF_LOG_WARNING,
"failed to set bitrot "
@@ -1831,102 +1861,48 @@ brick_graph_add_changelog(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
};
int ret = -1;
int32_t len = 0;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !set_dict || !brickinfo)
+ if (!graph || !volinfo || !set_dict || !brickinfo) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
xl = volgen_graph_add(graph, "features/changelog", volinfo->volname);
if (!xl)
goto out;
- ret = xlator_set_option(xl, "changelog-brick", brickinfo->path);
+ ret = xlator_set_fixed_option(xl, "changelog-brick", brickinfo->path);
if (ret)
goto out;
len = snprintf(changelog_basepath, sizeof(changelog_basepath), "%s/%s",
brickinfo->path, ".glusterfs/changelogs");
if ((len < 0) || (len >= sizeof(changelog_basepath))) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
- ret = xlator_set_option(xl, "changelog-dir", changelog_basepath);
- if (ret)
- goto out;
-out:
- return ret;
-}
-
-#if USE_GFDB /* only add changetimerecorder when GFDB is enabled */
-static int
-brick_graph_add_changetimerecorder(volgen_graph_t *graph,
- glusterd_volinfo_t *volinfo,
- dict_t *set_dict,
- glusterd_brickinfo_t *brickinfo)
-{
- xlator_t *xl = NULL;
- int ret = -1;
- char *brickname = NULL;
- char *path = NULL;
- char index_basepath[PATH_MAX] = {0};
- char *hotbrick = NULL;
-
- if (!graph || !volinfo || !set_dict || !brickinfo)
- goto out;
-
- path = brickinfo->path;
-
- xl = volgen_graph_add(graph, "features/changetimerecorder",
- volinfo->volname);
- if (!xl)
- goto out;
-
- ret = xlator_set_option(xl, "db-type", "sqlite3");
- if (ret)
- goto out;
-
- if (!set_dict || dict_get_str(set_dict, "hot-brick", &hotbrick))
- hotbrick = "off";
-
- ret = xlator_set_option(xl, "hot-brick", hotbrick);
- if (ret)
- goto out;
-
- brickname = strrchr(path, '/') + 1;
- snprintf(index_basepath, sizeof(index_basepath), "%s.db", brickname);
- ret = xlator_set_option(xl, "db-name", index_basepath);
- if (ret)
- goto out;
-
- snprintf(index_basepath, sizeof(index_basepath), "%s/%s", path,
- ".glusterfs/");
- ret = xlator_set_option(xl, "db-path", index_basepath);
- if (ret)
- goto out;
-
- ret = xlator_set_option(xl, "record-exit", "off");
- if (ret)
- goto out;
-
- ret = xlator_set_option(xl, "ctr_link_consistency", "off");
+ ret = xlator_set_fixed_option(xl, "changelog-dir", changelog_basepath);
if (ret)
goto out;
- ret = xlator_set_option(xl, "ctr_lookupheal_link_timeout", "300");
- if (ret)
- goto out;
-
- ret = xlator_set_option(xl, "ctr_lookupheal_inode_timeout", "300");
- if (ret)
- goto out;
-
- ret = xlator_set_option(xl, "record-entry", "on");
- if (ret)
+ ret = glusterd_is_bitrot_enabled(volinfo);
+ if (ret == -1) {
goto out;
-
+ } else if (ret) {
+ ret = xlator_set_fixed_option(xl, "changelog-notification", "on");
+ if (ret)
+ goto out;
+ } else {
+ ret = xlator_set_fixed_option(xl, "changelog-notification", "off");
+ if (ret)
+ goto out;
+ }
out:
return ret;
}
-#endif /* USE_GFDB */
static int
brick_graph_add_acl(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
@@ -1934,14 +1910,31 @@ brick_graph_add_acl(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
{
xlator_t *xl = NULL;
int ret = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !set_dict)
+ if (!graph || !volinfo || !set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
- xl = volgen_graph_add(graph, "features/access-control", volinfo->volname);
- if (!xl)
+ ret = dict_get_str_boolean(set_dict, "features.acl", 1);
+ if (!ret) {
+ /* Skip creating this volume if option is disabled */
+ /* By default, this is 'true' */
goto out;
+ } else if (ret < 0) {
+ /* lets not treat this as error, as this option is not critical,
+ and implemented for debug help */
+ gf_log(THIS->name, GF_LOG_INFO,
+ "failed to get 'features.acl' flag from dict");
+ }
+ xl = volgen_graph_add(graph, "features/access-control", volinfo->volname);
+ if (!xl) {
+ ret = -1;
+ goto out;
+ }
ret = 0;
out:
return ret;
@@ -1953,9 +1946,13 @@ brick_graph_add_locks(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
{
xlator_t *xl = NULL;
int ret = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !set_dict)
+ if (!graph || !volinfo || !set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
xl = volgen_graph_add(graph, "features/locks", volinfo->volname);
if (!xl)
@@ -1966,37 +1963,19 @@ out:
return ret;
}
-/* Add this before (above) io-threads because it's not thread-safe yet. */
-static int
-brick_graph_add_fdl(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
- dict_t *set_dict, glusterd_brickinfo_t *brickinfo)
-{
- xlator_t *xl = NULL;
- int ret = -1;
-
- if (!graph || !volinfo || !set_dict)
- goto out;
-
- if (dict_get_str_boolean(set_dict, "features.fdl", 0)) {
- xl = volgen_graph_add(graph, "experimental/fdl", volinfo->volname);
- if (!xl)
- goto out;
- }
- ret = 0;
-
-out:
- return ret;
-}
-
static int
brick_graph_add_iot(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
dict_t *set_dict, glusterd_brickinfo_t *brickinfo)
{
xlator_t *xl = NULL;
int ret = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !set_dict)
+ if (!graph || !volinfo || !set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
xl = volgen_graph_add(graph, "performance/io-threads", volinfo->volname);
if (!xl)
@@ -2012,9 +1991,12 @@ brick_graph_add_barrier(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
{
xlator_t *xl = NULL;
int ret = -1;
+ xlator_t *this = THIS;
- if (!graph || !volinfo)
+ if (!graph || !volinfo) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
xl = volgen_graph_add(graph, "features/barrier", volinfo->volname);
if (!xl)
@@ -2031,9 +2013,13 @@ brick_graph_add_sdfs(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
{
xlator_t *xl = NULL;
int ret = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo)
+ if (!graph || !volinfo) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
if (!dict_get_str_boolean(set_dict, "features.sdfs", 0)) {
/* update only if option is enabled */
@@ -2044,6 +2030,11 @@ brick_graph_add_sdfs(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
xl = volgen_graph_add(graph, "features/sdfs", volinfo->volname);
if (!xl)
goto out;
+ /* If we don't set this option here, the translator by default marks
+ it 'pass-through' */
+ ret = xlator_set_fixed_option(xl, "pass-through", "false");
+ if (ret)
+ goto out;
ret = 0;
out:
@@ -2056,9 +2047,13 @@ brick_graph_add_namespace(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
{
xlator_t *xl = NULL;
int ret = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !set_dict)
+ if (!graph || !volinfo || !set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
ret = dict_get_str_boolean(set_dict, "features.tag-namespaces", 0);
if (ret == -1)
@@ -2088,13 +2083,13 @@ add_one_peer(volgen_graph_t *graph, glusterd_brickinfo_t *peer, char *volname,
}
/* TBD: figure out where to get the proper transport list */
- if (xlator_set_option(kid, "transport-type", "socket")) {
+ if (xlator_set_fixed_option(kid, "transport-type", "socket")) {
return NULL;
}
- if (xlator_set_option(kid, "remote-host", peer->hostname)) {
+ if (xlator_set_fixed_option(kid, "remote-host", peer->hostname)) {
return NULL;
}
- if (xlator_set_option(kid, "remote-subvolume", peer->path)) {
+ if (xlator_set_fixed_option(kid, "remote-subvolume", peer->path)) {
return NULL;
}
/* TBD: deal with RDMA, SSL */
@@ -2102,75 +2097,6 @@ add_one_peer(volgen_graph_t *graph, glusterd_brickinfo_t *peer, char *volname,
return kid;
}
-int
-add_jbr_stuff(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
- glusterd_brickinfo_t *brickinfo)
-{
- xlator_t *me;
- glusterd_brickinfo_t *peer;
- glusterd_brickinfo_t *prev_peer;
- char *leader_opt;
- uint16_t index = 0;
- xlator_t *kid;
-
- /* Create the JBR xlator, but defer linkage for now. */
- me = xlator_instantiate("experimental/jbr", "%s-jbr", volinfo->volname);
- if (!me || volgen_xlator_link(me, first_of(graph))) {
- return -1;
- }
-
- /* Figure out if we should start as leader, mark appropriately. */
- peer = list_prev(brickinfo, &volinfo->bricks, glusterd_brickinfo_t,
- brick_list);
- leader_opt = (!peer || (peer->group != brickinfo->group)) ? "yes" : "no";
- if (xlator_set_option(me, "leader", leader_opt)) {
- /*
- * TBD: fix memory leak ("me" and associated dictionary)
- * There seems to be no function already to clean up a
- * just-allocated translator object if something else fails.
- * Apparently the convention elsewhere in this file is to return
- * without freeing anything, but we can't keep being that sloppy
- * forever.
- */
- return -1;
- }
-
- /*
- * Make sure we're at the beginning of the list of bricks in this
- * replica set. This way all bricks' volfiles have peers in a
- * consistent order.
- */
- peer = brickinfo;
- for (;;) {
- prev_peer = list_prev(peer, &volinfo->bricks, glusterd_brickinfo_t,
- brick_list);
- if (!prev_peer || (prev_peer->group != brickinfo->group)) {
- break;
- }
- peer = prev_peer;
- }
-
- /* Actually add the peers. */
- do {
- if (peer != brickinfo) {
- gf_log("glusterd", GF_LOG_INFO, "%s:%s needs client for %s:%s",
- brickinfo->hostname, brickinfo->path, peer->hostname,
- peer->path);
- kid = add_one_peer(graph, peer, volinfo->volname, index++);
- if (!kid || volgen_xlator_link(me, kid)) {
- return -1;
- }
- }
- peer = list_next(peer, &volinfo->bricks, glusterd_brickinfo_t,
- brick_list);
- } while (peer && (peer->group == brickinfo->group));
-
- /* Finish linkage to client file. */
- glusterfs_graph_set_first(&graph->graph, me);
-
- return 0;
-}
-
static int
brick_graph_add_index(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
dict_t *set_dict, glusterd_brickinfo_t *brickinfo)
@@ -2180,13 +2106,12 @@ brick_graph_add_index(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
char index_basepath[PATH_MAX] = {0};
int ret = -1;
int32_t len = 0;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !brickinfo || !set_dict)
+ if (!graph || !volinfo || !brickinfo || !set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
-
- /* For JBR we don't need/want index. */
- if (glusterd_volinfo_get_boolean(volinfo, "cluster.jbr") > 0) {
- return add_jbr_stuff(graph, volinfo, brickinfo);
}
xl = volgen_graph_add(graph, "features/index", volinfo->volname);
@@ -2196,28 +2121,30 @@ brick_graph_add_index(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
len = snprintf(index_basepath, sizeof(index_basepath), "%s/%s",
brickinfo->path, ".glusterfs/indices");
if ((len < 0) || (len >= sizeof(index_basepath))) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
goto out;
}
- ret = xlator_set_option(xl, "index-base", index_basepath);
+ ret = xlator_set_fixed_option(xl, "index-base", index_basepath);
if (ret)
goto out;
if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
- ret = xlator_set_option(xl, "xattrop64-watchlist", "trusted.ec.dirty");
+ ret = xlator_set_fixed_option(xl, "xattrop64-watchlist",
+ "trusted.ec.dirty");
if (ret)
goto out;
}
- if ((volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE ||
- volinfo->type == GF_CLUSTER_TYPE_REPLICATE ||
+ if ((volinfo->type == GF_CLUSTER_TYPE_REPLICATE ||
volinfo->type == GF_CLUSTER_TYPE_NONE)) {
- ret = xlator_set_option(xl, "xattrop-dirty-watchlist",
- "trusted.afr.dirty");
+ ret = xlator_set_fixed_option(xl, "xattrop-dirty-watchlist",
+ "trusted.afr.dirty");
if (ret)
goto out;
ret = gf_asprintf(&pending_xattr, "trusted.afr.%s-", volinfo->volname);
if (ret < 0)
goto out;
- ret = xlator_set_option(xl, "xattrop-pending-watchlist", pending_xattr);
+ ret = xlator_set_fixed_option(xl, "xattrop-pending-watchlist",
+ pending_xattr);
if (ret)
goto out;
}
@@ -2241,25 +2168,29 @@ brick_graph_add_marker(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
char buf[32] = {
0,
};
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !set_dict)
+ if (!graph || !volinfo || !set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
xl = volgen_graph_add(graph, "features/marker", volinfo->volname);
if (!xl)
goto out;
gf_uuid_unparse(volinfo->volume_id, volume_id);
- ret = xlator_set_option(xl, "volume-uuid", volume_id);
+ ret = xlator_set_fixed_option(xl, "volume-uuid", volume_id);
if (ret)
goto out;
get_vol_tstamp_file(tstamp_file, volinfo);
- ret = xlator_set_option(xl, "timestamp-file", tstamp_file);
+ ret = xlator_set_fixed_option(xl, "timestamp-file", tstamp_file);
if (ret)
goto out;
snprintf(buf, sizeof(buf), "%d", volinfo->quota_xattr_version);
- ret = xlator_set_option(xl, "quota-version", buf);
+ ret = xlator_set_fixed_option(xl, "quota-version", buf);
if (ret)
goto out;
@@ -2274,21 +2205,25 @@ brick_graph_add_quota(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
int ret = -1;
xlator_t *xl = NULL;
char *value = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !set_dict)
+ if (!graph || !volinfo || !set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
xl = volgen_graph_add(graph, "features/quota", volinfo->volname);
if (!xl)
goto out;
- ret = xlator_set_option(xl, "volume-uuid", volinfo->volname);
+ ret = xlator_set_fixed_option(xl, "volume-uuid", volinfo->volname);
if (ret)
goto out;
ret = glusterd_volinfo_get(volinfo, VKEY_FEATURES_QUOTA, &value);
if (value) {
- ret = xlator_set_option(xl, "server-quota", value);
+ ret = xlator_set_fixed_option(xl, "server-quota", value);
if (ret)
goto out;
}
@@ -2302,9 +2237,13 @@ brick_graph_add_ro(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
{
int ret = -1;
xlator_t *xl = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !set_dict)
+ if (!graph || !volinfo || !set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
if (dict_get_str_boolean(set_dict, "features.read-only", 0) &&
(dict_get_str_boolean(set_dict, "features.worm", 0) ||
@@ -2318,7 +2257,7 @@ brick_graph_add_ro(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
xl = volgen_graph_add(graph, "features/read-only", volinfo->volname);
if (!xl)
return -1;
- ret = xlator_set_option(xl, "read-only", "off");
+ ret = xlator_set_fixed_option(xl, "read-only", "off");
if (ret)
return -1;
@@ -2334,9 +2273,13 @@ brick_graph_add_worm(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
{
int ret = -1;
xlator_t *xl = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !set_dict)
+ if (!graph || !volinfo || !set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
if (dict_get_str_boolean(set_dict, "features.read-only", 0) &&
(dict_get_str_boolean(set_dict, "features.worm", 0) ||
@@ -2363,9 +2306,13 @@ brick_graph_add_cdc(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
{
int ret = -1;
xlator_t *xl = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !set_dict)
+ if (!graph || !volinfo || !set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
/* Check for compress volume option, and add it to the graph on
* server side */
@@ -2378,7 +2325,7 @@ brick_graph_add_cdc(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
ret = -1;
goto out;
}
- ret = xlator_set_option(xl, "mode", "server");
+ ret = xlator_set_fixed_option(xl, "mode", "server");
if (ret)
goto out;
}
@@ -2392,18 +2339,29 @@ brick_graph_add_io_stats(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
{
int ret = -1;
xlator_t *xl = NULL;
+ xlator_t *this = THIS;
+ glusterd_conf_t *priv = this->private;
- if (!graph || !volinfo || !set_dict || !brickinfo)
+ if (!graph || !set_dict || !brickinfo) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
- xl = volgen_graph_add(graph, "debug/io-stats", volinfo->volname);
+ xl = volgen_graph_add_as(graph, "debug/io-stats", brickinfo->path);
if (!xl)
goto out;
- ret = xlator_set_option(xl, "unique-id", brickinfo->path);
+ ret = xlator_set_fixed_option(xl, "unique-id", brickinfo->path);
if (ret)
goto out;
+ if (priv->op_version >= GD_OP_VERSION_7_1) {
+ ret = xlator_set_fixed_option(xl, "volume-id",
+ uuid_utoa(volinfo->volume_id));
+ if (ret)
+ goto out;
+ }
+
ret = 0;
out:
return ret;
@@ -2415,9 +2373,13 @@ brick_graph_add_upcall(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
{
xlator_t *xl = NULL;
int ret = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !set_dict)
+ if (!graph || !volinfo || !set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
xl = volgen_graph_add(graph, "features/upcall", volinfo->volname);
if (!xl) {
@@ -2437,9 +2399,13 @@ brick_graph_add_leases(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
{
xlator_t *xl = NULL;
int ret = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !set_dict)
+ if (!graph || !volinfo || !set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
xl = volgen_graph_add(graph, "features/leases", volinfo->volname);
if (!xl) {
@@ -2469,9 +2435,13 @@ brick_graph_add_server(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
char *volname = NULL;
char *address_family_data = NULL;
int32_t len = 0;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !set_dict || !brickinfo)
+ if (!graph || !volinfo || !set_dict || !brickinfo) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
get_vol_transport_type(volinfo, transt);
@@ -2482,16 +2452,16 @@ brick_graph_add_server(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (!xl)
goto out;
- ret = xlator_set_option(xl, "transport-type", transt);
+ ret = xlator_set_fixed_option(xl, "transport-type", transt);
if (ret)
goto out;
/*In the case of running multiple glusterds on a single machine,
* we should ensure that bricks don't listen on all IPs on that
* machine and break the IP based separation being brought about.*/
- if (dict_get(THIS->options, "transport.socket.bind-address")) {
- ret = xlator_set_option(xl, "transport.socket.bind-address",
- brickinfo->hostname);
+ if (dict_get_sizen(THIS->options, "transport.socket.bind-address")) {
+ ret = xlator_set_fixed_option(xl, "transport.socket.bind-address",
+ brickinfo->hostname);
if (ret)
return -1;
}
@@ -2505,10 +2475,10 @@ brick_graph_add_server(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
RPC_SET_OPT(xl, SSL_DH_PARAM_OPT, "ssl-dh-param", return -1);
RPC_SET_OPT(xl, SSL_EC_CURVE_OPT, "ssl-ec-curve", return -1);
- if (dict_get_str(volinfo->dict, "transport.address-family",
- &address_family_data) == 0) {
- ret = xlator_set_option(xl, "transport.address-family",
- address_family_data);
+ if (dict_get_str_sizen(volinfo->dict, "transport.address-family",
+ &address_family_data) == 0) {
+ ret = xlator_set_fixed_option(xl, "transport.address-family",
+ address_family_data);
if (ret) {
gf_log("glusterd", GF_LOG_WARNING,
"failed to set transport.address-family");
@@ -2523,22 +2493,22 @@ brick_graph_add_server(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
return -1;
}
- ret = xlator_set_option(xl, key, username);
+ ret = xlator_set_option(xl, key, len, username);
if (ret)
return -1;
}
if (password) {
- snprintf(key, sizeof(key), "auth.login.%s.password", username);
-
- ret = xlator_set_option(xl, key, password);
+ len = snprintf(key, sizeof(key), "auth.login.%s.password", username);
+ if ((len < 0) || (len >= sizeof(key))) {
+ return -1;
+ }
+ ret = xlator_set_option(xl, key, len, password);
if (ret)
return -1;
}
- snprintf(key, sizeof(key), "auth-path");
-
- ret = xlator_set_option(xl, key, brickinfo->path);
+ ret = xlator_set_fixed_option(xl, "auth-path", brickinfo->path);
if (ret)
return -1;
@@ -2546,21 +2516,19 @@ brick_graph_add_server(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
: volinfo->volname;
if (volname && !strcmp(volname, GLUSTER_SHARED_STORAGE)) {
- snprintf(key, sizeof(key), "strict-auth-accept");
-
- ret = xlator_set_option(xl, key, "true");
+ ret = xlator_set_fixed_option(xl, "strict-auth-accept", "true");
if (ret)
return -1;
}
- if (dict_get_str(volinfo->dict, "auth.ssl-allow", &ssl_user) == 0) {
+ if (dict_get_str_sizen(volinfo->dict, "auth.ssl-allow", &ssl_user) == 0) {
len = snprintf(key, sizeof(key), "auth.login.%s.ssl-allow",
brickinfo->path);
if ((len < 0) || (len >= sizeof(key))) {
return -1;
}
- ret = xlator_set_option(xl, key, ssl_user);
+ ret = xlator_set_option(xl, key, len, ssl_user);
if (ret)
return -1;
}
@@ -2582,13 +2550,20 @@ brick_graph_add_pump(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
char *password = NULL;
char *ptranst = NULL;
char *address_family_data = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
- if (!graph || !volinfo || !set_dict)
+ if (!graph || !volinfo || !set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
ret = dict_get_int32(volinfo->dict, "enable-pump", &pump);
- if (ret == -ENOENT)
+ if (ret == -ENOENT) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=enable-pump", NULL);
ret = pump = 0;
+ }
if (ret)
return -1;
@@ -2617,26 +2592,26 @@ brick_graph_add_pump(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
RPC_SET_OPT(rbxl, SSL_EC_CURVE_OPT, "ssl-ec-curve", return -1);
if (username) {
- ret = xlator_set_option(rbxl, "username", username);
+ ret = xlator_set_fixed_option(rbxl, "username", username);
if (ret)
return -1;
}
if (password) {
- ret = xlator_set_option(rbxl, "password", password);
+ ret = xlator_set_fixed_option(rbxl, "password", password);
if (ret)
return -1;
}
- ret = xlator_set_option(rbxl, "transport-type", ptranst);
+ ret = xlator_set_fixed_option(rbxl, "transport-type", ptranst);
GF_FREE(ptranst);
if (ret)
return -1;
- if (dict_get_str(volinfo->dict, "transport.address-family",
- &address_family_data) == 0) {
- ret = xlator_set_option(rbxl, "transport.address-family",
- address_family_data);
+ if (dict_get_str_sizen(volinfo->dict, "transport.address-family",
+ &address_family_data) == 0) {
+ ret = xlator_set_fixed_option(rbxl, "transport.address-family",
+ address_family_data);
if (ret) {
gf_log("glusterd", GF_LOG_WARNING,
"failed to set transport.address-family");
@@ -2664,7 +2639,6 @@ out:
* the topology of the brick graph */
static volgen_brick_xlator_t server_graph_table[] = {
{brick_graph_add_server, NULL},
- {brick_graph_add_decompounder, "decompounder"},
{brick_graph_add_io_stats, "NULL"},
{brick_graph_add_sdfs, "sdfs"},
{brick_graph_add_namespace, "namespace"},
@@ -2674,7 +2648,6 @@ static volgen_brick_xlator_t server_graph_table[] = {
{brick_graph_add_barrier, NULL},
{brick_graph_add_marker, "marker"},
{brick_graph_add_selinux, "selinux"},
- {brick_graph_add_fdl, "fdl"},
{brick_graph_add_iot, "io-threads"},
{brick_graph_add_upcall, "upcall"},
{brick_graph_add_leases, "leases"},
@@ -2685,10 +2658,6 @@ static volgen_brick_xlator_t server_graph_table[] = {
{brick_graph_add_acl, "acl"},
{brick_graph_add_bitrot_stub, "bitrot-stub"},
{brick_graph_add_changelog, "changelog"},
-#if USE_GFDB /* changetimerecorder depends on gfdb */
- {brick_graph_add_changetimerecorder, "changetimerecorder"},
-#endif
- {brick_graph_add_bd, "bd"},
{brick_graph_add_trash, "trash"},
{brick_graph_add_arbiter, "arbiter"},
{brick_graph_add_posix, "posix"},
@@ -2815,11 +2784,11 @@ server_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
i--;
}
- ret = dict_get_str(set_dict, "xlator", &xlator);
+ ret = dict_get_str_sizen(set_dict, "xlator", &xlator);
/* got a cli log level request */
if (!ret) {
- ret = dict_get_str(set_dict, "loglevel", &loglevel);
+ ret = dict_get_str_sizen(set_dict, "loglevel", &loglevel);
if (ret) {
gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
"could not get both"
@@ -2881,8 +2850,7 @@ perfxl_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme,
* performance
*/
if (!strcmp(vme->key, "performance.client-io-threads") &&
- (GF_CLUSTER_TYPE_STRIPE_REPLICATE == volinfo->type ||
- GF_CLUSTER_TYPE_REPLICATE == volinfo->type))
+ (GF_CLUSTER_TYPE_REPLICATE == volinfo->type))
return 0;
}
@@ -2930,6 +2898,7 @@ gfproxy_client_perfxl_option_handler(volgen_graph_t *graph,
return 0;
}
+#ifdef BUILD_GNFS
static int
nfsperfxl_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme,
void *param)
@@ -2952,6 +2921,7 @@ nfsperfxl_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme,
else
return -1;
}
+#endif
#if (HAVE_LIB_XML)
int
@@ -3109,8 +3079,10 @@ _get_xlator_opt_key_from_vme(struct volopt_map_entry *vme, char **key)
*key = gf_strdup(AUTH_ALLOW_OPT_KEY);
else if (!strcmp(vme->key, AUTH_REJECT_MAP_KEY))
*key = gf_strdup(AUTH_REJECT_OPT_KEY);
+#ifdef BUILD_GNFS
else if (!strcmp(vme->key, NFS_DISABLE_MAP_KEY))
*key = gf_strdup(NFS_DISABLE_OPT_KEY);
+#endif
else {
if (vme->option) {
if (vme->option[0] == '!') {
@@ -3176,34 +3148,34 @@ volgen_graph_build_client(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (!xl)
goto err;
- ret = xlator_set_option(xl, "ping-timeout", "42");
+ ret = xlator_set_fixed_option(xl, "ping-timeout", "42");
if (ret)
goto err;
if (hostname) {
- ret = xlator_set_option(xl, "remote-host", hostname);
+ ret = xlator_set_fixed_option(xl, "remote-host", hostname);
if (ret)
goto err;
}
if (port) {
- ret = xlator_set_option(xl, "remote-port", port);
+ ret = xlator_set_fixed_option(xl, "remote-port", port);
if (ret)
goto err;
}
- ret = xlator_set_option(xl, "remote-subvolume", subvol);
+ ret = xlator_set_fixed_option(xl, "remote-subvolume", subvol);
if (ret)
goto err;
- ret = xlator_set_option(xl, "transport-type", transt);
+ ret = xlator_set_fixed_option(xl, "transport-type", transt);
if (ret)
goto err;
- if (dict_get_str(volinfo->dict, "transport.address-family",
- &address_family_data) == 0) {
- ret = xlator_set_option(xl, "transport.address-family",
- address_family_data);
+ if (dict_get_str_sizen(volinfo->dict, "transport.address-family",
+ &address_family_data) == 0) {
+ ret = xlator_set_fixed_option(xl, "transport.address-family",
+ address_family_data);
if (ret) {
gf_log("glusterd", GF_LOG_WARNING,
"failed to set transport.address-family");
@@ -3218,24 +3190,24 @@ volgen_graph_build_client(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
str = NULL;
str = glusterd_auth_get_username(volinfo);
if (str) {
- ret = xlator_set_option(xl, "username", str);
+ ret = xlator_set_fixed_option(xl, "username", str);
if (ret)
goto err;
}
str = glusterd_auth_get_password(volinfo);
if (str) {
- ret = xlator_set_option(xl, "password", str);
+ ret = xlator_set_fixed_option(xl, "password", str);
if (ret)
goto err;
}
}
- if (dict_get_str(set_dict, "client.ssl", &ssl_str) == 0) {
+ if (dict_get_str_sizen(set_dict, "client.ssl", &ssl_str) == 0) {
if (gf_string2boolean(ssl_str, &ssl_bool) == 0) {
if (ssl_bool) {
- ret = xlator_set_option(xl, "transport.socket.ssl-enabled",
- "true");
+ ret = xlator_set_fixed_option(
+ xl, "transport.socket.ssl-enabled", "true");
if (ret) {
goto err;
}
@@ -3267,7 +3239,10 @@ volgen_graph_build_clients(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
0,
};
glusterd_brickinfo_t *brick = NULL;
+ glusterd_brickinfo_t *ta_brick = NULL;
xlator_t *xl = NULL;
+ int subvol_index = 0;
+ int thin_arbiter_index = 0;
if (volinfo->brick_count == 0) {
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLUME_INCONSISTENCY,
@@ -3275,8 +3250,7 @@ volgen_graph_build_clients(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
goto out;
}
- if ((volinfo->type != GF_CLUSTER_TYPE_TIER) &&
- (volinfo->dist_leaf_count < volinfo->brick_count) &&
+ if ((volinfo->dist_leaf_count < volinfo->brick_count) &&
((volinfo->brick_count % volinfo->dist_leaf_count) != 0)) {
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLUME_INCONSISTENCY,
"volume inconsistency: "
@@ -3295,6 +3269,30 @@ volgen_graph_build_clients(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
i = 0;
cds_list_for_each_entry(brick, &volinfo->bricks, brick_list)
{
+ /* insert ta client xlator entry.
+ * eg - If subvol count is > 1, then after every two client xlator
+ * entries there should be a ta client xlator entry in the volfile. ta
+ * client xlator indexes are - 2, 5, 8 etc depending on the index of
+ * subvol.
+ */
+ if (volinfo->thin_arbiter_count &&
+ (i + 1) % (volinfo->replica_count + 1) == 0) {
+ thin_arbiter_index = 0;
+ cds_list_for_each_entry(ta_brick, &volinfo->ta_bricks, brick_list)
+ {
+ if (thin_arbiter_index == subvol_index) {
+ xl = volgen_graph_build_client(
+ graph, volinfo, ta_brick->hostname, NULL,
+ ta_brick->path, ta_brick->brick_id, transt, set_dict);
+ if (!xl) {
+ ret = -1;
+ goto out;
+ }
+ }
+ thin_arbiter_index++;
+ }
+ subvol_index++;
+ }
xl = volgen_graph_build_client(graph, volinfo, brick->hostname, NULL,
brick->path, brick->brick_id, transt,
set_dict);
@@ -3306,6 +3304,28 @@ volgen_graph_build_clients(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
i++;
}
+ /* Add ta client xlator entry for last subvol
+ * Above loop will miss out on making the ta client
+ * xlator entry for the last subvolume in the volfile
+ */
+ if (volinfo->thin_arbiter_count) {
+ thin_arbiter_index = 0;
+ cds_list_for_each_entry(ta_brick, &volinfo->ta_bricks, brick_list)
+ {
+ if (thin_arbiter_index == subvol_index) {
+ xl = volgen_graph_build_client(
+ graph, volinfo, ta_brick->hostname, NULL, ta_brick->path,
+ ta_brick->brick_id, transt, set_dict);
+ if (!xl) {
+ ret = -1;
+ goto out;
+ }
+ }
+
+ thin_arbiter_index++;
+ }
+ }
+
if (i != volinfo->brick_count) {
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLUME_INCONSISTENCY,
"volume inconsistency: actual number of bricks (%d) "
@@ -3321,50 +3341,6 @@ out:
}
static int
-volgen_graph_build_clients_for_tier_shd(volgen_graph_t *graph,
- glusterd_volinfo_t *volinfo,
- dict_t *set_dict)
-{
- int ret = 0;
- glusterd_volinfo_t *dup_volinfo = NULL;
- gf_boolean_t is_hot_tier = _gf_false;
- gf_boolean_t is_hot_shd = _gf_false;
- gf_boolean_t is_cold_shd = _gf_false;
-
- is_cold_shd = glusterd_is_shd_compatible_type(volinfo->tier_info.cold_type);
- is_hot_shd = glusterd_is_shd_compatible_type(volinfo->tier_info.hot_type);
-
- if (is_cold_shd && is_hot_shd) {
- ret = volgen_graph_build_clients(graph, volinfo, set_dict, NULL);
- return ret;
- }
-
- if (is_cold_shd) {
- ret = glusterd_create_sub_tier_volinfo(volinfo, &dup_volinfo,
- is_hot_tier, volinfo->volname);
- if (ret)
- goto out;
- ret = volgen_graph_build_clients(graph, dup_volinfo, set_dict, NULL);
- if (ret)
- goto out;
- }
- if (is_hot_shd) {
- is_hot_tier = _gf_true;
- ret = glusterd_create_sub_tier_volinfo(volinfo, &dup_volinfo,
- is_hot_tier, volinfo->volname);
- if (ret)
- goto out;
- ret = volgen_graph_build_clients(graph, dup_volinfo, set_dict, NULL);
- if (ret)
- goto out;
- }
-out:
- if (dup_volinfo)
- glusterd_volinfo_delete(dup_volinfo);
- return ret;
-}
-
-static int
volgen_link_bricks(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
char *xl_type, char *xl_namefmt, size_t child_count,
size_t sub_count, size_t start_count, xlator_t *trav)
@@ -3383,13 +3359,22 @@ volgen_link_bricks(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if ((i % sub_count) == 0) {
xl = volgen_graph_add_nolink(graph, xl_type, xl_namefmt, volname,
j);
- if (!xl) {
- ret = -1;
- goto out;
- }
j++;
}
+ if (!xl) {
+ ret = -1;
+ goto out;
+ }
+
+ if (strncmp(xl_type, "performance/readdir-ahead",
+ SLEN("performance/readdir-ahead")) == 0) {
+ ret = xlator_set_fixed_option(xl, "performance.readdir-ahead",
+ "on");
+ if (ret)
+ goto out;
+ }
+
ret = volgen_xlator_link(xl, trav);
if (ret)
goto out;
@@ -3425,22 +3410,6 @@ volgen_link_bricks_from_list_tail_start(volgen_graph_t *graph,
}
static int
-volgen_link_bricks_from_list_head_start(volgen_graph_t *graph,
- glusterd_volinfo_t *volinfo,
- char *xl_type, char *xl_namefmt,
- size_t child_count, size_t sub_count,
- size_t start_count)
-{
- xlator_t *trav = NULL;
-
- for (trav = first_of(graph); trav->next; trav = trav->next)
- ;
-
- return volgen_link_bricks(graph, volinfo, xl_type, xl_namefmt, child_count,
- sub_count, start_count, trav);
-}
-
-static int
volgen_link_bricks_from_list_tail(volgen_graph_t *graph,
glusterd_volinfo_t *volinfo, char *xl_type,
char *xl_namefmt, size_t child_count,
@@ -3459,21 +3428,6 @@ volgen_link_bricks_from_list_tail(volgen_graph_t *graph,
sub_count, 0, trav);
}
-static int
-volgen_link_bricks_from_list_head(volgen_graph_t *graph,
- glusterd_volinfo_t *volinfo, char *xl_type,
- char *xl_namefmt, size_t child_count,
- size_t sub_count)
-{
- xlator_t *trav = NULL;
-
- for (trav = first_of(graph); trav->next; trav = trav->next)
- ;
-
- return volgen_link_bricks(graph, volinfo, xl_type, xl_namefmt, child_count,
- sub_count, 0, trav);
-}
-
/**
* This is the build graph function for user-serviceable snapshots.
* Generates snapview-client
@@ -3553,7 +3507,7 @@ _xl_is_client_decommissioned(xlator_t *xl, glusterd_volinfo_t *volinfo)
char *path = NULL;
GF_ASSERT(!strcmp(xl->type, "protocol/client"));
- ret = xlator_get_option(xl, "remote-host", &hostname);
+ ret = xlator_get_fixed_option(xl, "remote-host", &hostname);
if (ret) {
GF_ASSERT(0);
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REMOTE_HOST_GET_FAIL,
@@ -3562,7 +3516,7 @@ _xl_is_client_decommissioned(xlator_t *xl, glusterd_volinfo_t *volinfo)
xl->name);
goto out;
}
- ret = xlator_get_option(xl, "remote-subvolume", &path);
+ ret = xlator_get_fixed_option(xl, "remote-subvolume", &path);
if (ret) {
GF_ASSERT(0);
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REMOTE_HOST_GET_FAIL,
@@ -3648,13 +3602,13 @@ volgen_graph_build_readdir_ahead(volgen_graph_t *graph,
int32_t clusters = 0;
if (graph->type == GF_QUOTAD || graph->type == GF_SNAPD ||
- !glusterd_volinfo_get_boolean(volinfo, VKEY_PARALLEL_READDIR) ||
- !glusterd_volinfo_get_boolean(volinfo, VKEY_READDIR_AHEAD))
+ !glusterd_volinfo_get_boolean(volinfo, VKEY_PARALLEL_READDIR))
goto out;
clusters = volgen_link_bricks_from_list_tail(
graph, volinfo, "performance/readdir-ahead", "%s-readdir-ahead-%d",
child_count, 1);
+
out:
return clusters;
}
@@ -3698,19 +3652,14 @@ volgen_graph_build_dht_cluster(volgen_graph_t *graph,
if (clusters < 0)
goto out;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = 0;
- goto out;
- }
-
dht = first_of(graph);
ret = _graph_get_decommissioned_children(dht, volinfo,
&decommissioned_children);
if (ret)
goto out;
if (decommissioned_children) {
- ret = xlator_set_option(dht, "decommissioned-bricks",
- decommissioned_children);
+ ret = xlator_set_fixed_option(dht, "decommissioned-bricks",
+ decommissioned_children);
if (ret)
goto out;
}
@@ -3732,11 +3681,6 @@ volgen_graph_build_ec_clusters(volgen_graph_t *graph,
char option[32] = {0};
int start_count = 0;
- if (volinfo->tier_info.cur_tier_hot &&
- volinfo->tier_info.cold_type == GF_CLUSTER_TYPE_DISPERSE)
- start_count = volinfo->tier_info.cold_brick_count /
- volinfo->tier_info.cold_disperse_count;
-
clusters = volgen_link_bricks_from_list_tail_start(
graph, volinfo, disperse_args[0], disperse_args[1],
volinfo->brick_count, volinfo->disperse_count, start_count);
@@ -3746,7 +3690,7 @@ volgen_graph_build_ec_clusters(volgen_graph_t *graph,
sprintf(option, "%d", volinfo->redundancy_count);
ec = first_of(graph);
for (i = 0; i < clusters; i++) {
- ret = xlator_set_option(ec, "redundancy", option);
+ ret = xlator_set_fixed_option(ec, "redundancy", option);
if (ret) {
clusters = -1;
goto out;
@@ -3767,12 +3711,15 @@ set_afr_pending_xattrs_option(volgen_graph_t *graph,
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
glusterd_brickinfo_t *brick = NULL;
+ glusterd_brickinfo_t *ta_brick = NULL;
char *ptr = NULL;
int i = 0;
int index = -1;
int ret = 0;
char *afr_xattrs_list = NULL;
int list_size = -1;
+ int ta_brick_index = 0;
+ int subvol_index = 0;
this = THIS;
GF_VALIDATE_OR_GOTO("glusterd", this, out);
@@ -3811,13 +3758,44 @@ set_afr_pending_xattrs_option(volgen_graph_t *graph,
break;
strncat(ptr, brick->brick_id, strlen(brick->brick_id));
if (i == volinfo->replica_count) {
- ret = xlator_set_option(afr_xlators_list[index++],
- "afr-pending-xattr", afr_xattrs_list);
+ /* add ta client xlator in afr-pending-xattrs before making entries
+ * for client xlators in volfile.
+ * ta client xlator indexes are - 2, 5, 8 depending on the index of
+ * subvol. e.g- For first subvol ta client xlator id is volname-ta-2
+ * For pending-xattr, ta name would be
+ * 'volname-ta-2.{{volume-uuid}}' from GD_OP_VERSION_7_3.
+ */
+ ta_brick_index = 0;
+ if (volinfo->thin_arbiter_count == 1) {
+ ptr[strlen(brick->brick_id)] = ',';
+ cds_list_for_each_entry(ta_brick, &volinfo->ta_bricks,
+ brick_list)
+ {
+ if (ta_brick_index == subvol_index) {
+ break;
+ }
+ ta_brick_index++;
+ }
+ if (conf->op_version < GD_OP_VERSION_7_3) {
+ strncat(ptr, ta_brick->brick_id,
+ strlen(ta_brick->brick_id));
+ } else {
+ char ta_volname[PATH_MAX] = "";
+ int len = snprintf(ta_volname, PATH_MAX, "%s.%s",
+ ta_brick->brick_id,
+ uuid_utoa(volinfo->volume_id));
+ strncat(ptr, ta_volname, len);
+ }
+ }
+
+ ret = xlator_set_fixed_option(afr_xlators_list[index++],
+ "afr-pending-xattr", afr_xattrs_list);
if (ret)
goto out;
memset(afr_xattrs_list, 0, list_size);
ptr = afr_xattrs_list;
i = 1;
+ subvol_index++;
continue;
}
ptr[strlen(brick->brick_id)] = ',';
@@ -3832,36 +3810,70 @@ out:
}
static int
+set_volfile_id_option(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
+ int clusters)
+{
+ xlator_t *xlator = NULL;
+ int i = 0;
+ int ret = -1;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ if (conf->op_version < GD_OP_VERSION_9_0)
+ return 0;
+ xlator = first_of(graph);
+
+ for (i = 0; i < clusters; i++) {
+ ret = xlator_set_fixed_option(xlator, "volume-id",
+ uuid_utoa(volinfo->volume_id));
+ if (ret)
+ goto out;
+
+ xlator = xlator->next;
+ }
+
+out:
+ return ret;
+}
+
+static int
volgen_graph_build_afr_clusters(volgen_graph_t *graph,
glusterd_volinfo_t *volinfo)
{
int i = 0;
int ret = 0;
int clusters = 0;
- char *replicate_type = NULL;
+ char *replicate_type = "cluster/replicate";
char *replicate_name = "%s-replicate-%d";
xlator_t *afr = NULL;
char option[32] = {0};
- int start_count = 0;
+ glusterd_brickinfo_t *ta_brick = NULL;
+ int ta_brick_index = 0;
+ int ta_replica_offset = 0;
+ int ta_brick_offset = 0;
+ char ta_option[4096] = {
+ 0,
+ };
- if (glusterd_volinfo_get_boolean(volinfo, "cluster.jbr") > 0) {
- replicate_type = "experimental/jbrc";
- } else {
- replicate_type = "cluster/replicate";
+ /* In thin-arbiter case brick count and replica count remain same
+ * but due to additional entries of ta client xlators in the volfile,
+ * GD1 is manipulated to include these client xlators while linking them to
+ * afr/cluster entry in the volfile.
+ */
+ if (volinfo->thin_arbiter_count == 1) {
+ ta_replica_offset = 1;
+ ta_brick_offset = volinfo->subvol_count;
}
- if (volinfo->tier_info.cold_type == GF_CLUSTER_TYPE_REPLICATE)
- start_count = volinfo->tier_info.cold_brick_count /
- volinfo->tier_info.cold_replica_count;
-
- if (volinfo->tier_info.cur_tier_hot)
- clusters = volgen_link_bricks_from_list_head_start(
- graph, volinfo, replicate_type, replicate_name,
- volinfo->brick_count, volinfo->replica_count, start_count);
- else
- clusters = volgen_link_bricks_from_list_tail(
- graph, volinfo, replicate_type, replicate_name,
- volinfo->brick_count, volinfo->replica_count);
+ clusters = volgen_link_bricks_from_list_tail(
+ graph, volinfo, replicate_type, replicate_name,
+ volinfo->brick_count + ta_brick_offset,
+ volinfo->replica_count + ta_replica_offset);
if (clusters < 0)
goto out;
@@ -3871,18 +3883,50 @@ volgen_graph_build_afr_clusters(volgen_graph_t *graph,
clusters = -1;
goto out;
}
- if (!volinfo->arbiter_count)
+
+ ret = set_volfile_id_option(graph, volinfo, clusters);
+ if (ret) {
+ clusters = -1;
+ goto out;
+ }
+
+ if (!volinfo->arbiter_count && !volinfo->thin_arbiter_count)
goto out;
afr = first_of(graph);
- sprintf(option, "%d", volinfo->arbiter_count);
- for (i = 0; i < clusters; i++) {
- ret = xlator_set_option(afr, "arbiter-count", option);
- if (ret) {
- clusters = -1;
- goto out;
+
+ if (volinfo->arbiter_count) {
+ sprintf(option, "%d", volinfo->arbiter_count);
+ for (i = 0; i < clusters; i++) {
+ ret = xlator_set_fixed_option(afr, "arbiter-count", option);
+ if (ret) {
+ clusters = -1;
+ goto out;
+ }
+
+ afr = afr->next;
+ }
+ }
+
+ if (volinfo->thin_arbiter_count == 1) {
+ for (i = 0; i < clusters; i++) {
+ ta_brick_index = 0;
+ cds_list_for_each_entry(ta_brick, &volinfo->ta_bricks, brick_list)
+ {
+ if (ta_brick_index == i) {
+ break;
+ }
+ ta_brick_index++;
+ }
+ snprintf(ta_option, sizeof(ta_option), "%s:%s", ta_brick->hostname,
+ ta_brick->path);
+ ret = xlator_set_fixed_option(afr, "thin-arbiter", ta_option);
+ if (ret) {
+ clusters = -1;
+ goto out;
+ }
+ afr = afr->next;
}
- afr = afr->next;
}
out:
return clusters;
@@ -3893,15 +3937,9 @@ volume_volgen_graph_build_clusters(volgen_graph_t *graph,
glusterd_volinfo_t *volinfo,
gf_boolean_t is_quotad)
{
- char *tier_args[] = {"cluster/tier", "%s-tier-%d"};
- char *stripe_args[] = {"cluster/stripe", "%s-stripe-%d"};
- int rclusters = 0;
int clusters = 0;
int dist_count = 0;
int ret = -1;
- char tmp_volname[GD_VOLUME_NAME_MAX_TIER] = {
- 0,
- };
if (!volinfo->dist_leaf_count)
goto out;
@@ -3916,35 +3954,6 @@ volume_volgen_graph_build_clusters(volgen_graph_t *graph,
if (clusters < 0)
goto out;
break;
- case GF_CLUSTER_TYPE_STRIPE:
- clusters = volgen_link_bricks_from_list_tail(
- graph, volinfo, stripe_args[0], stripe_args[1],
- volinfo->brick_count, volinfo->stripe_count);
- if (clusters < 0)
- goto out;
- break;
- case GF_CLUSTER_TYPE_TIER:
- ret = volgen_link_bricks_from_list_head(
- graph, volinfo, tier_args[0], tier_args[1],
- volinfo->brick_count, volinfo->replica_count);
- break;
- case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
- /* Replicate after the clients, then stripe */
- if (volinfo->replica_count == 0)
- goto out;
- clusters = volgen_graph_build_afr_clusters(graph, volinfo);
- if (clusters < 0)
- goto out;
-
- rclusters = volinfo->brick_count / volinfo->replica_count;
- GF_ASSERT(rclusters == clusters);
- clusters = volgen_link_bricks_from_list_tail(
- graph, volinfo, stripe_args[0], stripe_args[1], rclusters,
- volinfo->stripe_count);
- if (clusters < 0)
- goto out;
- break;
-
case GF_CLUSTER_TYPE_DISPERSE:
clusters = volgen_graph_build_ec_clusters(graph, volinfo);
if (clusters < 0)
@@ -3964,24 +3973,11 @@ build_distribute:
ret = -1;
goto out;
}
- if (volinfo->tier_info.hot_brick_count) {
- if (snprintf(tmp_volname, GD_VOLUME_NAME_MAX_TIER, "%s",
- volinfo->volname) >= GD_VOLUME_NAME_MAX_TIER) {
- ret = -1;
- goto out;
- }
- if (volinfo->tier_info.cur_tier_hot)
- strcat(volinfo->volname, "-hot");
- else
- strcat(volinfo->volname, "-cold");
- }
clusters = volgen_graph_build_readdir_ahead(graph, volinfo, dist_count);
if (clusters < 0)
goto out;
ret = volgen_graph_build_dht_cluster(graph, volinfo, dist_count, is_quotad);
- if (volinfo->tier_info.hot_brick_count)
- strcpy(volinfo->volname, tmp_volname);
if (ret)
goto out;
@@ -4022,7 +4018,7 @@ client_graph_set_rda_options(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
* glusterd_volinfo_get, so that we consider key value of the in
* progress volume set option.
*/
- ret = dict_get_str(set_dict, VKEY_RDA_CACHE_LIMIT, &rda_cache_s);
+ ret = dict_get_str_sizen(set_dict, VKEY_RDA_CACHE_LIMIT, &rda_cache_s);
if (ret < 0) {
ret = glusterd_volinfo_get(volinfo, VKEY_RDA_CACHE_LIMIT, &rda_cache_s);
if (ret < 0)
@@ -4035,7 +4031,7 @@ client_graph_set_rda_options(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
goto out;
}
- ret = dict_get_str(set_dict, VKEY_RDA_REQUEST_SIZE, &rda_req_s);
+ ret = dict_get_str_sizen(set_dict, VKEY_RDA_REQUEST_SIZE, &rda_req_s);
if (ret < 0) {
ret = glusterd_volinfo_get(volinfo, VKEY_RDA_REQUEST_SIZE, &rda_req_s);
if (ret < 0)
@@ -4061,7 +4057,7 @@ client_graph_set_rda_options(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
else
rda_req_size = new_cache_size;
- snprintf(new_req_size_str, sizeof(new_req_size_str), "%ld%s",
+ snprintf(new_req_size_str, sizeof(new_req_size_str), "%" PRId64 "%s",
rda_req_size, "B");
ret = dict_set_dynstr_with_alloc(set_dict, VKEY_RDA_REQUEST_SIZE,
new_req_size_str);
@@ -4069,7 +4065,7 @@ client_graph_set_rda_options(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
goto out;
}
- snprintf(new_cache_size_str, sizeof(new_cache_size_str), "%ld%s",
+ snprintf(new_cache_size_str, sizeof(new_cache_size_str), "%" PRId64 "%s",
new_cache_size, "B");
ret = dict_set_dynstr_with_alloc(set_dict, VKEY_RDA_CACHE_LIMIT,
new_cache_size_str);
@@ -4084,8 +4080,6 @@ static int
client_graph_set_perf_options(volgen_graph_t *graph,
glusterd_volinfo_t *volinfo, dict_t *set_dict)
{
- data_t *tmp_data = NULL;
- char *volname = NULL;
int ret = 0;
/*
@@ -4114,15 +4108,19 @@ client_graph_set_perf_options(volgen_graph_t *graph,
if (ret < 0)
return ret;
- volname = volinfo->volname;
+#ifdef BUILD_GNFS
+ data_t *tmp_data = NULL;
+ char *volname = NULL;
- tmp_data = dict_get(set_dict, "nfs-volume-file");
- if (!tmp_data)
- return volgen_graph_set_options_generic(graph, set_dict, volinfo,
- &perfxl_option_handler);
- else
+ tmp_data = dict_get_sizen(set_dict, "nfs-volume-file");
+ if (tmp_data) {
+ volname = volinfo->volname;
return volgen_graph_set_options_generic(graph, set_dict, volname,
&nfsperfxl_option_handler);
+ } else
+#endif
+ return volgen_graph_set_options_generic(graph, set_dict, volinfo,
+ &perfxl_option_handler);
}
static int
@@ -4185,138 +4183,15 @@ graph_set_generic_options(xlator_t *this, volgen_graph_t *graph,
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_GRAPH_SET_OPT_FAIL,
"Failed to change "
"log-localtime-logging option");
- return 0;
-}
-
-static int
-volume_volgen_graph_build_clusters_tier(volgen_graph_t *graph,
- glusterd_volinfo_t *volinfo,
- gf_boolean_t is_quotad)
-{
- int ret = -1;
- xlator_t *xl, *hxl, *cxl;
- char *rule = NULL;
- int st_brick_count = 0;
- int st_replica_count = 0;
- int st_disperse_count = 0;
- int st_dist_leaf_count = 0;
- int st_type = 0;
- int dist_count = 0;
- int start_count = 0;
- char *decommissioned_children = NULL;
- glusterd_volinfo_t *dup_volinfo = NULL;
- gf_boolean_t is_hot_tier = _gf_false;
-
- st_brick_count = volinfo->brick_count;
- st_replica_count = volinfo->replica_count;
- st_disperse_count = volinfo->disperse_count;
- st_type = volinfo->type;
- st_dist_leaf_count = volinfo->dist_leaf_count;
-
- volinfo->dist_leaf_count = volinfo->tier_info.cold_dist_leaf_count;
- volinfo->brick_count = volinfo->tier_info.cold_brick_count;
- volinfo->replica_count = volinfo->tier_info.cold_replica_count;
- volinfo->disperse_count = volinfo->tier_info.cold_disperse_count;
- volinfo->redundancy_count = volinfo->tier_info.cold_redundancy_count;
- volinfo->type = volinfo->tier_info.cold_type;
- volinfo->tier_info.cur_tier_hot = 0;
- ret = glusterd_create_sub_tier_volinfo(volinfo, &dup_volinfo, is_hot_tier,
- volinfo->volname);
- if (ret)
- goto out;
-
- ret = volume_volgen_graph_build_clusters(graph, dup_volinfo, is_quotad);
- if (ret)
- goto out;
- cxl = first_of(graph);
-
- volinfo->type = volinfo->tier_info.hot_type;
- volinfo->brick_count = volinfo->tier_info.hot_brick_count;
- volinfo->replica_count = volinfo->tier_info.hot_replica_count;
- volinfo->dist_leaf_count = glusterd_get_dist_leaf_count(volinfo);
- volinfo->disperse_count = 0;
- volinfo->tier_info.cur_tier_hot = 1;
-
- dist_count = volinfo->brick_count / volinfo->dist_leaf_count;
-
- if (volinfo->tier_info.cold_type == GF_CLUSTER_TYPE_REPLICATE) {
- start_count = volinfo->tier_info.cold_brick_count /
- volinfo->tier_info.cold_replica_count;
- }
- if (volinfo->dist_leaf_count != 1) {
- ret = volgen_link_bricks_from_list_head_start(
- graph, volinfo, "cluster/replicate", "%s-replicate-%d",
- volinfo->brick_count, volinfo->replica_count, start_count);
- if (ret != -1) {
- ret = set_afr_pending_xattrs_option(graph, volinfo, ret);
- if (ret)
- goto out;
- volgen_link_bricks_from_list_tail(
- graph, volinfo, "cluster/distribute", "%s-hot-dht", dist_count,
- dist_count);
- }
- } else {
- ret = volgen_link_bricks_from_list_head(
- graph, volinfo, "cluster/distribute", "%s-hot-dht", dist_count,
- dist_count);
- }
- if (ret == -1)
- goto out;
-
- hxl = first_of(graph);
-
- volinfo->type = GF_CLUSTER_TYPE_TIER;
- if (!is_quotad) {
- xl = volgen_graph_add_nolink(graph, "cluster/tier", "%s-%s",
- volinfo->volname, "tier-dht");
- } else {
- xl = volgen_graph_add_nolink(graph, "cluster/tier", "%s",
- volinfo->volname);
- }
- if (!xl)
- goto out;
-
- gf_asprintf(&rule, "%s-hot-dht", volinfo->volname);
-
- ret = xlator_set_option(xl, "rule", rule);
- if (ret)
- goto out;
-
- /*Each dht/tier layer must have a different xattr name*/
- ret = xlator_set_option(xl, "xattr-name", "trusted.tier.tier-dht");
- if (ret)
- goto out;
-
- ret = volgen_xlator_link(xl, cxl);
- ret = volgen_xlator_link(xl, hxl);
-
- st_type = GF_CLUSTER_TYPE_TIER;
+ ret = volgen_graph_set_options_generic(graph, set_dict, "client",
+ &threads_option_handler);
- ret = _graph_get_decommissioned_children(xl, volinfo,
- &decommissioned_children);
if (ret)
- goto out;
- if (decommissioned_children) {
- ret = xlator_set_option(xl, "decommissioned-bricks",
- decommissioned_children);
- if (ret)
- goto out;
- }
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_GRAPH_SET_OPT_FAIL,
+ "changing %s threads failed", identifier);
-out:
- volinfo->brick_count = st_brick_count;
- volinfo->replica_count = st_replica_count;
- volinfo->disperse_count = st_disperse_count;
- volinfo->type = st_type;
- volinfo->dist_leaf_count = st_dist_leaf_count;
- volinfo->tier_info.cur_tier_hot = 0;
-
- if (dup_volinfo)
- glusterd_volinfo_delete(dup_volinfo);
- GF_FREE(rule);
- GF_FREE(decommissioned_children);
- return ret;
+ return 0;
}
static int
@@ -4350,9 +4225,6 @@ client_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (ret)
goto out;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- ret = volume_volgen_graph_build_clusters_tier(graph, volinfo,
- _gf_false);
else
ret = volume_volgen_graph_build_clusters(graph, volinfo, _gf_false);
@@ -4371,23 +4243,38 @@ client_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
"tcp", set_dict);
}
- ret = dict_get_str_boolean(set_dict, "features.shard", _gf_false);
+ ret = dict_get_str_boolean(set_dict, "features.cloudsync", _gf_false);
if (ret == -1)
goto out;
if (ret) {
- xl = volgen_graph_add(graph, "features/shard", volname);
+ xl = volgen_graph_add(graph, "features/cloudsync", volname);
if (!xl) {
ret = -1;
goto out;
}
}
- ret = dict_get_str_boolean(set_dict, "features.utime", _gf_false);
+ ret = dict_get_str_boolean(set_dict, "features.shard", _gf_false);
if (ret == -1)
goto out;
if (ret) {
+ xl = volgen_graph_add(graph, "features/shard", volname);
+ if (!xl) {
+ ret = -1;
+ goto out;
+ }
+ }
+ /* a. ret will be -1 if features.ctime is not set in the volinfo->dict which
+ * means ctime should be loaded into the graph.
+ * b. ret will be 1 if features.ctime is explicitly turned on through
+ * volume set and in that case ctime should be loaded into the graph.
+ * c. ret will be 0 if features.ctime is explicitly turned off and in that
+ * case ctime shouldn't be loaded into the graph.
+ */
+ ret = dict_get_str_boolean(set_dict, "features.ctime", -1);
+ if (conf->op_version >= GD_OP_VERSION_5_0 && ret) {
xl = volgen_graph_add(graph, "features/utime", volname);
if (!xl) {
ret = -1;
@@ -4409,7 +4296,7 @@ client_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
ret = -1;
goto out;
}
- ret = xlator_set_option(xl, "read-only", "on");
+ ret = xlator_set_fixed_option(xl, "read-only", "on");
if (ret)
goto out;
}
@@ -4425,22 +4312,11 @@ client_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
ret = -1;
goto out;
}
- ret = xlator_set_option(xl, "mode", "client");
+ ret = xlator_set_fixed_option(xl, "mode", "client");
if (ret)
goto out;
}
- ret = dict_get_str_boolean(set_dict, "features.encryption", _gf_false);
- if (ret == -1)
- goto out;
- if (ret) {
- xl = volgen_graph_add(graph, "encryption/crypt", volname);
- if (!xl) {
- ret = -1;
- goto out;
- }
- }
-
/* gfproxy needs the quiesce translator */
if (gfproxy_clnt) {
xl = volgen_graph_add(graph, "features/quiesce", volname);
@@ -4466,9 +4342,9 @@ client_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
/* Do not allow changing read-after-open option if root-squash is
enabled.
*/
- ret = dict_get_str(set_dict, "performance.read-after-open", &tmp);
+ ret = dict_get_str_sizen(set_dict, "performance.read-after-open", &tmp);
if (!ret) {
- ret = dict_get_str(volinfo->dict, "server.root-squash", &tmp);
+ ret = dict_get_str_sizen(volinfo->dict, "server.root-squash", &tmp);
if (!ret) {
ob = _gf_false;
ret = gf_string2boolean(tmp, &ob);
@@ -4491,15 +4367,15 @@ client_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
root-squash is enabled, open-behind's option to read after
open is done is also enabled.
*/
- ret = dict_get_str(set_dict, "server.root-squash", &tmp);
+ ret = dict_get_str_sizen(set_dict, "server.root-squash", &tmp);
if (!ret) {
ret = gf_string2boolean(tmp, &var);
if (ret)
goto out;
if (var) {
- ret = dict_get_str(volinfo->dict, "performance.read-after-open",
- &tmp);
+ ret = dict_get_str_sizen(volinfo->dict,
+ "performance.read-after-open", &tmp);
if (!ret) {
ret = gf_string2boolean(tmp, &ob);
/* go ahead with turning read-after-open on
@@ -4507,11 +4383,11 @@ client_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
OR if read-after-open option is turned off
*/
if (ret || !ob)
- ret = dict_set_str(set_dict, "performance.read-after-open",
- "yes");
+ ret = dict_set_sizen_str_sizen(
+ set_dict, "performance.read-after-open", "yes");
} else {
- ret = dict_set_str(set_dict, "performance.read-after-open",
- "yes");
+ ret = dict_set_sizen_str_sizen(
+ set_dict, "performance.read-after-open", "yes");
}
} else {
/* When root-squash has to be turned off, open-behind's
@@ -4521,14 +4397,14 @@ client_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
option was not set before turning on root-squash.
*/
ob = _gf_false;
- ret = dict_get_str(volinfo->dict, "performance.read-after-open",
- &tmp);
+ ret = dict_get_str_sizen(volinfo->dict,
+ "performance.read-after-open", &tmp);
if (!ret) {
ret = gf_string2boolean(tmp, &ob);
if (!ret && ob) {
- ret = dict_set_str(set_dict, "performance.read-after-open",
- "yes");
+ ret = dict_set_sizen_str_sizen(
+ set_dict, "performance.read-after-open", "yes");
}
}
/* consider operation is failure only if read-after-open
@@ -4548,8 +4424,8 @@ client_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
ret = dict_get_str_boolean(set_dict, "server.manage-gids", _gf_false);
if (ret != -1) {
- ret = dict_set_str(set_dict, "client.send-gids",
- ret ? "false" : "true");
+ ret = dict_set_str_sizen(set_dict, "client.send-gids",
+ ret ? "false" : "true");
if (ret)
gf_msg(THIS->name, GF_LOG_WARNING, errno, GD_MSG_DICT_SET_FAILED,
"changing client"
@@ -4575,22 +4451,21 @@ client_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (ret)
return -1;
- ret = dict_get_str_boolean(set_dict, "features.cloudsync", _gf_false);
- if (ret == -1)
- goto out;
-
- if (ret) {
- xl = volgen_graph_add(graph, "features/cloudsync", volname);
- if (!xl) {
- ret = -1;
- goto out;
- }
+ /* if the client is part of 'gfproxyd' server, then we need to keep the
+ volume name as 'gfproxyd-<volname>', for better portmapper options */
+ subvol = volname;
+ ret = dict_get_str_boolean(set_dict, "gfproxy-server", 0);
+ if (ret > 0) {
+ namelen = strlen(volinfo->volname) + SLEN("gfproxyd-") + 1;
+ subvol = alloca(namelen);
+ snprintf(subvol, namelen, "gfproxyd-%s", volname);
}
ret = -1;
- xl = volgen_graph_add_as(graph, "debug/io-stats", volname);
- if (!xl)
+ xl = volgen_graph_add_as(graph, "debug/io-stats", subvol);
+ if (!xl) {
goto out;
+ }
ret = graph_set_generic_options(this, graph, set_dict, "client");
out:
@@ -4621,18 +4496,18 @@ bitrot_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme,
void *param)
{
xlator_t *xl = NULL;
- char *bitrot_option = NULL;
int ret = 0;
xl = first_of(graph);
if (!strcmp(vme->option, "expiry-time")) {
- ret = gf_asprintf(&bitrot_option, "expiry-time");
- if (ret != -1) {
- ret = xlator_set_option(xl, bitrot_option, vme->value);
- GF_FREE(bitrot_option);
- }
+ ret = xlator_set_fixed_option(xl, "expiry-time", vme->value);
+ if (ret)
+ return -1;
+ }
+ if (!strcmp(vme->option, "signer-threads")) {
+ ret = xlator_set_fixed_option(xl, "signer-threads", vme->value);
if (ret)
return -1;
}
@@ -4645,41 +4520,25 @@ scrubber_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme,
void *param)
{
xlator_t *xl = NULL;
- char *scrub_option = NULL;
int ret = 0;
xl = first_of(graph);
if (!strcmp(vme->option, "scrub-throttle")) {
- ret = gf_asprintf(&scrub_option, "scrub-throttle");
- if (ret != -1) {
- ret = xlator_set_option(xl, scrub_option, vme->value);
- GF_FREE(scrub_option);
- }
-
+ ret = xlator_set_fixed_option(xl, "scrub-throttle", vme->value);
if (ret)
return -1;
}
if (!strcmp(vme->option, "scrub-frequency")) {
- ret = gf_asprintf(&scrub_option, "scrub-freq");
- if (ret != -1) {
- ret = xlator_set_option(xl, scrub_option, vme->value);
- GF_FREE(scrub_option);
- }
-
+ ret = xlator_set_fixed_option(xl, "scrub-freq", vme->value);
if (ret)
return -1;
}
if (!strcmp(vme->option, "scrubber")) {
if (!strcmp(vme->value, "pause")) {
- ret = gf_asprintf(&scrub_option, "scrub-state");
- if (ret != -1) {
- ret = xlator_set_option(xl, scrub_option, vme->value);
- GF_FREE(scrub_option);
- }
-
+ ret = xlator_set_fixed_option(xl, "scrub-state", vme->value);
if (ret)
return -1;
}
@@ -4709,158 +4568,92 @@ out:
return ret;
}
+#ifdef BUILD_GNFS
static int
nfs_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme,
void *param)
{
+ static struct nfs_opt nfs_opts[] = {
+ /* {pattern, printf_pattern} */
+ {"!rpc-auth.addr.*.allow", "rpc-auth.addr.%s.allow"},
+ {"!rpc-auth.addr.*.reject", "rpc-auth.addr.%s.reject"},
+ {"!rpc-auth.auth-unix.*", "rpc-auth.auth-unix.%s"},
+ {"!rpc-auth.auth-null.*", "rpc-auth.auth-null.%s"},
+ {"!nfs3.*.trusted-sync", "nfs3.%s.trusted-sync"},
+ {"!nfs3.*.trusted-write", "nfs3.%s.trusted-write"},
+ {"!nfs3.*.volume-access", "nfs3.%s.volume-access"},
+ {"!rpc-auth.ports.*.insecure", "rpc-auth.ports.%s.insecure"},
+ {"!nfs-disable", "nfs.%s.disable"},
+ {NULL, NULL}};
xlator_t *xl = NULL;
char *aa = NULL;
int ret = 0;
glusterd_volinfo_t *volinfo = NULL;
+ int keylen;
+ struct nfs_opt *opt = NULL;
volinfo = param;
- xl = first_of(graph);
-
- if (!volinfo || (volinfo->volname[0] == '\0'))
- return 0;
-
- if (!vme || (vme->option[0] == '\0'))
+ if (!volinfo || (volinfo->volname[0] == '\0')) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
return 0;
-
- if (!strcmp(vme->option, "!rpc-auth.addr.*.allow")) {
- ret = gf_asprintf(&aa, "rpc-auth.addr.%s.allow", volinfo->volname);
-
- if (ret != -1) {
- ret = xlator_set_option(xl, aa, vme->value);
- GF_FREE(aa);
- }
-
- if (ret)
- return -1;
- }
-
- if (!strcmp(vme->option, "!rpc-auth.addr.*.reject")) {
- ret = gf_asprintf(&aa, "rpc-auth.addr.%s.reject", volinfo->volname);
-
- if (ret != -1) {
- ret = xlator_set_option(xl, aa, vme->value);
- GF_FREE(aa);
- }
-
- if (ret)
- return -1;
- }
-
- if (!strcmp(vme->option, "!rpc-auth.auth-unix.*")) {
- ret = gf_asprintf(&aa, "rpc-auth.auth-unix.%s", volinfo->volname);
-
- if (ret != -1) {
- ret = xlator_set_option(xl, aa, vme->value);
- GF_FREE(aa);
- }
-
- if (ret)
- return -1;
- }
- if (!strcmp(vme->option, "!rpc-auth.auth-null.*")) {
- ret = gf_asprintf(&aa, "rpc-auth.auth-null.%s", volinfo->volname);
-
- if (ret != -1) {
- ret = xlator_set_option(xl, aa, vme->value);
- GF_FREE(aa);
- }
-
- if (ret)
- return -1;
}
- if (!strcmp(vme->option, "!nfs3.*.trusted-sync")) {
- ret = gf_asprintf(&aa, "nfs3.%s.trusted-sync", volinfo->volname);
-
- if (ret != -1) {
- ret = xlator_set_option(xl, aa, vme->value);
- GF_FREE(aa);
- }
-
- if (ret)
- return -1;
+ if (!vme || !(vme->option)) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
+ return 0;
}
- if (!strcmp(vme->option, "!nfs3.*.trusted-write")) {
- ret = gf_asprintf(&aa, "nfs3.%s.trusted-write", volinfo->volname);
-
- if (ret != -1) {
- ret = xlator_set_option(xl, aa, vme->value);
- GF_FREE(aa);
- }
+ xl = first_of(graph);
- if (ret)
- return -1;
- }
+ for (opt = nfs_opts; opt->pattern; opt++) {
+ if (!strcmp(vme->option, opt->pattern)) {
+ keylen = gf_asprintf(&aa, opt->printf_pattern, volinfo->volname);
- if (!strcmp(vme->option, "!nfs3.*.volume-access")) {
- ret = gf_asprintf(&aa, "nfs3.%s.volume-access", volinfo->volname);
+ if (keylen == -1) {
+ return -1;
+ }
- if (ret != -1) {
- ret = xlator_set_option(xl, aa, vme->value);
+ ret = xlator_set_option(xl, aa, keylen, vme->value);
GF_FREE(aa);
- }
- if (ret)
- return -1;
- }
-
- if (!strcmp(vme->option, "!nfs3.*.export-dir")) {
- ret = gf_asprintf(&aa, "nfs3.%s.export-dir", volinfo->volname);
-
- if (ret != -1) {
- ret = gf_canonicalize_path(vme->value);
if (ret)
return -1;
- ret = xlator_set_option(xl, aa, vme->value);
- GF_FREE(aa);
+ goto out;
}
-
- if (ret)
- return -1;
}
- if (!strcmp(vme->option, "!rpc-auth.ports.*.insecure")) {
- ret = gf_asprintf(&aa, "rpc-auth.ports.%s.insecure", volinfo->volname);
-
- if (ret != -1) {
- ret = xlator_set_option(xl, aa, vme->value);
- GF_FREE(aa);
- }
+ if (!strcmp(vme->option, "!nfs3.*.export-dir")) {
+ keylen = gf_asprintf(&aa, "nfs3.%s.export-dir", volinfo->volname);
- if (ret)
+ if (keylen == -1) {
return -1;
- }
-
- if (!strcmp(vme->option, "!nfs-disable")) {
- ret = gf_asprintf(&aa, "nfs.%s.disable", volinfo->volname);
+ }
- if (ret != -1) {
- ret = xlator_set_option(xl, aa, vme->value);
+ ret = gf_canonicalize_path(vme->value);
+ if (ret) {
GF_FREE(aa);
+ return -1;
}
+ ret = xlator_set_option(xl, aa, keylen, vme->value);
+ GF_FREE(aa);
if (ret)
return -1;
- }
-
- if ((strcmp(vme->voltype, "nfs/server") == 0) &&
- (vme->option && vme->option[0] != '!')) {
- ret = xlator_set_option(xl, vme->option, vme->value);
+ } else if ((strcmp(vme->voltype, "nfs/server") == 0) &&
+ (vme->option[0] != '!')) {
+ ret = xlator_set_option(xl, vme->option, strlen(vme->option),
+ vme->value);
if (ret)
return -1;
}
+out:
return 0;
}
+#endif
char *
volgen_get_shd_key(int type)
{
@@ -4868,7 +4661,6 @@ volgen_get_shd_key(int type)
switch (type) {
case GF_CLUSTER_TYPE_REPLICATE:
- case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
key = "cluster.self-heal-daemon";
break;
case GF_CLUSTER_TYPE_DISPERSE:
@@ -4882,6 +4674,27 @@ volgen_get_shd_key(int type)
return key;
}
+static int
+volgen_set_shd_key_enable(dict_t *set_dict, const int type)
+{
+ int ret = 0;
+
+ switch (type) {
+ case GF_CLUSTER_TYPE_REPLICATE:
+ ret = dict_set_sizen_str_sizen(set_dict, "cluster.self-heal-daemon",
+ "enable");
+ break;
+ case GF_CLUSTER_TYPE_DISPERSE:
+ ret = dict_set_sizen_str_sizen(
+ set_dict, "cluster.disperse-self-heal-daemon", "enable");
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static gf_boolean_t
volgen_is_shd_compatible_xl(char *xl_type)
{
@@ -4902,7 +4715,7 @@ volgen_graph_set_iam_shd(volgen_graph_t *graph)
if (!volgen_is_shd_compatible_xl(trav->type))
continue;
- ret = xlator_set_option(trav, "iam-self-heal-daemon", "yes");
+ ret = xlator_set_fixed_option(trav, "iam-self-heal-daemon", "yes");
if (ret)
break;
}
@@ -4910,54 +4723,21 @@ volgen_graph_set_iam_shd(volgen_graph_t *graph)
}
static int
-glusterd_prepare_shd_volume_options_for_tier(glusterd_volinfo_t *volinfo,
- dict_t *set_dict)
-{
- int ret = -1;
- char *key = NULL;
-
- key = volgen_get_shd_key(volinfo->tier_info.cold_type);
- if (key) {
- ret = dict_set_str(set_dict, key, "enable");
- if (ret)
- goto out;
- }
-
- key = volgen_get_shd_key(volinfo->tier_info.hot_type);
- if (key) {
- ret = dict_set_str(set_dict, key, "enable");
- if (ret)
- goto out;
- }
-out:
- return ret;
-}
-
-static int
prepare_shd_volume_options(glusterd_volinfo_t *volinfo, dict_t *mod_dict,
dict_t *set_dict)
{
- char *key = NULL;
int ret = 0;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_prepare_shd_volume_options_for_tier(volinfo, set_dict);
- if (ret)
- goto out;
- } else {
- key = volgen_get_shd_key(volinfo->type);
- if (!key) {
- ret = -1;
- goto out;
- }
- ret = dict_set_str(set_dict, key, "enable");
- if (ret)
- goto out;
- }
+ ret = volgen_set_shd_key_enable(set_dict, volinfo->type);
+ if (ret)
+ goto out;
ret = dict_set_uint32(set_dict, "trusted-client", GF_CLIENT_TRUSTED);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=trusted-client", NULL);
goto out;
+ }
dict_copy(volinfo->dict, set_dict);
if (mod_dict)
@@ -4972,7 +4752,6 @@ build_afr_ec_clusters(volgen_graph_t *graph, glusterd_volinfo_t *volinfo)
int clusters = -1;
switch (volinfo->type) {
case GF_CLUSTER_TYPE_REPLICATE:
- case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
clusters = volgen_graph_build_afr_clusters(graph, volinfo);
break;
@@ -4984,70 +4763,17 @@ build_afr_ec_clusters(volgen_graph_t *graph, glusterd_volinfo_t *volinfo)
}
static int
-build_afr_ec_clusters_for_tier(volgen_graph_t *graph,
- glusterd_volinfo_t *volinfo, dict_t *set_dict)
-{
- int ret = 0;
- glusterd_volinfo_t *dup_volinfo[2] = {NULL, NULL};
- int clusters = 0;
- int i = 0;
- gf_boolean_t is_hot_tier = _gf_false;
-
- if (glusterd_is_shd_compatible_type(volinfo->tier_info.cold_type)) {
- ret = glusterd_create_sub_tier_volinfo(volinfo, &dup_volinfo[0],
- is_hot_tier, volinfo->volname);
- if (ret)
- goto out;
- }
- if (glusterd_is_shd_compatible_type(volinfo->tier_info.hot_type)) {
- is_hot_tier = _gf_true;
- ret = glusterd_create_sub_tier_volinfo(volinfo, &dup_volinfo[1],
- is_hot_tier, volinfo->volname);
- if (ret)
- goto out;
- dup_volinfo[1]->tier_info.cur_tier_hot = 1;
- }
-
- for (i = 0; i < 2; i++) {
- if (!dup_volinfo[i])
- continue;
- ret = build_afr_ec_clusters(graph, dup_volinfo[i]);
- if (ret < 0)
- goto out;
- clusters += ret;
- }
- ret = 0;
-out:
- for (i = 0; i < 2; i++) {
- if (dup_volinfo[i])
- glusterd_volinfo_delete(dup_volinfo[i]);
- }
-
- if (ret)
- clusters = -1;
-
- return clusters;
-}
-
-static int
build_shd_clusters(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
dict_t *set_dict)
{
int ret = 0;
int clusters = -1;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = volgen_graph_build_clients_for_tier_shd(graph, volinfo, set_dict);
- if (ret)
- goto out;
+ ret = volgen_graph_build_clients(graph, volinfo, set_dict, NULL);
+ if (ret)
+ goto out;
+ clusters = build_afr_ec_clusters(graph, volinfo);
- clusters = build_afr_ec_clusters_for_tier(graph, volinfo, set_dict);
- } else {
- ret = volgen_graph_build_clients(graph, volinfo, set_dict, NULL);
- if (ret)
- goto out;
- clusters = build_afr_ec_clusters(graph, volinfo);
- }
out:
return clusters;
}
@@ -5062,21 +4788,10 @@ gd_is_self_heal_enabled(glusterd_volinfo_t *volinfo, dict_t *dict)
switch (volinfo->type) {
case GF_CLUSTER_TYPE_REPLICATE:
- case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
case GF_CLUSTER_TYPE_DISPERSE:
shd_key = volgen_get_shd_key(volinfo->type);
shd_enabled = dict_get_str_boolean(dict, shd_key, _gf_true);
break;
- case GF_CLUSTER_TYPE_TIER:
- shd_key = volgen_get_shd_key(volinfo->tier_info.cold_type);
- if (shd_key)
- shd_enabled = dict_get_str_boolean(dict, shd_key, _gf_true);
-
- shd_key = volgen_get_shd_key(volinfo->tier_info.hot_type);
- if (shd_key)
- shd_enabled |= dict_get_str_boolean(dict, shd_key, _gf_true);
-
- break;
default:
break;
}
@@ -5127,12 +4842,7 @@ build_rebalance_volfile(glusterd_volinfo_t *volinfo, char *filepath,
if (ret)
goto out;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- ret = volume_volgen_graph_build_clusters_tier(&graph, volinfo,
- _gf_false);
- else
- ret = volume_volgen_graph_build_clusters(&graph, volinfo, _gf_false);
-
+ ret = volume_volgen_graph_build_clusters(&graph, volinfo, _gf_false);
if (ret)
goto out;
@@ -5163,24 +4873,15 @@ out:
static int
build_shd_volume_graph(xlator_t *this, volgen_graph_t *graph,
glusterd_volinfo_t *volinfo, dict_t *mod_dict,
- dict_t *set_dict, gf_boolean_t graph_check,
- gf_boolean_t *valid_config)
+ dict_t *set_dict, gf_boolean_t graph_check)
{
volgen_graph_t cgraph = {0};
int ret = 0;
int clusters = -1;
- if (!graph_check && (volinfo->status != GLUSTERD_STATUS_STARTED))
- goto out;
-
if (!glusterd_is_shd_compatible_volume(volinfo))
goto out;
- /* Shd graph is valid only when there is at least one
- * replica/disperse volume is present
- */
- *valid_config = _gf_true;
-
ret = prepare_shd_volume_options(volinfo, mod_dict, set_dict);
if (ret)
goto out;
@@ -5210,51 +4911,43 @@ out:
}
int
-build_shd_graph(volgen_graph_t *graph, dict_t *mod_dict)
+build_shd_graph(glusterd_volinfo_t *volinfo, volgen_graph_t *graph,
+ dict_t *mod_dict)
{
- glusterd_volinfo_t *voliter = NULL;
xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
dict_t *set_dict = NULL;
int ret = 0;
- gf_boolean_t valid_config = _gf_false;
xlator_t *iostxl = NULL;
gf_boolean_t graph_check = _gf_false;
this = THIS;
- priv = this->private;
set_dict = dict_new();
if (!set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -ENOMEM;
goto out;
}
if (mod_dict)
graph_check = dict_get_str_boolean(mod_dict, "graph-check", 0);
- iostxl = volgen_graph_add_as(graph, "debug/io-stats", "glustershd");
+ iostxl = volgen_graph_add_as(graph, "debug/io-stats", volinfo->volname);
if (!iostxl) {
ret = -1;
goto out;
}
- cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
- {
- ret = build_shd_volume_graph(this, graph, voliter, mod_dict, set_dict,
- graph_check, &valid_config);
- ret = dict_reset(set_dict);
- if (ret)
- goto out;
- }
+ ret = build_shd_volume_graph(this, graph, volinfo, mod_dict, set_dict,
+ graph_check);
out:
if (set_dict)
dict_unref(set_dict);
- if (!valid_config)
- ret = -EINVAL;
return ret;
}
+#ifdef BUILD_GNFS
+
static int
volgen_graph_set_iam_nfsd(const volgen_graph_t *graph)
{
@@ -5265,7 +4958,7 @@ volgen_graph_set_iam_nfsd(const volgen_graph_t *graph)
if (strcmp(trav->type, "cluster/replicate") != 0)
continue;
- ret = xlator_set_option(trav, "iam-nfs-daemon", "yes");
+ ret = xlator_set_fixed_option(trav, "iam-nfs-daemon", "yes");
if (ret)
break;
}
@@ -5309,15 +5002,15 @@ build_nfs_graph(volgen_graph_t *graph, dict_t *mod_dict)
ret = -1;
goto out;
}
- ret = xlator_set_option(nfsxl, "nfs.dynamic-volumes", "on");
+ ret = xlator_set_fixed_option(nfsxl, "nfs.dynamic-volumes", "on");
if (ret)
goto out;
- ret = xlator_set_option(nfsxl, "nfs.nlm", "on");
+ ret = xlator_set_fixed_option(nfsxl, "nfs.nlm", "on");
if (ret)
goto out;
- ret = xlator_set_option(nfsxl, "nfs.drc", "off");
+ ret = xlator_set_fixed_option(nfsxl, "nfs.drc", "off");
if (ret)
goto out;
@@ -5335,7 +5028,7 @@ build_nfs_graph(volgen_graph_t *graph, dict_t *mod_dict)
"Out of memory");
goto out;
}
- ret = xlator_set_option(nfsxl, skey, "*");
+ ret = xlator_set_option(nfsxl, skey, ret, "*");
GF_FREE(skey);
if (ret)
goto out;
@@ -5346,7 +5039,8 @@ build_nfs_graph(volgen_graph_t *graph, dict_t *mod_dict)
"Out of memory");
goto out;
}
- ret = xlator_set_option(nfsxl, skey, uuid_utoa(voliter->volume_id));
+ ret = xlator_set_option(nfsxl, skey, ret,
+ uuid_utoa(voliter->volume_id));
GF_FREE(skey);
if (ret)
goto out;
@@ -5367,27 +5061,44 @@ build_nfs_graph(volgen_graph_t *graph, dict_t *mod_dict)
else
get_transport_type(voliter, voliter->dict, nfs_xprt, _gf_true);
- ret = dict_set_str(set_dict, "performance.stat-prefetch", "off");
- if (ret)
+ ret = dict_set_sizen_str_sizen(set_dict, "performance.stat-prefetch",
+ "off");
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=performance.stat-prefetch", NULL);
goto out;
+ }
- ret = dict_set_str(set_dict, "performance.client-io-threads", "off");
- if (ret)
+ ret = dict_set_sizen_str_sizen(set_dict,
+ "performance.client-io-threads", "off");
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=performance.client-io-threads", NULL);
goto out;
+ }
- ret = dict_set_str(set_dict, "client-transport-type", nfs_xprt);
- if (ret)
+ ret = dict_set_str_sizen(set_dict, "client-transport-type", nfs_xprt);
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=client-transport-type", NULL);
goto out;
+ }
ret = dict_set_uint32(set_dict, "trusted-client", GF_CLIENT_TRUSTED);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=trusted-client", NULL);
goto out;
+ }
- ret = dict_set_str(set_dict, "nfs-volume-file", "yes");
- if (ret)
+ ret = dict_set_sizen_str_sizen(set_dict, "nfs-volume-file", "yes");
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=nfs-volume-file", NULL);
goto out;
+ }
- if (mod_dict && (data = dict_get(mod_dict, "volume-name"))) {
+ if (mod_dict && (data = dict_get_sizen(mod_dict, "volume-name"))) {
volname = data->data;
if (strcmp(volname, voliter->volname) == 0)
dict_copy(mod_dict, set_dict);
@@ -5444,7 +5155,7 @@ out:
return ret;
}
-
+#endif
/****************************
*
* Volume generation interface
@@ -5510,7 +5221,7 @@ glusterd_is_valid_volfpath(char *volname, char *brick)
ret = 0;
goto out;
}
- strncpy(volinfo->volname, volname, strlen(volname));
+ (void)snprintf(volinfo->volname, sizeof(volinfo->volname), "%s", volname);
get_brick_filepath(volfpath, volinfo, brickinfo, NULL);
ret = ((strlen(volfpath) < PATH_MAX) &&
@@ -5627,8 +5338,11 @@ build_quotad_graph(volgen_graph_t *graph, dict_t *mod_dict)
continue;
ret = dict_set_uint32(set_dict, "trusted-client", GF_CLIENT_TRUSTED);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=trusted-client", NULL);
goto out;
+ }
dict_copy(voliter->dict, set_dict);
if (mod_dict)
@@ -5640,7 +5354,7 @@ build_quotad_graph(volgen_graph_t *graph, dict_t *mod_dict)
"Out of memory");
goto out;
}
- ret = xlator_set_option(quotad_xl, skey, voliter->volname);
+ ret = xlator_set_option(quotad_xl, skey, ret, voliter->volname);
GF_FREE(skey);
if (ret)
goto out;
@@ -5650,12 +5364,7 @@ build_quotad_graph(volgen_graph_t *graph, dict_t *mod_dict)
if (ret)
goto out;
- if (voliter->type == GF_CLUSTER_TYPE_TIER)
- ret = volume_volgen_graph_build_clusters_tier(&cgraph, voliter,
- _gf_true);
- else
- ret = volume_volgen_graph_build_clusters(&cgraph, voliter,
- _gf_true);
+ ret = volume_volgen_graph_build_clusters(&cgraph, voliter, _gf_true);
if (ret) {
ret = -1;
goto out;
@@ -5717,23 +5426,6 @@ get_parent_vol_tstamp_file(char *filename, glusterd_volinfo_t *volinfo)
}
}
-void
-assign_jbr_uuids(glusterd_volinfo_t *volinfo)
-{
- glusterd_brickinfo_t *brickinfo = NULL;
- int in_group = 0;
- uuid_t tmp_uuid;
-
- list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
- {
- if (in_group == 0)
- gf_uuid_generate(tmp_uuid);
- gf_uuid_copy(brickinfo->jbr_uuid, tmp_uuid);
- if (++in_group >= volinfo->replica_count)
- in_group = 0;
- }
-}
-
int
generate_brick_volfiles(glusterd_volinfo_t *volinfo)
{
@@ -5801,10 +5493,6 @@ generate_brick_volfiles(glusterd_volinfo_t *volinfo)
}
}
- if (glusterd_volinfo_get_boolean(volinfo, "cluster.jbr") > 0) {
- assign_jbr_uuids(volinfo);
- }
-
ret = glusterd_volume_brick_for_each(volinfo, NULL,
glusterd_generate_brick_volfile);
if (ret)
@@ -5850,16 +5538,23 @@ glusterd_generate_client_per_brick_volfile(glusterd_volinfo_t *volinfo)
int ret = -1;
char *ssl_str = NULL;
gf_boolean_t ssl_bool = _gf_false;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
ret = dict_set_uint32(dict, "trusted-client", GF_CLIENT_TRUSTED);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=trusted-client", NULL);
goto free_dict;
+ }
- if (dict_get_str(volinfo->dict, "client.ssl", &ssl_str) == 0) {
+ if (dict_get_str_sizen(volinfo->dict, "client.ssl", &ssl_str) == 0) {
if (gf_string2boolean(ssl_str, &ssl_bool) == 0) {
if (ssl_bool) {
if (dict_set_dynstr_with_alloc(dict, "client.ssl", "on") != 0) {
@@ -5939,17 +5634,25 @@ generate_dummy_client_volfiles(glusterd_volinfo_t *volinfo)
enumerate_transport_reqs(volinfo->transport_type, types);
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
for (i = 0; types[i]; i++) {
ret = dict_set_str(dict, "client-transport-type", types[i]);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=client-transport-type", NULL);
goto out;
+ }
type = transport_str_to_type(types[i]);
ret = dict_set_uint32(dict, "trusted-client", GF_CLIENT_OTHER);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=trusted-client", NULL);
goto out;
+ }
ret = glusterd_get_dummy_client_filepath(filepath, volinfo, type);
if (ret) {
@@ -6010,24 +5713,31 @@ generate_client_volfiles(glusterd_volinfo_t *volinfo,
enumerate_transport_reqs(volinfo->transport_type, types);
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
for (i = 0; types[i]; i++) {
ret = dict_set_str(dict, "client-transport-type", types[i]);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=client-transport-type", NULL);
goto out;
+ }
type = transport_str_to_type(types[i]);
ret = dict_set_uint32(dict, "trusted-client", client_type);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=trusted-client", NULL);
goto out;
+ }
if (client_type == GF_CLIENT_TRUSTED) {
ret = glusterd_get_trusted_client_filepath(filepath, volinfo, type);
} else if (client_type == GF_CLIENT_TRUSTED_PROXY) {
glusterd_get_gfproxy_client_volfile(volinfo, filepath, PATH_MAX);
- ret = dict_set_int32n(dict, "gfproxy-client",
- SLEN("gfproxy-client"), 1);
+ ret = dict_set_int32_sizen(dict, "gfproxy-client", 1);
} else {
ret = glusterd_get_client_filepath(filepath, volinfo, type);
}
@@ -6074,7 +5784,6 @@ glusterd_snapdsvc_generate_volfile(volgen_graph_t *graph,
dict_t *set_dict = NULL;
char *loglevel = NULL;
char *xlator = NULL;
- char auth_path[] = "auth-path";
char *ssl_str = NULL;
gf_boolean_t ssl_bool = _gf_false;
@@ -6082,9 +5791,9 @@ glusterd_snapdsvc_generate_volfile(volgen_graph_t *graph,
if (!set_dict)
return -1;
- ret = dict_get_str(set_dict, "xlator", &xlator);
+ ret = dict_get_str_sizen(set_dict, "xlator", &xlator);
if (!ret) {
- ret = dict_get_str(set_dict, "loglevel", &loglevel);
+ ret = dict_get_str_sizen(set_dict, "loglevel", &loglevel);
if (ret) {
gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
"could not get both"
@@ -6098,7 +5807,7 @@ glusterd_snapdsvc_generate_volfile(volgen_graph_t *graph,
if (!xl)
return -1;
- ret = xlator_set_option(xl, "volname", volinfo->volname);
+ ret = xlator_set_fixed_option(xl, "volname", volinfo->volname);
if (ret)
return -1;
@@ -6115,15 +5824,15 @@ glusterd_snapdsvc_generate_volfile(volgen_graph_t *graph,
if (!xl)
return -1;
- ret = xlator_set_option(xl, "transport-type", "tcp");
+ ret = xlator_set_fixed_option(xl, "transport-type", "tcp");
if (ret)
return -1;
- if (dict_get_str(set_dict, "server.ssl", &ssl_str) == 0) {
+ if (dict_get_str_sizen(set_dict, "server.ssl", &ssl_str) == 0) {
if (gf_string2boolean(ssl_str, &ssl_bool) == 0) {
if (ssl_bool) {
- ret = xlator_set_option(xl, "transport.socket.ssl-enabled",
- "true");
+ ret = xlator_set_fixed_option(
+ xl, "transport.socket.ssl-enabled", "true");
if (ret) {
return -1;
}
@@ -6143,18 +5852,19 @@ glusterd_snapdsvc_generate_volfile(volgen_graph_t *graph,
username = glusterd_auth_get_username(volinfo);
passwd = glusterd_auth_get_password(volinfo);
- snprintf(key, sizeof(key), "auth.login.snapd-%s.allow", volinfo->volname);
- ret = xlator_set_option(xl, key, username);
+ ret = snprintf(key, sizeof(key), "auth.login.snapd-%s.allow",
+ volinfo->volname);
+ ret = xlator_set_option(xl, key, ret, username);
if (ret)
return -1;
- snprintf(key, sizeof(key), "auth.login.%s.password", username);
- ret = xlator_set_option(xl, key, passwd);
+ ret = snprintf(key, sizeof(key), "auth.login.%s.password", username);
+ ret = xlator_set_option(xl, key, ret, passwd);
if (ret)
return -1;
snprintf(key, sizeof(key), "snapd-%s", volinfo->volname);
- ret = xlator_set_option(xl, auth_path, key);
+ ret = xlator_set_fixed_option(xl, "auth-path", key);
if (ret)
return -1;
@@ -6171,10 +5881,15 @@ prepare_bitrot_scrub_volume_options(glusterd_volinfo_t *volinfo,
dict_t *mod_dict, dict_t *set_dict)
{
int ret = 0;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
ret = dict_set_uint32(set_dict, "trusted-client", GF_CLIENT_TRUSTED);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=trusted-client", NULL);
goto out;
+ }
dict_copy(volinfo->dict, set_dict);
if (mod_dict)
@@ -6205,7 +5920,7 @@ build_bitd_clusters(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (ret < 0)
goto out;
- ret = xlator_set_option(xl, "brick-count", brick_hint);
+ ret = xlator_set_fixed_option(xl, "brick-count", brick_hint);
if (ret)
goto out;
@@ -6242,6 +5957,7 @@ build_bitd_volume_graph(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
set_dict = dict_new();
if (!set_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -6252,7 +5968,7 @@ build_bitd_volume_graph(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
get_transport_type(volinfo, set_dict, transt, _gf_false);
if (!strncmp(transt, "tcp,rdma", SLEN("tcp,rdma")))
- strncpy(transt, "tcp", sizeof(transt));
+ (void)snprintf(transt, sizeof(transt), "%s", "tcp");
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
{
@@ -6366,7 +6082,7 @@ build_scrub_clusters(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
xl = first_of(graph);
- ret = xlator_set_option(xl, "scrubber", "true");
+ ret = xlator_set_fixed_option(xl, "scrubber", "true");
if (ret)
goto out;
@@ -6411,7 +6127,7 @@ build_scrub_volume_graph(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
get_transport_type(volinfo, set_dict, transt, _gf_false);
if (!strncmp(transt, "tcp,rdma", SLEN("tcp,rdma")))
- strncpy(transt, "tcp", sizeof(transt));
+ (void)snprintf(transt, sizeof(transt), "%s", "tcp");
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
{
@@ -6566,7 +6282,11 @@ glusterd_create_volfiles(glusterd_volinfo_t *volinfo)
if (ret)
gf_log(this->name, GF_LOG_ERROR, "Could not generate gfproxy volfiles");
- dict_del(volinfo->dict, "skip-CLIOT");
+ ret = glusterd_shdsvc_create_volfile(volinfo);
+ if (ret)
+ gf_log(this->name, GF_LOG_ERROR, "Could not generate shd volfiles");
+
+ dict_del_sizen(volinfo->dict, "skip-CLIOT");
out:
return ret;
@@ -6643,10 +6363,13 @@ validate_shdopts(glusterd_volinfo_t *volinfo, dict_t *val_dict,
ret = 0;
goto out;
}
- ret = dict_set_int32n(val_dict, "graph-check", SLEN("graph-check"), 1);
- if (ret)
+ ret = dict_set_int32_sizen(val_dict, "graph-check", 1);
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=graph-check", NULL);
goto out;
- ret = build_shd_graph(&graph, val_dict);
+ }
+ ret = build_shd_graph(volinfo, &graph, val_dict);
if (!ret)
ret = graph_reconf_validateopt(&graph.graph, op_errstr);
@@ -6654,11 +6377,12 @@ validate_shdopts(glusterd_volinfo_t *volinfo, dict_t *val_dict,
gf_msg_debug("glusterd", 0, "Returning %d", ret);
out:
- dict_deln(val_dict, "graph-check", SLEN("graph-check"));
+ dict_del_sizen(val_dict, "graph-check");
return ret;
}
-int
+#ifdef BUILD_GNFS
+static int
validate_nfsopts(glusterd_volinfo_t *volinfo, dict_t *val_dict,
char **op_errstr)
{
@@ -6680,7 +6404,7 @@ validate_nfsopts(glusterd_volinfo_t *volinfo, dict_t *val_dict,
graph.errstr = op_errstr;
get_vol_transport_type(volinfo, transport_type);
- ret = dict_get_str(val_dict, "nfs.transport-type", &tt);
+ ret = dict_get_str_sizen(val_dict, "nfs.transport-type", &tt);
if (!ret) {
if (volinfo->transport_type != GF_TRANSPORT_BOTH_TCP_RDMA) {
snprintf(err_str, sizeof(err_str),
@@ -6698,13 +6422,15 @@ validate_nfsopts(glusterd_volinfo_t *volinfo, dict_t *val_dict,
"wrong transport "
"type %s",
tt);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INCOMPATIBLE_VALUE,
+ "Type=%s", tt, NULL);
*op_errstr = gf_strdup(err_str);
ret = -1;
goto out;
}
}
- ret = dict_set_str(val_dict, "volume-name", volinfo->volname);
+ ret = dict_set_str_sizen(val_dict, "volume-name", volinfo->volname);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
"Failed to set volume name");
@@ -6718,11 +6444,12 @@ validate_nfsopts(glusterd_volinfo_t *volinfo, dict_t *val_dict,
volgen_graph_free(&graph);
out:
- if (dict_get(val_dict, "volume-name"))
- dict_del(val_dict, "volume-name");
+ if (dict_get_sizen(val_dict, "volume-name"))
+ dict_del_sizen(val_dict, "volume-name");
gf_msg_debug(this->name, 0, "Returning %d", ret);
return ret;
}
+#endif
int
validate_clientopts(glusterd_volinfo_t *volinfo, dict_t *val_dict,
@@ -6765,6 +6492,7 @@ validate_brickopts(glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *brickinfo,
graph.errstr = op_errstr;
full_dict = dict_new();
if (!full_dict) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -6840,13 +6568,13 @@ glusterd_validate_globalopts(glusterd_volinfo_t *volinfo, dict_t *val_dict,
gf_msg_debug("glusterd", 0, "Could not Validate client");
goto out;
}
-
+#ifdef BUILD_GNFS
ret = validate_nfsopts(volinfo, val_dict, op_errstr);
if (ret) {
gf_msg_debug("glusterd", 0, "Could not Validate nfs");
goto out;
}
-
+#endif
ret = validate_shdopts(volinfo, val_dict, op_errstr);
if (ret) {
gf_msg_debug("glusterd", 0, "Could not Validate self-heald");
@@ -6896,12 +6624,13 @@ glusterd_validate_reconfopts(glusterd_volinfo_t *volinfo, dict_t *val_dict,
goto out;
}
+#ifdef BUILD_GNFS
ret = validate_nfsopts(volinfo, val_dict, op_errstr);
if (ret) {
gf_msg_debug("glusterd", 0, "Could not Validate nfs");
goto out;
}
-
+#endif
ret = validate_shdopts(volinfo, val_dict, op_errstr);
if (ret) {
gf_msg_debug("glusterd", 0, "Could not Validate self-heald");
@@ -6913,13 +6642,16 @@ out:
return ret;
}
-static struct volopt_map_entry *
-_gd_get_vmep(char *key)
+struct volopt_map_entry *
+gd_get_vmep(const char *key)
{
char *completion = NULL;
struct volopt_map_entry *vmep = NULL;
int ret = 0;
+ if (!key)
+ return NULL;
+
COMPLETE_OPTION((char *)key, completion, ret);
for (vmep = glusterd_volopt_map; vmep->key; vmep++) {
if (strcmp(vmep->key, key) == 0)
@@ -6930,13 +6662,8 @@ _gd_get_vmep(char *key)
}
uint32_t
-glusterd_get_op_version_for_key(char *key)
+glusterd_get_op_version_from_vmep(struct volopt_map_entry *vmep)
{
- struct volopt_map_entry *vmep = NULL;
-
- GF_ASSERT(key);
-
- vmep = _gd_get_vmep(key);
if (vmep)
return vmep->op_version;
@@ -6944,13 +6671,8 @@ glusterd_get_op_version_for_key(char *key)
}
gf_boolean_t
-gd_is_client_option(char *key)
+gd_is_client_option(struct volopt_map_entry *vmep)
{
- struct volopt_map_entry *vmep = NULL;
-
- GF_ASSERT(key);
-
- vmep = _gd_get_vmep(key);
if (vmep && (vmep->flags & VOLOPT_FLAG_CLIENT_OPT))
return _gf_true;
@@ -6958,23 +6680,17 @@ gd_is_client_option(char *key)
}
gf_boolean_t
-gd_is_xlator_option(char *key)
+gd_is_xlator_option(struct volopt_map_entry *vmep)
{
- struct volopt_map_entry *vmep = NULL;
-
- GF_ASSERT(key);
-
- vmep = _gd_get_vmep(key);
if (vmep && (vmep->flags & VOLOPT_FLAG_XLATOR_OPT))
return _gf_true;
return _gf_false;
}
-volume_option_type_t
-_gd_get_option_type(char *key)
+static volume_option_type_t
+_gd_get_option_type(struct volopt_map_entry *vmep)
{
- struct volopt_map_entry *vmep = NULL;
void *dl_handle = NULL;
volume_opt_list_t vol_opt_list = {
{0},
@@ -6984,10 +6700,6 @@ _gd_get_option_type(char *key)
char *xlopt_key = NULL;
volume_option_type_t opt_type = GF_OPTION_TYPE_MAX;
- GF_ASSERT(key);
-
- vmep = _gd_get_vmep(key);
-
if (vmep) {
CDS_INIT_LIST_HEAD(&vol_opt_list.list);
ret = xlator_volopt_dynload(vmep->voltype, &dl_handle, &vol_opt_list);
@@ -7014,12 +6726,29 @@ out:
}
gf_boolean_t
-gd_is_boolean_option(char *key)
+gd_is_boolean_option(struct volopt_map_entry *vmep)
{
- GF_ASSERT(key);
-
- if (GF_OPTION_TYPE_BOOL == _gd_get_option_type(key))
+ if (GF_OPTION_TYPE_BOOL == _gd_get_option_type(vmep))
return _gf_true;
return _gf_false;
}
+
+int
+glusterd_shdsvc_generate_volfile(glusterd_volinfo_t *volinfo, char *filename,
+ dict_t *mode_dict)
+{
+ int ret = -1;
+ volgen_graph_t graph = {
+ 0,
+ };
+
+ graph.type = GF_SHD;
+ ret = build_shd_graph(volinfo, &graph, mode_dict);
+ if (!ret)
+ ret = volgen_write_volfile(&graph, filename);
+
+ volgen_graph_free(&graph);
+
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.h b/xlators/mgmt/glusterd/src/glusterd-volgen.h
index f9fc068931b..cd4d0c7d0cc 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.h
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.h
@@ -38,6 +38,9 @@
#define VKEY_RDA_CACHE_LIMIT "performance.rda-cache-limit"
#define VKEY_RDA_REQUEST_SIZE "performance.rda-request-size"
#define VKEY_CONFIG_GFPROXY "config.gfproxyd"
+#define VKEY_CONFIG_GLOBAL_THREADING "config.global-threading"
+#define VKEY_CONFIG_CLIENT_THREADS "config.client-threads"
+#define VKEY_CONFIG_BRICK_THREADS "config.brick-threads"
#define AUTH_ALLOW_MAP_KEY "auth.allow"
#define AUTH_REJECT_MAP_KEY "auth.reject"
@@ -66,6 +69,7 @@ typedef enum {
GF_REBALANCED = 1,
GF_QUOTAD,
GF_SNAPD,
+ GF_SHD,
} glusterd_graph_type_t;
struct volgen_graph {
@@ -77,6 +81,8 @@ typedef struct volgen_graph volgen_graph_t;
typedef int (*glusterd_graph_builder_t)(volgen_graph_t *graph,
dict_t *mod_dict);
+typedef int (*glusterd_vol_graph_builder_t)(glusterd_volinfo_t *,
+ char *filename, dict_t *mod_dict);
#define COMPLETE_OPTION(key, completion, ret) \
do { \
@@ -168,6 +174,12 @@ struct volgen_brick_xlator {
* delay-gen before this xlator */
char *dbg_key;
};
+
+struct nfs_opt {
+ const char *pattern;
+ const char *printf_pattern;
+};
+
typedef struct volgen_brick_xlator volgen_brick_xlator_t;
int
@@ -201,11 +213,13 @@ void
glusterd_get_shd_filepath(char *filename);
int
-build_shd_graph(volgen_graph_t *graph, dict_t *mod_dict);
+build_shd_graph(glusterd_volinfo_t *volinfo, volgen_graph_t *graph,
+ dict_t *mod_dict);
+#ifdef BUILD_GNFS
int
build_nfs_graph(volgen_graph_t *graph, dict_t *mod_dict);
-
+#endif
int
build_quotad_graph(volgen_graph_t *graph, dict_t *mod_dict);
@@ -284,17 +298,20 @@ end_sethelp_xml_doc(xmlTextWriterPtr writer);
char *
glusterd_get_trans_type_rb(gf_transport_type ttype);
+struct volopt_map_entry *
+gd_get_vmep(const char *key);
+
uint32_t
-glusterd_get_op_version_for_key(char *key);
+glusterd_get_op_version_from_vmep(struct volopt_map_entry *vmep);
gf_boolean_t
-gd_is_client_option(char *key);
+gd_is_client_option(struct volopt_map_entry *vmep);
gf_boolean_t
-gd_is_xlator_option(char *key);
+gd_is_xlator_option(struct volopt_map_entry *vmep);
gf_boolean_t
-gd_is_boolean_option(char *key);
+gd_is_boolean_option(struct volopt_map_entry *vmep);
char *
volgen_get_shd_key(int type);
@@ -313,4 +330,9 @@ glusterd_generate_gfproxyd_volfile(glusterd_volinfo_t *volinfo);
int
glusterd_build_gfproxyd_volfile(glusterd_volinfo_t *volinfo, char *filename);
+
+int
+glusterd_shdsvc_generate_volfile(glusterd_volinfo_t *volinfo, char *filename,
+ dict_t *mode_dict);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index ac42b3ef73c..814ab14fb27 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -7,12 +7,8 @@
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
-#ifdef HAVE_BD_XLATOR
-#include <lvm2app.h>
-#endif
-
-#include "common-utils.h"
-#include "syscall.h"
+#include <glusterfs/common-utils.h>
+#include <glusterfs/syscall.h>
#include "cli1-xdr.h"
#include "xdr-generic.h"
#include "glusterd.h"
@@ -22,7 +18,7 @@
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
#include "glusterd-messages.h"
-#include "run.h"
+#include <glusterfs/run.h>
#include "glusterd-snapshot-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
@@ -41,234 +37,6 @@
#define glusterd_op_start_volume_args_get(dict, volname, flags) \
glusterd_op_stop_volume_args_get(dict, volname, flags)
-gf_ai_compare_t
-glusterd_compare_addrinfo(struct addrinfo *first, struct addrinfo *next)
-{
- int ret = -1;
- struct addrinfo *tmp1 = NULL;
- struct addrinfo *tmp2 = NULL;
- char firstip[NI_MAXHOST] = {0.};
- char nextip[NI_MAXHOST] = {
- 0,
- };
-
- for (tmp1 = first; tmp1 != NULL; tmp1 = tmp1->ai_next) {
- ret = getnameinfo(tmp1->ai_addr, tmp1->ai_addrlen, firstip, NI_MAXHOST,
- NULL, 0, NI_NUMERICHOST);
- if (ret)
- return GF_AI_COMPARE_ERROR;
- for (tmp2 = next; tmp2 != NULL; tmp2 = tmp2->ai_next) {
- ret = getnameinfo(tmp2->ai_addr, tmp2->ai_addrlen, nextip,
- NI_MAXHOST, NULL, 0, NI_NUMERICHOST);
- if (ret)
- return GF_AI_COMPARE_ERROR;
- if (!strcmp(firstip, nextip)) {
- return GF_AI_COMPARE_MATCH;
- }
- }
- }
- return GF_AI_COMPARE_NO_MATCH;
-}
-
-/* Check for non optimal brick order for replicate :
- * Checks if bricks belonging to a replicate volume
- * are present on the same server
- */
-int32_t
-glusterd_check_brick_order(dict_t *dict, char *err_str)
-{
- int ret = -1;
- int i = 0;
- int j = 0;
- int k = 0;
- xlator_t *this = NULL;
- addrinfo_list_t *ai_list = NULL;
- addrinfo_list_t *ai_list_tmp1 = NULL;
- addrinfo_list_t *ai_list_tmp2 = NULL;
- char *brick = NULL;
- char *brick_list = NULL;
- char *brick_list_dup = NULL;
- char *brick_list_ptr = NULL;
- char *tmpptr = NULL;
- char *volname = NULL;
- int32_t brick_count = 0;
- int32_t type = GF_CLUSTER_TYPE_NONE;
- int32_t sub_count = 0;
- struct addrinfo *ai_info = NULL;
-
- const char failed_string[2048] =
- "Failed to perform brick order "
- "check. Use 'force' at the end of the command"
- " if you want to override this behavior. ";
- const char found_string[2048] =
- "Multiple bricks of a %s "
- "volume are present on the same server. This "
- "setup is not optimal. Bricks should be on "
- "different nodes to have best fault tolerant "
- "configuration. Use 'force' at the end of the "
- "command if you want to override this "
- "behavior. ";
-
- this = THIS;
-
- GF_ASSERT(this);
-
- ai_list = malloc(sizeof(addrinfo_list_t));
- ai_list->info = NULL;
- CDS_INIT_LIST_HEAD(&ai_list->list);
-
- ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get volume name");
- goto out;
- }
-
- ret = dict_get_int32n(dict, "type", SLEN("type"), &type);
- if (ret) {
- snprintf(err_str, 512, "Unable to get type of volume %s", volname);
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, "%s",
- err_str);
- goto out;
- }
-
- ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &brick_list);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Bricks check : Could not "
- "retrieve bricks list");
- goto out;
- }
-
- ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Bricks check : Could not "
- "retrieve brick count");
- goto out;
- }
-
- if (type != GF_CLUSTER_TYPE_DISPERSE) {
- ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
- &sub_count);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Bricks check : Could"
- " not retrieve replica count");
- goto out;
- }
- gf_msg_debug(this->name, 0,
- "Replicate cluster type "
- "found. Checking brick order.");
- } else {
- ret = dict_get_int32n(dict, "disperse-count", SLEN("disperse-count"),
- &sub_count);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Bricks check : Could"
- " not retrieve disperse count");
- goto out;
- }
- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DISPERSE_CLUSTER_FOUND,
- "Disperse cluster type"
- " found. Checking brick order.");
- }
-
- brick_list_dup = brick_list_ptr = gf_strdup(brick_list);
- /* Resolve hostnames and get addrinfo */
- while (i < brick_count) {
- ++i;
- brick = strtok_r(brick_list_dup, " \n", &tmpptr);
- brick_list_dup = tmpptr;
- if (brick == NULL)
- goto check_failed;
- brick = strtok_r(brick, ":", &tmpptr);
- if (brick == NULL)
- goto check_failed;
- ret = getaddrinfo(brick, NULL, NULL, &ai_info);
- if (ret != 0) {
- ret = 0;
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_RESOLVE_FAIL,
- "unable to resolve "
- "host name");
- goto out;
- }
- ai_list_tmp1 = malloc(sizeof(addrinfo_list_t));
- if (ai_list_tmp1 == NULL) {
- ret = 0;
- gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
- "failed to allocate "
- "memory");
- freeaddrinfo(ai_info);
- goto out;
- }
- ai_list_tmp1->info = ai_info;
- cds_list_add_tail(&ai_list_tmp1->list, &ai_list->list);
- ai_list_tmp1 = NULL;
- }
-
- i = 0;
- ai_list_tmp1 = cds_list_entry(ai_list->list.next, addrinfo_list_t, list);
-
- /* Check for bad brick order */
- while (i < brick_count) {
- ++i;
- ai_info = ai_list_tmp1->info;
- ai_list_tmp1 = cds_list_entry(ai_list_tmp1->list.next, addrinfo_list_t,
- list);
- if (0 == i % sub_count) {
- j = 0;
- continue;
- }
- ai_list_tmp2 = ai_list_tmp1;
- k = j;
- while (k < sub_count - 1) {
- ++k;
- ret = glusterd_compare_addrinfo(ai_info, ai_list_tmp2->info);
- if (GF_AI_COMPARE_ERROR == ret)
- goto check_failed;
- if (GF_AI_COMPARE_MATCH == ret)
- goto found_bad_brick_order;
- ai_list_tmp2 = cds_list_entry(ai_list_tmp2->list.next,
- addrinfo_list_t, list);
- }
- ++j;
- }
- gf_msg_debug(this->name, 0, "Brick order okay");
- ret = 0;
- goto out;
-
-check_failed:
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER_CHECK_FAIL,
- "Failed bad brick order check");
- snprintf(err_str, sizeof(failed_string), failed_string);
- ret = -1;
- goto out;
-
-found_bad_brick_order:
- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BAD_BRKORDER,
- "Bad brick order found");
- if (type == GF_CLUSTER_TYPE_DISPERSE) {
- snprintf(err_str, sizeof(found_string), found_string, "disperse");
- } else {
- snprintf(err_str, sizeof(found_string), found_string, "replicate");
- }
-
- ret = -1;
-out:
- ai_list_tmp2 = NULL;
- GF_FREE(brick_list_ptr);
- cds_list_for_each_entry(ai_list_tmp1, &ai_list->list, list)
- {
- if (ai_list_tmp1->info)
- freeaddrinfo(ai_list_tmp1->info);
- free(ai_list_tmp2);
- ai_list_tmp2 = ai_list_tmp1;
- }
- free(ai_list_tmp2);
- return ret;
-}
-
int
__glusterd_handle_create_volume(rpcsvc_request_t *req)
{
@@ -280,6 +48,7 @@ __glusterd_handle_create_volume(rpcsvc_request_t *req)
char *bricks = NULL;
char *volname = NULL;
int brick_count = 0;
+ int thin_arbiter_count = 0;
void *cli_rsp = NULL;
char err_str[2048] = {
0,
@@ -304,6 +73,7 @@ __glusterd_handle_create_volume(rpcsvc_request_t *req)
#else
char *addr_family = "inet";
#endif
+ glusterd_volinfo_t *volinfo = NULL;
GF_ASSERT(req);
@@ -357,7 +127,9 @@ __glusterd_handle_create_volume(rpcsvc_request_t *req)
goto out;
}
- if ((ret = glusterd_check_volume_exists(volname))) {
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (!ret) {
+ ret = -1;
snprintf(err_str, sizeof(err_str), "Volume %s already exists", volname);
gf_msg(this->name, GF_LOG_ERROR, EEXIST, GD_MSG_VOL_ALREADY_EXIST, "%s",
err_str);
@@ -436,6 +208,21 @@ __glusterd_handle_create_volume(rpcsvc_request_t *req)
goto out;
}
+ ret = dict_get_int32n(dict, "thin-arbiter-count",
+ SLEN("thin-arbiter-count"), &thin_arbiter_count);
+ if (thin_arbiter_count && conf->op_version < GD_OP_VERSION_7_0) {
+ snprintf(err_str, sizeof(err_str),
+ "Cannot execute command. "
+ "The cluster is operating at version %d. "
+ "Thin-arbiter volume creation is unavailable in "
+ "this version",
+ conf->op_version);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_OP_FAILED, "%s",
+ err_str);
+ ret = -1;
+ goto out;
+ }
+
if (!dict_getn(dict, "force", SLEN("force"))) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
"Failed to get 'force' flag");
@@ -780,51 +567,6 @@ glusterd_handle_cli_delete_volume(rpcsvc_request_t *req)
return glusterd_big_locked_handler(req,
__glusterd_handle_cli_delete_volume);
}
-int
-glusterd_handle_shd_option_for_tier(glusterd_volinfo_t *volinfo, char *value,
- dict_t *dict)
-{
- int count = 0;
- char dict_key[64] = {
- 0,
- };
- int keylen;
- char *key = NULL;
- int ret = 0;
-
- key = gd_get_shd_key(volinfo->tier_info.cold_type);
- if (key) {
- count++;
- keylen = snprintf(dict_key, sizeof(dict_key), "key%d", count);
- ret = dict_set_strn(dict, dict_key, keylen, key);
- if (ret)
- goto out;
- keylen = snprintf(dict_key, sizeof(dict_key), "value%d", count);
- ret = dict_set_strn(dict, dict_key, keylen, value);
- if (ret)
- goto out;
- }
-
- key = gd_get_shd_key(volinfo->tier_info.hot_type);
- if (key) {
- count++;
- keylen = snprintf(dict_key, sizeof(dict_key), "key%d", count);
- ret = dict_set_strn(dict, dict_key, keylen, key);
- if (ret)
- goto out;
- keylen = snprintf(dict_key, sizeof(dict_key), "value%d", count);
- ret = dict_set_strn(dict, dict_key, keylen, value);
- if (ret)
- goto out;
- }
-
- ret = dict_set_int32n(dict, "count", SLEN("count"), count);
- if (ret)
- goto out;
-
-out:
- return ret;
-}
static int
glusterd_handle_heal_options_enable_disable(rpcsvc_request_t *req, dict_t *dict,
glusterd_volinfo_t *volinfo)
@@ -833,10 +575,14 @@ glusterd_handle_heal_options_enable_disable(rpcsvc_request_t *req, dict_t *dict,
int ret = 0;
char *key = NULL;
char *value = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
ret = dict_get_int32n(dict, "heal-op", SLEN("heal-op"),
(int32_t *)&heal_op);
if (ret || (heal_op == GF_SHD_OP_INVALID)) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=heal-op", NULL);
ret = -1;
goto out;
}
@@ -864,23 +610,6 @@ glusterd_handle_heal_options_enable_disable(rpcsvc_request_t *req, dict_t *dict,
value = "disable";
}
- /* Convert this command to volume-set command based on volume type */
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- switch (heal_op) {
- case GF_SHD_OP_HEAL_ENABLE:
- case GF_SHD_OP_HEAL_DISABLE:
- ret = glusterd_handle_shd_option_for_tier(volinfo, value, dict);
- if (!ret)
- goto set_volume;
- goto out;
- /* For any other heal_op, including granular-entry heal,
- * just break out of the block but don't goto out yet.
- */
- default:
- break;
- }
- }
-
if ((heal_op == GF_SHD_OP_HEAL_ENABLE) ||
(heal_op == GF_SHD_OP_HEAL_DISABLE)) {
key = volgen_get_shd_key(volinfo->type);
@@ -891,23 +620,34 @@ glusterd_handle_heal_options_enable_disable(rpcsvc_request_t *req, dict_t *dict,
} else {
key = "cluster.granular-entry-heal";
ret = dict_set_int8(dict, "is-special-key", 1);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=is-special-key", NULL);
goto out;
+ }
}
ret = dict_set_strn(dict, "key1", SLEN("key1"), key);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=key1", NULL);
goto out;
+ }
ret = dict_set_strn(dict, "value1", SLEN("value1"), value);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=value1", NULL);
goto out;
+ }
ret = dict_set_int32n(dict, "count", SLEN("count"), 1);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=count", NULL);
goto out;
+ }
-set_volume:
ret = glusterd_op_begin_synctask(req, GD_OP_SET_VOLUME, dict);
out:
@@ -930,18 +670,19 @@ __glusterd_handle_cli_heal_volume(rpcsvc_request_t *req)
0,
};
+ this = THIS;
+ GF_ASSERT(this);
+
GF_ASSERT(req);
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
// failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
- this = THIS;
- GF_ASSERT(this);
-
if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
dict = dict_new();
@@ -1002,8 +743,11 @@ __glusterd_handle_cli_heal_volume(rpcsvc_request_t *req)
goto out;
ret = dict_set_int32n(dict, "count", SLEN("count"), volinfo->brick_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=count", NULL);
goto out;
+ }
ret = glusterd_op_begin_synctask(req, GD_OP_HEAL_VOLUME, dict);
@@ -1055,6 +799,7 @@ __glusterd_handle_cli_statedump_volume(rpcsvc_request_t *req)
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
req->rpc_err = GARBAGE_ARGS;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
if (cli_req.dict.dict_len) {
@@ -1133,108 +878,6 @@ glusterd_handle_cli_statedump_volume(rpcsvc_request_t *req)
__glusterd_handle_cli_statedump_volume);
}
-#ifdef HAVE_BD_XLATOR
-/*
- * Validates if given VG in the brick exists or not. Also checks if VG has
- * GF_XATTR_VOL_ID_KEY tag set to avoid using same VG for multiple bricks.
- * Tag is checked only during glusterd_op_stage_create_volume. Tag is set during
- * glusterd_validate_and_create_brickpath().
- * @brick - brick info, @check_tag - check for VG tag or not
- * @msg - Error message to return to caller
- */
-int
-glusterd_is_valid_vg(glusterd_brickinfo_t *brick, int check_tag, char *msg)
-{
- lvm_t handle = NULL;
- vg_t vg = NULL;
- char *vg_name = NULL;
- int retval = 0;
- char *p = NULL;
- char *ptr = NULL;
- struct dm_list *dm_lvlist = NULL;
- struct dm_list *dm_seglist = NULL;
- struct lvm_lv_list *lv_list = NULL;
- struct lvm_property_value prop = {
- 0,
- };
- struct lvm_lvseg_list *seglist = NULL;
- struct dm_list *taglist = NULL;
- struct lvm_str_list *strl = NULL;
-
- handle = lvm_init(NULL);
- if (!handle) {
- sprintf(msg, "lvm_init failed, could not validate vg");
- return -1;
- }
- if (*brick->vg == '\0') { /* BD xlator has vg in brick->path */
- p = gf_strdup(brick->path);
- vg_name = strtok_r(p, "/", &ptr);
- } else
- vg_name = brick->vg;
-
- vg = lvm_vg_open(handle, vg_name, "r", 0);
- if (!vg) {
- sprintf(msg, "no such vg: %s", vg_name);
- retval = -1;
- goto out;
- }
- if (!check_tag)
- goto next;
-
- taglist = lvm_vg_get_tags(vg);
- if (!taglist)
- goto next;
-
- dm_list_iterate_items(strl, taglist)
- {
- if (!strncmp(strl->str, GF_XATTR_VOL_ID_KEY,
- SLEN(GF_XATTR_VOL_ID_KEY))) {
- sprintf(msg,
- "VG %s is already part of"
- " a brick",
- vg_name);
- retval = -1;
- goto out;
- }
- }
-next:
-
- brick->caps = CAPS_BD | CAPS_OFFLOAD_COPY | CAPS_OFFLOAD_SNAPSHOT;
-
- dm_lvlist = lvm_vg_list_lvs(vg);
- if (!dm_lvlist)
- goto out;
-
- dm_list_iterate_items(lv_list, dm_lvlist)
- {
- dm_seglist = lvm_lv_list_lvsegs(lv_list->lv);
- dm_list_iterate_items(seglist, dm_seglist)
- {
- prop = lvm_lvseg_get_property(seglist->lvseg, "segtype");
- if (!prop.is_valid || !prop.value.string)
- continue;
- if (!strcmp(prop.value.string, "thin-pool")) {
- brick->caps |= CAPS_THIN;
- gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_THINPOOLS_FOR_THINLVS,
- "Thin Pool "
- "\"%s\" will be used for thin LVs",
- lvm_lv_get_name(lv_list->lv));
- break;
- }
- }
- }
-
- retval = 0;
-out:
- if (vg)
- lvm_vg_close(vg);
- lvm_quit(handle);
- if (p)
- GF_FREE(p);
- return retval;
-}
-#endif
-
/* op-sm */
int
glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
@@ -1242,7 +885,6 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
{
int ret = 0;
char *volname = NULL;
- gf_boolean_t exists = _gf_false;
char *bricks = NULL;
char *brick_list = NULL;
char *free_ptr = NULL;
@@ -1252,6 +894,8 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
int32_t local_brick_count = 0;
int32_t i = 0;
int32_t type = 0;
+ int32_t replica_count = 0;
+ int32_t disperse_count = 0;
char *brick = NULL;
char *tmpptr = NULL;
xlator_t *this = NULL;
@@ -1260,6 +904,7 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
uuid_t volume_uuid;
char *volume_uuid_str;
gf_boolean_t is_force = _gf_false;
+ glusterd_volinfo_t *volinfo = NULL;
this = THIS;
GF_ASSERT(this);
@@ -1274,13 +919,11 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
goto out;
}
- exists = glusterd_check_volume_exists(volname);
- if (exists) {
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (!ret) {
snprintf(msg, sizeof(msg), "Volume %s already exists", volname);
ret = -1;
goto out;
- } else {
- ret = 0;
}
ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count);
@@ -1331,6 +974,64 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
}
}
+ /*Check brick order if the volume type is replicate or disperse. If
+ * force at the end of command not given then check brick order.
+ */
+ if (is_origin_glusterd(dict)) {
+ ret = dict_get_int32n(dict, "type", SLEN("type"), &type);
+ if (ret) {
+ snprintf(msg, sizeof(msg),
+ "Unable to get type of "
+ "volume %s",
+ volname);
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ msg);
+ goto out;
+ }
+
+ if (!is_force) {
+ if (type == GF_CLUSTER_TYPE_REPLICATE) {
+ ret = dict_get_int32n(dict, "replica-count",
+ SLEN("replica-count"), &replica_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Bricks check : Could"
+ " not retrieve replica count");
+ goto out;
+ }
+ gf_msg_debug(this->name, 0,
+ "Replicate cluster type "
+ "found. Checking brick order.");
+ ret = glusterd_check_brick_order(dict, msg, type, &volname,
+ &bricks, &brick_count,
+ replica_count);
+ } else if (type == GF_CLUSTER_TYPE_DISPERSE) {
+ ret = dict_get_int32n(dict, "disperse-count",
+ SLEN("disperse-count"), &disperse_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Bricks check : Could"
+ " not retrieve disperse count");
+ goto out;
+ }
+ gf_msg_debug(this->name, 0,
+ "Disperse cluster type"
+ " found. Checking brick order.");
+ ret = glusterd_check_brick_order(dict, msg, type, &volname,
+ &bricks, &brick_count,
+ disperse_count);
+ }
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER,
+ "Not creating the volume because of "
+ "bad brick order. %s",
+ msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
+ }
+ }
+
while (i < brick_count) {
i++;
brick = strtok_r(brick_list, " \n", &tmpptr);
@@ -1373,13 +1074,6 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
}
if (!gf_uuid_compare(brick_info->uuid, MY_UUID)) {
-#ifdef HAVE_BD_XLATOR
- if (brick_info->vg[0]) {
- ret = glusterd_is_valid_vg(brick_info, 1, msg);
- if (ret)
- goto out;
- }
-#endif
ret = glusterd_validate_and_create_brickpath(
brick_info, volume_uuid, volname, op_errstr, is_force,
_gf_false);
@@ -1417,37 +1111,6 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
brick_info = NULL;
}
- /*Check brick order if the volume type is replicate or disperse. If
- * force at the end of command not given then check brick order.
- */
- if (is_origin_glusterd(dict)) {
- ret = dict_get_int32n(dict, "type", SLEN("type"), &type);
- if (ret) {
- snprintf(msg, sizeof(msg),
- "Unable to get type of "
- "volume %s",
- volname);
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, "%s",
- msg);
- goto out;
- }
-
- if (!is_force) {
- if ((type == GF_CLUSTER_TYPE_REPLICATE) ||
- (type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) ||
- (type == GF_CLUSTER_TYPE_DISPERSE)) {
- ret = glusterd_check_brick_order(dict, msg);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER,
- "Not "
- "creating volume because of "
- "bad brick order");
- goto out;
- }
- }
- }
- }
-
ret = dict_set_int32n(rsp_dict, "brick_count", SLEN("brick_count"),
local_brick_count);
if (ret) {
@@ -1479,20 +1142,32 @@ glusterd_op_stop_volume_args_get(dict_t *dict, char **volname, int *flags)
this = THIS;
GF_ASSERT(this);
- if (!dict || !volname || !flags)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
+
+ if (!volname) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ARGUMENT, NULL);
+ goto out;
+ }
+
+ if (!flags) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ARGUMENT, NULL);
+ goto out;
+ }
ret = dict_get_strn(dict, "volname", SLEN("volname"), volname);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get volume name");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=volname", NULL);
goto out;
}
ret = dict_get_int32n(dict, "flags", SLEN("flags"), flags);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get flags");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=flags", NULL);
goto out;
}
out:
@@ -1505,27 +1180,29 @@ glusterd_op_statedump_volume_args_get(dict_t *dict, char **volname,
{
int ret = -1;
- if (!dict || !volname || !options || !option_cnt)
+ if (!dict || !volname || !options || !option_cnt) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
ret = dict_get_strn(dict, "volname", SLEN("volname"), volname);
if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get volname");
+ gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=volname", NULL);
goto out;
}
ret = dict_get_strn(dict, "options", SLEN("options"), options);
if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get options");
+ gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=options", NULL);
goto out;
}
ret = dict_get_int32n(dict, "option_cnt", SLEN("option_cnt"), option_cnt);
if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get option count");
+ gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=option_cnt", NULL);
goto out;
}
@@ -1542,7 +1219,6 @@ glusterd_op_stage_start_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
int flags = 0;
int32_t brick_count = 0;
int32_t local_brick_count = 0;
- gf_boolean_t exists = _gf_false;
glusterd_volinfo_t *volinfo = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
char msg[2048] = {
@@ -1559,7 +1235,6 @@ glusterd_op_stage_start_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
char xattr_volid[50] = {
0,
};
- int caps = 0;
int32_t len = 0;
this = THIS;
@@ -1572,16 +1247,9 @@ glusterd_op_stage_start_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (ret)
goto out;
- exists = glusterd_check_volume_exists(volname);
-
- if (!exists) {
- snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
- ret = -1;
- goto out;
- }
-
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
+ snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
FMTSTR_CHECK_VOL_EXISTS, volname);
goto out;
@@ -1721,22 +1389,6 @@ glusterd_op_stage_start_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
local_brick_count = brick_count;
}
}
-
-#ifdef HAVE_BD_XLATOR
- if (brickinfo->vg[0])
- caps = CAPS_BD | CAPS_THIN | CAPS_OFFLOAD_COPY |
- CAPS_OFFLOAD_SNAPSHOT;
- /* Check for VG/thin pool if its BD volume */
- if (brickinfo->vg[0]) {
- ret = glusterd_is_valid_vg(brickinfo, 0, msg);
- if (ret)
- goto out;
- /* if anyone of the brick does not have thin support,
- disable it for entire volume */
- caps &= brickinfo->caps;
- } else
- caps = 0;
-#endif
}
ret = dict_set_int32n(rsp_dict, "brick_count", SLEN("brick_count"),
@@ -1747,7 +1399,6 @@ glusterd_op_stage_start_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
}
- volinfo->caps = caps;
ret = 0;
out:
if (volinfo)
@@ -1767,7 +1418,6 @@ glusterd_op_stage_stop_volume(dict_t *dict, char **op_errstr)
int ret = -1;
char *volname = NULL;
int flags = 0;
- gf_boolean_t exists = _gf_false;
glusterd_volinfo_t *volinfo = NULL;
char msg[2048] = {0};
xlator_t *this = NULL;
@@ -1779,15 +1429,11 @@ glusterd_op_stage_stop_volume(dict_t *dict, char **op_errstr)
GF_ASSERT(this);
ret = glusterd_op_stop_volume_args_get(dict, &volname, &flags);
- if (ret)
- goto out;
-
- exists = glusterd_check_volume_exists(volname);
-
- if (!exists) {
- snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
- ret = -1;
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Failed to get details of volume %s",
+ volname);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_STOP_ARGS_GET_FAILED,
+ "Volume name=%s", volname, NULL);
goto out;
}
@@ -1824,6 +1470,18 @@ glusterd_op_stage_stop_volume(dict_t *dict, char **op_errstr)
goto out;
}
+ ret = glusterd_check_ganesha_export(volinfo);
+ if (ret) {
+ ret = ganesha_manage_export(dict, "off", _gf_false, op_errstr);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_WARNING, 0,
+ GD_MSG_NFS_GNS_UNEXPRT_VOL_FAIL,
+ "Could not "
+ "unexport volume via NFS-Ganesha");
+ ret = 0;
+ }
+ }
+
if (glusterd_is_defrag_on(volinfo)) {
snprintf(msg, sizeof(msg),
"rebalance session is "
@@ -1847,7 +1505,6 @@ glusterd_op_stage_delete_volume(dict_t *dict, char **op_errstr)
{
int ret = 0;
char *volname = NULL;
- gf_boolean_t exists = _gf_false;
glusterd_volinfo_t *volinfo = NULL;
char msg[2048] = {0};
xlator_t *this = NULL;
@@ -1862,15 +1519,6 @@ glusterd_op_stage_delete_volume(dict_t *dict, char **op_errstr)
goto out;
}
- exists = glusterd_check_volume_exists(volname);
- if (!exists) {
- snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
- ret = -1;
- goto out;
- } else {
- ret = 0;
- }
-
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
@@ -1908,6 +1556,10 @@ glusterd_op_stage_delete_volume(dict_t *dict, char **op_errstr)
goto out;
}
volinfo->stage_deleted = _gf_true;
+ gf_log(this->name, GF_LOG_INFO,
+ "Setting stage deleted flag to true for "
+ "volume %s",
+ volinfo->volname);
ret = 0;
out:
@@ -1925,7 +1577,7 @@ static int
glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
dict_t *dict, char **op_errstr)
{
- glusterd_conf_t *priv = NULL;
+ glusterd_svc_t *svc = NULL;
gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
int ret = 0;
char msg[2408] = {
@@ -1935,7 +1587,6 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
"Self-heal daemon is not running. "
"Check self-heal daemon log file.";
- priv = this->private;
ret = dict_get_int32n(dict, "heal-op", SLEN("heal-op"),
(int32_t *)&heal_op);
if (ret) {
@@ -1944,6 +1595,7 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
goto out;
}
+ svc = &(volinfo->shd.svc);
switch (heal_op) {
case GF_SHD_OP_INVALID:
case GF_SHD_OP_HEAL_ENABLE: /* This op should be handled in volume-set*/
@@ -1973,7 +1625,7 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
goto out;
}
- if (!priv->shd_svc.online) {
+ if (!svc->online) {
ret = -1;
*op_errstr = gf_strdup(offline_msg);
goto out;
@@ -1987,14 +1639,16 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
if (!glusterd_is_volume_replicate(volinfo)) {
ret = -1;
snprintf(msg, sizeof(msg),
- "Volume %s is not of type "
+ "This command is supported "
+ "for only volume of replicated "
+ "type. Volume %s is not of type "
"replicate",
volinfo->volname);
*op_errstr = gf_strdup(msg);
goto out;
}
- if (!priv->shd_svc.online) {
+ if (!svc->online) {
ret = -1;
*op_errstr = gf_strdup(offline_msg);
goto out;
@@ -2063,14 +1717,15 @@ glusterd_op_stage_heal_volume(dict_t *dict, char **op_errstr)
if (!glusterd_is_volume_started(volinfo)) {
ret = -1;
snprintf(msg, sizeof(msg), "Volume %s is not started.", volname);
- gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_STARTED, "%s",
- msg);
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_STARTED,
+ "Volume=%s", volname, NULL);
*op_errstr = gf_strdup(msg);
goto out;
}
opt_dict = volinfo->dict;
if (!opt_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, NULL);
ret = 0;
goto out;
}
@@ -2126,6 +1781,8 @@ glusterd_op_stage_statedump_volume(dict_t *dict, char **op_errstr)
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL,
+ "Volume=%s", volname, NULL);
goto out;
}
@@ -2246,25 +1903,30 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr)
glusterd_volinfo_t *volinfo = NULL;
gf_boolean_t vol_added = _gf_false;
glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_brickinfo_t *ta_brickinfo = NULL;
xlator_t *this = NULL;
char *brick = NULL;
+ char *ta_brick = NULL;
int32_t count = 0;
int32_t i = 1;
char *bricks = NULL;
+ char *ta_bricks = NULL;
char *brick_list = NULL;
+ char *ta_brick_list = NULL;
char *free_ptr = NULL;
+ char *ta_free_ptr = NULL;
char *saveptr = NULL;
+ char *ta_saveptr = NULL;
char *trans_type = NULL;
char *str = NULL;
char *username = NULL;
char *password = NULL;
- int caps = 0;
int brickid = 0;
char msg[1024] __attribute__((unused)) = {
0,
};
char *brick_mount_dir = NULL;
- char key[PATH_MAX] = "";
+ char key[64] = "";
char *address_family_str = NULL;
struct statvfs brickstat = {
0,
@@ -2284,8 +1946,6 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr)
goto out;
}
- pthread_mutex_init(&volinfo->store_volinfo_lock, NULL);
-
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
if (ret) {
@@ -2372,56 +2032,20 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr)
/* coverity[unused_value] arbiter count is optional */
ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
&volinfo->arbiter_count);
- } else if (GF_CLUSTER_TYPE_STRIPE == volinfo->type) {
- ret = dict_get_int32n(dict, "stripe-count", SLEN("stripe-count"),
- &volinfo->stripe_count);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Failed to get stripe"
- " count for volume %s",
- volname);
- goto out;
- }
- } else if (GF_CLUSTER_TYPE_STRIPE_REPLICATE == volinfo->type) {
- /* performance.client-io-threads is turned on to default,
- * however this has adverse effects on replicate volumes due to
- * replication design issues, till that get addressed
- * performance.client-io-threads option is turned off for all
- * replicate volumes
- */
- if (priv->op_version >= GD_OP_VERSION_3_12_2) {
- ret = dict_set_nstrn(volinfo->dict, "performance.client-io-threads",
- SLEN("performance.client-io-threads"), "off",
- SLEN("off"));
+ ret = dict_get_int32n(dict, "thin-arbiter-count",
+ SLEN("thin-arbiter-count"),
+ &volinfo->thin_arbiter_count);
+ if (volinfo->thin_arbiter_count) {
+ ret = dict_get_strn(dict, "ta-brick", SLEN("ta-brick"), &ta_bricks);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Failed to set "
- "performance.client-io-threads to off");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get thin arbiter brick for "
+ "volume %s",
+ volname);
goto out;
}
}
- ret = dict_get_int32n(dict, "stripe-count", SLEN("stripe-count"),
- &volinfo->stripe_count);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Failed to get stripe"
- " count for volume %s",
- volname);
- goto out;
- }
- ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
- &volinfo->replica_count);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Failed to get "
- "replica count for volume %s",
- volname);
- goto out;
- }
- /* coverity[unused_value] arbiter count is optional */
- ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
- &volinfo->arbiter_count);
} else if (GF_CLUSTER_TYPE_DISPERSE == volinfo->type) {
ret = dict_get_int32n(dict, "disperse-count", SLEN("disperse-count"),
&volinfo->disperse_count);
@@ -2510,6 +2134,38 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr)
volinfo->transport_type = GF_TRANSPORT_BOTH_TCP_RDMA;
}
+ if (ta_bricks) {
+ ta_brick_list = gf_strdup(ta_bricks);
+ ta_free_ptr = ta_brick_list;
+ }
+
+ if (volinfo->thin_arbiter_count) {
+ ta_brick = strtok_r(ta_brick_list + 1, " \n", &ta_saveptr);
+
+ count = 1;
+ brickid = volinfo->replica_count;
+ /* assign brickid to ta_bricks
+ * Following loop runs for number of subvols times. Although
+ * there is only one ta-brick for a volume but the volume fuse volfile
+ * requires an entry of ta-brick for each subvolume. Also, the ta-brick
+ * id needs to be adjusted according to the subvol count.
+ * For eg- For first subvolume ta-brick id is volname-ta-2, for second
+ * subvol ta-brick id is volname-ta-5.
+ */
+ while (count <= volinfo->subvol_count) {
+ ret = glusterd_brickinfo_new_from_brick(ta_brick, &ta_brickinfo,
+ _gf_false, op_errstr);
+ if (ret)
+ goto out;
+
+ GLUSTERD_ASSIGN_BRICKID_TO_TA_BRICKINFO(ta_brickinfo, volinfo,
+ brickid);
+ cds_list_add_tail(&ta_brickinfo->brick_list, &volinfo->ta_bricks);
+ count++;
+ brickid += volinfo->replica_count + 1;
+ }
+ }
+
if (bricks) {
brick_list = gf_strdup(bricks);
free_ptr = brick_list;
@@ -2519,7 +2175,6 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr)
if (count)
brick = strtok_r(brick_list + 1, " \n", &saveptr);
- caps = CAPS_BD | CAPS_THIN | CAPS_OFFLOAD_COPY | CAPS_OFFLOAD_SNAPSHOT;
brickid = glusterd_get_next_available_brickid(volinfo);
if (brickid < 0)
@@ -2529,7 +2184,10 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr)
op_errstr);
if (ret)
goto out;
-
+ if (volinfo->thin_arbiter_count == 1 &&
+ (brickid + 1) % (volinfo->replica_count + 1) == 0) {
+ brickid = brickid + 1;
+ }
GLUSTERD_ASSIGN_BRICKID_TO_BRICKINFO(brickinfo, volinfo, brickid++);
ret = glusterd_resolve_brick(brickinfo);
@@ -2566,25 +2224,6 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr)
goto out;
}
brickinfo->statfs_fsid = brickstat.f_fsid;
-
-#ifdef HAVE_BD_XLATOR
- if (brickinfo->vg[0]) {
- ret = glusterd_is_valid_vg(brickinfo, 0, msg);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_VG, "%s",
- msg);
- goto out;
- }
-
- /* if anyone of the brick does not have thin
- support, disable it for entire volume */
- caps &= brickinfo->caps;
- } else {
- caps = 0;
- }
-#endif
- } else {
- caps = 0;
}
cds_list_add_tail(&brickinfo->brick_list, &volinfo->bricks);
@@ -2617,8 +2256,6 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr)
gd_update_volume_op_versions(volinfo);
- volinfo->caps = caps;
-
ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
if (ret) {
glusterd_store_delete_volume(volinfo);
@@ -2641,6 +2278,7 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr)
out:
GF_FREE(free_ptr);
+ GF_FREE(ta_free_ptr);
if (!vol_added && volinfo)
glusterd_volinfo_unref(volinfo);
return ret;
@@ -2693,6 +2331,7 @@ glusterd_start_volume(glusterd_volinfo_t *volinfo, int flags, gf_boolean_t wait)
attach_brick_callback can also call store_volinfo for same
volume to update volinfo on disk
*/
+ /* coverity[ORDER_REVERSAL] */
LOCK(&volinfo->lock);
ret = glusterd_store_volinfo(volinfo, verincrement);
UNLOCK(&volinfo->lock);
@@ -2722,6 +2361,8 @@ glusterd_op_start_volume(dict_t *dict, char **op_errstr)
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
glusterd_svc_t *svc = NULL;
+ char *str = NULL;
+ gf_boolean_t option = _gf_false;
this = THIS;
GF_ASSERT(this);
@@ -2779,6 +2420,29 @@ glusterd_op_start_volume(dict_t *dict, char **op_errstr)
}
}
+ ret = dict_get_str(conf->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, &str);
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
+ "Global dict not present.");
+ ret = 0;
+
+ } else {
+ ret = gf_string2boolean(str, &option);
+ /* Check if the feature is enabled and set nfs-disable to true */
+ if (option) {
+ gf_msg_debug(this->name, 0, "NFS-Ganesha is enabled");
+ /* Gluster-nfs should not start when NFS-Ganesha is enabled*/
+ ret = dict_set_str(volinfo->dict, NFS_DISABLE_MAP_KEY, "on");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set nfs.disable for"
+ "volume %s",
+ volname);
+ goto out;
+ }
+ }
+ }
+
ret = glusterd_start_volume(volinfo, flags, _gf_true);
if (ret)
goto out;
@@ -2789,25 +2453,6 @@ glusterd_op_start_volume(dict_t *dict, char **op_errstr)
if (ret)
goto out;
}
- if (conf->op_version <= GD_OP_VERSION_3_7_6) {
- /*
- * Starting tier daemon on originator node will fail if
- * at least one of the peer host brick for the volume.
- * Because The bricks in the peer haven't started when you
- * commit on originator node.
- * Please upgrade to version greater than GD_OP_VERSION_3_7_6
- */
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- if (volinfo->rebal.op != GD_OP_REMOVE_BRICK) {
- glusterd_defrag_info_set(volinfo, dict,
- GF_DEFRAG_CMD_START_TIER,
- GF_DEFRAG_CMD_START, GD_OP_REBALANCE);
- }
- glusterd_restart_rebalance_for_volume(volinfo);
- }
- } else {
- /* Starting tier daemon is moved into post validate phase */
- }
svc = &(volinfo->gfproxyd.svc);
ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
@@ -2846,16 +2491,6 @@ glusterd_stop_volume(glusterd_volinfo_t *volinfo)
}
}
- /* call tier manager before the voluem status is set as stopped
- * as tier uses that as a check in the manager
- * */
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
- if (ret)
- goto out;
- }
-
glusterd_set_volume_status(volinfo, GLUSTERD_STATUS_STOPPED);
ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
@@ -2947,6 +2582,15 @@ glusterd_op_delete_volume(dict_t *dict)
goto out;
}
+ if (glusterd_check_ganesha_export(volinfo) && is_origin_glusterd(dict)) {
+ ret = manage_export_config(volname, "off", NULL);
+ if (ret)
+ gf_msg(this->name, GF_LOG_WARNING, 0, 0,
+ "Could not delete ganesha export conf file "
+ "for %s",
+ volname);
+ }
+
ret = glusterd_delete_volume(volinfo);
out:
gf_msg_debug(this->name, 0, "returning %d", ret);
@@ -2981,16 +2625,16 @@ glusterd_op_statedump_volume(dict_t *dict, char **op_errstr)
if (ret)
goto out;
gf_msg_debug("glusterd", 0, "Performing statedump on volume %s", volname);
- if (strstr(options, "nfs") != NULL) {
- ret = glusterd_nfs_statedump(options, option_cnt, op_errstr);
+ if (strstr(options, "quotad")) {
+ ret = glusterd_quotad_statedump(options, option_cnt, op_errstr);
if (ret)
goto out;
-
- } else if (strstr(options, "quotad")) {
- ret = glusterd_quotad_statedump(options, option_cnt, op_errstr);
+#ifdef BUILD_GNFS
+ } else if (strstr(options, "nfs") != NULL) {
+ ret = glusterd_nfs_statedump(options, option_cnt, op_errstr);
if (ret)
goto out;
-
+#endif
} else if (strstr(options, "client")) {
ret = glusterd_client_statedump(volname, options, option_cnt,
op_errstr);
@@ -3140,8 +2784,7 @@ glusterd_clearlocks_mount(glusterd_volinfo_t *volinfo, char **xl_opts,
runner_add_args(&runner, SBIN_DIR "/glusterfs", "-f", NULL);
runner_argprintf(&runner, "%s", client_volfpath);
runner_add_arg(&runner, "-l");
- runner_argprintf(&runner,
- DEFAULT_LOG_FILE_DIRECTORY "/%s-clearlocks-mnt.log",
+ runner_argprintf(&runner, "%s/%s-clearlocks-mnt.log", priv->logdir,
volinfo->volname);
if (volinfo->memory_accounting)
runner_add_arg(&runner, "--mem-accounting");
@@ -3254,33 +2897,35 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
char *mntpt = NULL;
char **xl_opts = NULL;
glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Failed to get volume name");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=volname", NULL);
goto out;
}
gf_msg_debug("glusterd", 0, "Performing clearlocks on volume %s", volname);
ret = dict_get_strn(dict, "path", SLEN("path"), &path);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Failed to get path");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Key=path",
+ NULL);
goto out;
}
ret = dict_get_strn(dict, "kind", SLEN("kind"), &kind);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Failed to get kind");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Key=kind",
+ NULL);
goto out;
}
ret = dict_get_strn(dict, "type", SLEN("type"), &type);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Failed to get type");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Key=type",
+ NULL);
goto out;
}
@@ -3288,10 +2933,9 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (ret)
ret = 0;
- gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_CLRCLK_VOL_REQ_RCVD,
- "Received clear-locks request for "
- "volume %s with kind %s type %s and options %s",
- volname, kind, type, opts);
+ gf_smsg(this->name, GF_LOG_INFO, 0, GD_MSG_CLRCLK_VOL_REQ_RCVD,
+ "Volume=%s, Kind=%s, Type=%s, Options=%s", volname, kind, type,
+ opts, NULL);
if (opts)
ret = gf_asprintf(&cmd_str, GF_XATTR_CLRLK_CMD ".t%s.k%s.%s", type,
@@ -3304,22 +2948,25 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
snprintf(msg, sizeof(msg), "Volume %s doesn't exist.", volname);
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "Volume=%s",
+ volname, NULL);
goto out;
}
xl_opts = GF_CALLOC(volinfo->brick_count + 1, sizeof(char *),
gf_gld_mt_charptr);
- if (!xl_opts)
+ if (!xl_opts) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
+ }
ret = glusterd_clearlocks_get_local_client_ports(volinfo, xl_opts);
if (ret) {
snprintf(msg, sizeof(msg),
"Couldn't get port numbers of "
"local bricks");
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRK_PORT_NUM_GET_FAIL, "%s",
- msg);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRK_PORT_NUM_GET_FAIL,
+ NULL);
goto out;
}
@@ -3328,8 +2975,8 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
snprintf(msg, sizeof(msg),
"Creating mount directory "
"for clear-locks failed.");
- gf_msg(THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_CLRLOCKS_MOUNTDIR_CREATE_FAIL, "%s", msg);
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_CLRLOCKS_MOUNTDIR_CREATE_FAIL, NULL);
goto out;
}
@@ -3338,16 +2985,15 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
snprintf(msg, sizeof(msg),
"Failed to mount clear-locks "
"maintenance client.");
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_CLRLOCKS_CLNT_MOUNT_FAIL,
- "%s", msg);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLRLOCKS_CLNT_MOUNT_FAIL,
+ NULL);
goto out;
}
ret = glusterd_clearlocks_send_cmd(volinfo, cmd_str, path, result, msg,
sizeof(msg), mntpt);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_CLRCLK_SND_CMD_FAIL, "%s",
- msg);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLRCLK_SND_CMD_FAIL, NULL);
goto umount;
}
@@ -3358,16 +3004,16 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
snprintf(msg, sizeof(msg),
"Failed to set clear-locks "
"result");
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "%s", msg);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=lk-summary", NULL);
}
umount:
glusterd_clearlocks_unmount(volinfo, mntpt);
if (glusterd_clearlocks_rmdir_mount(volinfo, mntpt))
- gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_CLRLOCKS_CLNT_UMOUNT_FAIL,
- "Couldn't unmount "
- "clear-locks mount point");
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_CLRLOCKS_CLNT_UMOUNT_FAIL,
+ NULL);
out:
if (ret)
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index fa260253a80..398b4d76f52 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -8,477 +8,10 @@ later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
+#include <glusterfs/syscall.h>
#include "glusterd-volgen.h"
#include "glusterd-utils.h"
-#if USE_GFDB /* no GFDB means tiering is disabled */
-
-static int
-get_tier_freq_threshold(glusterd_volinfo_t *volinfo, char *threshold_key)
-{
- int threshold = 0;
- char *str_thresold = NULL;
- int ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT(this);
-
- glusterd_volinfo_get(volinfo, threshold_key, &str_thresold);
- if (str_thresold) {
- ret = gf_string2int(str_thresold, &threshold);
- if (ret == -1) {
- threshold = ret;
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INCOMPATIBLE_VALUE,
- "Failed to convert "
- "string to integer");
- }
- }
-
- return threshold;
-}
-
-/*
- * Validation function for record-counters
- * if write-freq-threshold and read-freq-threshold both have non-zero values
- * record-counters cannot be set to off
- * if record-counters is set to on
- * check if both the frequency thresholds are zero, then pop
- * a note, but volume set is not failed.
- * */
-static int
-validate_tier_counters(glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
- char *value, char **op_errstr)
-{
- char errstr[2048] = "";
- int ret = -1;
- xlator_t *this = NULL;
- gf_boolean_t origin_val = -1;
- int current_wt = 0;
- int current_rt = 0;
-
- this = THIS;
- GF_ASSERT(this);
-
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf(errstr, sizeof(errstr),
- "Volume %s is not a tier "
- "volume. Option %s is only valid for tier volume.",
- volinfo->volname, key);
- goto out;
- }
-
- ret = gf_string2boolean(value, &origin_val);
- if (ret) {
- snprintf(errstr, sizeof(errstr),
- "%s is not a compatible "
- "value. %s expects an boolean value",
- value, key);
- goto out;
- }
-
- current_rt = get_tier_freq_threshold(volinfo,
- "cluster.read-freq-threshold");
- if (current_rt == -1) {
- snprintf(errstr, sizeof(errstr),
- " Failed to retrieve value"
- " of cluster.read-freq-threshold");
- goto out;
- }
- current_wt = get_tier_freq_threshold(volinfo,
- "cluster.write-freq-threshold");
- if (current_wt == -1) {
- snprintf(errstr, sizeof(errstr),
- " Failed to retrieve value "
- "of cluster.write-freq-threshold");
- goto out;
- }
- /* If record-counters is set to off */
- if (!origin_val) {
- /* Both the thresholds should be zero to set
- * record-counters to off*/
- if (current_rt || current_wt) {
- snprintf(errstr, sizeof(errstr),
- "Cannot set features.record-counters to \"%s\""
- " as cluster.write-freq-threshold is %d"
- " and cluster.read-freq-threshold is %d. Please"
- " set both cluster.write-freq-threshold and "
- " cluster.read-freq-threshold to 0, to set "
- " features.record-counters to \"%s\".",
- value, current_wt, current_rt, value);
- ret = -1;
- goto out;
- }
- }
- /* TODO give a warning message to the user. errstr without re = -1 will
- * not result in a warning on cli for now.
- else {
- if (!current_rt && !current_wt) {
- snprintf (errstr, sizeof (errstr),
- " Note : cluster.write-freq-threshold is %d"
- " and cluster.read-freq-threshold is %d. Please"
- " set both cluster.write-freq-threshold and "
- " cluster.read-freq-threshold to"
- " appropriate positive values.",
- current_wt, current_rt);
- }
- }*/
-
- ret = 0;
-out:
-
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INCOMPATIBLE_VALUE,
- "%s", errstr);
- *op_errstr = gf_strdup(errstr);
- }
-
- return ret;
-}
-
-/*
- * Validation function for ctr sql params
- * features.ctr-sql-db-cachesize (Range: 1000 to 262144 pages)
- * features.ctr-sql-db-wal-autocheckpoint (Range: 1000 to 262144 pages)
- * */
-static int
-validate_ctr_sql_params(glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
- char *value, char **op_errstr)
-{
- int ret = -1;
- xlator_t *this = NULL;
- char errstr[2048] = "";
- int origin_val = -1;
-
- this = THIS;
- GF_ASSERT(this);
-
- ret = gf_string2int(value, &origin_val);
- if (ret) {
- snprintf(errstr, sizeof(errstr),
- "%s is not a compatible "
- "value. %s expects an integer value.",
- value, key);
- ret = -1;
- goto out;
- }
-
- if (origin_val < 0) {
- snprintf(errstr, sizeof(errstr),
- "%s is not a "
- "compatible value. %s expects a positive"
- "integer value.",
- value, key);
- ret = -1;
- goto out;
- }
-
- if (strstr(key, "sql-db-cachesize") ||
- strstr(key, "sql-db-wal-autocheckpoint")) {
- if ((origin_val < 1000) || (origin_val > 262144)) {
- snprintf(errstr, sizeof(errstr),
- "%s is not a "
- "compatible value. %s "
- "expects a value between : "
- "1000 to 262144.",
- value, key);
- ret = -1;
- goto out;
- }
- }
-
- ret = 0;
-out:
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INCOMPATIBLE_VALUE,
- "%s", errstr);
- *op_errstr = gf_strdup(errstr);
- }
- return ret;
-}
-
-/* Validation for tiering frequency thresholds
- * If any of the frequency thresholds are set to a non-zero value,
- * switch record-counters on, if not already on
- * If both the frequency thresholds are set to zero,
- * switch record-counters off, if not already off
- * */
-static int
-validate_tier_thresholds(glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
- char *value, char **op_errstr)
-{
- char errstr[2048] = "";
- int ret = -1;
- xlator_t *this = NULL;
- int origin_val = -1;
- gf_boolean_t current_rc = _gf_false;
- int current_wt = 0;
- int current_rt = 0;
- gf_boolean_t is_set_rc = _gf_false;
- char *proposed_rc = NULL;
-
- this = THIS;
- GF_ASSERT(this);
-
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf(errstr, sizeof(errstr),
- "Volume %s is not a tier "
- "volume. Option %s is only valid for tier volume.",
- volinfo->volname, key);
- goto out;
- }
-
- ret = gf_string2int(value, &origin_val);
- if (ret) {
- snprintf(errstr, sizeof(errstr),
- "%s is not a compatible "
- "value. %s expects an integer value.",
- value, key);
- ret = -1;
- goto out;
- }
-
- if (origin_val < 0) {
- snprintf(errstr, sizeof(errstr),
- "%s is not a "
- "compatible value. %s expects a positive"
- "integer value.",
- value, key);
- ret = -1;
- goto out;
- }
-
- /* Get the record-counters value */
- ret = glusterd_volinfo_get_boolean(volinfo, "features.record-counters");
- if (ret == -1) {
- snprintf(errstr, sizeof(errstr),
- "Failed to retrieve value of"
- "features.record-counters from volume info");
- goto out;
- }
- current_rc = ret;
-
- /* if any of the thresholds are set to a non-zero value
- * switch record-counters on, if not already on*/
- if (origin_val > 0) {
- if (!current_rc) {
- is_set_rc = _gf_true;
- current_rc = _gf_true;
- }
- } else {
- /* if the set is for write-freq-threshold */
- if (strstr(key, "write-freq-threshold")) {
- current_rt = get_tier_freq_threshold(volinfo,
- "cluster.read-freq-threshold");
- if (current_rt == -1) {
- snprintf(errstr, sizeof(errstr),
- " Failed to retrieve value of"
- "cluster.read-freq-threshold");
- goto out;
- }
- current_wt = origin_val;
- }
- /* else it should be read-freq-threshold */
- else {
- current_wt = get_tier_freq_threshold(
- volinfo, "cluster.write-freq-threshold");
- if (current_wt == -1) {
- snprintf(errstr, sizeof(errstr),
- " Failed to retrieve value of"
- "cluster.write-freq-threshold");
- goto out;
- }
- current_rt = origin_val;
- }
-
- /* Since both the thresholds are zero, set record-counters
- * to off, if not already off */
- if (current_rt == 0 && current_wt == 0) {
- if (current_rc) {
- is_set_rc = _gf_true;
- current_rc = _gf_false;
- }
- }
- }
-
- /* if record-counter has to be set to proposed value */
- if (is_set_rc) {
- if (current_rc) {
- ret = gf_asprintf(&proposed_rc, "on");
- } else {
- ret = gf_asprintf(&proposed_rc, "off");
- }
- if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INCOMPATIBLE_VALUE,
- "Failed to allocate memory to dict_value");
- goto error;
- }
- ret = dict_set_str(volinfo->dict, "features.record-counters",
- proposed_rc);
- error:
- if (ret) {
- snprintf(errstr, sizeof(errstr),
- "Failed to set features.record-counters"
- "to \"%s\" automatically."
- "Please try to set features.record-counters "
- "\"%s\" manually. The options "
- "cluster.write-freq-threshold and "
- "cluster.read-freq-threshold can only "
- "be set to a non zero value, if "
- "features.record-counters is "
- "set to \"on\".",
- proposed_rc, proposed_rc);
- goto out;
- }
- }
- ret = 0;
-out:
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INCOMPATIBLE_VALUE,
- "%s", errstr);
- *op_errstr = gf_strdup(errstr);
- if (proposed_rc)
- GF_FREE(proposed_rc);
- }
- return ret;
-}
-
-static int
-validate_tier(glusterd_volinfo_t *volinfo, dict_t *dict, char *key, char *value,
- char **op_errstr)
-{
- char errstr[2048] = "";
- int ret = 0;
- xlator_t *this = NULL;
- int origin_val = -1;
- char *current_wm_hi = NULL;
- char *current_wm_low = NULL;
- uint64_t wm_hi = 0;
- uint64_t wm_low = 0;
-
- this = THIS;
- GF_ASSERT(this);
-
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf(errstr, sizeof(errstr),
- "Volume %s is not a tier "
- "volume. Option %s is only valid for tier volume.",
- volinfo->volname, key);
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INCOMPATIBLE_VALUE,
- "%s", errstr);
- *op_errstr = gf_strdup(errstr);
- ret = -1;
- goto out;
- }
-
- if (strstr(key, "cluster.tier-mode")) {
- if (strcmp(value, "test") && strcmp(value, "cache")) {
- ret = -1;
- goto out;
- }
- goto out;
- } else if (strstr(key, "tier-pause")) {
- if (strcmp(value, "off") && strcmp(value, "on")) {
- ret = -1;
- goto out;
- }
- goto out;
- } else if (strstr(key, "tier-compact")) {
- if (strcmp(value, "on") && strcmp(value, "off")) {
- ret = -1;
- goto out;
- }
-
- goto out;
- }
-
- /*
- * Rest of the volume set options for tier are expecting a positive
- * Integer. Change the function accordingly if this constraint is
- * changed.
- */
- ret = gf_string2int(value, &origin_val);
- if (ret) {
- snprintf(errstr, sizeof(errstr),
- "%s is not a compatible "
- "value. %s expects an integer value.",
- value, key);
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INCOMPATIBLE_VALUE,
- "%s", errstr);
- *op_errstr = gf_strdup(errstr);
- ret = -1;
- goto out;
- }
-
- if (strstr(key, "watermark-hi") || strstr(key, "watermark-low")) {
- if ((origin_val < 1) || (origin_val > 99)) {
- snprintf(errstr, sizeof(errstr),
- "%s is not a "
- "compatible value. %s expects a "
- "percentage from 1-99.",
- value, key);
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INCOMPATIBLE_VALUE,
- "%s", errstr);
- *op_errstr = gf_strdup(errstr);
- ret = -1;
- goto out;
- }
-
- if (strstr(key, "watermark-hi")) {
- wm_hi = origin_val;
- } else {
- glusterd_volinfo_get(volinfo, "cluster.watermark-hi",
- &current_wm_hi);
- gf_string2bytesize_uint64(current_wm_hi, &wm_hi);
- }
-
- if (strstr(key, "watermark-low")) {
- wm_low = origin_val;
- } else {
- glusterd_volinfo_get(volinfo, "cluster.watermark-low",
- &current_wm_low);
- gf_string2bytesize_uint64(current_wm_low, &wm_low);
- }
- if (wm_low >= wm_hi) {
- snprintf(errstr, sizeof(errstr),
- "lower watermark"
- " cannot be equal or exceed upper "
- "watermark.");
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INCOMPATIBLE_VALUE,
- "%s", errstr);
- *op_errstr = gf_strdup(errstr);
- ret = -1;
- goto out;
- }
- } else if (strstr(key, "tier-promote-frequency") ||
- strstr(key, "tier-max-mb") ||
- strstr(key, "tier-max-promote-file-size") ||
- strstr(key, "tier-max-files") ||
- strstr(key, "tier-demote-frequency") ||
- strstr(key, "tier-hot-compact-frequency") ||
- strstr(key, "tier-cold-compact-frequency") ||
- strstr(key, "tier-query-limit")) {
- if (origin_val < 1) {
- snprintf(errstr, sizeof(errstr),
- "%s is not a "
- " compatible value. %s expects a positive "
- "integer value greater than 0.",
- value, key);
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INCOMPATIBLE_VALUE,
- "%s", errstr);
- *op_errstr = gf_strdup(errstr);
- ret = -1;
- goto out;
- }
- }
-out:
- gf_msg_debug(this->name, 0, "Returning %d", ret);
-
- return ret;
-}
-
-#endif /* End for USE_GFDB */
-
static int
validate_cache_max_min_size(glusterd_volinfo_t *volinfo, dict_t *dict,
char *key, char *value, char **op_errstr)
@@ -766,36 +299,6 @@ out:
}
static int
-validate_stripe(glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
- char *value, char **op_errstr)
-{
- char errstr[2048] = "";
- glusterd_conf_t *priv = NULL;
- int ret = 0;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT(this);
- priv = this->private;
- GF_ASSERT(priv);
-
- if (volinfo->stripe_count == 1) {
- snprintf(errstr, sizeof(errstr),
- "Cannot set %s for a non-stripe volume.", key);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NON_STRIPE_VOL, "%s",
- errstr);
- *op_errstr = gf_strdup(errstr);
- ret = -1;
- goto out;
- }
-
-out:
- gf_msg_debug(this->name, 0, "Returning %d", ret);
-
- return ret;
-}
-
-static int
validate_disperse(glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
char *value, char **op_errstr)
{
@@ -972,18 +475,6 @@ validate_disperse_heal_enable_disable(glusterd_volinfo_t *volinfo, dict_t *dict,
char *key, char *value, char **op_errstr)
{
int ret = 0;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- if (volinfo->tier_info.cold_type != GF_CLUSTER_TYPE_DISPERSE &&
- volinfo->tier_info.hot_type != GF_CLUSTER_TYPE_DISPERSE) {
- gf_asprintf(op_errstr,
- "Volume %s is not containing "
- "disperse type",
- volinfo->volname);
-
- return -1;
- } else
- return 0;
- }
if (volinfo->type != GF_CLUSTER_TYPE_DISPERSE) {
gf_asprintf(op_errstr, "Volume %s is not of disperse type",
@@ -1006,8 +497,7 @@ validate_lock_migration_option(glusterd_volinfo_t *volinfo, dict_t *dict,
this = THIS;
GF_ASSERT(this);
- if (volinfo->replica_count > 1 || volinfo->disperse_count ||
- volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ if (volinfo->replica_count > 1 || volinfo->disperse_count) {
snprintf(errstr, sizeof(errstr),
"Lock migration is "
"a experimental feature. Currently works with"
@@ -1088,6 +578,51 @@ out:
}
static int
+validate_volume_per_thread_limit(glusterd_volinfo_t *volinfo, dict_t *dict,
+ char *key, char *value, char **op_errstr)
+{
+ xlator_t *this = NULL;
+ uint val = 0;
+ int ret = -1;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ if (!is_brick_mx_enabled()) {
+ gf_asprintf(op_errstr,
+ "Brick-multiplexing is not enabled. "
+ "Please enable brick multiplexing before trying "
+ "to set this option.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_WRONG_OPTS_SETTING, "%s",
+ *op_errstr);
+ goto out;
+ }
+
+ ret = gf_string2uint(value, &val);
+ if (ret) {
+ gf_asprintf(op_errstr,
+ "%s is not a valid count. "
+ "%s expects an unsigned integer.",
+ value, key);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY, "%s",
+ *op_errstr);
+ }
+
+ if ((val < 5) || (val > 200)) {
+ gf_asprintf(
+ op_errstr,
+ "Please set this option to a value between 5 and 200 to"
+ "optimize processing large numbers of volumes in parallel.");
+ ret = -1;
+ goto out;
+ }
+out:
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+
+ return ret;
+}
+
+static int
validate_boolean(glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
char *value, char **op_errstr)
{
@@ -1113,6 +648,42 @@ out:
}
static int
+validate_disperse_quorum_count(glusterd_volinfo_t *volinfo, dict_t *dict,
+ char *key, char *value, char **op_errstr)
+{
+ int ret = -1;
+ int quorum_count = 0;
+ int data_count = 0;
+
+ ret = gf_string2int(value, &quorum_count);
+ if (ret) {
+ gf_asprintf(op_errstr,
+ "%s is not an integer. %s expects a "
+ "valid integer value.",
+ value, key);
+ goto out;
+ }
+
+ if (volinfo->type != GF_CLUSTER_TYPE_DISPERSE) {
+ gf_asprintf(op_errstr, "Cannot set %s for a non-disperse volume.", key);
+ ret = -1;
+ goto out;
+ }
+
+ data_count = volinfo->disperse_count - volinfo->redundancy_count;
+ if (quorum_count < data_count || quorum_count > volinfo->disperse_count) {
+ gf_asprintf(op_errstr, "%d for %s is out of range [%d - %d]",
+ quorum_count, key, data_count, volinfo->disperse_count);
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+static int
validate_parallel_readdir(glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
char *value, char **op_errstr)
{
@@ -1216,6 +787,32 @@ out:
return ret;
}
+static int
+is_directory(const char *path)
+{
+ struct stat statbuf;
+ if (sys_stat(path, &statbuf) != 0)
+ return 0;
+ return S_ISDIR(statbuf.st_mode);
+}
+static int
+validate_statedump_path(glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
+ char *value, char **op_errstr)
+{
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
+
+ int ret = 0;
+ if (!is_directory(value)) {
+ gf_asprintf(op_errstr, "Failed: %s is not a directory", value);
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY, "%s",
+ *op_errstr);
+ }
+
+ return ret;
+}
/* dispatch table for VOLUME SET
* -----------------------------
@@ -1538,18 +1135,10 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.type = NO_DOC,
.op_version = GD_OP_VERSION_3_13_2,
.flags = VOLOPT_FLAG_CLIENT_OPT},
-
- /* stripe xlator options */
- {.key = "cluster.stripe-block-size",
- .voltype = "cluster/stripe",
- .option = "block-size",
- .op_version = 1,
- .validate_fn = validate_stripe,
- .flags = VOLOPT_FLAG_CLIENT_OPT},
- {.key = "cluster.stripe-coalesce",
- .voltype = "cluster/stripe",
- .option = "coalesce",
- .op_version = 1,
+ {.key = "cluster.optimistic-change-log",
+ .voltype = "cluster/replicate",
+ .type = NO_DOC,
+ .op_version = GD_OP_VERSION_7_2,
.flags = VOLOPT_FLAG_CLIENT_OPT},
/* IO-stats xlator options */
@@ -1677,10 +1266,21 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.option = "priority",
.op_version = 1,
.flags = VOLOPT_FLAG_CLIENT_OPT},
- {.key = "performance.cache-size",
+ {.key = "performance.io-cache-size",
.voltype = "performance/io-cache",
- .op_version = 1,
+ .option = "cache-size",
+ .op_version = GD_OP_VERSION_8_0,
.flags = VOLOPT_FLAG_CLIENT_OPT},
+ {
+ .key = "performance.cache-size",
+ .voltype = "performance/io-cache",
+ .op_version = 1,
+ .flags = VOLOPT_FLAG_CLIENT_OPT,
+ .description = "Deprecated option. Use performance.io-cache-size "
+ "to adjust the cache size of the io-cache translator, "
+ "and use performance.quick-read-cache-size to adjust "
+ "the cache size of the quick-read translator.",
+ },
/* IO-threads xlator options */
{.key = "performance.io-thread-count",
@@ -1720,19 +1320,32 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.voltype = "performance/io-cache",
.option = "pass-through",
.op_version = GD_OP_VERSION_4_1_0},
+ {.key = "performance.quick-read-cache-size",
+ .voltype = "performance/quick-read",
+ .option = "cache-size",
+ .op_version = GD_OP_VERSION_8_0,
+ .flags = VOLOPT_FLAG_CLIENT_OPT},
{.key = "performance.cache-size",
.voltype = "performance/quick-read",
.type = NO_DOC,
.op_version = 1,
.flags = VOLOPT_FLAG_CLIENT_OPT},
+ {.key = "performance.quick-read-cache-timeout",
+ .voltype = "performance/quick-read",
+ .option = "cache-timeout",
+ .op_version = GD_OP_VERSION_8_0,
+ .flags = VOLOPT_FLAG_CLIENT_OPT},
{.key = "performance.qr-cache-timeout",
.voltype = "performance/quick-read",
.option = "cache-timeout",
.op_version = 1,
- .flags = VOLOPT_FLAG_CLIENT_OPT},
- {.key = "performance.cache-invalidation",
+ .flags = VOLOPT_FLAG_CLIENT_OPT,
+ .description =
+ "Deprecated option. Use performance.quick-read-cache-timeout "
+ "instead."},
+ {.key = "performance.quick-read-cache-invalidation",
.voltype = "performance/quick-read",
- .option = "cache-invalidation",
+ .option = "quick-read-cache-invalidation",
.op_version = GD_OP_VERSION_4_0_0,
.flags = VOLOPT_FLAG_CLIENT_OPT},
{.key = "performance.ctime-invalidation",
@@ -1894,29 +1507,6 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.voltype = "performance/nl-cache",
.option = "pass-through",
.op_version = GD_OP_VERSION_4_1_0},
- /* Crypt xlator options */
-
- {.key = "features.encryption",
- .voltype = "encryption/crypt",
- .option = "!feat",
- .value = "off",
- .op_version = 3,
- .description = "enable/disable client-side encryption for "
- "the volume.",
- .flags = VOLOPT_FLAG_CLIENT_OPT | VOLOPT_FLAG_XLATOR_OPT},
-
- {.key = "encryption.master-key",
- .voltype = "encryption/crypt",
- .op_version = 3,
- .flags = VOLOPT_FLAG_CLIENT_OPT},
- {.key = "encryption.data-key-size",
- .voltype = "encryption/crypt",
- .op_version = 3,
- .flags = VOLOPT_FLAG_CLIENT_OPT},
- {.key = "encryption.block-size",
- .voltype = "encryption/crypt",
- .op_version = 3,
- .flags = VOLOPT_FLAG_CLIENT_OPT},
/* Client xlator options */
{.key = "network.frame-timeout",
@@ -1926,7 +1516,6 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{.key = "network.ping-timeout",
.voltype = "protocol/client",
.op_version = 1,
- .value = "42",
.flags = VOLOPT_FLAG_CLIENT_OPT},
{.key = "network.tcp-window-size",
.voltype = "protocol/client",
@@ -1935,6 +1524,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{.key = "client.ssl",
.voltype = "protocol/client",
.option = "transport.socket.ssl-enabled",
+ .value = "off",
.op_version = 2,
.description = "enable/disable client.ssl flag in the "
"volume.",
@@ -1980,6 +1570,27 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.op_version = GD_OP_VERSION_3_10_2,
.value = "9",
.flags = VOLOPT_FLAG_CLIENT_OPT},
+ {.key = "client.strict-locks",
+ .voltype = "protocol/client",
+ .option = "strict-locks",
+ .value = "off",
+ .op_version = GD_OP_VERSION_8_0,
+ .validate_fn = validate_boolean,
+ .type = GLOBAL_DOC,
+ .description = "When set, doesn't reopen saved fds after reconnect "
+ "if POSIX locks are held on them. Hence subsequent "
+ "operations on these fds will fail. This is "
+ "necessary for stricter lock complaince as bricks "
+ "cleanup any granted locks when a client "
+ "disconnects."},
+
+ /* Although the following option is named ta-remote-port but it will be
+ * added as remote-port in client volfile for ta-bricks only.
+ */
+ {.key = "client.ta-brick-port",
+ .voltype = "protocol/client",
+ .option = "ta-remote-port",
+ .op_version = GD_OP_VERSION_7_0},
/* Server xlator options */
{.key = "network.tcp-window-size",
@@ -2013,6 +1624,10 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.voltype = "protocol/server",
.option = "root-squash",
.op_version = 2},
+ {.key = "server.all-squash",
+ .voltype = "protocol/server",
+ .option = "all-squash",
+ .op_version = GD_OP_VERSION_6_0},
{.key = "server.anonuid",
.voltype = "protocol/server",
.option = "anonuid",
@@ -2024,7 +1639,8 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{.key = "server.statedump-path",
.voltype = "protocol/server",
.option = "statedump-path",
- .op_version = 1},
+ .op_version = 1,
+ .validate_fn = validate_statedump_path},
{.key = "server.outstanding-rpc-limit",
.voltype = "protocol/server",
.option = "rpc.outstanding-rpc-limit",
@@ -2032,6 +1648,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.op_version = 3},
{.key = "server.ssl",
.voltype = "protocol/server",
+ .value = "off",
.option = "transport.socket.ssl-enabled",
.description = "enable/disable server.ssl flag in the "
"volume.",
@@ -2041,7 +1658,11 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.voltype = "protocol/server",
.option = "!ssl-allow",
.value = "*",
- .type = NO_DOC,
+ .type = DOC,
+ .description = "Allow a comma separated list of common names (CN) of "
+ "the clients that are allowed to access the server."
+ "By default, all TLS authenticated clients are "
+ "allowed to access the server.",
.op_version = GD_OP_VERSION_3_6_0,
},
{
@@ -2082,7 +1703,6 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.voltype = "protocol/server",
.option = "transport.tcp-user-timeout",
.op_version = GD_OP_VERSION_3_10_2,
- .value = "0", /* 0 - implies "use system default" */
},
{
.key = "server.keepalive-time",
@@ -2186,33 +1806,24 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{.key = "performance.read-ahead",
.voltype = "performance/read-ahead",
.option = "!perf",
- .value = "on",
+ .value = "off",
.op_version = 1,
.description = "enable/disable read-ahead translator in the volume.",
.flags = VOLOPT_FLAG_CLIENT_OPT | VOLOPT_FLAG_XLATOR_OPT},
{.key = "performance.readdir-ahead",
.voltype = "performance/readdir-ahead",
.option = "!perf",
- .value = "on",
+ .value = "off",
.op_version = 3,
.description = "enable/disable readdir-ahead translator in the volume.",
.flags = VOLOPT_FLAG_CLIENT_OPT | VOLOPT_FLAG_XLATOR_OPT},
{.key = "performance.io-cache",
.voltype = "performance/io-cache",
.option = "!perf",
- .value = "on",
+ .value = "off",
.op_version = 1,
.description = "enable/disable io-cache translator in the volume.",
.flags = VOLOPT_FLAG_CLIENT_OPT},
- {.key = "performance.quick-read",
- .voltype = "performance/quick-read",
- .option = "!perf",
- .value = "on",
- .op_version = 1,
- .description = "enable/disable quick-read translator in the volume.",
- .flags = VOLOPT_FLAG_CLIENT_OPT | VOLOPT_FLAG_XLATOR_OPT
-
- },
{.key = "performance.open-behind",
.voltype = "performance/open-behind",
.option = "!perf",
@@ -2222,6 +1833,13 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.flags = VOLOPT_FLAG_CLIENT_OPT | VOLOPT_FLAG_XLATOR_OPT
},
+ {.key = "performance.quick-read",
+ .voltype = "performance/quick-read",
+ .option = "!perf",
+ .value = "on",
+ .op_version = 1,
+ .description = "enable/disable quick-read translator in the volume.",
+ .flags = VOLOPT_FLAG_CLIENT_OPT | VOLOPT_FLAG_XLATOR_OPT},
{.key = "performance.nl-cache",
.voltype = "performance/nl-cache",
.option = "!perf",
@@ -2300,6 +1918,12 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.op_version = GD_OP_VERSION_3_9_0,
.flags = VOLOPT_FLAG_CLIENT_OPT},
+ {.key = "performance.global-cache-invalidation",
+ .voltype = "performance/md-cache",
+ .option = "global-cache-invalidation",
+ .op_version = GD_OP_VERSION_6_0,
+ .flags = VOLOPT_FLAG_CLIENT_OPT},
+
/* Feature translators */
{.key = "features.uss",
.voltype = "features/snapview-server",
@@ -2858,11 +2482,10 @@ struct volopt_map_entry glusterd_volopt_map[] = {
},
{
.option = "ctime",
- .key = "storage.ctime",
+ .key = "features.ctime",
.voltype = "storage/posix",
.op_version = GD_OP_VERSION_4_1_0,
},
- {.key = "storage.bd-aio", .voltype = "storage/bd", .op_version = 3},
{.key = "config.memory-accounting",
.voltype = "mgmt/glusterd",
.option = "!config",
@@ -2887,7 +2510,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.op_version = 2},
{.key = GLUSTERD_QUORUM_RATIO_KEY,
.voltype = "mgmt/glusterd",
- .value = "0",
+ .value = "51",
.op_version = 2},
/* changelog translator - global tunables */
{.key = "changelog.changelog",
@@ -2979,261 +2602,6 @@ struct volopt_map_entry glusterd_volopt_map[] = {
"/var/run/gluster/shared_storage on enabling this "
"option. Unmount and delete the shared storage volume "
" on disabling this option."},
-#if USE_GFDB /* no GFDB means tiering is disabled */
- /* tier translator - global tunables */
- {.key = "cluster.write-freq-threshold",
- .voltype = "cluster/tier",
- .value = "0",
- .option = "write-freq-threshold",
- .op_version = GD_OP_VERSION_3_7_0,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier_thresholds,
- .description = "Defines the number of writes, in a promotion/demotion"
- " cycle, that would mark a file HOT for promotion. Any"
- " file that has write hits less than this value will "
- "be considered as COLD and will be demoted."},
- {.key = "cluster.read-freq-threshold",
- .voltype = "cluster/tier",
- .value = "0",
- .option = "read-freq-threshold",
- .op_version = GD_OP_VERSION_3_7_0,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier_thresholds,
- .description = "Defines the number of reads, in a promotion/demotion "
- "cycle, that would mark a file HOT for promotion. Any "
- "file that has read hits less than this value will be "
- "considered as COLD and will be demoted."},
- {
- .key = "cluster.tier-pause",
- .voltype = "cluster/tier",
- .option = "tier-pause",
- .op_version = GD_OP_VERSION_3_7_6,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier,
- },
- {
- .key = "cluster.tier-promote-frequency",
- .voltype = "cluster/tier",
- .value = "120",
- .option = "tier-promote-frequency",
- .op_version = GD_OP_VERSION_3_7_0,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier,
- },
- {
- .key = "cluster.tier-demote-frequency",
- .voltype = "cluster/tier",
- .value = "3600",
- .option = "tier-demote-frequency",
- .op_version = GD_OP_VERSION_3_7_0,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier,
- },
- {.key = "cluster.watermark-hi",
- .voltype = "cluster/tier",
- .value = "90",
- .option = "watermark-hi",
- .op_version = GD_OP_VERSION_3_7_6,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier,
- .description =
- "Upper % watermark for promotion. If hot tier fills"
- " above this percentage, no promotion will happen and demotion will "
- "happen with high probability."},
- {.key = "cluster.watermark-low",
- .voltype = "cluster/tier",
- .value = "75",
- .option = "watermark-low",
- .op_version = GD_OP_VERSION_3_7_6,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier,
- .description =
- "Lower % watermark. If hot tier is less "
- "full than this, promotion will happen and demotion will not happen. "
- "If greater than this, promotion/demotion will happen at a "
- "probability "
- "relative to how full the hot tier is."},
- {.key = "cluster.tier-mode",
- .voltype = "cluster/tier",
- .option = "tier-mode",
- .value = "cache",
- .op_version = GD_OP_VERSION_3_7_6,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier,
- .description =
- "Either 'test' or 'cache'. Test mode periodically"
- " demotes or promotes files automatically based on access."
- " Cache mode does so based on whether the cache is full or not,"
- " as specified with watermarks."},
- {.key = "cluster.tier-max-promote-file-size",
- .voltype = "cluster/tier",
- .option = "tier-max-promote-file-size",
- .value = "0",
- .op_version = GD_OP_VERSION_3_7_10,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier,
- .description =
- "The maximum file size in bytes that is promoted. If 0, there"
- " is no maximum size (default)."},
- {.key = "cluster.tier-max-mb",
- .voltype = "cluster/tier",
- .option = "tier-max-mb",
- .value = "4000",
- .op_version = GD_OP_VERSION_3_7_6,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier,
- .description = "The maximum number of MB that may be migrated"
- " in any direction in a given cycle by a single node."},
- {.key = "cluster.tier-max-files",
- .voltype = "cluster/tier",
- .option = "tier-max-files",
- .value = "10000",
- .op_version = GD_OP_VERSION_3_7_6,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier,
- .description = "The maximum number of files that may be migrated"
- " in any direction in a given cycle by a single node."},
- {.key = "cluster.tier-query-limit",
- .voltype = "cluster/tier",
- .option = "tier-query-limit",
- .value = "100",
- .op_version = GD_OP_VERSION_3_9_1,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier,
- .type = NO_DOC,
- .description = "The maximum number of files that may be migrated "
- "during an emergency demote. An emergency condition "
- "is flagged when writes breach the hi-watermark."},
- {.key = "cluster.tier-compact",
- .voltype = "cluster/tier",
- .option = "tier-compact",
- .value = "on",
- .op_version = GD_OP_VERSION_3_9_0,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier,
- .description = "Activate or deactivate the compaction of the DB"
- " for the volume's metadata."},
- {
- .key = "cluster.tier-hot-compact-frequency",
- .voltype = "cluster/tier",
- .value = "604800",
- .option = "tier-hot-compact-frequency",
- .op_version = GD_OP_VERSION_3_9_0,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier,
- },
- {
- .key = "cluster.tier-cold-compact-frequency",
- .voltype = "cluster/tier",
- .value = "604800",
- .option = "tier-cold-compact-frequency",
- .op_version = GD_OP_VERSION_3_9_0,
- .flags = VOLOPT_FLAG_CLIENT_OPT,
- .validate_fn = validate_tier,
- },
- {.key = "features.ctr-enabled",
- .voltype = "features/changetimerecorder",
- .value = "off",
- .option = "ctr-enabled",
- .op_version = GD_OP_VERSION_3_7_0,
- .description = "Enable CTR xlator"},
- {.key = "features.record-counters",
- .voltype = "features/changetimerecorder",
- .value = "off",
- .option = "record-counters",
- .op_version = GD_OP_VERSION_3_7_0,
- .validate_fn = validate_tier_counters,
- .description = "Its a Change Time Recorder Xlator option to "
- "enable recording write "
- "and read heat counters. The default is disabled. "
- "If enabled, \"cluster.write-freq-threshold\" and "
- "\"cluster.read-freq-threshold\" defined the number "
- "of writes (or reads) to a given file are needed "
- "before triggering migration."},
- {.key = "features.ctr-record-metadata-heat",
- .voltype = "features/changetimerecorder",
- .value = "off",
- .option = "ctr-record-metadata-heat",
- .op_version = GD_OP_VERSION_3_7_0,
- .type = NO_DOC,
- .description = "Its a Change Time Recorder Xlator option to "
- "enable recording write heat on metadata of the file. "
- "The default is disabled. "
- "Metadata is inode attributes like atime, mtime,"
- " permissions etc and "
- "extended attributes of a file ."},
- {.key = "features.ctr_link_consistency",
- .voltype = "features/changetimerecorder",
- .value = "off",
- .option = "ctr_link_consistency",
- .op_version = GD_OP_VERSION_3_7_0,
- .type = NO_DOC,
- .description = "Enable a crash consistent way of recording hardlink "
- "updates by Change Time Recorder Xlator. "
- "When recording in a crash "
- "consistent way the data operations will "
- "experience more latency."},
- {.key = "features.ctr_lookupheal_link_timeout",
- .voltype = "features/changetimerecorder",
- .value = "300",
- .option = "ctr_lookupheal_link_timeout",
- .op_version = GD_OP_VERSION_3_7_2,
- .type = NO_DOC,
- .description = "Defines the expiry period of in-memory "
- "hardlink of an inode,"
- "used by lookup heal in Change Time Recorder."
- "Once the expiry period"
- "hits an attempt to heal the database per "
- "hardlink is done and the "
- "in-memory hardlink period is reset"},
- {.key = "features.ctr_lookupheal_inode_timeout",
- .voltype = "features/changetimerecorder",
- .value = "300",
- .option = "ctr_lookupheal_inode_timeout",
- .op_version = GD_OP_VERSION_3_7_2,
- .type = NO_DOC,
- .description = "Defines the expiry period of in-memory inode,"
- "used by lookup heal in Change Time Recorder. "
- "Once the expiry period"
- "hits an attempt to heal the database per "
- "inode is done"},
- {.key = "features.ctr-sql-db-cachesize",
- .voltype = "features/changetimerecorder",
- .value = "12500",
- .option = "sql-db-cachesize",
- .validate_fn = validate_ctr_sql_params,
- .op_version = GD_OP_VERSION_3_7_7,
- .description = "Defines the cache size of the sqlite database of "
- "changetimerecorder xlator."
- "The input to this option is in pages."
- "Each page is 4096 bytes. Default value is 12500 "
- "pages."
- "The max value is 262144 pages i.e 1 GB and "
- "the min value is 1000 pages i.e ~ 4 MB. "},
- {.key = "features.ctr-sql-db-wal-autocheckpoint",
- .voltype = "features/changetimerecorder",
- .value = "25000",
- .option = "sql-db-wal-autocheckpoint",
- .validate_fn = validate_ctr_sql_params,
- .op_version = GD_OP_VERSION_3_7_7,
- .description = "Defines the autocheckpoint of the sqlite database of "
- " changetimerecorder. "
- "The input to this option is in pages. "
- "Each page is 4096 bytes. Default value is 25000 "
- "pages."
- "The max value is 262144 pages i.e 1 GB and "
- "the min value is 1000 pages i.e ~4 MB."},
- {.key = VKEY_FEATURES_SELINUX,
- .voltype = "features/selinux",
- .type = NO_DOC,
- .value = "on",
- .op_version = GD_OP_VERSION_3_11_0,
- .description = "Convert security.selinux xattrs to "
- "trusted.gluster.selinux on the bricks. Recommended "
- "to have enabled when clients and/or bricks support "
- "SELinux."},
-
-#endif /* USE_GFDB */
{
.key = "locks.trace",
.voltype = "features/locks",
@@ -3334,6 +2702,15 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.op_version = GD_OP_VERSION_3_7_0,
.type = NO_DOC,
},
+ {
+ .key = "features.signer-threads",
+ .voltype = "features/bit-rot",
+ .value = BR_WORKERS,
+ .option = "signer-threads",
+ .op_version = GD_OP_VERSION_8_0,
+ .type = NO_DOC,
+ },
+ /* Upcall translator options */
/* Upcall translator options */
{
.key = "features.cache-invalidation",
@@ -3346,6 +2723,13 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.voltype = "features/upcall",
.op_version = GD_OP_VERSION_3_7_0,
},
+ {
+ .key = "ganesha.enable",
+ .voltype = "mgmt/ganesha",
+ .value = "off",
+ .option = "ganesha.enable",
+ .op_version = GD_OP_VERSION_7_0,
+ },
/* Lease translator options */
{
.key = "features.leases",
@@ -3531,7 +2915,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
/* Brick multiplexing options */
{.key = GLUSTERD_BRICK_MULTIPLEX_KEY,
.voltype = "mgmt/glusterd",
- .value = "off",
+ .value = "disable",
.op_version = GD_OP_VERSION_3_10_0,
.validate_fn = validate_boolean,
.type = GLOBAL_DOC,
@@ -3539,9 +2923,19 @@ struct volopt_map_entry glusterd_volopt_map[] = {
"brick multiplexing. Brick multiplexing ensures that "
"compatible brick instances can share one single "
"brick process."},
+ {.key = GLUSTERD_VOL_CNT_PER_THRD,
+ .voltype = "mgmt/glusterd",
+ .value = GLUSTERD_VOL_CNT_PER_THRD_DEFAULT_VALUE,
+ .op_version = GD_OP_VERSION_7_0,
+ .validate_fn = validate_volume_per_thread_limit,
+ .type = GLOBAL_NO_DOC,
+ .description =
+ "This option can be used to limit the number of volumes "
+ "handled per thread to populate peer data.The option accepts "
+ "values in the range of 5 to 200"},
{.key = GLUSTERD_BRICKMUX_LIMIT_KEY,
.voltype = "mgmt/glusterd",
- .value = "0",
+ .value = GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE,
.op_version = GD_OP_VERSION_3_12_0,
.validate_fn = validate_mux_limit,
.type = GLOBAL_DOC,
@@ -3588,6 +2982,15 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.voltype = "cluster/replicate",
.op_version = GD_OP_VERSION_3_11_0,
.flags = VOLOPT_FLAG_CLIENT_OPT},
+ {.key = VKEY_FEATURES_SELINUX,
+ .voltype = "features/selinux",
+ .type = NO_DOC,
+ .value = "on",
+ .op_version = GD_OP_VERSION_3_11_0,
+ .description = "Convert security.selinux xattrs to "
+ "trusted.gluster.selinux on the bricks. Recommended "
+ "to have enabled when clients and/or bricks support "
+ "SELinux."},
{.key = GLUSTERD_LOCALTIME_LOGGING_KEY,
.voltype = "mgmt/glusterd",
.type = GLOBAL_DOC,
@@ -3628,10 +3031,20 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.type = NO_DOC,
.op_version = GD_OP_VERSION_3_13_0,
.flags = VOLOPT_FLAG_CLIENT_OPT},
+ {.key = "disperse.quorum-count",
+ .voltype = "cluster/disperse",
+ .type = NO_DOC,
+ .op_version = GD_OP_VERSION_8_0,
+ .validate_fn = validate_disperse_quorum_count,
+ .description = "This option can be used to define how many successes on"
+ "the bricks constitute a success to the application. This"
+ " count should be in the range"
+ "[disperse-data-count, disperse-count] (inclusive)",
+ .flags = VOLOPT_FLAG_CLIENT_OPT},
{
.key = "features.sdfs",
.voltype = "features/sdfs",
- .value = "on",
+ .value = "off",
.option = "!features",
.op_version = GD_OP_VERSION_4_0_0,
.description = "enable/disable dentry serialization xlator in volume",
@@ -3642,10 +3055,10 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.value = "off",
.op_version = GD_OP_VERSION_4_1_0,
.flags = VOLOPT_FLAG_CLIENT_OPT},
- {.key = "features.utime",
+ {.key = "features.ctime",
.voltype = "features/utime",
.validate_fn = validate_boolean,
- .value = "off",
+ .value = "on",
.option = "!utime",
.op_version = GD_OP_VERSION_4_1_0,
.description = "enable/disable utime translator on the volume.",
@@ -3658,7 +3071,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.op_version = GD_OP_VERSION_5_0,
.description = "enable/disable noatime option with ctime enabled.",
.flags = VOLOPT_FLAG_CLIENT_OPT | VOLOPT_FLAG_XLATOR_OPT},
- {.key = "feature.cloudsync-storetype",
+ {.key = "features.cloudsync-storetype",
.voltype = "features/cloudsync",
.op_version = GD_OP_VERSION_5_0,
.flags = VOLOPT_FLAG_CLIENT_OPT},
@@ -3678,4 +3091,56 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.voltype = "features/cloudsync",
.op_version = GD_OP_VERSION_5_0,
.flags = VOLOPT_FLAG_CLIENT_OPT},
+ {.key = "features.enforce-mandatory-lock",
+ .voltype = "features/locks",
+ .value = "off",
+ .type = NO_DOC,
+ .op_version = GD_OP_VERSION_6_0,
+ .validate_fn = validate_boolean,
+ .description = "option to enforce mandatory lock on a file",
+ .flags = VOLOPT_FLAG_XLATOR_OPT},
+ {.key = VKEY_CONFIG_GLOBAL_THREADING,
+ .voltype = "debug/io-stats",
+ .option = "global-threading",
+ .value = "off",
+ .op_version = GD_OP_VERSION_6_0},
+ {.key = VKEY_CONFIG_CLIENT_THREADS,
+ .voltype = "debug/io-stats",
+ .option = "!client-threads",
+ .value = "16",
+ .op_version = GD_OP_VERSION_6_0},
+ {.key = VKEY_CONFIG_BRICK_THREADS,
+ .voltype = "debug/io-stats",
+ .option = "!brick-threads",
+ .value = "16",
+ .op_version = GD_OP_VERSION_6_0},
+ {.key = "features.cloudsync-remote-read",
+ .voltype = "features/cloudsync",
+ .value = "off",
+ .op_version = GD_OP_VERSION_7_0,
+ .flags = VOLOPT_FLAG_CLIENT_OPT},
+ {.key = "features.cloudsync-store-id",
+ .voltype = "features/cloudsync",
+ .op_version = GD_OP_VERSION_7_0,
+ .flags = VOLOPT_FLAG_CLIENT_OPT},
+ {.key = "features.cloudsync-product-id",
+ .voltype = "features/cloudsync",
+ .op_version = GD_OP_VERSION_7_0,
+ .flags = VOLOPT_FLAG_CLIENT_OPT},
+ {
+ .key = "features.acl",
+ .voltype = "features/access-control",
+ .value = "enable",
+ .option = "!features",
+ .op_version = GD_OP_VERSION_8_0,
+ .description = "(WARNING: for debug purpose only) enable/disable "
+ "access-control xlator in volume",
+ .type = NO_DOC,
+ },
+
+ {.key = "cluster.use-anonymous-inode",
+ .voltype = "cluster/replicate",
+ .op_version = GD_OP_VERSION_9_0,
+ .value = "yes",
+ .flags = VOLOPT_FLAG_CLIENT_OPT},
{.key = NULL}};
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
index 187038fc029..7a86c2997b1 100644
--- a/xlators/mgmt/glusterd/src/glusterd.c
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -14,20 +14,20 @@
#include <sys/resource.h>
#include <libgen.h>
-#include "compat-uuid.h"
+#include <glusterfs/compat-uuid.h>
#include "glusterd.h"
#include "rpcsvc.h"
#include "fnmatch.h"
-#include "xlator.h"
-#include "call-stub.h"
-#include "defaults.h"
-#include "list.h"
-#include "dict.h"
-#include "options.h"
-#include "compat.h"
-#include "compat-errno.h"
-#include "syscall.h"
+#include <glusterfs/xlator.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/list.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/options.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/syscall.h>
#include "glusterd-statedump.h"
#include "glusterd-sm.h"
#include "glusterd-op-sm.h"
@@ -37,19 +37,21 @@
#include "glusterd-locks.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-shd-svc.h"
+#ifdef BUILD_GNFS
#include "glusterd-nfs-svc.h"
+#endif
#include "glusterd-bitd-svc.h"
#include "glusterd-scrub-svc.h"
#include "glusterd-quotad-svc.h"
#include "glusterd-snapd-svc.h"
#include "glusterd-messages.h"
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "glusterd-geo-rep.h"
-#include "run.h"
+#include <glusterfs/run.h>
#include "rpc-clnt-ping.h"
#include "rpc-common-xdr.h"
-#include "syncop.h"
+#include <glusterfs/syncop.h>
#include "glusterd-mountbroker.h"
@@ -65,7 +67,7 @@ extern struct rpcsvc_program gd_svc_cli_trusted_progs;
extern struct rpc_clnt_program gd_brick_prog;
extern struct rpcsvc_program glusterd_mgmt_hndsk_prog;
-extern char snap_mount_dir[PATH_MAX];
+extern char snap_mount_dir[VALID_GLUSTERD_PATHMAX];
rpcsvc_cbk_program_t glusterd_cbk_prog = {
.progname = "Gluster Callback",
@@ -200,8 +202,10 @@ glusterd_options_init(xlator_t *this)
priv = this->private;
priv->opts = dict_new();
- if (!priv->opts)
+ if (!priv->opts) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
ret = glusterd_store_retrieve_options(this);
if (ret == 0) {
@@ -245,6 +249,7 @@ glusterd_client_statedump_submit_req(char *volname, char *target_ip, char *pid)
GF_ASSERT(conf);
if (target_ip == NULL || pid == NULL) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
ret = -1;
goto out;
}
@@ -445,14 +450,19 @@ glusterd_rpcsvc_options_build(dict_t *options)
{
int ret = 0;
uint32_t backlog = 0;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
ret = dict_get_uint32(options, "transport.listen-backlog", &backlog);
if (ret) {
backlog = GLUSTERFS_SOCKET_LISTEN_BACKLOG;
ret = dict_set_uint32(options, "transport.listen-backlog", backlog);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=transport.listen-backlog", NULL);
goto out;
+ }
}
gf_msg_debug("glusterd", 0, "listen-backlog value: %d", backlog);
@@ -555,7 +565,9 @@ glusterd_crt_georep_folders(char *georepdir, glusterd_conf_t *conf)
char *greplg_s = NULL;
struct group *gr = NULL;
int ret = 0;
+ int gr_ret = 0;
int32_t len = 0;
+ char logdir[PATH_MAX] = {0};
GF_ASSERT(georepdir);
GF_ASSERT(conf);
@@ -570,87 +582,106 @@ glusterd_crt_georep_folders(char *georepdir, glusterd_conf_t *conf)
len = snprintf(georepdir, PATH_MAX, "%s/" GEOREP, conf->workdir);
if ((len < 0) || (len >= PATH_MAX)) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
- ret = mkdir_p(georepdir, 0777, _gf_true);
+ ret = mkdir_p(georepdir, 0755, _gf_true);
if (-1 == ret) {
gf_msg("glusterd", GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
"Unable to create " GEOREP " directory %s", georepdir);
goto out;
}
- if (SLEN(DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP) >= PATH_MAX) {
+ ret = dict_get_str(THIS->options, GEOREP "-log-group", &greplg_s);
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=log-group", NULL);
+ ret = 0;
+ } else {
+ gr = getgrnam(greplg_s);
+ if (!gr) {
+ gf_msg("glusterd", GF_LOG_CRITICAL, 0, GD_MSG_LOGGROUP_INVALID,
+ "group " GEOREP "-log-group %s does not exist", greplg_s);
+ gr_ret = -1;
+ }
+ }
+ if ((strlen(conf->logdir) + 2 + SLEN(GEOREP)) >= PATH_MAX) {
ret = -1;
gf_msg("glusterd", GF_LOG_CRITICAL, 0, GD_MSG_DIRPATH_TOO_LONG,
- "directory path " DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP
- " is longer than PATH_MAX");
+ "directory path %s/" GEOREP " is longer than PATH_MAX",
+ conf->logdir);
goto out;
}
- ret = mkdir_p(DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP, 0777, _gf_true);
+ len = snprintf(logdir, PATH_MAX, "%s/" GEOREP, conf->logdir);
+ if ((len < 0) || (len >= PATH_MAX)) {
+ ret = -1;
+ goto out;
+ }
+ ret = mkdir_p(logdir, 0755, _gf_true);
if (-1 == ret) {
gf_msg("glusterd", GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
"Unable to create " GEOREP " log directory");
goto out;
}
+ if (gr) {
+ gr_ret = group_write_allow(logdir, gr->gr_gid);
+ }
- /* Slave log file directory */
- if (SLEN(DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP "-slaves") >= PATH_MAX) {
+ if ((strlen(conf->logdir) + 2 + SLEN(GEOREP "-slaves")) >= PATH_MAX) {
ret = -1;
gf_msg("glusterd", GF_LOG_CRITICAL, 0, GD_MSG_DIRPATH_TOO_LONG,
- "directory path " DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP
+ "directory path %s/" GEOREP
"-slaves"
- " is longer than PATH_MAX");
+ " is longer than PATH_MAX",
+ conf->logdir);
+ goto out;
+ }
+ len = snprintf(logdir, PATH_MAX, "%s/" GEOREP "-slaves", conf->logdir);
+ if ((len < 0) || (len >= PATH_MAX)) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
+ ret = -1;
goto out;
}
- ret = mkdir_p(DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP "-slaves", 0777,
- _gf_true);
+ ret = mkdir_p(logdir, 0755, _gf_true);
if (-1 == ret) {
gf_msg("glusterd", GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
"Unable to create " GEOREP " slave log directory");
goto out;
}
+ if (gr && !gr_ret) {
+ gr_ret = group_write_allow(logdir, gr->gr_gid);
+ }
/* MountBroker log file directory */
- if (SLEN(DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP "-slaves/mbr") >= PATH_MAX) {
+ if ((strlen(conf->logdir) + 2 + SLEN(GEOREP "-slaves/mbr")) >= PATH_MAX) {
ret = -1;
gf_msg("glusterd", GF_LOG_CRITICAL, 0, GD_MSG_DIRPATH_TOO_LONG,
- "directory path " DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP
+ "directory path %s/" GEOREP
"-slaves/mbr"
- " is longer than PATH_MAX");
+ " is longer than PATH_MAX",
+ conf->logdir);
+ goto out;
+ }
+
+ len = snprintf(logdir, PATH_MAX, "%s/" GEOREP "-slaves/mbr", conf->logdir);
+ if ((len < 0) || (len >= PATH_MAX)) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
+ ret = -1;
goto out;
}
- ret = mkdir_p(DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP "-slaves/mbr", 0777,
- _gf_true);
+
+ ret = mkdir_p(logdir, 0755, _gf_true);
if (-1 == ret) {
gf_msg("glusterd", GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
"Unable to create " GEOREP " mountbroker slave log directory");
goto out;
}
-
- ret = dict_get_str(THIS->options, GEOREP "-log-group", &greplg_s);
- if (ret)
- ret = 0;
- else {
- gr = getgrnam(greplg_s);
- if (!gr) {
- gf_msg("glusterd", GF_LOG_CRITICAL, 0, GD_MSG_LOGGROUP_INVALID,
- "group " GEOREP "-log-group %s does not exist", greplg_s);
- ret = -1;
- goto out;
- }
-
- ret = group_write_allow(DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP,
- gr->gr_gid);
- if (ret == 0)
- ret = group_write_allow(
- DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP "-slaves", gr->gr_gid);
- if (ret == 0)
- ret = group_write_allow(DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP
- "-slaves/mbr",
- gr->gr_gid);
+ if (gr && !gr_ret) {
+ gr_ret = group_write_allow(logdir, gr->gr_gid);
}
-
+ if (gr_ret)
+ ret = gr_ret;
out:
gf_msg_debug("glusterd", 0, "Returning %d", ret);
return ret;
@@ -814,18 +845,19 @@ configure_syncdaemon(glusterd_conf_t *conf)
/* log-file */
runinit_gsyncd_setrx(&runner, conf);
- runner_add_args(&runner, "log-file",
- DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP
- "/${mastervol}/${eSlave}.log",
- ".", ".", NULL);
+ runner_add_arg(&runner, "log-file");
+ runner_argprintf(&runner, "%s/" GEOREP "/${mastervol}/${eSlave}.log",
+ conf->logdir);
+ runner_add_args(&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
/* gluster-log-file */
runinit_gsyncd_setrx(&runner, conf);
- runner_add_args(&runner, "gluster-log-file",
- DEFAULT_LOG_FILE_DIRECTORY
- "/" GEOREP "/${mastervol}/${eSlave}${local_id}.gluster.log",
- ".", ".", NULL);
+ runner_add_arg(&runner, "gluster-log-file");
+ runner_argprintf(
+ &runner, "%s/" GEOREP "/${mastervol}/${eSlave}${local_id}.gluster.log",
+ conf->logdir);
+ runner_add_args(&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
/* ignore-deletes */
@@ -867,33 +899,36 @@ configure_syncdaemon(glusterd_conf_t *conf)
/* log-file */
runinit_gsyncd_setrx(&runner, conf);
- runner_add_args(
- &runner, "log-file",
- DEFAULT_LOG_FILE_DIRECTORY
- "/" GEOREP
+ runner_add_arg(&runner, "log-file");
+ runner_argprintf(
+ &runner,
+ "%s/" GEOREP
"-slaves/${session_owner}:${local_node}${local_id}.${slavevol}.log",
- ".", NULL);
+ conf->logdir);
+ runner_add_args(&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
/* MountBroker log-file */
runinit_gsyncd_setrx(&runner, conf);
- runner_add_args(
- &runner, "log-file-mbr",
- DEFAULT_LOG_FILE_DIRECTORY
- "/" GEOREP
+ runner_add_arg(&runner, "log-file-mbr");
+ runner_argprintf(
+ &runner,
+ "%s/" GEOREP
"-slaves/mbr/${session_owner}:${local_node}${local_id}.${slavevol}.log",
- ".", NULL);
+ conf->logdir);
+ runner_add_args(&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
/* gluster-log-file */
runinit_gsyncd_setrx(&runner, conf);
- runner_add_args(
- &runner, "gluster-log-file",
- DEFAULT_LOG_FILE_DIRECTORY
- "/" GEOREP
+ runner_add_arg(&runner, "gluster-log-file");
+ runner_argprintf(
+ &runner,
+ "%s/" GEOREP
"-slaves/"
"${session_owner}:${local_node}${local_id}.${slavevol}.gluster.log",
- ".", NULL);
+ conf->logdir);
+ runner_add_args(&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
out:
@@ -1018,37 +1053,34 @@ _install_mount_spec(dict_t *opts, char *key, data_t *value, void *data)
glusterd_conf_t *priv = THIS->private;
char *label = NULL;
gf_boolean_t georep = _gf_false;
- gf_boolean_t ghadoop = _gf_false;
char *pdesc = value->data;
char *volname = NULL;
int rv = 0;
gf_mount_spec_t *mspec = NULL;
char *user = NULL;
- char *volfile_server = NULL;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
label = strtail(key, "mountbroker.");
- /* check for presence of geo-rep/hadoop label */
+ /* check for presence of geo-rep label */
if (!label) {
label = strtail(key, "mountbroker-" GEOREP ".");
if (label)
georep = _gf_true;
- else {
- label = strtail(key, "mountbroker-" GHADOOP ".");
- if (label)
- ghadoop = _gf_true;
- }
}
if (!label)
return 0;
mspec = GF_CALLOC(1, sizeof(*mspec), gf_gld_mt_mount_spec);
- if (!mspec)
+ if (!mspec) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto err;
+ }
mspec->label = label;
- if (georep || ghadoop) {
+ if (georep) {
volname = gf_strdup(pdesc);
if (!volname)
goto err;
@@ -1059,18 +1091,7 @@ _install_mount_spec(dict_t *opts, char *key, data_t *value, void *data)
} else
user = label;
- if (georep)
- rv = make_georep_mountspec(mspec, volname, user);
-
- if (ghadoop) {
- volfile_server = strchr(user, ':');
- if (volfile_server)
- *volfile_server++ = '\0';
- else
- volfile_server = "localhost";
-
- rv = make_ghadoop_mountspec(mspec, volname, user, volfile_server);
- }
+ rv = make_georep_mountspec(mspec, volname, user, priv->logdir);
GF_FREE(volname);
if (rv != 0)
@@ -1085,7 +1106,7 @@ err:
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_MOUNT_SPEC_INSTALL_FAIL,
"adding %smount spec failed: label: %s desc: %s",
- georep ? GEOREP " " : "", label, pdesc);
+ georep ? GEOREP " " : "", label, pdesc ? pdesc : "");
if (mspec) {
if (mspec->patterns) {
@@ -1106,25 +1127,22 @@ glusterd_init_uds_listener(xlator_t *this)
dict_t *options = NULL;
rpcsvc_t *rpc = NULL;
data_t *sock_data = NULL;
- char sockfile[UNIX_PATH_MAX + 1] = {
- 0,
- };
+ char sockfile[UNIX_PATH_MAX] = {0};
int i = 0;
GF_ASSERT(this);
- sock_data = dict_get(this->options, "glusterd-sockfile");
- if (!sock_data) {
- strncpy(sockfile, DEFAULT_GLUSTERD_SOCKFILE, UNIX_PATH_MAX);
- } else {
- strncpy(sockfile, sock_data->data, UNIX_PATH_MAX);
- }
-
options = dict_new();
- if (!options)
+ if (!options) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
+
+ sock_data = dict_get(this->options, "glusterd-sockfile");
+ (void)snprintf(sockfile, sizeof(sockfile), "%s",
+ sock_data ? sock_data->data : DEFAULT_GLUSTERD_SOCKFILE);
- ret = rpcsvc_transport_unix_options_build(&options, sockfile);
+ ret = rpcsvc_transport_unix_options_build(options, sockfile);
if (ret)
goto out;
@@ -1182,9 +1200,7 @@ glusterd_stop_uds_listener(xlator_t *this)
rpcsvc_listener_t *listener = NULL;
rpcsvc_listener_t *next = NULL;
data_t *sock_data = NULL;
- char sockfile[UNIX_PATH_MAX + 1] = {
- 0,
- };
+ char sockfile[UNIX_PATH_MAX] = {0};
GF_ASSERT(this);
conf = this->private;
@@ -1200,11 +1216,8 @@ glusterd_stop_uds_listener(xlator_t *this)
(void)rpcsvc_unregister_notify(conf->uds_rpc, glusterd_rpcsvc_notify, this);
sock_data = dict_get(this->options, "glusterd-sockfile");
- if (!sock_data) {
- strncpy(sockfile, DEFAULT_GLUSTERD_SOCKFILE, UNIX_PATH_MAX);
- } else {
- strncpy(sockfile, sock_data->data, UNIX_PATH_MAX);
- }
+ (void)snprintf(sockfile, sizeof(sockfile), "%s",
+ sock_data ? sock_data->data : DEFAULT_GLUSTERD_SOCKFILE);
sys_unlink(sockfile);
return;
@@ -1314,7 +1327,7 @@ glusterd_init_var_run_dirs(xlator_t *this, char *var_run_dir,
if ((-1 == ret) && (ENOENT == errno)) {
/* Create missing dirs */
- ret = mkdir_p(abs_path, 0777, _gf_true);
+ ret = mkdir_p(abs_path, 0755, _gf_true);
if (-1 == ret) {
gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
@@ -1401,13 +1414,16 @@ init(xlator_t *this)
char rundir[PATH_MAX] = {
0,
};
+ char logdir[VALID_GLUSTERD_PATHMAX] = {
+ 0,
+ };
char cmd_log_filename[PATH_MAX] = {
0,
};
char *mountbroker_root = NULL;
int i = 0;
int total_transport = 0;
- gf_boolean_t valgrind = _gf_false;
+ gf_valgrind_tool vgtool;
char *valgrind_str = NULL;
char *transport_type = NULL;
char var_run_dir[PATH_MAX] = {
@@ -1418,6 +1434,15 @@ init(xlator_t *this)
gf_boolean_t downgrade = _gf_false;
char *localtime_logging = NULL;
int32_t len = 0;
+ int op_version = 0;
+
+#if defined(RUN_WITH_MEMCHECK)
+ vgtool = _gf_memcheck;
+#elif defined(RUN_WITH_DRD)
+ vgtool = _gf_drd;
+#else
+ vgtool = _gf_none;
+#endif
#ifndef GF_DARWIN_HOST_OS
{
@@ -1426,9 +1451,8 @@ init(xlator_t *this)
lim.rlim_max = 65536;
if (setrlimit(RLIMIT_NOFILE, &lim) == -1) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_SETXATTR_FAIL,
- "Failed to set 'ulimit -n "
- " 65536'");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SET_XATTR_FAIL,
+ "Failed to set 'ulimit -n 65536'", NULL);
} else {
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_FILE_DESC_LIMIT_SET,
"Maximum allowed open file descriptors "
@@ -1448,6 +1472,26 @@ init(xlator_t *this)
if (len < 0 || len >= PATH_MAX)
exit(2);
+ dir_data = dict_get(this->options, "cluster-test-mode");
+ if (!dir_data) {
+ /* Use default working dir */
+ len = snprintf(logdir, VALID_GLUSTERD_PATHMAX, "%s",
+ DEFAULT_LOG_FILE_DIRECTORY);
+ } else {
+ len = snprintf(logdir, VALID_GLUSTERD_PATHMAX, "%s", dir_data->data);
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CLUSTER_RC_ENABLE,
+ "cluster-test-mode is enabled logdir is %s", dir_data->data);
+ }
+ if (len < 0 || len >= PATH_MAX)
+ exit(2);
+
+ ret = mkdir_p(logdir, 0777, _gf_true);
+ if ((ret == -1) && (EEXIST != errno)) {
+ gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED,
+ "Unable to create log dir %s", logdir);
+ exit(1);
+ }
+
dir_data = dict_get(this->options, "working-directory");
if (!dir_data) {
@@ -1475,7 +1519,7 @@ init(xlator_t *this)
}
if ((-1 == ret) && (ENOENT == errno)) {
- ret = mkdir_p(workdir, 0777, _gf_true);
+ ret = mkdir_p(workdir, 0755, _gf_true);
if (-1 == ret) {
gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
@@ -1522,7 +1566,7 @@ init(xlator_t *this)
exit(1);
}
- ret = mkdir_p(GLUSTER_SHARED_STORAGE_BRICK_DIR, 0777, _gf_true);
+ ret = mkdir_p(GLUSTER_SHARED_STORAGE_BRICK_DIR, 0755, _gf_true);
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_DIR_OP_FAILED,
"Unable to create "
@@ -1546,14 +1590,7 @@ init(xlator_t *this)
exit(1);
}
- ret = glusterd_init_var_run_dirs(this, rundir, GLUSTERD_GLUSTERSHD_RUN_DIR);
- if (ret) {
- gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CREATE_DIR_FAILED,
- "Unable to create "
- "glustershd running directory");
- exit(1);
- }
-
+#ifdef BUILD_GNFS
ret = glusterd_init_var_run_dirs(this, rundir, GLUSTERD_NFS_RUN_DIR);
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CREATE_DIR_FAILED,
@@ -1561,6 +1598,7 @@ init(xlator_t *this)
"nfs running directory");
exit(1);
}
+#endif
ret = glusterd_init_var_run_dirs(this, rundir, GLUSTERD_QUOTAD_RUN_DIR);
if (ret) {
@@ -1570,8 +1608,7 @@ init(xlator_t *this)
exit(1);
}
- snprintf(cmd_log_filename, PATH_MAX, "%s/cmd_history.log",
- DEFAULT_LOG_FILE_DIRECTORY);
+ snprintf(cmd_log_filename, PATH_MAX, "%s/cmd_history.log", logdir);
ret = gf_cmd_log_init(cmd_log_filename);
if (ret == -1) {
@@ -1585,7 +1622,7 @@ init(xlator_t *this)
exit(1);
}
- ret = sys_mkdir(storedir, 0777);
+ ret = sys_mkdir(storedir, 0755);
if ((-1 == ret) && (errno != EEXIST)) {
gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
@@ -1601,7 +1638,7 @@ init(xlator_t *this)
exit(1);
}
- ret = sys_mkdir(storedir, 0777);
+ ret = sys_mkdir(storedir, 0755);
if ((-1 == ret) && (errno != EEXIST)) {
gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
@@ -1616,7 +1653,7 @@ init(xlator_t *this)
exit(1);
}
- ret = sys_mkdir(storedir, 0777);
+ ret = sys_mkdir(storedir, 0755);
if ((-1 == ret) && (errno != EEXIST)) {
gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
@@ -1631,7 +1668,7 @@ init(xlator_t *this)
exit(1);
}
- ret = sys_mkdir(storedir, 0777);
+ ret = sys_mkdir(storedir, 0755);
if ((-1 == ret) && (errno != EEXIST)) {
gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
@@ -1641,13 +1678,12 @@ init(xlator_t *this)
exit(1);
}
- len = snprintf(storedir, sizeof(storedir), "%s/bricks",
- DEFAULT_LOG_FILE_DIRECTORY);
+ len = snprintf(storedir, sizeof(storedir), "%s/bricks", logdir);
if ((len < 0) || (len >= sizeof(storedir))) {
exit(1);
}
- ret = sys_mkdir(storedir, 0777);
+ ret = sys_mkdir(storedir, 0755);
if ((-1 == ret) && (errno != EEXIST)) {
gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
"Unable to create logs directory %s"
@@ -1656,11 +1692,12 @@ init(xlator_t *this)
exit(1);
}
+#ifdef BUILD_GNFS
len = snprintf(storedir, sizeof(storedir), "%s/nfs", workdir);
if ((len < 0) || (len >= sizeof(storedir))) {
exit(1);
}
- ret = sys_mkdir(storedir, 0777);
+ ret = sys_mkdir(storedir, 0755);
if ((-1 == ret) && (errno != EEXIST)) {
gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
"Unable to create nfs directory %s"
@@ -1668,12 +1705,12 @@ init(xlator_t *this)
storedir, errno);
exit(1);
}
-
+#endif
len = snprintf(storedir, sizeof(storedir), "%s/bitd", workdir);
if ((len < 0) || (len >= sizeof(storedir))) {
exit(1);
}
- ret = sys_mkdir(storedir, 0777);
+ ret = sys_mkdir(storedir, 0755);
if ((-1 == ret) && (errno != EEXIST)) {
gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
"Unable to create bitrot directory %s", storedir);
@@ -1684,7 +1721,7 @@ init(xlator_t *this)
if ((len < 0) || (len >= sizeof(storedir))) {
exit(1);
}
- ret = sys_mkdir(storedir, 0777);
+ ret = sys_mkdir(storedir, 0755);
if ((-1 == ret) && (errno != EEXIST)) {
gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
"Unable to create scrub directory %s", storedir);
@@ -1695,7 +1732,7 @@ init(xlator_t *this)
if ((len < 0) || (len >= sizeof(storedir))) {
exit(1);
}
- ret = sys_mkdir(storedir, 0777);
+ ret = sys_mkdir(storedir, 0755);
if ((-1 == ret) && (errno != EEXIST)) {
gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
"Unable to create glustershd directory %s"
@@ -1708,7 +1745,7 @@ init(xlator_t *this)
if ((len < 0) || (len >= sizeof(storedir))) {
exit(1);
}
- ret = sys_mkdir(storedir, 0777);
+ ret = sys_mkdir(storedir, 0755);
if ((-1 == ret) && (errno != EEXIST)) {
gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
"Unable to create quotad directory %s"
@@ -1721,7 +1758,7 @@ init(xlator_t *this)
if ((len < 0) || (len >= sizeof(storedir))) {
exit(1);
}
- ret = sys_mkdir(storedir, 0777);
+ ret = sys_mkdir(storedir, 0755);
if ((-1 == ret) && (errno != EEXIST)) {
gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
"Unable to create glustershd directory %s"
@@ -1828,15 +1865,42 @@ init(xlator_t *this)
CDS_INIT_LIST_HEAD(&conf->snapshots);
CDS_INIT_LIST_HEAD(&conf->missed_snaps_list);
CDS_INIT_LIST_HEAD(&conf->brick_procs);
+ CDS_INIT_LIST_HEAD(&conf->shd_procs);
+ pthread_mutex_init(&conf->attach_lock, NULL);
+ pthread_mutex_init(&conf->volume_lock, NULL);
pthread_mutex_init(&conf->mutex, NULL);
conf->rpc = rpc;
conf->uds_rpc = uds_rpc;
conf->gfs_mgmt = &gd_brick_prog;
- (void)strncpy(conf->workdir, workdir, strlen(workdir) + 1);
- (void)strncpy(conf->rundir, rundir, strlen(rundir) + 1);
+ conf->restart_shd = _gf_false;
+ this->private = conf;
+ /* conf->workdir and conf->rundir are smaller than PATH_MAX; gcc's
+ * snprintf checking will throw an error here if sprintf is used.
+ * Dueling gcc-8 and coverity, now coverity isn't smart enough to
+ * detect that these strncpy calls are safe. And for extra fun,
+ * the annotations don't do anything. */
+ if (strlen(workdir) >= sizeof(conf->workdir)) {
+ ret = -1;
+ goto out;
+ }
+ /* coverity[BUFFER_SIZE_WARNING] */
+ (void)strncpy(conf->workdir, workdir, sizeof(conf->workdir));
+ /* separate tests because combined tests confuses gcc */
+ if (strlen(rundir) >= sizeof(conf->rundir)) {
+ ret = -1;
+ goto out;
+ }
+ /* coverity[BUFFER_SIZE_WARNING] */
+ (void)strncpy(conf->rundir, rundir, sizeof(conf->rundir));
+
+ /* coverity[BUFFER_SIZE_WARNING] */
+ (void)strncpy(conf->logdir, logdir, sizeof(conf->logdir));
synclock_init(&conf->big_lock, SYNC_LOCK_RECURSIVE);
+ synccond_init(&conf->cond_restart_bricks);
+ synccond_init(&conf->cond_restart_shd);
+ synccond_init(&conf->cond_blockers);
pthread_mutex_init(&conf->xprt_lock, NULL);
INIT_LIST_HEAD(&conf->xprt_list);
pthread_mutex_init(&conf->import_volumes, NULL);
@@ -1869,31 +1933,37 @@ init(xlator_t *this)
}
/* Set option to run bricks on valgrind if enabled in glusterd.vol */
- this->ctx->cmd_args.valgrind = valgrind;
+ this->ctx->cmd_args.vgtool = vgtool;
ret = dict_get_str(this->options, "run-with-valgrind", &valgrind_str);
if (ret < 0) {
gf_msg_debug(this->name, 0, "cannot get run-with-valgrind value");
}
if (valgrind_str) {
- if (gf_string2boolean(valgrind_str, &valgrind)) {
+ gf_boolean_t vg = _gf_false;
+
+ if (!strcmp(valgrind_str, "memcheck"))
+ this->ctx->cmd_args.vgtool = _gf_memcheck;
+ else if (!strcmp(valgrind_str, "drd"))
+ this->ctx->cmd_args.vgtool = _gf_drd;
+ else if (!gf_string2boolean(valgrind_str, &vg))
+ this->ctx->cmd_args.vgtool = (vg ? _gf_memcheck : _gf_none);
+ else
gf_msg(this->name, GF_LOG_WARNING, EINVAL, GD_MSG_INVALID_ENTRY,
- "run-with-valgrind value not a boolean string");
- } else {
- this->ctx->cmd_args.valgrind = valgrind;
- }
+ "run-with-valgrind is neither boolean"
+ " nor one of 'memcheck' or 'drd'");
}
/* Store ping-timeout in conf */
ret = dict_get_int32(this->options, "ping-timeout", &conf->ping_timeout);
/* Not failing here since ping-timeout can be optional as well */
- this->private = conf;
glusterd_mgmt_v3_lock_init();
glusterd_mgmt_v3_lock_timer_init();
glusterd_txn_opinfo_dict_init();
- glusterd_shdsvc_build(&conf->shd_svc);
+#ifdef BUILD_GNFS
glusterd_nfssvc_build(&conf->nfs_svc);
+#endif
glusterd_quotadsvc_build(&conf->quotad_svc);
glusterd_bitdsvc_build(&conf->bitd_svc);
glusterd_scrubsvc_build(&conf->scrub_svc);
@@ -1976,18 +2046,36 @@ init(xlator_t *this)
}
}
- conf->blockers = 0;
+ GF_ATOMIC_INIT(conf->blockers, 0);
+ ret = glusterd_handle_upgrade_downgrade(this->options, conf, upgrade,
+ downgrade);
+ if (ret)
+ goto out;
+
+ ret = glusterd_retrieve_max_op_version(this, &op_version);
+ /* first condition indicates file isn't present which means this code
+ * change is hitting for the first time or someone has deleted it from the
+ * backend.second condition is when max op_version differs, in both cases
+ * volfiles should be regenerated
+ */
+ if (op_version == 0 || op_version != GD_OP_VERSION_MAX) {
+ gf_log(this->name, GF_LOG_INFO,
+ "Regenerating volfiles due to a max op-version mismatch or "
+ "glusterd.upgrade file not being present, op_version retrieved:"
+ "%d, max op_version: %d",
+ op_version, GD_OP_VERSION_MAX);
+ glusterd_recreate_volfiles(conf);
+ ret = glusterd_store_max_op_version(this);
+ if (ret)
+ gf_log(this->name, GF_LOG_ERROR, "Failed to store max op-version");
+ }
+
/* If the peer count is less than 2 then this would be the best time to
* spawn process/bricks that may need (re)starting since last time
* (this) glusterd was up. */
if (glusterd_get_peers_count() < 2)
glusterd_launch_synctask(glusterd_spawn_daemons, NULL);
- ret = glusterd_handle_upgrade_downgrade(this->options, conf, upgrade,
- downgrade);
- if (ret)
- goto out;
-
ret = glusterd_hooks_spawn_worker(this);
if (ret)
goto out;
@@ -1995,7 +2083,7 @@ init(xlator_t *this)
GF_OPTION_INIT("event-threads", workers, int32, out);
if (workers > 0 && workers != conf->workers) {
conf->workers = workers;
- ret = event_reconfigure_threads(this->ctx->event_pool, workers);
+ ret = gf_event_reconfigure_threads(this->ctx->event_pool, workers);
if (ret)
goto out;
}
@@ -2137,10 +2225,6 @@ struct volume_options options[] = {
.type = GF_OPTION_TYPE_ANY,
},
{
- .key = {"mountbroker-" GHADOOP ".*"},
- .type = GF_OPTION_TYPE_ANY,
- },
- {
.key = {GEOREP "-log-group"},
.type = GF_OPTION_TYPE_ANY,
},
@@ -2151,13 +2235,17 @@ struct volume_options options[] = {
{.key = {"server-quorum-type"},
.type = GF_OPTION_TYPE_STR,
.value = {"none", "server"},
- .description = "This feature is on the server-side i.e. in glusterd."
- " Whenever the glusterd on a machine observes that "
+ .default_value = "none",
+ .description = "It can be set to none or server. When set to server, "
+ "this option enables the specified volume to "
+ "participate in the server-side quorum. "
+ "This feature is on the server-side i.e. in glusterd. "
+ "Whenever the glusterd on a machine observes that "
"the quorum is not met, it brings down the bricks to "
"prevent data split-brains. When the network "
"connections are brought back up and the quorum is "
- "restored the bricks in the volume are brought back "
- "up."},
+ "restored the bricks in "
+ "the volume are brought back up."},
{.key = {"server-quorum-ratio"},
.type = GF_OPTION_TYPE_PERCENT,
.description = "Sets the quorum percentage for the trusted "
@@ -2202,3 +2290,16 @@ struct volume_options options[] = {
" power. Range 1-32 threads."},
{.key = {NULL}},
};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .mem_acct_init = mem_acct_init,
+ .op_version = {1}, /* Present from the initial version */
+ .dumpops = &dumpops,
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .identifier = "glusterd",
+ .category = GF_MAINTAINED,
+};
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index 412ba7415f0..cc4f98ecf47 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -15,30 +15,32 @@
#include <pthread.h>
#include <libgen.h>
-#include "compat-uuid.h"
+#include <glusterfs/compat-uuid.h>
#include "rpc-clnt.h"
-#include "glusterfs.h"
-#include "xlator.h"
-#include "logging.h"
-#include "call-stub.h"
-#include "byte-order.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/byte-order.h>
#include "glusterd-mem-types.h"
#include "rpcsvc.h"
#include "glusterd-sm.h"
#include "glusterd-snapd-svc.h"
-#include "glusterd-tierd-svc.h"
+#include "glusterd-shd-svc.h"
#include "glusterd-bitd-svc.h"
#include "glusterd1-xdr.h"
#include "protocol-common.h"
#include "glusterd-pmap.h"
#include "cli1-xdr.h"
-#include "syncop.h"
-#include "store.h"
+#include <glusterfs/syncop.h>
+#include <glusterfs/store.h>
#include "glusterd-rcu.h"
-#include "events.h"
+#include <glusterfs/events.h>
#include "glusterd-gfproxyd-svc.h"
+#include "gd-common-utils.h"
+
#define GLUSTERD_TR_LOG_SIZE 50
#define GLUSTERD_QUORUM_TYPE_KEY "cluster.server-quorum-type"
#define GLUSTERD_QUORUM_RATIO_KEY "cluster.server-quorum-ratio"
@@ -56,10 +58,16 @@
#define GLUSTER_SHARED_STORAGE "gluster_shared_storage"
#define GLUSTERD_SHARED_STORAGE_KEY "cluster.enable-shared-storage"
#define GLUSTERD_BRICK_MULTIPLEX_KEY "cluster.brick-multiplex"
+#define GLUSTERD_VOL_CNT_PER_THRD "glusterd.vol_count_per_thread"
#define GLUSTERD_BRICKMUX_LIMIT_KEY "cluster.max-bricks-per-process"
+#define GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE "250"
+#define GLUSTERD_VOL_CNT_PER_THRD_DEFAULT_VALUE "100"
#define GLUSTERD_LOCALTIME_LOGGING_KEY "cluster.localtime-logging"
#define GLUSTERD_DAEMON_LOG_LEVEL_KEY "cluster.daemon-log-level"
+#define GANESHA_HA_CONF CONFDIR "/ganesha-ha.conf"
+#define GANESHA_EXPORT_DIRECTORY CONFDIR "/exports"
+
#define GLUSTERD_SNAPS_MAX_HARD_LIMIT 256
#define GLUSTERD_SNAPS_DEF_SOFT_LIMIT_PERCENT 90
#define GLUSTERD_SNAPS_MAX_SOFT_LIMIT_PERCENT 100
@@ -160,13 +168,8 @@ typedef struct {
struct _volfile_ctx *volfile;
pthread_mutex_t mutex;
struct cds_list_head peers;
- gf_boolean_t verify_volfile_checksum;
- gf_boolean_t trace;
uuid_t uuid;
- char workdir[VALID_GLUSTERD_PATHMAX];
- char rundir[VALID_GLUSTERD_PATHMAX];
rpcsvc_t *rpc;
- glusterd_svc_t shd_svc;
glusterd_svc_t nfs_svc;
glusterd_svc_t bitd_svc;
glusterd_svc_t scrub_svc;
@@ -175,6 +178,7 @@ typedef struct {
struct cds_list_head volumes;
struct cds_list_head snapshots; /*List of snap volumes */
struct cds_list_head brick_procs; /* List of brick processes */
+ struct cds_list_head shd_procs; /* List of shd processes */
pthread_mutex_t xprt_lock;
struct list_head xprt_list;
pthread_mutex_t import_volumes;
@@ -196,13 +200,18 @@ typedef struct {
pthread_t brick_thread;
void *hooks_priv;
+ xlator_t *xl; /* Should be set to 'THIS' before creating thread */
/* need for proper handshake_t */
int op_version; /* Starts with 1 for 3.3.0 */
- xlator_t *xl; /* Should be set to 'THIS' before creating thread */
gf_boolean_t pending_quorum_action;
+ gf_boolean_t verify_volfile_checksum;
+ gf_boolean_t trace;
+ gf_boolean_t restart_done;
dict_t *opts;
synclock_t big_lock;
- gf_boolean_t restart_done;
+ synccond_t cond_restart_bricks;
+ synccond_t cond_restart_shd;
+ synccond_t cond_blockers;
rpcsvc_t *uds_rpc; /* RPCSVC for the unix domain socket */
uint32_t base_port;
uint32_t max_port;
@@ -212,11 +221,35 @@ typedef struct {
int ping_timeout;
uint32_t generation;
int32_t workers;
- uint32_t blockers;
uint32_t mgmt_v3_lock_timeout;
+ gf_atomic_t blockers;
+ pthread_mutex_t attach_lock; /* Lock can be per process or a common one */
+ pthread_mutex_t volume_lock; /* We release the big_lock from lot of places
+ which might lead the modification of volinfo
+ list.
+ */
+ gf_atomic_t thread_count;
gf_boolean_t restart_bricks;
+ gf_boolean_t restart_shd; /* This flag prevents running two shd manager
+ simultaneously
+ */
+ char workdir[VALID_GLUSTERD_PATHMAX];
+ char rundir[VALID_GLUSTERD_PATHMAX];
+ char logdir[VALID_GLUSTERD_PATHMAX];
} glusterd_conf_t;
+typedef struct glusterd_add_dict_args {
+ xlator_t *this;
+ dict_t *voldict;
+ int start;
+ int end;
+} glusterd_add_dict_args_t;
+
+typedef struct glusterd_friend_synctask_args {
+ char *dict_buf;
+ u_int dictlen;
+} glusterd_friend_synctask_args_t;
+
typedef enum gf_brick_status {
GF_BRICK_STOPPED,
GF_BRICK_STARTED,
@@ -224,26 +257,27 @@ typedef enum gf_brick_status {
GF_BRICK_STARTING
} gf_brick_status_t;
+typedef struct glusterd_brickinfo glusterd_brickinfo_t;
+
+struct glusterd_brick_proc {
+ int port;
+ uint32_t brick_count;
+ struct cds_list_head brick_proc_list;
+ struct cds_list_head bricks;
+};
+
+typedef struct glusterd_brick_proc glusterd_brick_proc_t;
+
struct glusterd_brickinfo {
- char hostname[NAME_MAX];
- char path[VALID_GLUSTERD_PATHMAX];
- char real_path[VALID_GLUSTERD_PATHMAX];
- char device_path[VALID_GLUSTERD_PATHMAX];
- char mount_dir[VALID_GLUSTERD_PATHMAX];
- char brick_id[1024]; /*Client xlator name, AFR changelog name*/
- char fstype[NAME_MAX]; /* Brick file-system type */
- char mnt_opts[1024]; /* Brick mount options */
struct cds_list_head brick_list;
uuid_t uuid;
int port;
int rdma_port;
char *logfile;
gf_store_handle_t *shandle;
- gf_brick_status_t status;
struct rpc_clnt *rpc;
int decommissioned;
- char vg[PATH_MAX]; /* FIXME: Use max size for length of vg */
- int caps; /* Capability */
+ gf_brick_status_t status;
int32_t snap_status;
/*
* The group is used to identify which bricks are part of the same
@@ -253,31 +287,30 @@ struct glusterd_brickinfo {
* a replica 3 volume with arbiter enabled.
*/
uint16_t group;
- uuid_t jbr_uuid;
+ gf_boolean_t port_registered;
+ gf_boolean_t start_triggered;
/* Below are used for handling the case of multiple bricks sharing
the backend filesystem */
uint64_t statfs_fsid;
- uint32_t fs_share_count;
- gf_boolean_t port_registered;
- gf_boolean_t start_triggered;
pthread_mutex_t restart_mutex;
+ glusterd_brick_proc_t *brick_proc; /* Information regarding mux bricks */
+ struct cds_list_head mux_bricks; /* List to store the bricks in brick_proc*/
+ uint32_t fs_share_count;
+ char hostname[NAME_MAX];
+ char path[VALID_GLUSTERD_PATHMAX];
+ char real_path[VALID_GLUSTERD_PATHMAX];
+ char device_path[VALID_GLUSTERD_PATHMAX];
+ char mount_dir[VALID_GLUSTERD_PATHMAX];
+ char brick_id[1024]; /*Client xlator name, AFR changelog name*/
+ char fstype[NAME_MAX]; /* Brick file-system type */
+ char mnt_opts[1024]; /* Brick mount options */
+ char vg[PATH_MAX]; /* FIXME: Use max size for length of vg */
};
-typedef struct glusterd_brickinfo glusterd_brickinfo_t;
-
-struct glusterd_brick_proc {
- int port;
- uint32_t brick_count;
- struct cds_list_head brick_proc_list;
- struct cds_list_head bricks;
-};
-
-typedef struct glusterd_brick_proc glusterd_brick_proc_t;
-
struct glusterd_gfproxyd_info {
- short port;
char *logfile;
+ short port;
};
struct gf_defrag_brickinfo_ {
@@ -296,14 +329,13 @@ struct glusterd_defrag_info_ {
uint64_t total_failures;
gf_lock_t lock;
int cmd;
+ uint32_t connected;
pthread_t th;
- gf_defrag_status_t defrag_status;
struct rpc_clnt *rpc;
- uint32_t connected;
- char mount[1024];
struct gf_defrag_brickinfo_ *bricks; /* volinfo->brick_count */
-
defrag_cbk_fn_t cbk_fn;
+ gf_defrag_status_t defrag_status;
+ char mount[1024];
};
typedef struct glusterd_defrag_info_ glusterd_defrag_info_t;
@@ -348,20 +380,20 @@ struct glusterd_bitrot_scrub_ {
typedef struct glusterd_bitrot_scrub_ glusterd_bitrot_scrub_t;
struct glusterd_rebalance_ {
- gf_defrag_status_t defrag_status;
uint64_t rebalance_files;
uint64_t rebalance_data;
uint64_t lookedup_files;
uint64_t skipped_files;
+ uint64_t rebalance_failures;
glusterd_defrag_info_t *defrag;
gf_cli_defrag_type defrag_cmd;
- uint64_t rebalance_failures;
+ gf_defrag_status_t defrag_status;
uuid_t rebalance_id;
double rebalance_time;
uint64_t time_left;
- glusterd_op_t op;
dict_t *dict; /* Dict to store misc information
* like list of bricks being removed */
+ glusterd_op_t op;
uint32_t commit_hash;
};
@@ -380,44 +412,10 @@ typedef enum gd_quorum_status_ {
DOESNT_MEET_QUORUM, // Follows quorum and does not meet.
} gd_quorum_status_t;
-typedef struct tier_info_ {
- int cold_type;
- int cold_brick_count;
- int cold_replica_count;
- int cold_disperse_count;
- int cold_dist_leaf_count;
- int cold_redundancy_count;
- int hot_type;
- int hot_brick_count;
- int hot_replica_count;
- int promoted;
- int demoted;
- uint16_t cur_tier_hot;
-} gd_tier_info_t;
-
struct glusterd_volinfo_ {
gf_lock_t lock;
- gf_boolean_t is_snap_volume;
glusterd_snap_t *snapshot;
uuid_t restored_from_snap;
- gd_tier_info_t tier_info;
- gf_boolean_t is_tier_enabled;
- char parent_volname[GD_VOLUME_NAME_MAX];
- /* In case of a snap volume
- i.e (is_snap_volume == TRUE) this
- field will contain the name of
- the volume which is snapped. In
- case of a non-snap volume, this
- field will be initialized as N/A */
- char volname[NAME_MAX + 1];
- /* NAME_MAX + 1 will be equal to
- * GD_VOLUME_NAME_MAX + 5.(also to
- * GD_VOLUME_NAME_MAX_TIER). An extra 5
- * bytes are added to GD_VOLUME_NAME_MAX
- * because, as part of the tiering
- * volfile generation code, we are
- * temporarily appending either "-hot"
- * or "-cold" */
int type;
int brick_count;
uint64_t snap_count;
@@ -432,6 +430,7 @@ struct glusterd_volinfo_ {
/* This is a current pointer for
glusterd_volinfo_t->snap_volumes */
struct cds_list_head bricks;
+ struct cds_list_head ta_bricks;
struct cds_list_head snap_volumes;
/* TODO : Need to remove this, as this
* is already part of snapshot object.
@@ -441,6 +440,7 @@ struct glusterd_volinfo_ {
int stripe_count;
int replica_count;
int arbiter_count;
+ int thin_arbiter_count;
int disperse_count;
int redundancy_count;
int subvol_count; /* Number of subvolumes in a
@@ -461,13 +461,10 @@ struct glusterd_volinfo_ {
/* Bitrot scrub status*/
glusterd_bitrot_scrub_t bitrot_scrub;
- glusterd_rebalance_t tier;
-
int version;
uint32_t quota_conf_version;
uint32_t cksum;
uint32_t quota_conf_cksum;
- gf_transport_type transport_type;
dict_t *dict;
@@ -478,28 +475,48 @@ struct glusterd_volinfo_ {
dict_t *gsync_slaves;
dict_t *gsync_active_slaves;
- int decommission_in_progress;
xlator_t *xl;
-
- gf_boolean_t memory_accounting;
- int caps; /* Capability */
+ int decommission_in_progress;
int op_version;
int client_op_version;
+ int32_t quota_xattr_version;
pthread_mutex_t reflock;
int refcnt;
gd_quorum_status_t quorum_status;
glusterd_snapdsvc_t snapd;
- glusterd_tierdsvc_t tierd;
+ glusterd_shdsvc_t shd;
glusterd_gfproxydsvc_t gfproxyd;
- int32_t quota_xattr_version;
- gf_boolean_t stage_deleted; /* volume has passed staging
- * for delete operation
- */
pthread_mutex_t store_volinfo_lock; /* acquire lock for
* updating the volinfo
*/
+ gf_transport_type transport_type;
+ gf_boolean_t is_snap_volume;
+ gf_boolean_t memory_accounting;
+ gf_boolean_t stage_deleted; /* volume has passed staging
+ * for delete operation
+ */
+ char parent_volname[GD_VOLUME_NAME_MAX];
+ /* In case of a snap volume
+ i.e (is_snap_volume == TRUE) this
+ field will contain the name of
+ the volume which is snapped. In
+ case of a non-snap volume, this
+ field will be initialized as N/A */
+ char volname[NAME_MAX + 1];
+ /* NAME_MAX + 1 will be equal to
+ * GD_VOLUME_NAME_MAX + 5.(also to
+ * GD_VOLUME_NAME_MAX_TIER). An extra 5
+ * bytes are added to GD_VOLUME_NAME_MAX
+ * because, as part of the tiering
+ * volfile generation code, we are
+ * temporarily appending either "-hot"
+ * or "-cold" */
+ gf_atomic_t volpeerupdate;
+ /* Flag to check about volume has received updates
+ from peer
+ */
};
typedef enum gd_snap_status_ {
@@ -515,22 +532,22 @@ struct glusterd_snap_ {
gf_lock_t lock;
struct cds_list_head volumes;
struct cds_list_head snap_list;
- char snapname[GLUSTERD_MAX_SNAP_NAME];
- uuid_t snap_id;
char *description;
+ uuid_t snap_id;
time_t time_stamp;
- gf_boolean_t snap_restored;
- gd_snap_status_t snap_status;
gf_store_handle_t *shandle;
+ gd_snap_status_t snap_status;
+ gf_boolean_t snap_restored;
+ char snapname[GLUSTERD_MAX_SNAP_NAME];
};
typedef struct glusterd_snap_op_ {
char *snap_vol_id;
- int32_t brick_num;
char *brick_path;
+ struct cds_list_head snap_ops_list;
+ int32_t brick_num;
int32_t op;
int32_t status;
- struct cds_list_head snap_ops_list;
} glusterd_snap_op_t;
typedef struct glusterd_missed_snap_ {
@@ -568,9 +585,9 @@ typedef struct glusterd_pending_node_ {
struct gsync_config_opt_vals_ {
char *op_name;
+ char *values[GEO_CONF_MAX_OPT_VALS];
int no_of_pos_vals;
gf_boolean_t case_sensitive;
- char *values[GEO_CONF_MAX_OPT_VALS];
};
enum glusterd_op_ret {
@@ -597,6 +614,9 @@ typedef enum {
#define GLUSTERD_DEFAULT_PORT GF_DEFAULT_BASE_PORT
#define GLUSTERD_INFO_FILE "glusterd.info"
+#define GLUSTERD_UPGRADE_FILE \
+ "glusterd.upgrade" /* zero byte file to detect a need for regenerating \
+ volfiles in container mode */
#define GLUSTERD_VOLUME_QUOTA_CONFIG "quota.conf"
#define GLUSTERD_VOLUME_DIR_PREFIX "vols"
#define GLUSTERD_PEER_DIR_PREFIX "peers"
@@ -615,7 +635,6 @@ typedef enum {
#define GLUSTERD_DEFAULT_SNAPS_BRICK_DIR "/gluster/snaps"
#define GLUSTERD_BITD_RUN_DIR "/bitd"
#define GLUSTERD_SCRUB_RUN_DIR "/scrub"
-#define GLUSTERD_GLUSTERSHD_RUN_DIR "/glustershd"
#define GLUSTERD_NFS_RUN_DIR "/nfs"
#define GLUSTERD_QUOTAD_RUN_DIR "/quotad"
#define GLUSTER_SHARED_STORAGE_BRICK_DIR GLUSTERD_DEFAULT_WORKDIR "/ss_brick"
@@ -649,24 +668,36 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
} \
} while (0)
-#define GLUSTERD_GET_TIER_DIR(path, volinfo, priv) \
+#define GLUSTERD_GET_DEFRAG_DIR(path, volinfo, priv) \
+ do { \
+ char vol_path[PATH_MAX]; \
+ int32_t _defrag_dir_len; \
+ GLUSTERD_GET_VOLUME_DIR(vol_path, volinfo, priv); \
+ _defrag_dir_len = snprintf(path, PATH_MAX, "%s/%s", vol_path, \
+ "rebalance"); \
+ if ((_defrag_dir_len < 0) || (_defrag_dir_len >= PATH_MAX)) { \
+ path[0] = 0; \
+ } \
+ } while (0)
+
+#define GLUSTERD_GET_DEFRAG_PID_FILE(path, volinfo, priv) \
do { \
- int32_t _tier_dir_len; \
- _tier_dir_len = snprintf(path, PATH_MAX, "%s/tier/%s", priv->workdir, \
- volinfo->volname); \
- if ((_tier_dir_len < 0) || (_tier_dir_len >= PATH_MAX)) { \
+ char defrag_path[PATH_MAX]; \
+ int32_t _defrag_pidfile_len; \
+ GLUSTERD_GET_DEFRAG_DIR(defrag_path, volinfo, priv); \
+ _defrag_pidfile_len = snprintf(path, PATH_MAX, "%s/%s.pid", \
+ defrag_path, uuid_utoa(MY_UUID)); \
+ if ((_defrag_pidfile_len < 0) || (_defrag_pidfile_len >= PATH_MAX)) { \
path[0] = 0; \
} \
} while (0)
-#define GLUSTERD_GET_TIER_PID_FILE(path, volinfo, priv) \
+#define GLUSTERD_GET_SHD_RUNDIR(path, volinfo, priv) \
do { \
- char tier_path[PATH_MAX]; \
- int32_t _tier_pid_len; \
- GLUSTERD_GET_TIER_DIR(tier_path, volinfo, priv); \
- _tier_pid_len = snprintf(path, PATH_MAX, "%s/run/%s-tierd.pid", \
- tier_path, volinfo->volname); \
- if ((_tier_pid_len < 0) || (_tier_pid_len >= PATH_MAX)) { \
+ int32_t _shd_dir_len; \
+ _shd_dir_len = snprintf(path, PATH_MAX, "%s/shd/%s", priv->rundir, \
+ volinfo->volname); \
+ if ((_shd_dir_len < 0) || (_shd_dir_len >= PATH_MAX)) { \
path[0] = 0; \
} \
} while (0)
@@ -687,16 +718,6 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
} \
} while (0)
-#define GLUSTERD_GET_SNAP_DIR(path, snap, priv) \
- do { \
- int32_t _snap_dir_len; \
- _snap_dir_len = snprintf(path, PATH_MAX, "%s/snaps/%s", priv->workdir, \
- snap->snapname); \
- if ((_snap_dir_len < 0) || (_snap_dir_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
#define GLUSTERD_GET_SNAP_GEO_REP_DIR(path, snap, priv) \
do { \
int32_t _snap_geo_len; \
@@ -707,42 +728,6 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
} \
} while (0)
-#define GLUSTERD_GET_BRICK_DIR(path, volinfo, priv) \
- do { \
- int32_t _brick_len; \
- if (volinfo->is_snap_volume) { \
- _brick_len = snprintf(path, PATH_MAX, "%s/snaps/%s/%s/%s", \
- priv->workdir, volinfo->snapshot->snapname, \
- volinfo->volname, GLUSTERD_BRICK_INFO_DIR); \
- } else { \
- _brick_len = snprintf(path, PATH_MAX, "%s/%s/%s/%s", \
- priv->workdir, GLUSTERD_VOLUME_DIR_PREFIX, \
- volinfo->volname, GLUSTERD_BRICK_INFO_DIR); \
- } \
- if ((_brick_len < 0) || (_brick_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_GET_NFS_DIR(path, priv) \
- do { \
- int32_t _nfs_dir_len; \
- _nfs_dir_len = snprintf(path, PATH_MAX, "%s/nfs", priv->workdir); \
- if ((_nfs_dir_len < 0) || (_nfs_dir_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_GET_QUOTAD_DIR(path, priv) \
- do { \
- int32_t _quotad_dir_len; \
- _quotad_dir_len = snprintf(path, PATH_MAX, "%s/quotad", \
- priv->workdir); \
- if ((_quotad_dir_len < 0) || (_quotad_dir_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
#define GLUSTERD_GET_QUOTA_LIMIT_MOUNT_PATH(abspath, volname, path) \
do { \
snprintf(abspath, sizeof(abspath) - 1, \
@@ -750,18 +735,6 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
path); \
} while (0)
-#define GLUSTERD_GET_QUOTA_LIST_MOUNT_PATH(abspath, volname, path) \
- do { \
- snprintf(abspath, sizeof(abspath) - 1, \
- DEFAULT_VAR_RUN_DIRECTORY "/%s_quota_list%s", volname, path); \
- } while (0)
-
-#define GLUSTERD_GET_TMP_PATH(abspath, path) \
- do { \
- snprintf(abspath, sizeof(abspath) - 1, \
- DEFAULT_VAR_RUN_DIRECTORY "/tmp%s", path); \
- } while (0)
-
#define GLUSTERD_REMOVE_SLASH_FROM_PATH(path, string) \
do { \
int i = 0; \
@@ -790,138 +763,19 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
} \
} while (0)
-#define GLUSTERD_GET_NFS_PIDFILE(pidfile, nfspath, priv) \
- do { \
- int32_t _nfs_pid_len; \
- _nfs_pid_len = snprintf(pidfile, PATH_MAX, "%s/nfs/nfs.pid", \
- priv->rundir); \
- if ((_nfs_pid_len < 0) || (_nfs_pid_len >= PATH_MAX)) { \
- pidfile[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_GET_QUOTAD_PIDFILE(pidfile, quotadpath, priv) \
- do { \
- int32_t _quotad_pid_len; \
- _quotad_pid_len = snprintf(pidfile, PATH_MAX, "%s/quotad/quotad.pid", \
- priv->rundir); \
- if ((_quotad_pid_len < 0) || (_quotad_pid_len >= PATH_MAX)) { \
- pidfile[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_GET_QUOTA_CRAWL_PIDDIR(piddir, volinfo, type) \
- do { \
- char _volpath[PATH_MAX] = { \
- 0, \
- }; \
- int32_t _crawl_pid_len; \
- GLUSTERD_GET_VOLUME_DIR(_volpath, volinfo, priv); \
- if (type == GF_QUOTA_OPTION_TYPE_ENABLE || \
- type == GF_QUOTA_OPTION_TYPE_ENABLE_OBJECTS) \
- _crawl_pid_len = snprintf(piddir, PATH_MAX, "%s/run/quota/enable", \
- _volpath); \
- else \
- _crawl_pid_len = snprintf(piddir, PATH_MAX, \
- "%s/run/quota/disable", _volpath); \
- if ((_crawl_pid_len < 0) || (_crawl_pid_len >= PATH_MAX)) { \
- piddir[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_STACK_DESTROY(frame) \
- do { \
- frame->local = NULL; \
- STACK_DESTROY(frame->root); \
- } while (0)
-
-#define GLUSTERD_GET_DEFRAG_PROCESS(path, volinfo) \
- do { \
- if (volinfo->rebal.defrag_cmd == GF_DEFRAG_CMD_START_TIER) \
- snprintf(path, NAME_MAX, "tier"); \
- else \
- snprintf(path, NAME_MAX, "rebalance"); \
- } while (0)
-
-#define GLUSTERD_GET_DEFRAG_DIR(path, volinfo, priv) \
- do { \
- char vol_path[PATH_MAX]; \
- char operation[NAME_MAX]; \
- int32_t _defrag_dir_len; \
- GLUSTERD_GET_VOLUME_DIR(vol_path, volinfo, priv); \
- GLUSTERD_GET_DEFRAG_PROCESS(operation, volinfo); \
- _defrag_dir_len = snprintf(path, PATH_MAX, "%s/%s", vol_path, \
- operation); \
- if ((_defrag_dir_len < 0) || (_defrag_dir_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_GET_DEFRAG_SOCK_FILE_OLD(path, volinfo, priv) \
- do { \
- char defrag_path[PATH_MAX]; \
- int32_t _sockfile_old_len; \
- GLUSTERD_GET_DEFRAG_DIR(defrag_path, volinfo, priv); \
- _sockfile_old_len = snprintf(path, PATH_MAX, "%s/%s.sock", \
- defrag_path, uuid_utoa(MY_UUID)); \
- if ((_sockfile_old_len < 0) || (_sockfile_old_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_GET_DEFRAG_SOCK_FILE(path, volinfo) \
- do { \
- char operation[NAME_MAX]; \
- int32_t _defrag_sockfile_len; \
- GLUSTERD_GET_DEFRAG_PROCESS(operation, volinfo); \
- _defrag_sockfile_len = snprintf( \
- path, UNIX_PATH_MAX, \
- DEFAULT_VAR_RUN_DIRECTORY "/gluster-%s-%s.sock", operation, \
- uuid_utoa(volinfo->volume_id)); \
- if ((_defrag_sockfile_len < 0) || \
- (_defrag_sockfile_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_GET_DEFRAG_PID_FILE(path, volinfo, priv) \
- do { \
- char defrag_path[PATH_MAX]; \
- int32_t _defrag_pidfile_len; \
- GLUSTERD_GET_DEFRAG_DIR(defrag_path, volinfo, priv); \
- _defrag_pidfile_len = snprintf(path, PATH_MAX, "%s/%s.pid", \
- defrag_path, uuid_utoa(MY_UUID)); \
- if ((_defrag_pidfile_len < 0) || (_defrag_pidfile_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERFS_GET_QUOTA_LIMIT_MOUNT_PIDFILE(pidfile, volname) \
+#define RCU_READ_LOCK \
+ pthread_mutex_lock(&(THIS->ctx)->cleanup_lock); \
{ \
- snprintf(pidfile, PATH_MAX - 1, \
- DEFAULT_VAR_RUN_DIRECTORY "/%s_quota_limit.pid", volname); \
- }
+ rcu_read_lock(); \
+ } \
+ pthread_mutex_unlock(&(THIS->ctx)->cleanup_lock);
-#define GLUSTERFS_GET_QUOTA_LIST_MOUNT_PIDFILE(pidfile, volname) \
+#define RCU_READ_UNLOCK \
+ pthread_mutex_lock(&(THIS->ctx)->cleanup_lock); \
{ \
- snprintf(pidfile, PATH_MAX - 1, \
- DEFAULT_VAR_RUN_DIRECTORY "/%s_quota_list.pid", volname); \
- }
-
-#define GLUSTERD_GET_UUID_NOHYPHEN(ret_string, uuid) \
- do { \
- char *snap_volname_ptr = ret_string; \
- char tmp_uuid[64]; \
- char *snap_volid_ptr = uuid_utoa_r(uuid, tmp_uuid); \
- while (*snap_volid_ptr) { \
- if (*snap_volid_ptr == '-') { \
- snap_volid_ptr++; \
- } else { \
- (*snap_volname_ptr++) = (*snap_volid_ptr++); \
- } \
- } \
- *snap_volname_ptr = '\0'; \
- } while (0)
+ rcu_read_unlock(); \
+ } \
+ pthread_mutex_unlock(&(THIS->ctx)->cleanup_lock);
#define GLUSTERD_DUMP_PEERS(head, member, xpeers) \
do { \
@@ -931,7 +785,7 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
\
key = xpeers ? "glusterd.xaction_peer" : "glusterd.peer"; \
\
- rcu_read_lock(); \
+ RCU_READ_LOCK; \
cds_list_for_each_entry_rcu(_peerinfo, head, member) \
{ \
glusterd_dump_peer(_peerinfo, key, index, xpeers); \
@@ -939,7 +793,7 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
glusterd_dump_peer_rpcstat(_peerinfo, key, index); \
index++; \
} \
- rcu_read_unlock(); \
+ RCU_READ_UNLOCK; \
\
} while (0)
@@ -1202,15 +1056,11 @@ int
glusterd_fetchsnap_notify(xlator_t *this);
int
-glusterd_add_tier_volume_detail_to_dict(glusterd_volinfo_t *volinfo,
- dict_t *volumes, int count);
-
-int
glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes,
int count);
int
-glusterd_restart_bricks();
+glusterd_restart_bricks(void *opaque);
int32_t
glusterd_volume_txn(rpcsvc_request_t *req, char *volname, int flags,
@@ -1336,13 +1186,33 @@ glusterd_op_stop_volume(dict_t *dict);
int
glusterd_op_delete_volume(dict_t *dict);
int
+glusterd_handle_ganesha_op(dict_t *dict, char **op_errstr, char *key,
+ char *value);
+int
+glusterd_check_ganesha_cmd(char *key, char *value, char **errstr, dict_t *dict);
+int
+glusterd_op_stage_set_ganesha(dict_t *dict, char **op_errstr);
+int
+glusterd_op_set_ganesha(dict_t *dict, char **errstr);
+int
+ganesha_manage_export(dict_t *dict, char *value,
+ gf_boolean_t update_cache_invalidation, char **op_errstr);
+int
+gd_ganesha_send_dbus(char *volname, char *value);
+gf_boolean_t
+glusterd_is_ganesha_cluster();
+gf_boolean_t
+glusterd_check_ganesha_export(glusterd_volinfo_t *volinfo);
+int
+stop_ganesha(char **op_errstr);
+int
+tear_down_cluster(gf_boolean_t run_teardown);
+int
manage_export_config(char *volname, char *value, char **op_errstr);
int
glusterd_op_add_brick(dict_t *dict, char **op_errstr);
int
-glusterd_op_add_tier_brick(dict_t *dict, char **op_errstr);
-int
glusterd_op_remove_brick(dict_t *dict, char **op_errstr);
int
glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
@@ -1350,6 +1220,18 @@ int
glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr);
int
+glusterd_set_rebalance_id_for_remove_brick(dict_t *req_dict, dict_t *rsp_dict);
+
+int
+glusterd_set_rebalance_id_in_rsp_dict(dict_t *req_dict, dict_t *rsp_dict);
+
+int
+glusterd_mgmt_v3_op_stage_rebalance(dict_t *dict, char **op_errstr);
+
+int
+glusterd_mgmt_v3_op_rebalance(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
+
+int
glusterd_op_stage_rebalance(dict_t *dict, char **op_errstr);
int
glusterd_op_rebalance(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
@@ -1475,20 +1357,19 @@ glusterd_should_i_stop_bitd();
int
glusterd_remove_brick_migrate_cbk(glusterd_volinfo_t *volinfo,
gf_defrag_status_t status);
-/* tier */
-
int
__glusterd_handle_reset_brick(rpcsvc_request_t *req);
-int
-glusterd_op_stage_tier(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
-int
-glusterd_op_tier_start_stop(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
-int
-glusterd_op_remove_tier_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
-int
-glusterd_tier_prevalidate(dict_t *dict, char **op_errstr, dict_t *rsp_dict,
- uint32_t *op_errno);
int
glusterd_options_init(xlator_t *this);
+
+int32_t
+glusterd_recreate_volfiles(glusterd_conf_t *conf);
+
+void
+glusterd_add_peers_to_auth_list(char *volname);
+
+int
+glusterd_replace_old_auth_allow_list(char *volname);
+
#endif